mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 12:32:30 +08:00
### What problem does this PR solve? Fix: can't upload image in ollama model #10447 ### Type of change - [X] Bug Fix (non-breaking change which fixes an issue) ### Change all `image=[]` to `image = None` Changing `image=[]` to `images=None` avoids Python’s mutable default parameter issue. If you keep `images=[]`, all calls share the same list, so modifying it (e.g., images.append()) will affect later calls. Using images=None and creating a new list inside the function ensures each call is independent. This change does not affect current behavior — it simply makes the code safer and more predictable. 把 `images=[]` 改成 `images=None` 是为了避免 Python 默认参数的可变对象问题。 如果保留 `images=[]`,所有调用都会共用同一个列表,一旦修改就会影响后续调用。 改成 None 并在函数内部重新创建列表,可以确保每次调用都是独立的。 这个修改不会影响现有运行结果,只是让代码更安全、更可控。
This commit is contained in:
@ -22,7 +22,7 @@ import secrets
|
|||||||
import time
|
import time
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from flask import redirect, request, session, Response
|
from flask import redirect, request, session, make_response
|
||||||
from flask_login import current_user, login_required, login_user, logout_user
|
from flask_login import current_user, login_required, login_user, logout_user
|
||||||
from werkzeug.security import check_password_hash, generate_password_hash
|
from werkzeug.security import check_password_hash, generate_password_hash
|
||||||
|
|
||||||
@ -866,7 +866,9 @@ def forget_get_captcha():
|
|||||||
from captcha.image import ImageCaptcha
|
from captcha.image import ImageCaptcha
|
||||||
image = ImageCaptcha(width=300, height=120, font_sizes=[50, 60, 70])
|
image = ImageCaptcha(width=300, height=120, font_sizes=[50, 60, 70])
|
||||||
img_bytes = image.generate(captcha_text).read()
|
img_bytes = image.generate(captcha_text).read()
|
||||||
return Response(img_bytes, mimetype="image/png")
|
response = make_response(img_bytes)
|
||||||
|
response.headers.set("Content-Type", "image/JPEG")
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/forget/otp", methods=["POST"]) # noqa: F821
|
@manager.route("/forget/otp", methods=["POST"]) # noqa: F821
|
||||||
|
|||||||
@ -210,19 +210,18 @@ class LLMBundle(LLM4Tenant):
|
|||||||
def _clean_param(chat_partial, **kwargs):
|
def _clean_param(chat_partial, **kwargs):
|
||||||
func = chat_partial.func
|
func = chat_partial.func
|
||||||
sig = inspect.signature(func)
|
sig = inspect.signature(func)
|
||||||
keyword_args = []
|
|
||||||
support_var_args = False
|
support_var_args = False
|
||||||
|
allowed_params = set()
|
||||||
|
|
||||||
for param in sig.parameters.values():
|
for param in sig.parameters.values():
|
||||||
if param.kind == inspect.Parameter.VAR_KEYWORD or param.kind == inspect.Parameter.VAR_POSITIONAL:
|
if param.kind == inspect.Parameter.VAR_KEYWORD:
|
||||||
support_var_args = True
|
support_var_args = True
|
||||||
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
|
elif param.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY):
|
||||||
keyword_args.append(param.name)
|
allowed_params.add(param.name)
|
||||||
|
if support_var_args:
|
||||||
use_kwargs = kwargs
|
return kwargs
|
||||||
if not support_var_args:
|
else:
|
||||||
use_kwargs = {k: v for k, v in kwargs.items() if k in keyword_args}
|
return {k: v for k, v in kwargs.items() if k in allowed_params}
|
||||||
return use_kwargs
|
|
||||||
|
|
||||||
def chat(self, system: str, history: list, gen_conf: dict = {}, **kwargs) -> str:
|
def chat(self, system: str, history: list, gen_conf: dict = {}, **kwargs) -> str:
|
||||||
if self.langfuse:
|
if self.langfuse:
|
||||||
generation = self.langfuse.start_generation(trace_context=self.trace_context, name="chat", model=self.llm_name, input={"system": system, "history": history})
|
generation = self.langfuse.start_generation(trace_context=self.trace_context, name="chat", model=self.llm_name, input={"system": system, "history": history})
|
||||||
|
|||||||
@ -50,7 +50,7 @@ class Base(ABC):
|
|||||||
def describe_with_prompt(self, image, prompt=None):
|
def describe_with_prompt(self, image, prompt=None):
|
||||||
raise NotImplementedError("Please implement encode method!")
|
raise NotImplementedError("Please implement encode method!")
|
||||||
|
|
||||||
def _form_history(self, system, history, images=[]):
|
def _form_history(self, system, history, images=None):
|
||||||
hist = []
|
hist = []
|
||||||
if system:
|
if system:
|
||||||
hist.append({"role": "system", "content": system})
|
hist.append({"role": "system", "content": system})
|
||||||
@ -78,7 +78,7 @@ class Base(ABC):
|
|||||||
})
|
})
|
||||||
return pmpt
|
return pmpt
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=[], **kwargs):
|
def chat(self, system, history, gen_conf, images=None, **kwargs):
|
||||||
try:
|
try:
|
||||||
response = self.client.chat.completions.create(
|
response = self.client.chat.completions.create(
|
||||||
model=self.model_name,
|
model=self.model_name,
|
||||||
@ -89,7 +89,7 @@ class Base(ABC):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "**ERROR**: " + str(e), 0
|
return "**ERROR**: " + str(e), 0
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf, images=[], **kwargs):
|
def chat_streamly(self, system, history, gen_conf, images=None, **kwargs):
|
||||||
ans = ""
|
ans = ""
|
||||||
tk_count = 0
|
tk_count = 0
|
||||||
try:
|
try:
|
||||||
@ -228,7 +228,7 @@ class QWenCV(GptV4):
|
|||||||
base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||||
super().__init__(key, model_name, lang=lang, base_url=base_url, **kwargs)
|
super().__init__(key, model_name, lang=lang, base_url=base_url, **kwargs)
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=[], video_bytes=None, filename=""):
|
def chat(self, system, history, gen_conf, images=None, video_bytes=None, filename=""):
|
||||||
if video_bytes:
|
if video_bytes:
|
||||||
try:
|
try:
|
||||||
summary, summary_num_tokens = self._process_video(video_bytes, filename)
|
summary, summary_num_tokens = self._process_video(video_bytes, filename)
|
||||||
@ -506,7 +506,7 @@ class OllamaCV(Base):
|
|||||||
options["frequency_penalty"] = gen_conf["frequency_penalty"]
|
options["frequency_penalty"] = gen_conf["frequency_penalty"]
|
||||||
return options
|
return options
|
||||||
|
|
||||||
def _form_history(self, system, history, images=[]):
|
def _form_history(self, system, history, images=None):
|
||||||
hist = deepcopy(history)
|
hist = deepcopy(history)
|
||||||
if system and hist[0]["role"] == "user":
|
if system and hist[0]["role"] == "user":
|
||||||
hist.insert(0, {"role": "system", "content": system})
|
hist.insert(0, {"role": "system", "content": system})
|
||||||
@ -547,7 +547,7 @@ class OllamaCV(Base):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "**ERROR**: " + str(e), 0
|
return "**ERROR**: " + str(e), 0
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=[]):
|
def chat(self, system, history, gen_conf, images=None):
|
||||||
try:
|
try:
|
||||||
response = self.client.chat(
|
response = self.client.chat(
|
||||||
model=self.model_name,
|
model=self.model_name,
|
||||||
@ -561,7 +561,7 @@ class OllamaCV(Base):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "**ERROR**: " + str(e), 0
|
return "**ERROR**: " + str(e), 0
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf, images=[]):
|
def chat_streamly(self, system, history, gen_conf, images=None):
|
||||||
ans = ""
|
ans = ""
|
||||||
try:
|
try:
|
||||||
response = self.client.chat(
|
response = self.client.chat(
|
||||||
@ -596,7 +596,7 @@ class GeminiCV(Base):
|
|||||||
self.lang = lang
|
self.lang = lang
|
||||||
Base.__init__(self, **kwargs)
|
Base.__init__(self, **kwargs)
|
||||||
|
|
||||||
def _form_history(self, system, history, images=[]):
|
def _form_history(self, system, history, images=None):
|
||||||
hist = []
|
hist = []
|
||||||
if system:
|
if system:
|
||||||
hist.append({"role": "user", "parts": [system, history[0]["content"]]})
|
hist.append({"role": "user", "parts": [system, history[0]["content"]]})
|
||||||
@ -633,7 +633,7 @@ class GeminiCV(Base):
|
|||||||
return res.text, total_token_count_from_response(res)
|
return res.text, total_token_count_from_response(res)
|
||||||
|
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=[], video_bytes=None, filename=""):
|
def chat(self, system, history, gen_conf, images=None, video_bytes=None, filename=""):
|
||||||
if video_bytes:
|
if video_bytes:
|
||||||
try:
|
try:
|
||||||
summary, summary_num_tokens = self._process_video(video_bytes, filename)
|
summary, summary_num_tokens = self._process_video(video_bytes, filename)
|
||||||
@ -651,7 +651,7 @@ class GeminiCV(Base):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "**ERROR**: " + str(e), 0
|
return "**ERROR**: " + str(e), 0
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf, images=[]):
|
def chat_streamly(self, system, history, gen_conf, images=None):
|
||||||
ans = ""
|
ans = ""
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
@ -782,7 +782,7 @@ class NvidiaCV(Base):
|
|||||||
total_token_count_from_response(response)
|
total_token_count_from_response(response)
|
||||||
)
|
)
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=[], **kwargs):
|
def chat(self, system, history, gen_conf, images=None, **kwargs):
|
||||||
try:
|
try:
|
||||||
response = self._request(self._form_history(system, history, images), gen_conf)
|
response = self._request(self._form_history(system, history, images), gen_conf)
|
||||||
return (
|
return (
|
||||||
@ -792,7 +792,7 @@ class NvidiaCV(Base):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "**ERROR**: " + str(e), 0
|
return "**ERROR**: " + str(e), 0
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf, images=[], **kwargs):
|
def chat_streamly(self, system, history, gen_conf, images=None, **kwargs):
|
||||||
total_tokens = 0
|
total_tokens = 0
|
||||||
try:
|
try:
|
||||||
response = self._request(self._form_history(system, history, images), gen_conf)
|
response = self._request(self._form_history(system, history, images), gen_conf)
|
||||||
@ -858,7 +858,7 @@ class AnthropicCV(Base):
|
|||||||
gen_conf["max_tokens"] = self.max_tokens
|
gen_conf["max_tokens"] = self.max_tokens
|
||||||
return gen_conf
|
return gen_conf
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=[]):
|
def chat(self, system, history, gen_conf, images=None):
|
||||||
gen_conf = self._clean_conf(gen_conf)
|
gen_conf = self._clean_conf(gen_conf)
|
||||||
ans = ""
|
ans = ""
|
||||||
try:
|
try:
|
||||||
@ -879,7 +879,7 @@ class AnthropicCV(Base):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return ans + "\n**ERROR**: " + str(e), 0
|
return ans + "\n**ERROR**: " + str(e), 0
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf, images=[]):
|
def chat_streamly(self, system, history, gen_conf, images=None):
|
||||||
gen_conf = self._clean_conf(gen_conf)
|
gen_conf = self._clean_conf(gen_conf)
|
||||||
total_tokens = 0
|
total_tokens = 0
|
||||||
try:
|
try:
|
||||||
@ -963,13 +963,13 @@ class GoogleCV(AnthropicCV, GeminiCV):
|
|||||||
else:
|
else:
|
||||||
return GeminiCV.describe_with_prompt(self, image, prompt)
|
return GeminiCV.describe_with_prompt(self, image, prompt)
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=[]):
|
def chat(self, system, history, gen_conf, images=None):
|
||||||
if "claude" in self.model_name:
|
if "claude" in self.model_name:
|
||||||
return AnthropicCV.chat(self, system, history, gen_conf, images)
|
return AnthropicCV.chat(self, system, history, gen_conf, images)
|
||||||
else:
|
else:
|
||||||
return GeminiCV.chat(self, system, history, gen_conf, images)
|
return GeminiCV.chat(self, system, history, gen_conf, images)
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf, images=[]):
|
def chat_streamly(self, system, history, gen_conf, images=None):
|
||||||
if "claude" in self.model_name:
|
if "claude" in self.model_name:
|
||||||
for ans in AnthropicCV.chat_streamly(self, system, history, gen_conf, images):
|
for ans in AnthropicCV.chat_streamly(self, system, history, gen_conf, images):
|
||||||
yield ans
|
yield ans
|
||||||
|
|||||||
Reference in New Issue
Block a user