mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Fix: gemini cv model chat issue. (#10799)
### What problem does this PR solve? #10787 #10781 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
@ -228,7 +228,7 @@ class QWenCV(GptV4):
|
|||||||
base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||||
super().__init__(key, model_name, lang=lang, base_url=base_url, **kwargs)
|
super().__init__(key, model_name, lang=lang, base_url=base_url, **kwargs)
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=None, video_bytes=None, filename=""):
|
def chat(self, system, history, gen_conf, images=None, video_bytes=None, filename="", **kwargs):
|
||||||
if video_bytes:
|
if video_bytes:
|
||||||
try:
|
try:
|
||||||
summary, summary_num_tokens = self._process_video(video_bytes, filename)
|
summary, summary_num_tokens = self._process_video(video_bytes, filename)
|
||||||
@ -547,7 +547,7 @@ class OllamaCV(Base):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "**ERROR**: " + str(e), 0
|
return "**ERROR**: " + str(e), 0
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=None):
|
def chat(self, system, history, gen_conf, images=None, **kwargs):
|
||||||
try:
|
try:
|
||||||
response = self.client.chat(
|
response = self.client.chat(
|
||||||
model=self.model_name,
|
model=self.model_name,
|
||||||
@ -561,7 +561,7 @@ class OllamaCV(Base):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "**ERROR**: " + str(e), 0
|
return "**ERROR**: " + str(e), 0
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf, images=None):
|
def chat_streamly(self, system, history, gen_conf, images=None, **kwargs):
|
||||||
ans = ""
|
ans = ""
|
||||||
try:
|
try:
|
||||||
response = self.client.chat(
|
response = self.client.chat(
|
||||||
@ -633,7 +633,7 @@ class GeminiCV(Base):
|
|||||||
return res.text, total_token_count_from_response(res)
|
return res.text, total_token_count_from_response(res)
|
||||||
|
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=None, video_bytes=None, filename=""):
|
def chat(self, system, history, gen_conf, images=None, video_bytes=None, filename="", **kwargs):
|
||||||
if video_bytes:
|
if video_bytes:
|
||||||
try:
|
try:
|
||||||
summary, summary_num_tokens = self._process_video(video_bytes, filename)
|
summary, summary_num_tokens = self._process_video(video_bytes, filename)
|
||||||
@ -651,7 +651,7 @@ class GeminiCV(Base):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return "**ERROR**: " + str(e), 0
|
return "**ERROR**: " + str(e), 0
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf, images=None):
|
def chat_streamly(self, system, history, gen_conf, images=None, **kwargs):
|
||||||
ans = ""
|
ans = ""
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
@ -858,7 +858,7 @@ class AnthropicCV(Base):
|
|||||||
gen_conf["max_tokens"] = self.max_tokens
|
gen_conf["max_tokens"] = self.max_tokens
|
||||||
return gen_conf
|
return gen_conf
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=None):
|
def chat(self, system, history, gen_conf, images=None, **kwargs):
|
||||||
gen_conf = self._clean_conf(gen_conf)
|
gen_conf = self._clean_conf(gen_conf)
|
||||||
ans = ""
|
ans = ""
|
||||||
try:
|
try:
|
||||||
@ -879,7 +879,7 @@ class AnthropicCV(Base):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return ans + "\n**ERROR**: " + str(e), 0
|
return ans + "\n**ERROR**: " + str(e), 0
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf, images=None):
|
def chat_streamly(self, system, history, gen_conf, images=None, **kwargs):
|
||||||
gen_conf = self._clean_conf(gen_conf)
|
gen_conf = self._clean_conf(gen_conf)
|
||||||
total_tokens = 0
|
total_tokens = 0
|
||||||
try:
|
try:
|
||||||
@ -963,13 +963,13 @@ class GoogleCV(AnthropicCV, GeminiCV):
|
|||||||
else:
|
else:
|
||||||
return GeminiCV.describe_with_prompt(self, image, prompt)
|
return GeminiCV.describe_with_prompt(self, image, prompt)
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf, images=None):
|
def chat(self, system, history, gen_conf, images=None, **kwargs):
|
||||||
if "claude" in self.model_name:
|
if "claude" in self.model_name:
|
||||||
return AnthropicCV.chat(self, system, history, gen_conf, images)
|
return AnthropicCV.chat(self, system, history, gen_conf, images)
|
||||||
else:
|
else:
|
||||||
return GeminiCV.chat(self, system, history, gen_conf, images)
|
return GeminiCV.chat(self, system, history, gen_conf, images)
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf, images=None):
|
def chat_streamly(self, system, history, gen_conf, images=None, **kwargs):
|
||||||
if "claude" in self.model_name:
|
if "claude" in self.model_name:
|
||||||
for ans in AnthropicCV.chat_streamly(self, system, history, gen_conf, images):
|
for ans in AnthropicCV.chat_streamly(self, system, history, gen_conf, images):
|
||||||
yield ans
|
yield ans
|
||||||
|
|||||||
@ -755,7 +755,7 @@ async def run_toc_from_text(chunks, chat_mdl, callback=None):
|
|||||||
|
|
||||||
# Merge structure and content (by index)
|
# Merge structure and content (by index)
|
||||||
prune = len(toc_with_levels) > 512
|
prune = len(toc_with_levels) > 512
|
||||||
max_lvl = sorted([t.get("level", "0") for t in toc_with_levels])[-1]
|
max_lvl = sorted([t.get("level", "0") for t in toc_with_levels if isinstance(t, dict)])[-1]
|
||||||
merged = []
|
merged = []
|
||||||
for _ , (toc_item, src_item) in enumerate(zip(toc_with_levels, filtered)):
|
for _ , (toc_item, src_item) in enumerate(zip(toc_with_levels, filtered)):
|
||||||
if prune and toc_item.get("level", "0") >= max_lvl:
|
if prune and toc_item.get("level", "0") >= max_lvl:
|
||||||
|
|||||||
Reference in New Issue
Block a user