mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Feature/feat1017 (#2872)
### What problem does this PR solve? 1. fix: mid map show error in knowledge graph, juse because ```@antv/g6```version changed 2. feat: concurrent threads configuration support in graph extractor 3. fix: used tokens update failed for tenant 4. feat: timeout configuration support for llm 5. fix: regex error in graph extractor 6. feat: qwen rerank(```gte-rerank```) support 7. fix: timeout deal in knowledge graph index process. Now chat by stream output, also, it is configuratable. 8. feat: ```qwen-long``` model configuration ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue) - [x] New Feature (non-breaking change which adds functionality) --------- Co-authored-by: chongchuanbing <chongchuanbing@gmail.com> Co-authored-by: Kevin Hu <kevinhu.sh@gmail.com>
This commit is contained in:
@ -122,7 +122,8 @@ RerankModel = {
|
||||
"TogetherAI": TogetherAIRerank,
|
||||
"SILICONFLOW": SILICONFLOWRerank,
|
||||
"BaiduYiyan": BaiduYiyanRerank,
|
||||
"Voyage AI": VoyageRerank
|
||||
"Voyage AI": VoyageRerank,
|
||||
"Tongyi-Qianwen": QWenRerank,
|
||||
}
|
||||
|
||||
Seq2txtModel = {
|
||||
|
||||
@ -31,7 +31,8 @@ import asyncio
|
||||
|
||||
class Base(ABC):
|
||||
def __init__(self, key, model_name, base_url):
|
||||
self.client = OpenAI(api_key=key, base_url=base_url)
|
||||
timeout = int(os.environ.get('LM_TIMEOUT_SECONDS', 600))
|
||||
self.client = OpenAI(api_key=key, base_url=base_url, timeout=timeout)
|
||||
self.model_name = model_name
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
@ -216,28 +217,39 @@ class QWenChat(Base):
|
||||
self.model_name = model_name
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
from http import HTTPStatus
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
response = Generation.call(
|
||||
self.model_name,
|
||||
messages=history,
|
||||
result_format='message',
|
||||
**gen_conf
|
||||
)
|
||||
ans = ""
|
||||
tk_count = 0
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
ans += response.output.choices[0]['message']['content']
|
||||
tk_count += response.usage.total_tokens
|
||||
if response.output.choices[0].get("finish_reason", "") == "length":
|
||||
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
|
||||
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
||||
return ans, tk_count
|
||||
stream_flag = str(os.environ.get('QWEN_CHAT_BY_STREAM', 'true')).lower() == 'true'
|
||||
if not stream_flag:
|
||||
from http import HTTPStatus
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
|
||||
return "**ERROR**: " + response.message, tk_count
|
||||
response = Generation.call(
|
||||
self.model_name,
|
||||
messages=history,
|
||||
result_format='message',
|
||||
**gen_conf
|
||||
)
|
||||
ans = ""
|
||||
tk_count = 0
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
ans += response.output.choices[0]['message']['content']
|
||||
tk_count += response.usage.total_tokens
|
||||
if response.output.choices[0].get("finish_reason", "") == "length":
|
||||
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
|
||||
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
||||
return ans, tk_count
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
return "**ERROR**: " + response.message, tk_count
|
||||
else:
|
||||
g = self._chat_streamly(system, history, gen_conf, incremental_output=True)
|
||||
result_list = list(g)
|
||||
error_msg_list = [item for item in result_list if str(item).find("**ERROR**") >= 0]
|
||||
if len(error_msg_list) > 0:
|
||||
return "**ERROR**: " + "".join(error_msg_list) , 0
|
||||
else:
|
||||
return "".join(result_list[:-1]), result_list[-1]
|
||||
|
||||
def _chat_streamly(self, system, history, gen_conf, incremental_output=False):
|
||||
from http import HTTPStatus
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
@ -249,6 +261,7 @@ class QWenChat(Base):
|
||||
messages=history,
|
||||
result_format='message',
|
||||
stream=True,
|
||||
incremental_output=incremental_output,
|
||||
**gen_conf
|
||||
)
|
||||
for resp in response:
|
||||
@ -267,6 +280,9 @@ class QWenChat(Base):
|
||||
|
||||
yield tk_count
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
return self._chat_streamly(system, history, gen_conf)
|
||||
|
||||
|
||||
class ZhipuChat(Base):
|
||||
def __init__(self, key, model_name="glm-3-turbo", **kwargs):
|
||||
|
||||
@ -390,3 +390,27 @@ class VoyageRerank(Base):
|
||||
for r in res.results:
|
||||
rank[r.index] = r.relevance_score
|
||||
return rank, res.total_tokens
|
||||
|
||||
class QWenRerank(Base):
|
||||
def __init__(self, key, model_name='gte-rerank', base_url=None, **kwargs):
|
||||
import dashscope
|
||||
self.api_key = key
|
||||
self.model_name = dashscope.TextReRank.Models.gte_rerank if model_name is None else model_name
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
import dashscope
|
||||
from http import HTTPStatus
|
||||
resp = dashscope.TextReRank.call(
|
||||
api_key=self.api_key,
|
||||
model=self.model_name,
|
||||
query=query,
|
||||
documents=texts,
|
||||
top_n=len(texts),
|
||||
return_documents=False
|
||||
)
|
||||
rank = np.zeros(len(texts), dtype=float)
|
||||
if resp.status_code == HTTPStatus.OK:
|
||||
for r in resp.output.results:
|
||||
rank[r.index] = r.relevance_score
|
||||
return rank, resp.usage.total_tokens
|
||||
return rank, 0
|
||||
Reference in New Issue
Block a user