mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Fix: renrank_model and pdf_parser bugs | Update: session API (#2601)
### What problem does this PR solve? Fix: renrank_model and pdf_parser bugs | Update: session API #2575 #2559 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue) - [x] Refactoring --------- Co-authored-by: liuhua <10215101452@stu.ecun.edu.cn>
This commit is contained in:
@ -26,9 +26,11 @@ from api.utils.file_utils import get_home_cache_dir
|
||||
from rag.utils import num_tokens_from_string, truncate
|
||||
import json
|
||||
|
||||
|
||||
def sigmoid(x):
|
||||
return 1 / (1 + np.exp(-x))
|
||||
|
||||
|
||||
class Base(ABC):
|
||||
def __init__(self, key, model_name):
|
||||
pass
|
||||
@ -59,16 +61,19 @@ class DefaultRerank(Base):
|
||||
with DefaultRerank._model_lock:
|
||||
if not DefaultRerank._model:
|
||||
try:
|
||||
DefaultRerank._model = FlagReranker(os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)), use_fp16=torch.cuda.is_available())
|
||||
DefaultRerank._model = FlagReranker(
|
||||
os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)),
|
||||
use_fp16=torch.cuda.is_available())
|
||||
except Exception as e:
|
||||
model_dir = snapshot_download(repo_id= model_name,
|
||||
local_dir=os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)),
|
||||
model_dir = snapshot_download(repo_id=model_name,
|
||||
local_dir=os.path.join(get_home_cache_dir(),
|
||||
re.sub(r"^[a-zA-Z]+/", "", model_name)),
|
||||
local_dir_use_symlinks=False)
|
||||
DefaultRerank._model = FlagReranker(model_dir, use_fp16=torch.cuda.is_available())
|
||||
self._model = DefaultRerank._model
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
pairs = [(query,truncate(t, 2048)) for t in texts]
|
||||
pairs = [(query, truncate(t, 2048)) for t in texts]
|
||||
token_count = 0
|
||||
for _, t in pairs:
|
||||
token_count += num_tokens_from_string(t)
|
||||
@ -77,8 +82,10 @@ class DefaultRerank(Base):
|
||||
for i in range(0, len(pairs), batch_size):
|
||||
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=2048)
|
||||
scores = sigmoid(np.array(scores)).tolist()
|
||||
if isinstance(scores, float): res.append(scores)
|
||||
else: res.extend(scores)
|
||||
if isinstance(scores, float):
|
||||
res.append(scores)
|
||||
else:
|
||||
res.extend(scores)
|
||||
return np.array(res), token_count
|
||||
|
||||
|
||||
@ -101,7 +108,10 @@ class JinaRerank(Base):
|
||||
"top_n": len(texts)
|
||||
}
|
||||
res = requests.post(self.base_url, headers=self.headers, json=data).json()
|
||||
return np.array([d["relevance_score"] for d in res["results"]]), res["usage"]["total_tokens"]
|
||||
rank = np.zeros(len(texts), dtype=float)
|
||||
for d in res["results"]:
|
||||
rank[d["index"]] = d["relevance_score"]
|
||||
return rank, res["usage"]["total_tokens"]
|
||||
|
||||
|
||||
class YoudaoRerank(DefaultRerank):
|
||||
@ -124,7 +134,7 @@ class YoudaoRerank(DefaultRerank):
|
||||
"maidalun1020", "InfiniFlow"))
|
||||
|
||||
self._model = YoudaoRerank._model
|
||||
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
pairs = [(query, truncate(t, self._model.max_length)) for t in texts]
|
||||
token_count = 0
|
||||
@ -135,8 +145,10 @@ class YoudaoRerank(DefaultRerank):
|
||||
for i in range(0, len(pairs), batch_size):
|
||||
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=self._model.max_length)
|
||||
scores = sigmoid(np.array(scores)).tolist()
|
||||
if isinstance(scores, float): res.append(scores)
|
||||
else: res.extend(scores)
|
||||
if isinstance(scores, float):
|
||||
res.append(scores)
|
||||
else:
|
||||
res.extend(scores)
|
||||
return np.array(res), token_count
|
||||
|
||||
|
||||
@ -162,7 +174,10 @@ class XInferenceRerank(Base):
|
||||
"documents": texts
|
||||
}
|
||||
res = requests.post(self.base_url, headers=self.headers, json=data).json()
|
||||
return np.array([d["relevance_score"] for d in res["results"]]), res["meta"]["tokens"]["input_tokens"]+res["meta"]["tokens"]["output_tokens"]
|
||||
rank = np.zeros(len(texts), dtype=float)
|
||||
for d in res["results"]:
|
||||
rank[d["index"]] = d["relevance_score"]
|
||||
return rank, res["meta"]["tokens"]["input_tokens"] + res["meta"]["tokens"]["output_tokens"]
|
||||
|
||||
|
||||
class LocalAIRerank(Base):
|
||||
@ -175,7 +190,7 @@ class LocalAIRerank(Base):
|
||||
|
||||
class NvidiaRerank(Base):
|
||||
def __init__(
|
||||
self, key, model_name, base_url="https://ai.api.nvidia.com/v1/retrieval/nvidia/"
|
||||
self, key, model_name, base_url="https://ai.api.nvidia.com/v1/retrieval/nvidia/"
|
||||
):
|
||||
if not base_url:
|
||||
base_url = "https://ai.api.nvidia.com/v1/retrieval/nvidia/"
|
||||
@ -208,9 +223,10 @@ class NvidiaRerank(Base):
|
||||
"top_n": len(texts),
|
||||
}
|
||||
res = requests.post(self.base_url, headers=self.headers, json=data).json()
|
||||
rank = np.array([d["logit"] for d in res["rankings"]])
|
||||
indexs = [d["index"] for d in res["rankings"]]
|
||||
return rank[indexs], token_count
|
||||
rank = np.zeros(len(texts), dtype=float)
|
||||
for d in res["rankings"]:
|
||||
rank[d["index"]] = d["logit"]
|
||||
return rank, token_count
|
||||
|
||||
|
||||
class LmStudioRerank(Base):
|
||||
@ -247,9 +263,10 @@ class CoHereRerank(Base):
|
||||
top_n=len(texts),
|
||||
return_documents=False,
|
||||
)
|
||||
rank = np.array([d.relevance_score for d in res.results])
|
||||
indexs = [d.index for d in res.results]
|
||||
return rank[indexs], token_count
|
||||
rank = np.zeros(len(texts), dtype=float)
|
||||
for d in res.results:
|
||||
rank[d.index] = d.relevance_score
|
||||
return rank, token_count
|
||||
|
||||
|
||||
class TogetherAIRerank(Base):
|
||||
@ -262,7 +279,7 @@ class TogetherAIRerank(Base):
|
||||
|
||||
class SILICONFLOWRerank(Base):
|
||||
def __init__(
|
||||
self, key, model_name, base_url="https://api.siliconflow.cn/v1/rerank"
|
||||
self, key, model_name, base_url="https://api.siliconflow.cn/v1/rerank"
|
||||
):
|
||||
if not base_url:
|
||||
base_url = "https://api.siliconflow.cn/v1/rerank"
|
||||
@ -287,10 +304,11 @@ class SILICONFLOWRerank(Base):
|
||||
response = requests.post(
|
||||
self.base_url, json=payload, headers=self.headers
|
||||
).json()
|
||||
rank = np.array([d["relevance_score"] for d in response["results"]])
|
||||
indexs = [d["index"] for d in response["results"]]
|
||||
rank = np.zeros(len(texts), dtype=float)
|
||||
for d in response["results"]:
|
||||
rank[d["index"]] = d["relevance_score"]
|
||||
return (
|
||||
rank[indexs],
|
||||
rank,
|
||||
response["meta"]["tokens"]["input_tokens"] + response["meta"]["tokens"]["output_tokens"],
|
||||
)
|
||||
|
||||
@ -312,9 +330,10 @@ class BaiduYiyanRerank(Base):
|
||||
documents=texts,
|
||||
top_n=len(texts),
|
||||
).body
|
||||
rank = np.array([d["relevance_score"] for d in res["results"]])
|
||||
indexs = [d["index"] for d in res["results"]]
|
||||
return rank[indexs], res["usage"]["total_tokens"]
|
||||
rank = np.zeros(len(texts), dtype=float)
|
||||
for d in res["results"]:
|
||||
rank[d["index"]] = d["relevance_score"]
|
||||
return rank, res["usage"]["total_tokens"]
|
||||
|
||||
|
||||
class VoyageRerank(Base):
|
||||
@ -328,6 +347,7 @@ class VoyageRerank(Base):
|
||||
res = self.client.rerank(
|
||||
query=query, documents=texts, model=self.model_name, top_k=len(texts)
|
||||
)
|
||||
rank = np.array([r.relevance_score for r in res.results])
|
||||
indexs = [r.index for r in res.results]
|
||||
return rank[indexs], res.total_tokens
|
||||
rank = np.zeros(len(texts), dtype=float)
|
||||
for r in res.results:
|
||||
rank[r.index] = r.relevance_score
|
||||
return rank, res.total_tokens
|
||||
|
||||
Reference in New Issue
Block a user