mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
add rerank model (#969)
### What problem does this PR solve? feat: add rerank models to the project #724 #162 ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
@ -16,18 +16,19 @@
|
||||
from .embedding_model import *
|
||||
from .chat_model import *
|
||||
from .cv_model import *
|
||||
from .rerank_model import *
|
||||
|
||||
|
||||
EmbeddingModel = {
|
||||
"Ollama": OllamaEmbed,
|
||||
"OpenAI": OpenAIEmbed,
|
||||
"Xinference": XinferenceEmbed,
|
||||
"Tongyi-Qianwen": DefaultEmbedding, #QWenEmbed,
|
||||
"Tongyi-Qianwen": DefaultEmbedding,#QWenEmbed,
|
||||
"ZHIPU-AI": ZhipuEmbed,
|
||||
"FastEmbed": FastEmbed,
|
||||
"Youdao": YoudaoEmbed,
|
||||
"DeepSeek": DefaultEmbedding,
|
||||
"BaiChuan": BaiChuanEmbed
|
||||
"BaiChuan": BaiChuanEmbed,
|
||||
"BAAI": DefaultEmbedding
|
||||
}
|
||||
|
||||
|
||||
@ -52,3 +53,9 @@ ChatModel = {
|
||||
"BaiChuan": BaiChuanChat
|
||||
}
|
||||
|
||||
|
||||
RerankModel = {
|
||||
"BAAI": DefaultRerank,
|
||||
"Jina": JinaRerank,
|
||||
"Youdao": YoudaoRerank,
|
||||
}
|
||||
|
||||
@ -13,8 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
from huggingface_hub import snapshot_download
|
||||
from zhipuai import ZhipuAI
|
||||
import os
|
||||
@ -26,21 +28,9 @@ from FlagEmbedding import FlagModel
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
from api.utils.file_utils import get_project_base_directory, get_home_cache_dir
|
||||
from api.utils.file_utils import get_home_cache_dir
|
||||
from rag.utils import num_tokens_from_string, truncate
|
||||
|
||||
try:
|
||||
flag_model = FlagModel(os.path.join(get_home_cache_dir(), "bge-large-zh-v1.5"),
|
||||
query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:",
|
||||
use_fp16=torch.cuda.is_available())
|
||||
except Exception as e:
|
||||
model_dir = snapshot_download(repo_id="BAAI/bge-large-zh-v1.5",
|
||||
local_dir=os.path.join(get_home_cache_dir(), "bge-large-zh-v1.5"),
|
||||
local_dir_use_symlinks=False)
|
||||
flag_model = FlagModel(model_dir,
|
||||
query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:",
|
||||
use_fp16=torch.cuda.is_available())
|
||||
|
||||
|
||||
class Base(ABC):
|
||||
def __init__(self, key, model_name):
|
||||
@ -54,7 +44,9 @@ class Base(ABC):
|
||||
|
||||
|
||||
class DefaultEmbedding(Base):
|
||||
def __init__(self, *args, **kwargs):
|
||||
_model = None
|
||||
|
||||
def __init__(self, key, model_name, **kwargs):
|
||||
"""
|
||||
If you have trouble downloading HuggingFace models, -_^ this might help!!
|
||||
|
||||
@ -66,7 +58,18 @@ class DefaultEmbedding(Base):
|
||||
^_-
|
||||
|
||||
"""
|
||||
self.model = flag_model
|
||||
if not DefaultEmbedding._model:
|
||||
try:
|
||||
self._model = FlagModel(os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)),
|
||||
query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:",
|
||||
use_fp16=torch.cuda.is_available())
|
||||
except Exception as e:
|
||||
model_dir = snapshot_download(repo_id="BAAI/bge-large-zh-v1.5",
|
||||
local_dir=os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)),
|
||||
local_dir_use_symlinks=False)
|
||||
self._model = FlagModel(model_dir,
|
||||
query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:",
|
||||
use_fp16=torch.cuda.is_available())
|
||||
|
||||
def encode(self, texts: list, batch_size=32):
|
||||
texts = [truncate(t, 2048) for t in texts]
|
||||
@ -75,12 +78,12 @@ class DefaultEmbedding(Base):
|
||||
token_count += num_tokens_from_string(t)
|
||||
res = []
|
||||
for i in range(0, len(texts), batch_size):
|
||||
res.extend(self.model.encode(texts[i:i + batch_size]).tolist())
|
||||
res.extend(self._model.encode(texts[i:i + batch_size]).tolist())
|
||||
return np.array(res), token_count
|
||||
|
||||
def encode_queries(self, text: str):
|
||||
token_count = num_tokens_from_string(text)
|
||||
return self.model.encode_queries([text]).tolist()[0], token_count
|
||||
return self._model.encode_queries([text]).tolist()[0], token_count
|
||||
|
||||
|
||||
class OpenAIEmbed(Base):
|
||||
@ -189,16 +192,19 @@ class OllamaEmbed(Base):
|
||||
|
||||
|
||||
class FastEmbed(Base):
|
||||
_model = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
key: Optional[str] = None,
|
||||
model_name: str = "BAAI/bge-small-en-v1.5",
|
||||
cache_dir: Optional[str] = None,
|
||||
threads: Optional[int] = None,
|
||||
**kwargs,
|
||||
self,
|
||||
key: Optional[str] = None,
|
||||
model_name: str = "BAAI/bge-small-en-v1.5",
|
||||
cache_dir: Optional[str] = None,
|
||||
threads: Optional[int] = None,
|
||||
**kwargs,
|
||||
):
|
||||
from fastembed import TextEmbedding
|
||||
self._model = TextEmbedding(model_name, cache_dir, threads, **kwargs)
|
||||
if not FastEmbed._model:
|
||||
self._model = TextEmbedding(model_name, cache_dir, threads, **kwargs)
|
||||
|
||||
def encode(self, texts: list, batch_size=32):
|
||||
# Using the internal tokenizer to encode the texts and get the total
|
||||
@ -265,3 +271,29 @@ class YoudaoEmbed(Base):
|
||||
def encode_queries(self, text):
|
||||
embds = YoudaoEmbed._client.encode([text])
|
||||
return np.array(embds[0]), num_tokens_from_string(text)
|
||||
|
||||
|
||||
class JinaEmbed(Base):
|
||||
def __init__(self, key, model_name="jina-embeddings-v2-base-zh",
|
||||
base_url="https://api.jina.ai/v1/embeddings"):
|
||||
|
||||
self.base_url = "https://api.jina.ai/v1/embeddings"
|
||||
self.headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {key}"
|
||||
}
|
||||
self.model_name = model_name
|
||||
|
||||
def encode(self, texts: list, batch_size=None):
|
||||
texts = [truncate(t, 8196) for t in texts]
|
||||
data = {
|
||||
"model": self.model_name,
|
||||
"input": texts,
|
||||
'encoding_type': 'float'
|
||||
}
|
||||
res = requests.post(self.base_url, headers=self.headers, json=data)
|
||||
return np.array([d["embedding"] for d in res["data"]]), res["usage"]["total_tokens"]
|
||||
|
||||
def encode_queries(self, text):
|
||||
embds, cnt = self.encode([text])
|
||||
return np.array(embds[0]), cnt
|
||||
113
rag/llm/rerank_model.py
Normal file
113
rag/llm/rerank_model.py
Normal file
@ -0,0 +1,113 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
import requests
|
||||
import torch
|
||||
from FlagEmbedding import FlagReranker
|
||||
from huggingface_hub import snapshot_download
|
||||
import os
|
||||
from abc import ABC
|
||||
import numpy as np
|
||||
from api.utils.file_utils import get_home_cache_dir
|
||||
from rag.utils import num_tokens_from_string, truncate
|
||||
|
||||
|
||||
class Base(ABC):
|
||||
def __init__(self, key, model_name):
|
||||
pass
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
raise NotImplementedError("Please implement encode method!")
|
||||
|
||||
|
||||
class DefaultRerank(Base):
|
||||
_model = None
|
||||
|
||||
def __init__(self, key, model_name, **kwargs):
|
||||
"""
|
||||
If you have trouble downloading HuggingFace models, -_^ this might help!!
|
||||
|
||||
For Linux:
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
For Windows:
|
||||
Good luck
|
||||
^_-
|
||||
|
||||
"""
|
||||
if not DefaultRerank._model:
|
||||
try:
|
||||
self._model = FlagReranker(os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)),
|
||||
use_fp16=torch.cuda.is_available())
|
||||
except Exception as e:
|
||||
self._model = snapshot_download(repo_id=model_name,
|
||||
local_dir=os.path.join(get_home_cache_dir(),
|
||||
re.sub(r"^[a-zA-Z]+/", "", model_name)),
|
||||
local_dir_use_symlinks=False)
|
||||
self._model = FlagReranker(os.path.join(get_home_cache_dir(), model_name),
|
||||
use_fp16=torch.cuda.is_available())
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
pairs = [(query,truncate(t, 2048)) for t in texts]
|
||||
token_count = 0
|
||||
for _, t in pairs:
|
||||
token_count += num_tokens_from_string(t)
|
||||
batch_size = 32
|
||||
res = []
|
||||
for i in range(0, len(pairs), batch_size):
|
||||
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=2048)
|
||||
res.extend(scores)
|
||||
return np.array(res), token_count
|
||||
|
||||
|
||||
class JinaRerank(Base):
|
||||
def __init__(self, key, model_name="jina-reranker-v1-base-en",
|
||||
base_url="https://api.jina.ai/v1/rerank"):
|
||||
self.base_url = "https://api.jina.ai/v1/rerank"
|
||||
self.headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {key}"
|
||||
}
|
||||
self.model_name = model_name
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
texts = [truncate(t, 8196) for t in texts]
|
||||
data = {
|
||||
"model": self.model_name,
|
||||
"query": query,
|
||||
"documents": texts,
|
||||
"top_n": len(texts)
|
||||
}
|
||||
res = requests.post(self.base_url, headers=self.headers, json=data)
|
||||
return np.array([d["relevance_score"] for d in res["results"]]), res["usage"]["total_tokens"]
|
||||
|
||||
|
||||
class YoudaoRerank(DefaultRerank):
|
||||
_model = None
|
||||
|
||||
def __init__(self, key=None, model_name="maidalun1020/bce-reranker-base_v1", **kwargs):
|
||||
from BCEmbedding import RerankerModel
|
||||
if not YoudaoRerank._model:
|
||||
try:
|
||||
print("LOADING BCE...")
|
||||
YoudaoRerank._model = RerankerModel(model_name_or_path=os.path.join(
|
||||
get_home_cache_dir(),
|
||||
re.sub(r"^[a-zA-Z]+/", "", model_name)))
|
||||
except Exception as e:
|
||||
YoudaoRerank._model = RerankerModel(
|
||||
model_name_or_path=model_name.replace(
|
||||
"maidalun1020", "InfiniFlow"))
|
||||
|
||||
@ -54,7 +54,8 @@ class EsQueryer:
|
||||
if not self.isChinese(txt):
|
||||
tks = rag_tokenizer.tokenize(txt).split(" ")
|
||||
tks_w = self.tw.weights(tks)
|
||||
q = [re.sub(r"[ \\\"']+", "", tk)+"^{:.4f}".format(w) for tk, w in tks_w]
|
||||
tks_w = [(re.sub(r"[ \\\"']+", "", tk), w) for tk, w in tks_w]
|
||||
q = ["{}^{:.4f}".format(tk, w) for tk, w in tks_w if tk]
|
||||
for i in range(1, len(tks_w)):
|
||||
q.append("\"%s %s\"^%.4f" % (tks_w[i - 1][0], tks_w[i][0], max(tks_w[i - 1][1], tks_w[i][1])*2))
|
||||
if not q:
|
||||
@ -136,7 +137,11 @@ class EsQueryer:
|
||||
from sklearn.metrics.pairwise import cosine_similarity as CosineSimilarity
|
||||
import numpy as np
|
||||
sims = CosineSimilarity([avec], bvecs)
|
||||
tksim = self.token_similarity(atks, btkss)
|
||||
return np.array(sims[0]) * vtweight + \
|
||||
np.array(tksim) * tkweight, tksim, sims[0]
|
||||
|
||||
def token_similarity(self, atks, btkss):
|
||||
def toDict(tks):
|
||||
d = {}
|
||||
if isinstance(tks, str):
|
||||
@ -149,9 +154,7 @@ class EsQueryer:
|
||||
|
||||
atks = toDict(atks)
|
||||
btkss = [toDict(tks) for tks in btkss]
|
||||
tksim = [self.similarity(atks, btks) for btks in btkss]
|
||||
return np.array(sims[0]) * vtweight + \
|
||||
np.array(tksim) * tkweight, tksim, sims[0]
|
||||
return [self.similarity(atks, btks) for btks in btkss]
|
||||
|
||||
def similarity(self, qtwt, dtwt):
|
||||
if isinstance(dtwt, type("")):
|
||||
|
||||
@ -241,11 +241,14 @@ class RagTokenizer:
|
||||
|
||||
return self.score_(res[::-1])
|
||||
|
||||
def english_normalize_(self, tks):
|
||||
return [self.stemmer.stem(self.lemmatizer.lemmatize(t)) if re.match(r"[a-zA-Z_-]+$", t) else t for t in tks]
|
||||
|
||||
def tokenize(self, line):
|
||||
line = self._strQ2B(line).lower()
|
||||
line = self._tradi2simp(line)
|
||||
zh_num = len([1 for c in line if is_chinese(c)])
|
||||
if zh_num < len(line) * 0.2:
|
||||
if zh_num == 0:
|
||||
return " ".join([self.stemmer.stem(self.lemmatizer.lemmatize(t)) for t in word_tokenize(line)])
|
||||
|
||||
arr = re.split(self.SPLIT_CHAR, line)
|
||||
@ -293,7 +296,7 @@ class RagTokenizer:
|
||||
|
||||
i = e + 1
|
||||
|
||||
res = " ".join(res)
|
||||
res = " ".join(self.english_normalize_(res))
|
||||
if self.DEBUG:
|
||||
print("[TKS]", self.merge_(res))
|
||||
return self.merge_(res)
|
||||
@ -336,7 +339,7 @@ class RagTokenizer:
|
||||
|
||||
res.append(stk)
|
||||
|
||||
return " ".join(res)
|
||||
return " ".join(self.english_normalize_(res))
|
||||
|
||||
|
||||
def is_chinese(s):
|
||||
|
||||
@ -71,8 +71,8 @@ class Dealer:
|
||||
|
||||
s = Search()
|
||||
pg = int(req.get("page", 1)) - 1
|
||||
ps = int(req.get("size", 1000))
|
||||
topk = int(req.get("topk", 1024))
|
||||
ps = int(req.get("size", topk))
|
||||
src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id", "title_tks", "important_kwd",
|
||||
"image_id", "doc_id", "q_512_vec", "q_768_vec", "position_int",
|
||||
"q_1024_vec", "q_1536_vec", "available_int", "content_with_weight"])
|
||||
@ -311,6 +311,26 @@ class Dealer:
|
||||
ins_tw, tkweight, vtweight)
|
||||
return sim, tksim, vtsim
|
||||
|
||||
def rerank_by_model(self, rerank_mdl, sres, query, tkweight=0.3,
|
||||
vtweight=0.7, cfield="content_ltks"):
|
||||
_, keywords = self.qryr.question(query)
|
||||
|
||||
for i in sres.ids:
|
||||
if isinstance(sres.field[i].get("important_kwd", []), str):
|
||||
sres.field[i]["important_kwd"] = [sres.field[i]["important_kwd"]]
|
||||
ins_tw = []
|
||||
for i in sres.ids:
|
||||
content_ltks = sres.field[i][cfield].split(" ")
|
||||
title_tks = [t for t in sres.field[i].get("title_tks", "").split(" ") if t]
|
||||
important_kwd = sres.field[i].get("important_kwd", [])
|
||||
tks = content_ltks + title_tks + important_kwd
|
||||
ins_tw.append(tks)
|
||||
|
||||
tksim = self.qryr.token_similarity(keywords, ins_tw)
|
||||
vtsim,_ = rerank_mdl.similarity(" ".join(keywords), [rmSpace(" ".join(tks)) for tks in ins_tw])
|
||||
|
||||
return tkweight*np.array(tksim) + vtweight*vtsim, tksim, vtsim
|
||||
|
||||
def hybrid_similarity(self, ans_embd, ins_embd, ans, inst):
|
||||
return self.qryr.hybrid_similarity(ans_embd,
|
||||
ins_embd,
|
||||
@ -318,17 +338,22 @@ class Dealer:
|
||||
rag_tokenizer.tokenize(inst).split(" "))
|
||||
|
||||
def retrieval(self, question, embd_mdl, tenant_id, kb_ids, page, page_size, similarity_threshold=0.2,
|
||||
vector_similarity_weight=0.3, top=1024, doc_ids=None, aggs=True):
|
||||
vector_similarity_weight=0.3, top=1024, doc_ids=None, aggs=True, rerank_mdl=None):
|
||||
ranks = {"total": 0, "chunks": [], "doc_aggs": {}}
|
||||
if not question:
|
||||
return ranks
|
||||
req = {"kb_ids": kb_ids, "doc_ids": doc_ids, "size": page_size,
|
||||
"question": question, "vector": True, "topk": top,
|
||||
"similarity": similarity_threshold}
|
||||
"similarity": similarity_threshold,
|
||||
"available_int": 1}
|
||||
sres = self.search(req, index_name(tenant_id), embd_mdl)
|
||||
|
||||
sim, tsim, vsim = self.rerank(
|
||||
sres, question, 1 - vector_similarity_weight, vector_similarity_weight)
|
||||
if rerank_mdl:
|
||||
sim, tsim, vsim = self.rerank_by_model(rerank_mdl,
|
||||
sres, question, 1 - vector_similarity_weight, vector_similarity_weight)
|
||||
else:
|
||||
sim, tsim, vsim = self.rerank(
|
||||
sres, question, 1 - vector_similarity_weight, vector_similarity_weight)
|
||||
idx = np.argsort(sim * -1)
|
||||
|
||||
dim = len(sres.query_vector)
|
||||
|
||||
Reference in New Issue
Block a user