mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Support Xinference (#320)
### What problem does this PR solve? Issue link:#299 ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
@ -21,6 +21,7 @@ from .cv_model import *
|
||||
EmbeddingModel = {
|
||||
"Ollama": OllamaEmbed,
|
||||
"OpenAI": OpenAIEmbed,
|
||||
"Xinference": XinferenceEmbed,
|
||||
"Tongyi-Qianwen": HuEmbedding, #QWenEmbed,
|
||||
"ZHIPU-AI": ZhipuEmbed,
|
||||
"Moonshot": HuEmbedding
|
||||
@ -30,6 +31,7 @@ EmbeddingModel = {
|
||||
CvModel = {
|
||||
"OpenAI": GptV4,
|
||||
"Ollama": OllamaCV,
|
||||
"Xinference": XinferenceCV,
|
||||
"Tongyi-Qianwen": QWenCV,
|
||||
"ZHIPU-AI": Zhipu4V,
|
||||
"Moonshot": LocalCV
|
||||
@ -41,6 +43,7 @@ ChatModel = {
|
||||
"ZHIPU-AI": ZhipuChat,
|
||||
"Tongyi-Qianwen": QWenChat,
|
||||
"Ollama": OllamaChat,
|
||||
"Xinference": XinferenceChat,
|
||||
"Moonshot": MoonshotChat
|
||||
}
|
||||
|
||||
|
||||
@ -158,6 +158,28 @@ class OllamaChat(Base):
|
||||
return "**ERROR**: " + str(e), 0
|
||||
|
||||
|
||||
class XinferenceChat(Base):
|
||||
def __init__(self, key=None, model_name="", base_url=""):
|
||||
self.client = OpenAI(api_key="xxx", base_url=base_url)
|
||||
self.model_name = model_name
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
try:
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=history,
|
||||
**gen_conf)
|
||||
ans = response.choices[0].message.content.strip()
|
||||
if response.choices[0].finish_reason == "length":
|
||||
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
|
||||
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
||||
return ans, response.usage.completion_tokens
|
||||
except openai.APIError as e:
|
||||
return "**ERROR**: " + str(e), 0
|
||||
|
||||
|
||||
class LocalLLM(Base):
|
||||
class RPCProxy:
|
||||
def __init__(self, host, port):
|
||||
|
||||
@ -161,6 +161,22 @@ class OllamaCV(Base):
|
||||
except Exception as e:
|
||||
return "**ERROR**: " + str(e), 0
|
||||
|
||||
class XinferenceCV(Base):
|
||||
def __init__(self, key, model_name="", lang="Chinese", base_url=""):
|
||||
self.client = OpenAI(api_key=key, base_url=base_url)
|
||||
self.model_name = model_name
|
||||
self.lang = lang
|
||||
|
||||
def describe(self, image, max_tokens=300):
|
||||
b64 = self.image2base64(image)
|
||||
|
||||
res = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=self.prompt(b64),
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
return res.choices[0].message.content.strip(), res.usage.total_tokens
|
||||
|
||||
|
||||
class LocalCV(Base):
|
||||
def __init__(self, key, model_name="glm-4v", lang="Chinese", **kwargs):
|
||||
|
||||
@ -170,3 +170,20 @@ class OllamaEmbed(Base):
|
||||
res = self.client.embeddings(prompt=text,
|
||||
model=self.model_name)
|
||||
return np.array(res["embedding"]), 128
|
||||
|
||||
|
||||
class XinferenceEmbed(Base):
|
||||
def __init__(self, key, model_name="", base_url=""):
|
||||
self.client = OpenAI(api_key="xxx", base_url=base_url)
|
||||
self.model_name = model_name
|
||||
|
||||
def encode(self, texts: list, batch_size=32):
|
||||
res = self.client.embeddings.create(input=texts,
|
||||
model=self.model_name)
|
||||
return np.array([d.embedding for d in res.data]
|
||||
), res.usage.total_tokens
|
||||
|
||||
def encode_queries(self, text):
|
||||
res = self.client.embeddings.create(input=[text],
|
||||
model=self.model_name)
|
||||
return np.array(res.data[0].embedding), res.usage.total_tokens
|
||||
|
||||
@ -34,7 +34,7 @@ LoggerFactory.set_directory(
|
||||
"logs",
|
||||
"rag"))
|
||||
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
|
||||
LoggerFactory.LEVEL = 10
|
||||
LoggerFactory.LEVEL = 30
|
||||
|
||||
es_logger = getLogger("es")
|
||||
minio_logger = getLogger("minio")
|
||||
|
||||
@ -24,6 +24,8 @@ import sys
|
||||
import time
|
||||
import traceback
|
||||
from functools import partial
|
||||
|
||||
from api.db.db_models import close_connection
|
||||
from rag.settings import database_logger
|
||||
from rag.settings import cron_logger, DOC_MAXIMUM_SIZE
|
||||
from multiprocessing import Pool
|
||||
@ -302,3 +304,4 @@ if __name__ == "__main__":
|
||||
comm = MPI.COMM_WORLD
|
||||
while True:
|
||||
main(int(sys.argv[2]), int(sys.argv[1]))
|
||||
close_connection()
|
||||
|
||||
Reference in New Issue
Block a user