mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
add dockerfile for cuda envirement. Refine table search strategy, (#123)
This commit is contained in:
@ -19,18 +19,20 @@ from .cv_model import *
|
||||
|
||||
|
||||
EmbeddingModel = {
|
||||
"local": HuEmbedding,
|
||||
"Local": HuEmbedding,
|
||||
"OpenAI": OpenAIEmbed,
|
||||
"通义千问": HuEmbedding, #QWenEmbed,
|
||||
"智谱AI": ZhipuEmbed
|
||||
"智谱AI": ZhipuEmbed,
|
||||
"Moonshot": HuEmbedding
|
||||
}
|
||||
|
||||
|
||||
CvModel = {
|
||||
"OpenAI": GptV4,
|
||||
"local": LocalCV,
|
||||
"Local": LocalCV,
|
||||
"通义千问": QWenCV,
|
||||
"智谱AI": Zhipu4V
|
||||
"智谱AI": Zhipu4V,
|
||||
"Moonshot": LocalCV
|
||||
}
|
||||
|
||||
|
||||
@ -38,6 +40,7 @@ ChatModel = {
|
||||
"OpenAI": GptTurbo,
|
||||
"智谱AI": ZhipuChat,
|
||||
"通义千问": QWenChat,
|
||||
"local": LocalLLM
|
||||
"Local": LocalLLM,
|
||||
"Moonshot": MoonshotChat
|
||||
}
|
||||
|
||||
|
||||
@ -14,11 +14,8 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from copy import deepcopy
|
||||
|
||||
from openai import OpenAI
|
||||
import openai
|
||||
|
||||
from rag.nlp import is_english
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
@ -52,6 +49,12 @@ class GptTurbo(Base):
|
||||
return "**ERROR**: "+str(e), 0
|
||||
|
||||
|
||||
class MoonshotChat(GptTurbo):
|
||||
def __init__(self, key, model_name="moonshot-v1-8k"):
|
||||
self.client = OpenAI(api_key=key, base_url="https://api.moonshot.cn/v1",)
|
||||
self.model_name = model_name
|
||||
|
||||
|
||||
from dashscope import Generation
|
||||
class QWenChat(Base):
|
||||
def __init__(self, key, model_name=Generation.Models.qwen_turbo):
|
||||
|
||||
@ -4,7 +4,7 @@ import random
|
||||
import time
|
||||
from multiprocessing.connection import Listener
|
||||
from threading import Thread
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
|
||||
class RPCHandler:
|
||||
@ -47,14 +47,27 @@ tokenizer = None
|
||||
def chat(messages, gen_conf):
|
||||
global tokenizer
|
||||
model = Model()
|
||||
roles = {"system":"System", "user": "User", "assistant": "Assistant"}
|
||||
line = ["{}: {}".format(roles[m["role"].lower()], m["content"]) for m in messages]
|
||||
line = "\n".join(line) + "\nAssistant: "
|
||||
tokens = tokenizer([line], return_tensors='pt')
|
||||
tokens = {k: tokens[k].to(model.device) if isinstance(tokens[k], torch.Tensor) else tokens[k] for k in
|
||||
tokens.keys()}
|
||||
res = [tokenizer.decode(t) for t in model.generate(**tokens, **gen_conf)][0]
|
||||
return res.split("Assistant: ")[-1]
|
||||
try:
|
||||
conf = {"max_new_tokens": int(gen_conf.get("max_tokens", 256)), "temperature": float(gen_conf.get("temperature", 0.1))}
|
||||
print(messages, conf)
|
||||
text = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tokenize=False,
|
||||
add_generation_prompt=True
|
||||
)
|
||||
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
||||
|
||||
generated_ids = model.generate(
|
||||
model_inputs.input_ids,
|
||||
**conf
|
||||
)
|
||||
generated_ids = [
|
||||
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
||||
]
|
||||
|
||||
return tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
|
||||
def Model():
|
||||
@ -71,20 +84,13 @@ if __name__ == "__main__":
|
||||
handler = RPCHandler()
|
||||
handler.register_function(chat)
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.generation.utils import GenerationConfig
|
||||
|
||||
models = []
|
||||
for _ in range(2):
|
||||
for _ in range(1):
|
||||
m = AutoModelForCausalLM.from_pretrained(args.model_name,
|
||||
device_map="auto",
|
||||
torch_dtype='auto',
|
||||
trust_remote_code=True)
|
||||
m.generation_config = GenerationConfig.from_pretrained(args.model_name)
|
||||
m.generation_config.pad_token_id = m.generation_config.eos_token_id
|
||||
torch_dtype='auto')
|
||||
models.append(m)
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.model_name, use_fast=False,
|
||||
trust_remote_code=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
|
||||
|
||||
# Run the server
|
||||
rpc_server(handler, ('0.0.0.0', args.port), authkey=b'infiniflow-token4kevinhu')
|
||||
|
||||
Reference in New Issue
Block a user