mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
add support for LocalAI (#1608)
### What problem does this PR solve? #762 ### Type of change - [x] New Feature (non-breaking change which adds functionality) --------- Co-authored-by: Zhedong Cen <cenzhedong2@126.com>
This commit is contained in:
@ -348,6 +348,82 @@ class OllamaChat(Base):
|
||||
yield 0
|
||||
|
||||
|
||||
class LocalAIChat(Base):
|
||||
def __init__(self, key, model_name, base_url):
|
||||
if base_url[-1] == "/":
|
||||
base_url = base_url[:-1]
|
||||
self.base_url = base_url + "/v1/chat/completions"
|
||||
self.model_name = model_name.split("___")[0]
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
for k in list(gen_conf.keys()):
|
||||
if k not in ["temperature", "top_p", "max_tokens"]:
|
||||
del gen_conf[k]
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
payload = json.dumps(
|
||||
{"model": self.model_name, "messages": history, **gen_conf}
|
||||
)
|
||||
try:
|
||||
response = requests.request(
|
||||
"POST", url=self.base_url, headers=headers, data=payload
|
||||
)
|
||||
response = response.json()
|
||||
ans = response["choices"][0]["message"]["content"].strip()
|
||||
if response["choices"][0]["finish_reason"] == "length":
|
||||
ans += (
|
||||
"...\nFor the content length reason, it stopped, continue?"
|
||||
if is_english([ans])
|
||||
else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
||||
)
|
||||
return ans, response["usage"]["total_tokens"]
|
||||
except Exception as e:
|
||||
return "**ERROR**: " + str(e), 0
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
ans = ""
|
||||
total_tokens = 0
|
||||
try:
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
payload = json.dumps(
|
||||
{
|
||||
"model": self.model_name,
|
||||
"messages": history,
|
||||
"stream": True,
|
||||
**gen_conf,
|
||||
}
|
||||
)
|
||||
response = requests.request(
|
||||
"POST",
|
||||
url=self.base_url,
|
||||
headers=headers,
|
||||
data=payload,
|
||||
)
|
||||
for resp in response.content.decode("utf-8").split("\n\n"):
|
||||
if "choices" not in resp:
|
||||
continue
|
||||
resp = json.loads(resp[6:])
|
||||
if "delta" in resp["choices"][0]:
|
||||
text = resp["choices"][0]["delta"]["content"]
|
||||
else:
|
||||
continue
|
||||
ans += text
|
||||
total_tokens += 1
|
||||
yield ans
|
||||
|
||||
except Exception as e:
|
||||
yield ans + "\n**ERROR**: " + str(e)
|
||||
|
||||
yield total_tokens
|
||||
|
||||
|
||||
class LocalLLM(Base):
|
||||
class RPCProxy:
|
||||
def __init__(self, host, port):
|
||||
|
||||
Reference in New Issue
Block a user