mirror of
https://github.com/infiniflow/ragflow.git
synced 2026-02-02 00:25:06 +08:00
Refa: ollama keep alive issue. (#8216)
### What problem does this PR solve? #8122 ### Type of change - [x] Refactoring
This commit is contained in:
@ -871,7 +871,7 @@ class OllamaChat(Base):
|
||||
ctx_size = self._calculate_dynamic_ctx(history)
|
||||
|
||||
gen_conf["num_ctx"] = ctx_size
|
||||
response = self.client.chat(model=self.model_name, messages=history, options=gen_conf)
|
||||
response = self.client.chat(model=self.model_name, messages=history, options=gen_conf, keep_alive=-1)
|
||||
ans = response["message"]["content"].strip()
|
||||
token_count = response.get("eval_count", 0) + response.get("prompt_eval_count", 0)
|
||||
return ans, token_count
|
||||
@ -898,7 +898,7 @@ class OllamaChat(Base):
|
||||
|
||||
ans = ""
|
||||
try:
|
||||
response = self.client.chat(model=self.model_name, messages=history, stream=True, options=options)
|
||||
response = self.client.chat(model=self.model_name, messages=history, stream=True, options=options, keep_alive=-1)
|
||||
for resp in response:
|
||||
if resp["done"]:
|
||||
token_count = resp.get("prompt_eval_count", 0) + resp.get("eval_count", 0)
|
||||
|
||||
Reference in New Issue
Block a user