Fix: self-deployed LLM error, (#9217)

### What problem does this PR solve?

Close #9197
Close #9145

### Type of change

- [x] Refactoring
- [x] Bug fixing.
This commit is contained in:
Kevin Hu
2025-08-05 09:49:47 +08:00
committed by GitHub
parent 7724acbadb
commit 6ec3f18e22
4 changed files with 14 additions and 8 deletions

View File

@ -173,7 +173,8 @@ def completion(tenant_id, agent_id, session_id=None, **kwargs):
conv.message.append({"role": "assistant", "content": txt, "created_at": time.time(), "id": message_id})
conv.reference = canvas.get_reference()
conv.errors = canvas.error
API4ConversationService.append_message(conv.id, conv.to_dict())
conv = conv.to_dict()
API4ConversationService.append_message(conv["id"], conv)
def completionOpenAI(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):

View File

@ -225,6 +225,9 @@ class TenantLLMService(CommonService):
if llm_id == llm["llm_name"]:
return llm["model_type"].split(",")[-1]
for llm in LLMService.query(llm_name=llm_id):
return llm.model_type
class LLMBundle:
def __init__(self, tenant_id, llm_type, llm_name=None, lang="Chinese", **kwargs):