Fix: correct llm_id for graphrag (#13032)

### What problem does this PR solve?

Fix: correct llm_id for graphrag #13030

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
Magicbook1108
2026-02-06 14:05:32 +08:00
committed by GitHub
parent 6a17e8cc85
commit 4b0d65f089

View File

@ -960,8 +960,9 @@ async def do_handle_task(task):
task_tenant_id = task["tenant_id"] task_tenant_id = task["tenant_id"]
task_embedding_id = task["embd_id"] task_embedding_id = task["embd_id"]
task_language = task["language"] task_language = task["language"]
task_llm_id = task["parser_config"].get("llm_id") or task["llm_id"] doc_task_llm_id = task["parser_config"].get("llm_id") or task["llm_id"]
task["llm_id"] = task_llm_id kb_task_llm_id = task['kb_parser_config'].get("llm_id") or task["llm_id"]
task['llm_id'] = kb_task_llm_id
task_dataset_id = task["kb_id"] task_dataset_id = task["kb_id"]
task_doc_id = task["doc_id"] task_doc_id = task["doc_id"]
task_document_name = task["name"] task_document_name = task["name"]
@ -1032,7 +1033,7 @@ async def do_handle_task(task):
return return
# bind LLM for raptor # bind LLM for raptor
chat_model = LLMBundle(task_tenant_id, LLMType.CHAT, llm_name=task_llm_id, lang=task_language) chat_model = LLMBundle(task_tenant_id, LLMType.CHAT, llm_name=kb_task_llm_id, lang=task_language)
# run RAPTOR # run RAPTOR
async with kg_limiter: async with kg_limiter:
chunks, token_count = await run_raptor_for_kb( chunks, token_count = await run_raptor_for_kb(
@ -1076,7 +1077,7 @@ async def do_handle_task(task):
graphrag_conf = kb_parser_config.get("graphrag", {}) graphrag_conf = kb_parser_config.get("graphrag", {})
start_ts = timer() start_ts = timer()
chat_model = LLMBundle(task_tenant_id, LLMType.CHAT, llm_name=task_llm_id, lang=task_language) chat_model = LLMBundle(task_tenant_id, LLMType.CHAT, llm_name=kb_task_llm_id, lang=task_language)
with_resolution = graphrag_conf.get("resolution", False) with_resolution = graphrag_conf.get("resolution", False)
with_community = graphrag_conf.get("community", False) with_community = graphrag_conf.get("community", False)
async with kg_limiter: async with kg_limiter:
@ -1101,6 +1102,7 @@ async def do_handle_task(task):
return return
else: else:
# Standard chunking methods # Standard chunking methods
task['llm_id'] = doc_task_llm_id
start_ts = timer() start_ts = timer()
chunks = await build_chunks(task, progress_callback) chunks = await build_chunks(task, progress_callback)
logging.info("Build document {}: {:.2f}s".format(task_document_name, timer() - start_ts)) logging.info("Build document {}: {:.2f}s".format(task_document_name, timer() - start_ts))