Fix: too long context during KG issue. (#5723)

### What problem does this PR solve?

#5088

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
Kevin Hu
2025-03-06 19:21:07 +08:00
committed by GitHub
parent 8fb8374dfc
commit 9fc7174612
2 changed files with 4 additions and 2 deletions

View File

@ -24,6 +24,7 @@ from graphrag.general.graph_prompt import SUMMARIZE_DESCRIPTIONS_PROMPT
from graphrag.utils import get_llm_cache, set_llm_cache, handle_single_entity_extraction, \
handle_single_relationship_extraction, split_string_by_multi_markers, flat_uniq_list, chat_limiter
from rag.llm.chat_model import Base as CompletionLLM
from rag.prompts import message_fit_in
from rag.utils import truncate
GRAPH_FIELD_SEP = "<SEP>"
@ -58,7 +59,8 @@ class Extractor:
response = get_llm_cache(self._llm.llm_name, system, hist, conf)
if response:
return response
response = self._llm.chat(system, hist, conf)
_, system_msg = message_fit_in([{"role": "system", "content": system}], int(self._llm.max_length * 0.97))
response = self._llm.chat(system_msg[0]["content"], hist, conf)
response = re.sub(r"<think>.*</think>", "", response, flags=re.DOTALL)
if response.find("**ERROR**") >= 0:
raise Exception(response)