mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Cache the result from llm for graphrag and raptor (#4051)
### What problem does this PR solve? #4045 ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
0
graphrag/__init__.py
Normal file
0
graphrag/__init__.py
Normal file
@ -16,6 +16,7 @@ from typing import Any
|
||||
import tiktoken
|
||||
|
||||
from graphrag.claim_prompt import CLAIM_EXTRACTION_PROMPT, CONTINUE_PROMPT, LOOP_PROMPT
|
||||
from graphrag.extractor import Extractor
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements
|
||||
|
||||
@ -33,10 +34,9 @@ class ClaimExtractorResult:
|
||||
source_docs: dict[str, Any]
|
||||
|
||||
|
||||
class ClaimExtractor:
|
||||
class ClaimExtractor(Extractor):
|
||||
"""Claim extractor class definition."""
|
||||
|
||||
_llm: CompletionLLM
|
||||
_extraction_prompt: str
|
||||
_summary_prompt: str
|
||||
_output_formatter_prompt: str
|
||||
@ -169,7 +169,7 @@ class ClaimExtractor:
|
||||
}
|
||||
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
||||
gen_conf = {"temperature": 0.5}
|
||||
results = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
results = self._chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
claims = results.strip().removesuffix(completion_delimiter)
|
||||
history = [{"role": "system", "content": text}, {"role": "assistant", "content": results}]
|
||||
|
||||
@ -177,7 +177,7 @@ class ClaimExtractor:
|
||||
for i in range(self._max_gleanings):
|
||||
text = perform_variable_replacements(CONTINUE_PROMPT, history=history, variables=variables)
|
||||
history.append({"role": "user", "content": text})
|
||||
extension = self._llm.chat("", history, gen_conf)
|
||||
extension = self._chat("", history, gen_conf)
|
||||
claims += record_delimiter + extension.strip().removesuffix(
|
||||
completion_delimiter
|
||||
)
|
||||
@ -188,7 +188,7 @@ class ClaimExtractor:
|
||||
|
||||
history.append({"role": "assistant", "content": extension})
|
||||
history.append({"role": "user", "content": LOOP_PROMPT})
|
||||
continuation = self._llm.chat("", history, self._loop_args)
|
||||
continuation = self._chat("", history, self._loop_args)
|
||||
if continuation != "YES":
|
||||
break
|
||||
|
||||
|
||||
@ -15,6 +15,7 @@ import networkx as nx
|
||||
import pandas as pd
|
||||
from graphrag import leiden
|
||||
from graphrag.community_report_prompt import COMMUNITY_REPORT_PROMPT
|
||||
from graphrag.extractor import Extractor
|
||||
from graphrag.leiden import add_community_info2graph
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements, dict_has_keys_with_types
|
||||
@ -30,10 +31,9 @@ class CommunityReportsResult:
|
||||
structured_output: list[dict]
|
||||
|
||||
|
||||
class CommunityReportsExtractor:
|
||||
class CommunityReportsExtractor(Extractor):
|
||||
"""Community reports extractor class definition."""
|
||||
|
||||
_llm: CompletionLLM
|
||||
_extraction_prompt: str
|
||||
_output_formatter_prompt: str
|
||||
_on_error: ErrorHandlerFn
|
||||
@ -74,7 +74,7 @@ class CommunityReportsExtractor:
|
||||
text = perform_variable_replacements(self._extraction_prompt, variables=prompt_variables)
|
||||
gen_conf = {"temperature": 0.3}
|
||||
try:
|
||||
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
response = self._chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
token_count += num_tokens_from_string(text + response)
|
||||
response = re.sub(r"^[^\{]*", "", response)
|
||||
response = re.sub(r"[^\}]*$", "", response)
|
||||
|
||||
@ -8,6 +8,7 @@ Reference:
|
||||
import json
|
||||
from dataclasses import dataclass
|
||||
|
||||
from graphrag.extractor import Extractor
|
||||
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
|
||||
@ -42,10 +43,9 @@ class SummarizationResult:
|
||||
description: str
|
||||
|
||||
|
||||
class SummarizeExtractor:
|
||||
class SummarizeExtractor(Extractor):
|
||||
"""Unipartite graph extractor class definition."""
|
||||
|
||||
_llm: CompletionLLM
|
||||
_entity_name_key: str
|
||||
_input_descriptions_key: str
|
||||
_summarization_prompt: str
|
||||
@ -143,4 +143,4 @@ class SummarizeExtractor:
|
||||
self._input_descriptions_key: json.dumps(sorted(descriptions)),
|
||||
}
|
||||
text = perform_variable_replacements(self._summarization_prompt, variables=variables)
|
||||
return self._llm.chat("", [{"role": "user", "content": text}])
|
||||
return self._chat("", [{"role": "user", "content": text}])
|
||||
|
||||
@ -21,6 +21,8 @@ from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
import networkx as nx
|
||||
|
||||
from graphrag.extractor import Extractor
|
||||
from rag.nlp import is_english
|
||||
import editdistance
|
||||
from graphrag.entity_resolution_prompt import ENTITY_RESOLUTION_PROMPT
|
||||
@ -39,10 +41,9 @@ class EntityResolutionResult:
|
||||
output: nx.Graph
|
||||
|
||||
|
||||
class EntityResolution:
|
||||
class EntityResolution(Extractor):
|
||||
"""Entity resolution class definition."""
|
||||
|
||||
_llm: CompletionLLM
|
||||
_resolution_prompt: str
|
||||
_output_formatter_prompt: str
|
||||
_on_error: ErrorHandlerFn
|
||||
@ -117,7 +118,7 @@ class EntityResolution:
|
||||
}
|
||||
text = perform_variable_replacements(self._resolution_prompt, variables=variables)
|
||||
|
||||
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
response = self._chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
result = self._process_results(len(candidate_resolution_i[1]), response,
|
||||
prompt_variables.get(self._record_delimiter_key,
|
||||
DEFAULT_RECORD_DELIMITER),
|
||||
|
||||
34
graphrag/extractor.py
Normal file
34
graphrag/extractor.py
Normal file
@ -0,0 +1,34 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from graphrag.utils import get_llm_cache, set_llm_cache
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
|
||||
|
||||
class Extractor:
|
||||
_llm: CompletionLLM
|
||||
|
||||
def __init__(self, llm_invoker: CompletionLLM):
|
||||
self._llm = llm_invoker
|
||||
|
||||
def _chat(self, system, history, gen_conf):
|
||||
response = get_llm_cache(self._llm.llm_name, system, history, gen_conf)
|
||||
if response:
|
||||
return response
|
||||
response = self._llm.chat(system, history, gen_conf)
|
||||
if response.find("**ERROR**") >= 0:
|
||||
raise Exception(response)
|
||||
set_llm_cache(self._llm.llm_name, system, response, history, gen_conf)
|
||||
return response
|
||||
@ -12,6 +12,8 @@ import traceback
|
||||
from typing import Any, Callable, Mapping
|
||||
from dataclasses import dataclass
|
||||
import tiktoken
|
||||
|
||||
from graphrag.extractor import Extractor
|
||||
from graphrag.graph_prompt import GRAPH_EXTRACTION_PROMPT, CONTINUE_PROMPT, LOOP_PROMPT
|
||||
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements, clean_str
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
@ -34,10 +36,9 @@ class GraphExtractionResult:
|
||||
source_docs: dict[Any, Any]
|
||||
|
||||
|
||||
class GraphExtractor:
|
||||
class GraphExtractor(Extractor):
|
||||
"""Unipartite graph extractor class definition."""
|
||||
|
||||
_llm: CompletionLLM
|
||||
_join_descriptions: bool
|
||||
_tuple_delimiter_key: str
|
||||
_record_delimiter_key: str
|
||||
@ -165,9 +166,7 @@ class GraphExtractor:
|
||||
token_count = 0
|
||||
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
||||
gen_conf = {"temperature": 0.3}
|
||||
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
if response.find("**ERROR**") >= 0:
|
||||
raise Exception(response)
|
||||
response = self._chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
token_count = num_tokens_from_string(text + response)
|
||||
|
||||
results = response or ""
|
||||
@ -177,9 +176,7 @@ class GraphExtractor:
|
||||
for i in range(self._max_gleanings):
|
||||
text = perform_variable_replacements(CONTINUE_PROMPT, history=history, variables=variables)
|
||||
history.append({"role": "user", "content": text})
|
||||
response = self._llm.chat("", history, gen_conf)
|
||||
if response.find("**ERROR**") >=0:
|
||||
raise Exception(response)
|
||||
response = self._chat("", history, gen_conf)
|
||||
results += response or ""
|
||||
|
||||
# if this is the final glean, don't bother updating the continuation flag
|
||||
@ -187,7 +184,7 @@ class GraphExtractor:
|
||||
break
|
||||
history.append({"role": "assistant", "content": response})
|
||||
history.append({"role": "user", "content": LOOP_PROMPT})
|
||||
continuation = self._llm.chat("", history, self._loop_args)
|
||||
continuation = self._chat("", history, self._loop_args)
|
||||
if continuation != "YES":
|
||||
break
|
||||
|
||||
|
||||
@ -23,6 +23,7 @@ from typing import Any
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from dataclasses import dataclass
|
||||
|
||||
from graphrag.extractor import Extractor
|
||||
from graphrag.mind_map_prompt import MIND_MAP_EXTRACTION_PROMPT
|
||||
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
@ -37,8 +38,7 @@ class MindMapResult:
|
||||
output: dict
|
||||
|
||||
|
||||
class MindMapExtractor:
|
||||
_llm: CompletionLLM
|
||||
class MindMapExtractor(Extractor):
|
||||
_input_text_key: str
|
||||
_mind_map_prompt: str
|
||||
_on_error: ErrorHandlerFn
|
||||
@ -190,7 +190,7 @@ class MindMapExtractor:
|
||||
}
|
||||
text = perform_variable_replacements(self._mind_map_prompt, variables=variables)
|
||||
gen_conf = {"temperature": 0.5}
|
||||
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
response = self._chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
response = re.sub(r"```[^\n]*", "", response)
|
||||
logging.debug(response)
|
||||
logging.debug(self._todict(markdown_to_json.dictify(response)))
|
||||
|
||||
@ -6,9 +6,15 @@ Reference:
|
||||
"""
|
||||
|
||||
import html
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Callable
|
||||
|
||||
import numpy as np
|
||||
import xxhash
|
||||
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
ErrorHandlerFn = Callable[[BaseException | None, str | None, dict | None], None]
|
||||
|
||||
|
||||
@ -60,3 +66,49 @@ def dict_has_keys_with_types(
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_llm_cache(llmnm, txt, history, genconf):
|
||||
hasher = xxhash.xxh64()
|
||||
hasher.update(str(llmnm).encode("utf-8"))
|
||||
hasher.update(str(txt).encode("utf-8"))
|
||||
hasher.update(str(history).encode("utf-8"))
|
||||
hasher.update(str(genconf).encode("utf-8"))
|
||||
|
||||
k = hasher.hexdigest()
|
||||
bin = REDIS_CONN.get(k)
|
||||
if not bin:
|
||||
return
|
||||
return bin.decode("utf-8")
|
||||
|
||||
|
||||
def set_llm_cache(llmnm, txt, v: str, history, genconf):
|
||||
hasher = xxhash.xxh64()
|
||||
hasher.update(str(llmnm).encode("utf-8"))
|
||||
hasher.update(str(txt).encode("utf-8"))
|
||||
hasher.update(str(history).encode("utf-8"))
|
||||
hasher.update(str(genconf).encode("utf-8"))
|
||||
|
||||
k = hasher.hexdigest()
|
||||
REDIS_CONN.set(k, v.encode("utf-8"), 24*3600)
|
||||
|
||||
|
||||
def get_embed_cache(llmnm, txt):
|
||||
hasher = xxhash.xxh64()
|
||||
hasher.update(str(llmnm).encode("utf-8"))
|
||||
hasher.update(str(txt).encode("utf-8"))
|
||||
|
||||
k = hasher.hexdigest()
|
||||
bin = REDIS_CONN.get(k)
|
||||
if not bin:
|
||||
return
|
||||
return np.array(json.loads(bin.decode("utf-8")))
|
||||
|
||||
|
||||
def set_embed_cache(llmnm, txt, arr):
|
||||
hasher = xxhash.xxh64()
|
||||
hasher.update(str(llmnm).encode("utf-8"))
|
||||
hasher.update(str(txt).encode("utf-8"))
|
||||
|
||||
k = hasher.hexdigest()
|
||||
arr = json.dumps(arr.tolist() if isinstance(arr, np.ndarray) else arr)
|
||||
REDIS_CONN.set(k, arr.encode("utf-8"), 24*3600)
|
||||
Reference in New Issue
Block a user