mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Made task_executor async to speedup parsing (#5530)
### What problem does this PR solve? Made task_executor async to speedup parsing ### Type of change - [x] Performance Improvement
This commit is contained in:
@ -4,16 +4,16 @@
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
import logging
|
||||
import re
|
||||
from typing import Any, Callable
|
||||
from dataclasses import dataclass
|
||||
from graphrag.general.extractor import Extractor, ENTITY_EXTRACTION_MAX_GLEANINGS
|
||||
from graphrag.light.graph_prompt import PROMPTS
|
||||
from graphrag.utils import pack_user_ass_to_openai_messages, split_string_by_multi_markers
|
||||
from graphrag.utils import pack_user_ass_to_openai_messages, split_string_by_multi_markers, chat_limiter
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
import networkx as nx
|
||||
from rag.utils import num_tokens_from_string
|
||||
import trio
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -82,7 +82,7 @@ class GraphExtractor(Extractor):
|
||||
)
|
||||
self._left_token_count = max(llm_invoker.max_length * 0.6, self._left_token_count)
|
||||
|
||||
def _process_single_content(self, chunk_key_dp: tuple[str, str]):
|
||||
async def _process_single_content(self, chunk_key_dp: tuple[str, str], chunk_seq: int, num_chunks: int, out_results):
|
||||
token_count = 0
|
||||
chunk_key = chunk_key_dp[0]
|
||||
content = chunk_key_dp[1]
|
||||
@ -90,38 +90,39 @@ class GraphExtractor(Extractor):
|
||||
**self._context_base, input_text="{input_text}"
|
||||
).format(**self._context_base, input_text=content)
|
||||
|
||||
try:
|
||||
gen_conf = {"temperature": 0.8}
|
||||
final_result = self._chat(hint_prompt, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
token_count += num_tokens_from_string(hint_prompt + final_result)
|
||||
history = pack_user_ass_to_openai_messages("Output:", final_result, self._continue_prompt)
|
||||
for now_glean_index in range(self._max_gleanings):
|
||||
glean_result = self._chat(hint_prompt, history, gen_conf)
|
||||
history.extend([{"role": "assistant", "content": glean_result}, {"role": "user", "content": self._continue_prompt}])
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + hint_prompt + self._continue_prompt)
|
||||
final_result += glean_result
|
||||
if now_glean_index == self._max_gleanings - 1:
|
||||
break
|
||||
gen_conf = {"temperature": 0.8}
|
||||
async with chat_limiter:
|
||||
final_result = await trio.to_thread.run_sync(lambda: self._chat(hint_prompt, [{"role": "user", "content": "Output:"}], gen_conf))
|
||||
token_count += num_tokens_from_string(hint_prompt + final_result)
|
||||
history = pack_user_ass_to_openai_messages("Output:", final_result, self._continue_prompt)
|
||||
for now_glean_index in range(self._max_gleanings):
|
||||
async with chat_limiter:
|
||||
glean_result = await trio.to_thread.run_sync(lambda: self._chat(hint_prompt, history, gen_conf))
|
||||
history.extend([{"role": "assistant", "content": glean_result}, {"role": "user", "content": self._continue_prompt}])
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + hint_prompt + self._continue_prompt)
|
||||
final_result += glean_result
|
||||
if now_glean_index == self._max_gleanings - 1:
|
||||
break
|
||||
|
||||
if_loop_result = self._chat(self._if_loop_prompt, history, gen_conf)
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + if_loop_result + self._if_loop_prompt)
|
||||
if_loop_result = if_loop_result.strip().strip('"').strip("'").lower()
|
||||
if if_loop_result != "yes":
|
||||
break
|
||||
async with chat_limiter:
|
||||
if_loop_result = await trio.to_thread.run_sync(lambda: self._chat(self._if_loop_prompt, history, gen_conf))
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + if_loop_result + self._if_loop_prompt)
|
||||
if_loop_result = if_loop_result.strip().strip('"').strip("'").lower()
|
||||
if if_loop_result != "yes":
|
||||
break
|
||||
|
||||
records = split_string_by_multi_markers(
|
||||
final_result,
|
||||
[self._context_base["record_delimiter"], self._context_base["completion_delimiter"]],
|
||||
)
|
||||
rcds = []
|
||||
for record in records:
|
||||
record = re.search(r"\((.*)\)", record)
|
||||
if record is None:
|
||||
continue
|
||||
rcds.append(record.group(1))
|
||||
records = rcds
|
||||
maybe_nodes, maybe_edges = self._entities_and_relations(chunk_key, records, self._context_base["tuple_delimiter"])
|
||||
return maybe_nodes, maybe_edges, token_count
|
||||
except Exception as e:
|
||||
logging.exception("error extracting graph")
|
||||
return e, None, None
|
||||
records = split_string_by_multi_markers(
|
||||
final_result,
|
||||
[self._context_base["record_delimiter"], self._context_base["completion_delimiter"]],
|
||||
)
|
||||
rcds = []
|
||||
for record in records:
|
||||
record = re.search(r"\((.*)\)", record)
|
||||
if record is None:
|
||||
continue
|
||||
rcds.append(record.group(1))
|
||||
records = rcds
|
||||
maybe_nodes, maybe_edges = self._entities_and_relations(chunk_key, records, self._context_base["tuple_delimiter"])
|
||||
out_results.append((maybe_nodes, maybe_edges, token_count))
|
||||
if self.callback:
|
||||
self.callback(0.5+0.1*len(out_results)/num_chunks, msg = f"Entities extraction of chunk {chunk_seq} {len(out_results)}/{num_chunks} done, {len(maybe_nodes)} nodes, {len(maybe_edges)} edges, {token_count} tokens.")
|
||||
|
||||
Reference in New Issue
Block a user