mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Made task_executor async to speedup parsing (#5530)
### What problem does this PR solve? Made task_executor async to speedup parsing ### Type of change - [x] Performance Improvement
This commit is contained in:
@ -17,6 +17,7 @@ import json
|
||||
import logging
|
||||
from functools import reduce, partial
|
||||
import networkx as nx
|
||||
import trio
|
||||
|
||||
from api import settings
|
||||
from graphrag.general.community_reports_extractor import CommunityReportsExtractor
|
||||
@ -41,18 +42,24 @@ class Dealer:
|
||||
embed_bdl=None,
|
||||
callback=None
|
||||
):
|
||||
docids = list(set([docid for docid,_ in chunks]))
|
||||
self.tenant_id = tenant_id
|
||||
self.kb_id = kb_id
|
||||
self.chunks = chunks
|
||||
self.llm_bdl = llm_bdl
|
||||
self.embed_bdl = embed_bdl
|
||||
ext = extractor(self.llm_bdl, language=language,
|
||||
self.ext = extractor(self.llm_bdl, language=language,
|
||||
entity_types=entity_types,
|
||||
get_entity=partial(get_entity, tenant_id, kb_id),
|
||||
set_entity=partial(set_entity, tenant_id, kb_id, self.embed_bdl),
|
||||
get_relation=partial(get_relation, tenant_id, kb_id),
|
||||
set_relation=partial(set_relation, tenant_id, kb_id, self.embed_bdl)
|
||||
)
|
||||
ents, rels = ext(chunks, callback)
|
||||
self.graph = nx.Graph()
|
||||
self.callback = callback
|
||||
|
||||
async def __call__(self):
|
||||
docids = list(set([docid for docid, _ in self.chunks]))
|
||||
ents, rels = await self.ext(self.chunks, self.callback)
|
||||
for en in ents:
|
||||
self.graph.add_node(en["entity_name"], entity_type=en["entity_type"])#, description=en["description"])
|
||||
|
||||
@ -64,16 +71,16 @@ class Dealer:
|
||||
#description=rel["description"]
|
||||
)
|
||||
|
||||
with RedisDistributedLock(kb_id, 60*60):
|
||||
old_graph, old_doc_ids = get_graph(tenant_id, kb_id)
|
||||
with RedisDistributedLock(self.kb_id, 60*60):
|
||||
old_graph, old_doc_ids = get_graph(self.tenant_id, self.kb_id)
|
||||
if old_graph is not None:
|
||||
logging.info("Merge with an exiting graph...................")
|
||||
self.graph = reduce(graph_merge, [old_graph, self.graph])
|
||||
update_nodes_pagerank_nhop_neighbour(tenant_id, kb_id, self.graph, 2)
|
||||
update_nodes_pagerank_nhop_neighbour(self.tenant_id, self.kb_id, self.graph, 2)
|
||||
if old_doc_ids:
|
||||
docids.extend(old_doc_ids)
|
||||
docids = list(set(docids))
|
||||
set_graph(tenant_id, kb_id, self.graph, docids)
|
||||
set_graph(self.tenant_id, self.kb_id, self.graph, docids)
|
||||
|
||||
|
||||
class WithResolution(Dealer):
|
||||
@ -84,47 +91,50 @@ class WithResolution(Dealer):
|
||||
embed_bdl=None,
|
||||
callback=None
|
||||
):
|
||||
self.tenant_id = tenant_id
|
||||
self.kb_id = kb_id
|
||||
self.llm_bdl = llm_bdl
|
||||
self.embed_bdl = embed_bdl
|
||||
|
||||
with RedisDistributedLock(kb_id, 60*60):
|
||||
self.graph, doc_ids = get_graph(tenant_id, kb_id)
|
||||
self.callback = callback
|
||||
async def __call__(self):
|
||||
with RedisDistributedLock(self.kb_id, 60*60):
|
||||
self.graph, doc_ids = await trio.to_thread.run_sync(lambda: get_graph(self.tenant_id, self.kb_id))
|
||||
if not self.graph:
|
||||
logging.error(f"Faild to fetch the graph. tenant_id:{kb_id}, kb_id:{kb_id}")
|
||||
if callback:
|
||||
callback(-1, msg="Faild to fetch the graph.")
|
||||
logging.error(f"Faild to fetch the graph. tenant_id:{self.kb_id}, kb_id:{self.kb_id}")
|
||||
if self.callback:
|
||||
self.callback(-1, msg="Faild to fetch the graph.")
|
||||
return
|
||||
|
||||
if callback:
|
||||
callback(msg="Fetch the existing graph.")
|
||||
if self.callback:
|
||||
self.callback(msg="Fetch the existing graph.")
|
||||
er = EntityResolution(self.llm_bdl,
|
||||
get_entity=partial(get_entity, tenant_id, kb_id),
|
||||
set_entity=partial(set_entity, tenant_id, kb_id, self.embed_bdl),
|
||||
get_relation=partial(get_relation, tenant_id, kb_id),
|
||||
set_relation=partial(set_relation, tenant_id, kb_id, self.embed_bdl))
|
||||
reso = er(self.graph)
|
||||
get_entity=partial(get_entity, self.tenant_id, self.kb_id),
|
||||
set_entity=partial(set_entity, self.tenant_id, self.kb_id, self.embed_bdl),
|
||||
get_relation=partial(get_relation, self.tenant_id, self.kb_id),
|
||||
set_relation=partial(set_relation, self.tenant_id, self.kb_id, self.embed_bdl))
|
||||
reso = await er(self.graph)
|
||||
self.graph = reso.graph
|
||||
logging.info("Graph resolution is done. Remove {} nodes.".format(len(reso.removed_entities)))
|
||||
if callback:
|
||||
callback(msg="Graph resolution is done. Remove {} nodes.".format(len(reso.removed_entities)))
|
||||
update_nodes_pagerank_nhop_neighbour(tenant_id, kb_id, self.graph, 2)
|
||||
set_graph(tenant_id, kb_id, self.graph, doc_ids)
|
||||
if self.callback:
|
||||
self.callback(msg="Graph resolution is done. Remove {} nodes.".format(len(reso.removed_entities)))
|
||||
await trio.to_thread.run_sync(lambda: update_nodes_pagerank_nhop_neighbour(self.tenant_id, self.kb_id, self.graph, 2))
|
||||
await trio.to_thread.run_sync(lambda: set_graph(self.tenant_id, self.kb_id, self.graph, doc_ids))
|
||||
|
||||
settings.docStoreConn.delete({
|
||||
await trio.to_thread.run_sync(lambda: settings.docStoreConn.delete({
|
||||
"knowledge_graph_kwd": "relation",
|
||||
"kb_id": kb_id,
|
||||
"kb_id": self.kb_id,
|
||||
"from_entity_kwd": reso.removed_entities
|
||||
}, search.index_name(tenant_id), kb_id)
|
||||
settings.docStoreConn.delete({
|
||||
}, search.index_name(self.tenant_id), self.kb_id))
|
||||
await trio.to_thread.run_sync(lambda: settings.docStoreConn.delete({
|
||||
"knowledge_graph_kwd": "relation",
|
||||
"kb_id": kb_id,
|
||||
"kb_id": self.kb_id,
|
||||
"to_entity_kwd": reso.removed_entities
|
||||
}, search.index_name(tenant_id), kb_id)
|
||||
settings.docStoreConn.delete({
|
||||
}, search.index_name(self.tenant_id), self.kb_id))
|
||||
await trio.to_thread.run_sync(lambda: settings.docStoreConn.delete({
|
||||
"knowledge_graph_kwd": "entity",
|
||||
"kb_id": kb_id,
|
||||
"kb_id": self.kb_id,
|
||||
"entity_kwd": reso.removed_entities
|
||||
}, search.index_name(tenant_id), kb_id)
|
||||
}, search.index_name(self.tenant_id), self.kb_id))
|
||||
|
||||
|
||||
class WithCommunity(Dealer):
|
||||
@ -136,38 +146,41 @@ class WithCommunity(Dealer):
|
||||
callback=None
|
||||
):
|
||||
|
||||
self.tenant_id = tenant_id
|
||||
self.kb_id = kb_id
|
||||
self.community_structure = None
|
||||
self.community_reports = None
|
||||
self.llm_bdl = llm_bdl
|
||||
self.embed_bdl = embed_bdl
|
||||
|
||||
with RedisDistributedLock(kb_id, 60*60):
|
||||
self.graph, doc_ids = get_graph(tenant_id, kb_id)
|
||||
self.callback = callback
|
||||
async def __call__(self):
|
||||
with RedisDistributedLock(self.kb_id, 60*60):
|
||||
self.graph, doc_ids = get_graph(self.tenant_id, self.kb_id)
|
||||
if not self.graph:
|
||||
logging.error(f"Faild to fetch the graph. tenant_id:{kb_id}, kb_id:{kb_id}")
|
||||
if callback:
|
||||
callback(-1, msg="Faild to fetch the graph.")
|
||||
logging.error(f"Faild to fetch the graph. tenant_id:{self.kb_id}, kb_id:{self.kb_id}")
|
||||
if self.callback:
|
||||
self.callback(-1, msg="Faild to fetch the graph.")
|
||||
return
|
||||
if callback:
|
||||
callback(msg="Fetch the existing graph.")
|
||||
if self.callback:
|
||||
self.callback(msg="Fetch the existing graph.")
|
||||
|
||||
cr = CommunityReportsExtractor(self.llm_bdl,
|
||||
get_entity=partial(get_entity, tenant_id, kb_id),
|
||||
set_entity=partial(set_entity, tenant_id, kb_id, self.embed_bdl),
|
||||
get_relation=partial(get_relation, tenant_id, kb_id),
|
||||
set_relation=partial(set_relation, tenant_id, kb_id, self.embed_bdl))
|
||||
cr = cr(self.graph, callback=callback)
|
||||
get_entity=partial(get_entity, self.tenant_id, self.kb_id),
|
||||
set_entity=partial(set_entity, self.tenant_id, self.kb_id, self.embed_bdl),
|
||||
get_relation=partial(get_relation, self.tenant_id, self.kb_id),
|
||||
set_relation=partial(set_relation, self.tenant_id, self.kb_id, self.embed_bdl))
|
||||
cr = await cr(self.graph, callback=self.callback)
|
||||
self.community_structure = cr.structured_output
|
||||
self.community_reports = cr.output
|
||||
set_graph(tenant_id, kb_id, self.graph, doc_ids)
|
||||
await trio.to_thread.run_sync(lambda: set_graph(self.tenant_id, self.kb_id, self.graph, doc_ids))
|
||||
|
||||
if callback:
|
||||
callback(msg="Graph community extraction is done. Indexing {} reports.".format(len(cr.structured_output)))
|
||||
if self.callback:
|
||||
self.callback(msg="Graph community extraction is done. Indexing {} reports.".format(len(cr.structured_output)))
|
||||
|
||||
settings.docStoreConn.delete({
|
||||
await trio.to_thread.run_sync(lambda: settings.docStoreConn.delete({
|
||||
"knowledge_graph_kwd": "community_report",
|
||||
"kb_id": kb_id
|
||||
}, search.index_name(tenant_id), kb_id)
|
||||
"kb_id": self.kb_id
|
||||
}, search.index_name(self.tenant_id), self.kb_id))
|
||||
|
||||
for stru, rep in zip(self.community_structure, self.community_reports):
|
||||
obj = {
|
||||
@ -183,7 +196,7 @@ class WithCommunity(Dealer):
|
||||
"weight_flt": stru["weight"],
|
||||
"entities_kwd": stru["entities"],
|
||||
"important_kwd": stru["entities"],
|
||||
"kb_id": kb_id,
|
||||
"kb_id": self.kb_id,
|
||||
"source_id": doc_ids,
|
||||
"available_int": 0
|
||||
}
|
||||
@ -193,5 +206,5 @@ class WithCommunity(Dealer):
|
||||
# chunk["q_%d_vec" % len(ebd[0])] = ebd[0]
|
||||
#except Exception as e:
|
||||
# logging.exception(f"Fail to embed entity relation: {e}")
|
||||
settings.docStoreConn.insert([{"id": chunk_id(chunk), **chunk}], search.index_name(tenant_id))
|
||||
await trio.to_thread.run_sync(lambda: settings.docStoreConn.insert([{"id": chunk_id(chunk), **chunk}], search.index_name(self.tenant_id)))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user