refine mindmap (#1817)

### What problem does this PR solve?

#1594
### Type of change

- [x] Refactoring
This commit is contained in:
Kevin Hu
2024-08-06 09:24:53 +08:00
committed by GitHub
parent 5650442b0b
commit 3fd7db40ea
3 changed files with 39 additions and 24 deletions

View File

@ -21,6 +21,7 @@ from typing import List
import networkx as nx
from api.db import LLMType
from api.db.services.llm_service import LLMBundle
from api.db.services.user_service import TenantService
from graphrag.community_reports_extractor import CommunityReportsExtractor
from graphrag.entity_resolution import EntityResolution
from graphrag.graph_extractor import GraphExtractor
@ -30,6 +31,11 @@ from rag.utils import num_tokens_from_string
def be_children(obj: dict, keyset:set):
if isinstance(obj, str):
obj = [obj]
if isinstance(obj, list):
for i in obj: keyset.add(i)
return [{"id": i, "children":[]} for i in obj]
arr = []
for k,v in obj.items():
k = re.sub(r"\*+", "", k)
@ -65,7 +71,8 @@ def graph_merge(g1, g2):
def build_knowlege_graph_chunks(tenant_id: str, chunks: List[str], callback, entity_types=["organization", "person", "location", "event", "time"]):
llm_bdl = LLMBundle(tenant_id, LLMType.CHAT)
_, tenant = TenantService.get_by_id(tenant_id)
llm_bdl = LLMBundle(tenant_id, LLMType.CHAT, tenant.llm_id)
ext = GraphExtractor(llm_bdl)
left_token_count = llm_bdl.max_length - ext.prompt_token_count - 1024
left_token_count = max(llm_bdl.max_length * 0.8, left_token_count)