mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Light GraphRAG (#4585)
### What problem does this PR solve? #4543 ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
0
graphrag/general/__init__.py
Normal file
0
graphrag/general/__init__.py
Normal file
268
graphrag/general/claim_extractor.py
Normal file
268
graphrag/general/claim_extractor.py
Normal file
@ -0,0 +1,268 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import traceback
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
import tiktoken
|
||||
|
||||
from graphrag.general.claim_prompt import CLAIM_EXTRACTION_PROMPT, CONTINUE_PROMPT, LOOP_PROMPT
|
||||
from graphrag.general.extractor import Extractor
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements
|
||||
|
||||
DEFAULT_TUPLE_DELIMITER = "<|>"
|
||||
DEFAULT_RECORD_DELIMITER = "##"
|
||||
DEFAULT_COMPLETION_DELIMITER = "<|COMPLETE|>"
|
||||
CLAIM_MAX_GLEANINGS = 1
|
||||
|
||||
|
||||
@dataclass
|
||||
class ClaimExtractorResult:
|
||||
"""Claim extractor result class definition."""
|
||||
|
||||
output: list[dict]
|
||||
source_docs: dict[str, Any]
|
||||
|
||||
|
||||
class ClaimExtractor(Extractor):
|
||||
"""Claim extractor class definition."""
|
||||
|
||||
_extraction_prompt: str
|
||||
_summary_prompt: str
|
||||
_output_formatter_prompt: str
|
||||
_input_text_key: str
|
||||
_input_entity_spec_key: str
|
||||
_input_claim_description_key: str
|
||||
_tuple_delimiter_key: str
|
||||
_record_delimiter_key: str
|
||||
_completion_delimiter_key: str
|
||||
_max_gleanings: int
|
||||
_on_error: ErrorHandlerFn
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_invoker: CompletionLLM,
|
||||
extraction_prompt: str | None = None,
|
||||
input_text_key: str | None = None,
|
||||
input_entity_spec_key: str | None = None,
|
||||
input_claim_description_key: str | None = None,
|
||||
input_resolved_entities_key: str | None = None,
|
||||
tuple_delimiter_key: str | None = None,
|
||||
record_delimiter_key: str | None = None,
|
||||
completion_delimiter_key: str | None = None,
|
||||
encoding_model: str | None = None,
|
||||
max_gleanings: int | None = None,
|
||||
on_error: ErrorHandlerFn | None = None,
|
||||
):
|
||||
"""Init method definition."""
|
||||
self._llm = llm_invoker
|
||||
self._extraction_prompt = extraction_prompt or CLAIM_EXTRACTION_PROMPT
|
||||
self._input_text_key = input_text_key or "input_text"
|
||||
self._input_entity_spec_key = input_entity_spec_key or "entity_specs"
|
||||
self._tuple_delimiter_key = tuple_delimiter_key or "tuple_delimiter"
|
||||
self._record_delimiter_key = record_delimiter_key or "record_delimiter"
|
||||
self._completion_delimiter_key = (
|
||||
completion_delimiter_key or "completion_delimiter"
|
||||
)
|
||||
self._input_claim_description_key = (
|
||||
input_claim_description_key or "claim_description"
|
||||
)
|
||||
self._input_resolved_entities_key = (
|
||||
input_resolved_entities_key or "resolved_entities"
|
||||
)
|
||||
self._max_gleanings = (
|
||||
max_gleanings if max_gleanings is not None else CLAIM_MAX_GLEANINGS
|
||||
)
|
||||
self._on_error = on_error or (lambda _e, _s, _d: None)
|
||||
|
||||
# Construct the looping arguments
|
||||
encoding = tiktoken.get_encoding(encoding_model or "cl100k_base")
|
||||
yes = encoding.encode("YES")
|
||||
no = encoding.encode("NO")
|
||||
self._loop_args = {"logit_bias": {yes[0]: 100, no[0]: 100}, "max_tokens": 1}
|
||||
|
||||
def __call__(
|
||||
self, inputs: dict[str, Any], prompt_variables: dict | None = None
|
||||
) -> ClaimExtractorResult:
|
||||
"""Call method definition."""
|
||||
if prompt_variables is None:
|
||||
prompt_variables = {}
|
||||
texts = inputs[self._input_text_key]
|
||||
entity_spec = str(inputs[self._input_entity_spec_key])
|
||||
claim_description = inputs[self._input_claim_description_key]
|
||||
resolved_entities = inputs.get(self._input_resolved_entities_key, {})
|
||||
source_doc_map = {}
|
||||
|
||||
prompt_args = {
|
||||
self._input_entity_spec_key: entity_spec,
|
||||
self._input_claim_description_key: claim_description,
|
||||
self._tuple_delimiter_key: prompt_variables.get(self._tuple_delimiter_key)
|
||||
or DEFAULT_TUPLE_DELIMITER,
|
||||
self._record_delimiter_key: prompt_variables.get(self._record_delimiter_key)
|
||||
or DEFAULT_RECORD_DELIMITER,
|
||||
self._completion_delimiter_key: prompt_variables.get(
|
||||
self._completion_delimiter_key
|
||||
)
|
||||
or DEFAULT_COMPLETION_DELIMITER,
|
||||
}
|
||||
|
||||
all_claims: list[dict] = []
|
||||
for doc_index, text in enumerate(texts):
|
||||
document_id = f"d{doc_index}"
|
||||
try:
|
||||
claims = self._process_document(prompt_args, text, doc_index)
|
||||
all_claims += [
|
||||
self._clean_claim(c, document_id, resolved_entities) for c in claims
|
||||
]
|
||||
source_doc_map[document_id] = text
|
||||
except Exception as e:
|
||||
logging.exception("error extracting claim")
|
||||
self._on_error(
|
||||
e,
|
||||
traceback.format_exc(),
|
||||
{"doc_index": doc_index, "text": text},
|
||||
)
|
||||
continue
|
||||
|
||||
return ClaimExtractorResult(
|
||||
output=all_claims,
|
||||
source_docs=source_doc_map,
|
||||
)
|
||||
|
||||
def _clean_claim(
|
||||
self, claim: dict, document_id: str, resolved_entities: dict
|
||||
) -> dict:
|
||||
# clean the parsed claims to remove any claims with status = False
|
||||
obj = claim.get("object_id", claim.get("object"))
|
||||
subject = claim.get("subject_id", claim.get("subject"))
|
||||
|
||||
# If subject or object in resolved entities, then replace with resolved entity
|
||||
obj = resolved_entities.get(obj, obj)
|
||||
subject = resolved_entities.get(subject, subject)
|
||||
claim["object_id"] = obj
|
||||
claim["subject_id"] = subject
|
||||
claim["doc_id"] = document_id
|
||||
return claim
|
||||
|
||||
def _process_document(
|
||||
self, prompt_args: dict, doc, doc_index: int
|
||||
) -> list[dict]:
|
||||
record_delimiter = prompt_args.get(
|
||||
self._record_delimiter_key, DEFAULT_RECORD_DELIMITER
|
||||
)
|
||||
completion_delimiter = prompt_args.get(
|
||||
self._completion_delimiter_key, DEFAULT_COMPLETION_DELIMITER
|
||||
)
|
||||
variables = {
|
||||
self._input_text_key: doc,
|
||||
**prompt_args,
|
||||
}
|
||||
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
||||
gen_conf = {"temperature": 0.5}
|
||||
results = self._chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
claims = results.strip().removesuffix(completion_delimiter)
|
||||
history = [{"role": "system", "content": text}, {"role": "assistant", "content": results}]
|
||||
|
||||
# Repeat to ensure we maximize entity count
|
||||
for i in range(self._max_gleanings):
|
||||
text = perform_variable_replacements(CONTINUE_PROMPT, history=history, variables=variables)
|
||||
history.append({"role": "user", "content": text})
|
||||
extension = self._chat("", history, gen_conf)
|
||||
claims += record_delimiter + extension.strip().removesuffix(
|
||||
completion_delimiter
|
||||
)
|
||||
|
||||
# If this isn't the last loop, check to see if we should continue
|
||||
if i >= self._max_gleanings - 1:
|
||||
break
|
||||
|
||||
history.append({"role": "assistant", "content": extension})
|
||||
history.append({"role": "user", "content": LOOP_PROMPT})
|
||||
continuation = self._chat("", history, self._loop_args)
|
||||
if continuation != "YES":
|
||||
break
|
||||
|
||||
result = self._parse_claim_tuples(claims, prompt_args)
|
||||
for r in result:
|
||||
r["doc_id"] = f"{doc_index}"
|
||||
return result
|
||||
|
||||
def _parse_claim_tuples(
|
||||
self, claims: str, prompt_variables: dict
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Parse claim tuples."""
|
||||
record_delimiter = prompt_variables.get(
|
||||
self._record_delimiter_key, DEFAULT_RECORD_DELIMITER
|
||||
)
|
||||
completion_delimiter = prompt_variables.get(
|
||||
self._completion_delimiter_key, DEFAULT_COMPLETION_DELIMITER
|
||||
)
|
||||
tuple_delimiter = prompt_variables.get(
|
||||
self._tuple_delimiter_key, DEFAULT_TUPLE_DELIMITER
|
||||
)
|
||||
|
||||
def pull_field(index: int, fields: list[str]) -> str | None:
|
||||
return fields[index].strip() if len(fields) > index else None
|
||||
|
||||
result: list[dict[str, Any]] = []
|
||||
claims_values = (
|
||||
claims.strip().removesuffix(completion_delimiter).split(record_delimiter)
|
||||
)
|
||||
for claim in claims_values:
|
||||
claim = claim.strip().removeprefix("(").removesuffix(")")
|
||||
claim = re.sub(r".*Output:", "", claim)
|
||||
|
||||
# Ignore the completion delimiter
|
||||
if claim == completion_delimiter:
|
||||
continue
|
||||
|
||||
claim_fields = claim.split(tuple_delimiter)
|
||||
o = {
|
||||
"subject_id": pull_field(0, claim_fields),
|
||||
"object_id": pull_field(1, claim_fields),
|
||||
"type": pull_field(2, claim_fields),
|
||||
"status": pull_field(3, claim_fields),
|
||||
"start_date": pull_field(4, claim_fields),
|
||||
"end_date": pull_field(5, claim_fields),
|
||||
"description": pull_field(6, claim_fields),
|
||||
"source_text": pull_field(7, claim_fields),
|
||||
"doc_id": pull_field(8, claim_fields),
|
||||
}
|
||||
if any([not o["subject_id"], not o["object_id"], o["subject_id"].lower() == "none", o["object_id"] == "none"]):
|
||||
continue
|
||||
result.append(o)
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-t', '--tenant_id', default=False, help="Tenant ID", action='store', required=True)
|
||||
parser.add_argument('-d', '--doc_id', default=False, help="Document ID", action='store', required=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(args.tenant_id)
|
||||
|
||||
ex = ClaimExtractor(LLMBundle(args.tenant_id, LLMType.CHAT))
|
||||
docs = [d["content_with_weight"] for d in settings.retrievaler.chunk_list(args.doc_id, args.tenant_id, kb_ids, max_count=12, fields=["content_with_weight"])]
|
||||
info = {
|
||||
"input_text": docs,
|
||||
"entity_specs": "organization, person",
|
||||
"claim_description": ""
|
||||
}
|
||||
claim = ex(info)
|
||||
logging.info(json.dumps(claim.output, ensure_ascii=False, indent=2))
|
||||
71
graphrag/general/claim_prompt.py
Normal file
71
graphrag/general/claim_prompt.py
Normal file
@ -0,0 +1,71 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
CLAIM_EXTRACTION_PROMPT = """
|
||||
################
|
||||
-Target activity-
|
||||
################
|
||||
You are an intelligent assistant that helps a human analyst to analyze claims against certain entities presented in a text document.
|
||||
|
||||
################
|
||||
-Goal-
|
||||
################
|
||||
Given a text document that is potentially relevant to this activity, an entity specification, and a claim description, extract all entities that match the entity specification and all claims against those entities.
|
||||
|
||||
################
|
||||
-Steps-
|
||||
################
|
||||
- 1. Extract all named entities that match the predefined entity specification. Entity specification can either be a list of entity names or a list of entity types.
|
||||
- 2. For each entity identified in step 1, extract all claims associated with the entity. Claims need to match the specified claim description, and the entity should be the subject of the claim.
|
||||
For each claim, extract the following information:
|
||||
- Subject: name of the entity that is subject of the claim, capitalized. The subject entity is one that committed the action described in the claim. Subject needs to be one of the named entities identified in step 1.
|
||||
- Object: name of the entity that is object of the claim, capitalized. The object entity is one that either reports/handles or is affected by the action described in the claim. If object entity is unknown, use **NONE**.
|
||||
- Claim Type: overall category of the claim, capitalized. Name it in a way that can be repeated across multiple text inputs, so that similar claims share the same claim type
|
||||
- Claim Status: **TRUE**, **FALSE**, or **SUSPECTED**. TRUE means the claim is confirmed, FALSE means the claim is found to be False, SUSPECTED means the claim is not verified.
|
||||
- Claim Description: Detailed description explaining the reasoning behind the claim, together with all the related evidence and references.
|
||||
- Claim Date: Period (start_date, end_date) when the claim was made. Both start_date and end_date should be in ISO-8601 format. If the claim was made on a single date rather than a date range, set the same date for both start_date and end_date. If date is unknown, return **NONE**.
|
||||
- Claim Source Text: List of **all** quotes from the original text that are relevant to the claim.
|
||||
|
||||
- 3. Format each claim as (<subject_entity>{tuple_delimiter}<object_entity>{tuple_delimiter}<claim_type>{tuple_delimiter}<claim_status>{tuple_delimiter}<claim_start_date>{tuple_delimiter}<claim_end_date>{tuple_delimiter}<claim_description>{tuple_delimiter}<claim_source>)
|
||||
- 4. Return output in language of the 'Text' as a single list of all the claims identified in steps 1 and 2. Use **{record_delimiter}** as the list delimiter.
|
||||
- 5. If there's nothing satisfy the above requirements, just keep output empty.
|
||||
- 6. When finished, output {completion_delimiter}
|
||||
|
||||
################
|
||||
-Examples-
|
||||
################
|
||||
Example 1:
|
||||
Entity specification: organization
|
||||
Claim description: red flags associated with an entity
|
||||
Text: According to an article on 2022/01/10, Company A was fined for bid rigging while participating in multiple public tenders published by Government Agency B. The company is owned by Person C who was suspected of engaging in corruption activities in 2015.
|
||||
Output:
|
||||
(COMPANY A{tuple_delimiter}GOVERNMENT AGENCY B{tuple_delimiter}ANTI-COMPETITIVE PRACTICES{tuple_delimiter}TRUE{tuple_delimiter}2022-01-10T00:00:00{tuple_delimiter}2022-01-10T00:00:00{tuple_delimiter}Company A was found to engage in anti-competitive practices because it was fined for bid rigging in multiple public tenders published by Government Agency B according to an article published on 2022/01/10{tuple_delimiter}According to an article published on 2022/01/10, Company A was fined for bid rigging while participating in multiple public tenders published by Government Agency B.)
|
||||
{completion_delimiter}
|
||||
|
||||
###########################
|
||||
Example 2:
|
||||
Entity specification: Company A, Person C
|
||||
Claim description: red flags associated with an entity
|
||||
Text: According to an article on 2022/01/10, Company A was fined for bid rigging while participating in multiple public tenders published by Government Agency B. The company is owned by Person C who was suspected of engaging in corruption activities in 2015.
|
||||
Output:
|
||||
(COMPANY A{tuple_delimiter}GOVERNMENT AGENCY B{tuple_delimiter}ANTI-COMPETITIVE PRACTICES{tuple_delimiter}TRUE{tuple_delimiter}2022-01-10T00:00:00{tuple_delimiter}2022-01-10T00:00:00{tuple_delimiter}Company A was found to engage in anti-competitive practices because it was fined for bid rigging in multiple public tenders published by Government Agency B according to an article published on 2022/01/10{tuple_delimiter}According to an article published on 2022/01/10, Company A was fined for bid rigging while participating in multiple public tenders published by Government Agency B.)
|
||||
{record_delimiter}
|
||||
(PERSON C{tuple_delimiter}NONE{tuple_delimiter}CORRUPTION{tuple_delimiter}SUSPECTED{tuple_delimiter}2015-01-01T00:00:00{tuple_delimiter}2015-12-30T00:00:00{tuple_delimiter}Person C was suspected of engaging in corruption activities in 2015{tuple_delimiter}The company is owned by Person C who was suspected of engaging in corruption activities in 2015)
|
||||
{completion_delimiter}
|
||||
|
||||
################
|
||||
-Real Data-
|
||||
################
|
||||
Use the following input for your answer.
|
||||
Entity specification: {entity_specs}
|
||||
Claim description: {claim_description}
|
||||
Text: {input_text}
|
||||
Output:"""
|
||||
|
||||
|
||||
CONTINUE_PROMPT = "MANY entities were missed in the last extraction. Add them below using the same format(see 'Steps', start with the 'Output').\nOutput: "
|
||||
LOOP_PROMPT = "It appears some entities may have still been missed. Answer YES {tuple_delimiter} NO if there are still entities that need to be added.\n"
|
||||
158
graphrag/general/community_report_prompt.py
Normal file
158
graphrag/general/community_report_prompt.py
Normal file
@ -0,0 +1,158 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
COMMUNITY_REPORT_PROMPT = """
|
||||
You are an AI assistant that helps a human analyst to perform general information discovery. Information discovery is the process of identifying and assessing relevant information associated with certain entities (e.g., organizations and individuals) within a network.
|
||||
|
||||
# Goal
|
||||
Write a comprehensive report of a community, given a list of entities that belong to the community as well as their relationships and optional associated claims. The report will be used to inform decision-makers about information associated with the community and their potential impact. The content of this report includes an overview of the community's key entities, their legal compliance, technical capabilities, reputation, and noteworthy claims.
|
||||
|
||||
# Report Structure
|
||||
|
||||
The report should include the following sections:
|
||||
|
||||
- TITLE: community's name that represents its key entities - title should be short but specific. When possible, include representative named entities in the title.
|
||||
- SUMMARY: An executive summary of the community's overall structure, how its entities are related to each other, and significant information associated with its entities.
|
||||
- IMPACT SEVERITY RATING: a float score between 0-10 that represents the severity of IMPACT posed by entities within the community. IMPACT is the scored importance of a community.
|
||||
- RATING EXPLANATION: Give a single sentence explanation of the IMPACT severity rating.
|
||||
- DETAILED FINDINGS: A list of 5-10 key insights about the community. Each insight should have a short summary followed by multiple paragraphs of explanatory text grounded according to the grounding rules below. Be comprehensive.
|
||||
|
||||
Return output as a well-formed JSON-formatted string with the following format(in language of 'Text' content):
|
||||
{{
|
||||
"title": <report_title>,
|
||||
"summary": <executive_summary>,
|
||||
"rating": <impact_severity_rating>,
|
||||
"rating_explanation": <rating_explanation>,
|
||||
"findings": [
|
||||
{{
|
||||
"summary":<insight_1_summary>,
|
||||
"explanation": <insight_1_explanation>
|
||||
}},
|
||||
{{
|
||||
"summary":<insight_2_summary>,
|
||||
"explanation": <insight_2_explanation>
|
||||
}}
|
||||
]
|
||||
}}
|
||||
|
||||
# Grounding Rules
|
||||
|
||||
Points supported by data should list their data references as follows:
|
||||
|
||||
"This is an example sentence supported by multiple data references [Data: <dataset name> (record ids); <dataset name> (record ids)]."
|
||||
|
||||
Do not list more than 5 record ids in a single reference. Instead, list the top 5 most relevant record ids and add "+more" to indicate that there are more.
|
||||
|
||||
For example:
|
||||
"Person X is the owner of Company Y and subject to many allegations of wrongdoing [Data: Reports (1), Entities (5, 7); Relationships (23); Claims (7, 2, 34, 64, 46, +more)]."
|
||||
|
||||
where 1, 5, 7, 23, 2, 34, 46, and 64 represent the id (not the index) of the relevant data record.
|
||||
|
||||
Do not include information where the supporting evidence for it is not provided.
|
||||
|
||||
|
||||
# Example Input
|
||||
-----------
|
||||
Text:
|
||||
|
||||
-Entities-
|
||||
|
||||
id,entity,description
|
||||
5,VERDANT OASIS PLAZA,Verdant Oasis Plaza is the location of the Unity March
|
||||
6,HARMONY ASSEMBLY,Harmony Assembly is an organization that is holding a march at Verdant Oasis Plaza
|
||||
|
||||
-Relationships-
|
||||
|
||||
id,source,target,description
|
||||
37,VERDANT OASIS PLAZA,UNITY MARCH,Verdant Oasis Plaza is the location of the Unity March
|
||||
38,VERDANT OASIS PLAZA,HARMONY ASSEMBLY,Harmony Assembly is holding a march at Verdant Oasis Plaza
|
||||
39,VERDANT OASIS PLAZA,UNITY MARCH,The Unity March is taking place at Verdant Oasis Plaza
|
||||
40,VERDANT OASIS PLAZA,TRIBUNE SPOTLIGHT,Tribune Spotlight is reporting on the Unity march taking place at Verdant Oasis Plaza
|
||||
41,VERDANT OASIS PLAZA,BAILEY ASADI,Bailey Asadi is speaking at Verdant Oasis Plaza about the march
|
||||
43,HARMONY ASSEMBLY,UNITY MARCH,Harmony Assembly is organizing the Unity March
|
||||
|
||||
Output:
|
||||
{{
|
||||
"title": "Verdant Oasis Plaza and Unity March",
|
||||
"summary": "The community revolves around the Verdant Oasis Plaza, which is the location of the Unity March. The plaza has relationships with the Harmony Assembly, Unity March, and Tribune Spotlight, all of which are associated with the march event.",
|
||||
"rating": 5.0,
|
||||
"rating_explanation": "The impact severity rating is moderate due to the potential for unrest or conflict during the Unity March.",
|
||||
"findings": [
|
||||
{{
|
||||
"summary": "Verdant Oasis Plaza as the central location",
|
||||
"explanation": "Verdant Oasis Plaza is the central entity in this community, serving as the location for the Unity March. This plaza is the common link between all other entities, suggesting its significance in the community. The plaza's association with the march could potentially lead to issues such as public disorder or conflict, depending on the nature of the march and the reactions it provokes. [Data: Entities (5), Relationships (37, 38, 39, 40, 41,+more)]"
|
||||
}},
|
||||
{{
|
||||
"summary": "Harmony Assembly's role in the community",
|
||||
"explanation": "Harmony Assembly is another key entity in this community, being the organizer of the march at Verdant Oasis Plaza. The nature of Harmony Assembly and its march could be a potential source of threat, depending on their objectives and the reactions they provoke. The relationship between Harmony Assembly and the plaza is crucial in understanding the dynamics of this community. [Data: Entities(6), Relationships (38, 43)]"
|
||||
}},
|
||||
{{
|
||||
"summary": "Unity March as a significant event",
|
||||
"explanation": "The Unity March is a significant event taking place at Verdant Oasis Plaza. This event is a key factor in the community's dynamics and could be a potential source of threat, depending on the nature of the march and the reactions it provokes. The relationship between the march and the plaza is crucial in understanding the dynamics of this community. [Data: Relationships (39)]"
|
||||
}},
|
||||
{{
|
||||
"summary": "Role of Tribune Spotlight",
|
||||
"explanation": "Tribune Spotlight is reporting on the Unity March taking place in Verdant Oasis Plaza. This suggests that the event has attracted media attention, which could amplify its impact on the community. The role of Tribune Spotlight could be significant in shaping public perception of the event and the entities involved. [Data: Relationships (40)]"
|
||||
}}
|
||||
]
|
||||
}}
|
||||
|
||||
|
||||
# Real Data
|
||||
|
||||
Use the following text for your answer. Do not make anything up in your answer.
|
||||
|
||||
Text:
|
||||
|
||||
-Entities-
|
||||
{entity_df}
|
||||
|
||||
-Relationships-
|
||||
{relation_df}
|
||||
|
||||
The report should include the following sections:
|
||||
|
||||
- TITLE: community's name that represents its key entities - title should be short but specific. When possible, include representative named entities in the title.
|
||||
- SUMMARY: An executive summary of the community's overall structure, how its entities are related to each other, and significant information associated with its entities.
|
||||
- IMPACT SEVERITY RATING: a float score between 0-10 that represents the severity of IMPACT posed by entities within the community. IMPACT is the scored importance of a community.
|
||||
- RATING EXPLANATION: Give a single sentence explanation of the IMPACT severity rating.
|
||||
- DETAILED FINDINGS: A list of 5-10 key insights about the community. Each insight should have a short summary followed by multiple paragraphs of explanatory text grounded according to the grounding rules below. Be comprehensive.
|
||||
|
||||
Return output as a well-formed JSON-formatted string with the following format(in language of 'Text' content):
|
||||
{{
|
||||
"title": <report_title>,
|
||||
"summary": <executive_summary>,
|
||||
"rating": <impact_severity_rating>,
|
||||
"rating_explanation": <rating_explanation>,
|
||||
"findings": [
|
||||
{{
|
||||
"summary":<insight_1_summary>,
|
||||
"explanation": <insight_1_explanation>
|
||||
}},
|
||||
{{
|
||||
"summary":<insight_2_summary>,
|
||||
"explanation": <insight_2_explanation>
|
||||
}}
|
||||
]
|
||||
}}
|
||||
|
||||
# Grounding Rules
|
||||
|
||||
Points supported by data should list their data references as follows:
|
||||
|
||||
"This is an example sentence supported by multiple data references [Data: <dataset name> (record ids); <dataset name> (record ids)]."
|
||||
|
||||
Do not list more than 5 record ids in a single reference. Instead, list the top 5 most relevant record ids and add "+more" to indicate that there are more.
|
||||
|
||||
For example:
|
||||
"Person X is the owner of Company Y and subject to many allegations of wrongdoing [Data: Reports (1), Entities (5, 7); Relationships (23); Claims (7, 2, 34, 64, 46, +more)]."
|
||||
|
||||
where 1, 5, 7, 23, 2, 34, 46, and 64 represent the id (not the index) of the relevant data record.
|
||||
|
||||
Do not include information where the supporting evidence for it is not provided.
|
||||
|
||||
Output:"""
|
||||
141
graphrag/general/community_reports_extractor.py
Normal file
141
graphrag/general/community_reports_extractor.py
Normal file
@ -0,0 +1,141 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
import traceback
|
||||
from typing import Callable
|
||||
from dataclasses import dataclass
|
||||
import networkx as nx
|
||||
import pandas as pd
|
||||
from graphrag.general import leiden
|
||||
from graphrag.general.community_report_prompt import COMMUNITY_REPORT_PROMPT
|
||||
from graphrag.general.extractor import Extractor
|
||||
from graphrag.general.leiden import add_community_info2graph
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements, dict_has_keys_with_types
|
||||
from rag.utils import num_tokens_from_string
|
||||
from timeit import default_timer as timer
|
||||
|
||||
|
||||
@dataclass
|
||||
class CommunityReportsResult:
|
||||
"""Community reports result class definition."""
|
||||
|
||||
output: list[str]
|
||||
structured_output: list[dict]
|
||||
|
||||
|
||||
class CommunityReportsExtractor(Extractor):
|
||||
"""Community reports extractor class definition."""
|
||||
|
||||
_extraction_prompt: str
|
||||
_output_formatter_prompt: str
|
||||
_on_error: ErrorHandlerFn
|
||||
_max_report_length: int
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_invoker: CompletionLLM,
|
||||
get_entity: Callable | None = None,
|
||||
set_entity: Callable | None = None,
|
||||
get_relation: Callable | None = None,
|
||||
set_relation: Callable | None = None,
|
||||
max_report_length: int | None = None,
|
||||
):
|
||||
super().__init__(llm_invoker, get_entity=get_entity, set_entity=set_entity, get_relation=get_relation, set_relation=set_relation)
|
||||
"""Init method definition."""
|
||||
self._llm = llm_invoker
|
||||
self._extraction_prompt = COMMUNITY_REPORT_PROMPT
|
||||
self._max_report_length = max_report_length or 1500
|
||||
|
||||
def __call__(self, graph: nx.Graph, callback: Callable | None = None):
|
||||
for node_degree in graph.degree:
|
||||
graph.nodes[str(node_degree[0])]["rank"] = int(node_degree[1])
|
||||
|
||||
communities: dict[str, dict[str, list]] = leiden.run(graph, {})
|
||||
total = sum([len(comm.items()) for _, comm in communities.items()])
|
||||
res_str = []
|
||||
res_dict = []
|
||||
over, token_count = 0, 0
|
||||
st = timer()
|
||||
for level, comm in communities.items():
|
||||
logging.info(f"Level {level}: Community: {len(comm.keys())}")
|
||||
for cm_id, ents in comm.items():
|
||||
weight = ents["weight"]
|
||||
ents = ents["nodes"]
|
||||
ent_df = pd.DataFrame(self._get_entity_(ents)).dropna()#[{"entity": n, **graph.nodes[n]} for n in ents])
|
||||
ent_df["entity"] = ent_df["entity_name"]
|
||||
del ent_df["entity_name"]
|
||||
rela_df = pd.DataFrame(self._get_relation_(list(ent_df["entity"]), list(ent_df["entity"]), 10000))
|
||||
rela_df["source"] = rela_df["src_id"]
|
||||
rela_df["target"] = rela_df["tgt_id"]
|
||||
del rela_df["src_id"]
|
||||
del rela_df["tgt_id"]
|
||||
|
||||
prompt_variables = {
|
||||
"entity_df": ent_df.to_csv(index_label="id"),
|
||||
"relation_df": rela_df.to_csv(index_label="id")
|
||||
}
|
||||
text = perform_variable_replacements(self._extraction_prompt, variables=prompt_variables)
|
||||
gen_conf = {"temperature": 0.3}
|
||||
try:
|
||||
response = self._chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
token_count += num_tokens_from_string(text + response)
|
||||
response = re.sub(r"^[^\{]*", "", response)
|
||||
response = re.sub(r"[^\}]*$", "", response)
|
||||
response = re.sub(r"\{\{", "{", response)
|
||||
response = re.sub(r"\}\}", "}", response)
|
||||
logging.debug(response)
|
||||
response = json.loads(response)
|
||||
if not dict_has_keys_with_types(response, [
|
||||
("title", str),
|
||||
("summary", str),
|
||||
("findings", list),
|
||||
("rating", float),
|
||||
("rating_explanation", str),
|
||||
]):
|
||||
continue
|
||||
response["weight"] = weight
|
||||
response["entities"] = ents
|
||||
except Exception as e:
|
||||
logging.exception("CommunityReportsExtractor got exception")
|
||||
self._on_error(e, traceback.format_exc(), None)
|
||||
continue
|
||||
|
||||
add_community_info2graph(graph, ents, response["title"])
|
||||
res_str.append(self._get_text_output(response))
|
||||
res_dict.append(response)
|
||||
over += 1
|
||||
if callback:
|
||||
callback(msg=f"Communities: {over}/{total}, elapsed: {timer() - st}s, used tokens: {token_count}")
|
||||
|
||||
return CommunityReportsResult(
|
||||
structured_output=res_dict,
|
||||
output=res_str,
|
||||
)
|
||||
|
||||
def _get_text_output(self, parsed_output: dict) -> str:
|
||||
title = parsed_output.get("title", "Report")
|
||||
summary = parsed_output.get("summary", "")
|
||||
findings = parsed_output.get("findings", [])
|
||||
|
||||
def finding_summary(finding: dict):
|
||||
if isinstance(finding, str):
|
||||
return finding
|
||||
return finding.get("summary")
|
||||
|
||||
def finding_explanation(finding: dict):
|
||||
if isinstance(finding, str):
|
||||
return ""
|
||||
return finding.get("explanation")
|
||||
|
||||
report_sections = "\n\n".join(
|
||||
f"## {finding_summary(f)}\n\n{finding_explanation(f)}" for f in findings
|
||||
)
|
||||
return f"# {title}\n\n{summary}\n\n{report_sections}"
|
||||
66
graphrag/general/entity_embedding.py
Normal file
66
graphrag/general/entity_embedding.py
Normal file
@ -0,0 +1,66 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
import numpy as np
|
||||
import networkx as nx
|
||||
from dataclasses import dataclass
|
||||
from graphrag.general.leiden import stable_largest_connected_component
|
||||
import graspologic as gc
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeEmbeddings:
|
||||
"""Node embeddings class definition."""
|
||||
|
||||
nodes: list[str]
|
||||
embeddings: np.ndarray
|
||||
|
||||
|
||||
def embed_nod2vec(
|
||||
graph: nx.Graph | nx.DiGraph,
|
||||
dimensions: int = 1536,
|
||||
num_walks: int = 10,
|
||||
walk_length: int = 40,
|
||||
window_size: int = 2,
|
||||
iterations: int = 3,
|
||||
random_seed: int = 86,
|
||||
) -> NodeEmbeddings:
|
||||
"""Generate node embeddings using Node2Vec."""
|
||||
# generate embedding
|
||||
lcc_tensors = gc.embed.node2vec_embed( # type: ignore
|
||||
graph=graph,
|
||||
dimensions=dimensions,
|
||||
window_size=window_size,
|
||||
iterations=iterations,
|
||||
num_walks=num_walks,
|
||||
walk_length=walk_length,
|
||||
random_seed=random_seed,
|
||||
)
|
||||
return NodeEmbeddings(embeddings=lcc_tensors[0], nodes=lcc_tensors[1])
|
||||
|
||||
|
||||
def run(graph: nx.Graph, args: dict[str, Any]) -> NodeEmbeddings:
|
||||
"""Run method definition."""
|
||||
if args.get("use_lcc", True):
|
||||
graph = stable_largest_connected_component(graph)
|
||||
|
||||
# create graph embedding using node2vec
|
||||
embeddings = embed_nod2vec(
|
||||
graph=graph,
|
||||
dimensions=args.get("dimensions", 1536),
|
||||
num_walks=args.get("num_walks", 10),
|
||||
walk_length=args.get("walk_length", 40),
|
||||
window_size=args.get("window_size", 2),
|
||||
iterations=args.get("iterations", 3),
|
||||
random_seed=args.get("random_seed", 86),
|
||||
)
|
||||
|
||||
pairs = zip(embeddings.nodes, embeddings.embeddings.tolist(), strict=True)
|
||||
sorted_pairs = sorted(pairs, key=lambda x: x[0])
|
||||
|
||||
return dict(sorted_pairs)
|
||||
245
graphrag/general/extractor.py
Normal file
245
graphrag/general/extractor.py
Normal file
@ -0,0 +1,245 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import os
|
||||
from collections import defaultdict, Counter
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from copy import deepcopy
|
||||
from typing import Callable
|
||||
|
||||
from graphrag.general.graph_prompt import SUMMARIZE_DESCRIPTIONS_PROMPT
|
||||
from graphrag.utils import get_llm_cache, set_llm_cache, handle_single_entity_extraction, \
|
||||
handle_single_relationship_extraction, split_string_by_multi_markers, flat_uniq_list
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
from rag.utils import truncate
|
||||
|
||||
GRAPH_FIELD_SEP = "<SEP>"
|
||||
DEFAULT_ENTITY_TYPES = ["organization", "person", "geo", "event", "category"]
|
||||
ENTITY_EXTRACTION_MAX_GLEANINGS = 2
|
||||
|
||||
|
||||
class Extractor:
|
||||
_llm: CompletionLLM
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_invoker: CompletionLLM,
|
||||
language: str | None = "English",
|
||||
entity_types: list[str] | None = None,
|
||||
get_entity: Callable | None = None,
|
||||
set_entity: Callable | None = None,
|
||||
get_relation: Callable | None = None,
|
||||
set_relation: Callable | None = None,
|
||||
):
|
||||
self._llm = llm_invoker
|
||||
self._language = language
|
||||
self._entity_types = entity_types or DEFAULT_ENTITY_TYPES
|
||||
self._get_entity_ = get_entity
|
||||
self._set_entity_ = set_entity
|
||||
self._get_relation_ = get_relation
|
||||
self._set_relation_ = set_relation
|
||||
|
||||
def _chat(self, system, history, gen_conf):
|
||||
hist = deepcopy(history)
|
||||
conf = deepcopy(gen_conf)
|
||||
response = get_llm_cache(self._llm.llm_name, system, hist, conf)
|
||||
if response:
|
||||
return response
|
||||
response = self._llm.chat(system, hist, conf)
|
||||
if response.find("**ERROR**") >= 0:
|
||||
raise Exception(response)
|
||||
set_llm_cache(self._llm.llm_name, system, response, history, gen_conf)
|
||||
return response
|
||||
|
||||
def _entities_and_relations(self, chunk_key: str, records: list, tuple_delimiter: str):
|
||||
maybe_nodes = defaultdict(list)
|
||||
maybe_edges = defaultdict(list)
|
||||
ent_types = [t.lower() for t in self._entity_types]
|
||||
for record in records:
|
||||
record_attributes = split_string_by_multi_markers(
|
||||
record, [tuple_delimiter]
|
||||
)
|
||||
|
||||
if_entities = handle_single_entity_extraction(
|
||||
record_attributes, chunk_key
|
||||
)
|
||||
if if_entities is not None and if_entities.get("entity_type", "unknown").lower() in ent_types:
|
||||
maybe_nodes[if_entities["entity_name"]].append(if_entities)
|
||||
continue
|
||||
|
||||
if_relation = handle_single_relationship_extraction(
|
||||
record_attributes, chunk_key
|
||||
)
|
||||
if if_relation is not None:
|
||||
maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append(
|
||||
if_relation
|
||||
)
|
||||
return dict(maybe_nodes), dict(maybe_edges)
|
||||
|
||||
def __call__(
|
||||
self, chunks: list[tuple[str, str]],
|
||||
callback: Callable | None = None
|
||||
):
|
||||
|
||||
results = []
|
||||
max_workers = int(os.environ.get('GRAPH_EXTRACTOR_MAX_WORKERS', 50))
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as exe:
|
||||
threads = []
|
||||
for i, (cid, ck) in enumerate(chunks):
|
||||
threads.append(
|
||||
exe.submit(self._process_single_content, (cid, ck)))
|
||||
|
||||
for i, _ in enumerate(threads):
|
||||
n, r, tc = _.result()
|
||||
if not isinstance(n, Exception):
|
||||
results.append((n, r))
|
||||
if callback:
|
||||
callback(0.5 + 0.1 * i / len(threads), f"Entities extraction progress ... {i + 1}/{len(threads)} ({tc} tokens)")
|
||||
elif callback:
|
||||
callback(msg="Knowledge graph extraction error:{}".format(str(n)))
|
||||
|
||||
maybe_nodes = defaultdict(list)
|
||||
maybe_edges = defaultdict(list)
|
||||
for m_nodes, m_edges in results:
|
||||
for k, v in m_nodes.items():
|
||||
maybe_nodes[k].extend(v)
|
||||
for k, v in m_edges.items():
|
||||
maybe_edges[tuple(sorted(k))].extend(v)
|
||||
logging.info("Inserting entities into storage...")
|
||||
all_entities_data = []
|
||||
for en_nm, ents in maybe_nodes.items():
|
||||
all_entities_data.append(self._merge_nodes(en_nm, ents))
|
||||
|
||||
logging.info("Inserting relationships into storage...")
|
||||
all_relationships_data = []
|
||||
for (src,tgt), rels in maybe_edges.items():
|
||||
all_relationships_data.append(self._merge_edges(src, tgt, rels))
|
||||
|
||||
if not len(all_entities_data) and not len(all_relationships_data):
|
||||
logging.warning(
|
||||
"Didn't extract any entities and relationships, maybe your LLM is not working"
|
||||
)
|
||||
|
||||
if not len(all_entities_data):
|
||||
logging.warning("Didn't extract any entities")
|
||||
if not len(all_relationships_data):
|
||||
logging.warning("Didn't extract any relationships")
|
||||
|
||||
return all_entities_data, all_relationships_data
|
||||
|
||||
def _merge_nodes(self, entity_name: str, entities: list[dict]):
|
||||
if not entities:
|
||||
return
|
||||
already_entity_types = []
|
||||
already_source_ids = []
|
||||
already_description = []
|
||||
|
||||
already_node = self._get_entity_(entity_name)
|
||||
if already_node:
|
||||
already_entity_types.append(already_node["entity_type"])
|
||||
already_source_ids.extend(already_node["source_id"])
|
||||
already_description.append(already_node["description"])
|
||||
|
||||
entity_type = sorted(
|
||||
Counter(
|
||||
[dp["entity_type"] for dp in entities] + already_entity_types
|
||||
).items(),
|
||||
key=lambda x: x[1],
|
||||
reverse=True,
|
||||
)[0][0]
|
||||
description = GRAPH_FIELD_SEP.join(
|
||||
sorted(set([dp["description"] for dp in entities] + already_description))
|
||||
)
|
||||
already_source_ids = flat_uniq_list(entities, "source_id")
|
||||
description = self._handle_entity_relation_summary(
|
||||
entity_name, description
|
||||
)
|
||||
node_data = dict(
|
||||
entity_type=entity_type,
|
||||
description=description,
|
||||
source_id=already_source_ids,
|
||||
)
|
||||
node_data["entity_name"] = entity_name
|
||||
self._set_entity_(entity_name, node_data)
|
||||
return node_data
|
||||
|
||||
def _merge_edges(
|
||||
self,
|
||||
src_id: str,
|
||||
tgt_id: str,
|
||||
edges_data: list[dict]
|
||||
):
|
||||
if not edges_data:
|
||||
return
|
||||
already_weights = []
|
||||
already_source_ids = []
|
||||
already_description = []
|
||||
already_keywords = []
|
||||
|
||||
relation = self._get_relation_(src_id, tgt_id)
|
||||
if relation:
|
||||
already_weights = [relation["weight"]]
|
||||
already_source_ids = relation["source_id"]
|
||||
already_description = [relation["description"]]
|
||||
already_keywords = relation["keywords"]
|
||||
|
||||
weight = sum([dp["weight"] for dp in edges_data] + already_weights)
|
||||
description = GRAPH_FIELD_SEP.join(
|
||||
sorted(set([dp["description"] for dp in edges_data] + already_description))
|
||||
)
|
||||
keywords = flat_uniq_list(edges_data, "keywords") + already_keywords
|
||||
source_id = flat_uniq_list(edges_data, "source_id") + already_source_ids
|
||||
|
||||
for need_insert_id in [src_id, tgt_id]:
|
||||
if self._get_entity_(need_insert_id):
|
||||
continue
|
||||
self._set_entity_(need_insert_id, {
|
||||
"source_id": source_id,
|
||||
"description": description,
|
||||
"entity_type": 'UNKNOWN'
|
||||
})
|
||||
description = self._handle_entity_relation_summary(
|
||||
f"({src_id}, {tgt_id})", description
|
||||
)
|
||||
edge_data = dict(
|
||||
src_id=src_id,
|
||||
tgt_id=tgt_id,
|
||||
description=description,
|
||||
keywords=keywords,
|
||||
weight=weight,
|
||||
source_id=source_id
|
||||
)
|
||||
self._set_relation_(src_id, tgt_id, edge_data)
|
||||
|
||||
return edge_data
|
||||
|
||||
def _handle_entity_relation_summary(
|
||||
self,
|
||||
entity_or_relation_name: str,
|
||||
description: str
|
||||
) -> str:
|
||||
summary_max_tokens = 512
|
||||
use_description = truncate(description, summary_max_tokens)
|
||||
prompt_template = SUMMARIZE_DESCRIPTIONS_PROMPT
|
||||
context_base = dict(
|
||||
entity_name=entity_or_relation_name,
|
||||
description_list=use_description.split(GRAPH_FIELD_SEP),
|
||||
language=self._language,
|
||||
)
|
||||
use_prompt = prompt_template.format(**context_base)
|
||||
logging.info(f"Trigger summary: {entity_or_relation_name}")
|
||||
summary = self._chat(use_prompt, [{"role": "assistant", "content": "Output: "}], {"temperature": 0.8})
|
||||
return summary
|
||||
154
graphrag/general/graph_extractor.py
Normal file
154
graphrag/general/graph_extractor.py
Normal file
@ -0,0 +1,154 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import Any, Callable
|
||||
from dataclasses import dataclass
|
||||
import tiktoken
|
||||
|
||||
from graphrag.general.extractor import Extractor, ENTITY_EXTRACTION_MAX_GLEANINGS, DEFAULT_ENTITY_TYPES
|
||||
from graphrag.general.graph_prompt import GRAPH_EXTRACTION_PROMPT, CONTINUE_PROMPT, LOOP_PROMPT
|
||||
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
import networkx as nx
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
DEFAULT_TUPLE_DELIMITER = "<|>"
|
||||
DEFAULT_RECORD_DELIMITER = "##"
|
||||
DEFAULT_COMPLETION_DELIMITER = "<|COMPLETE|>"
|
||||
|
||||
|
||||
@dataclass
|
||||
class GraphExtractionResult:
|
||||
"""Unipartite graph extraction result class definition."""
|
||||
|
||||
output: nx.Graph
|
||||
source_docs: dict[Any, Any]
|
||||
|
||||
|
||||
class GraphExtractor(Extractor):
|
||||
"""Unipartite graph extractor class definition."""
|
||||
|
||||
_join_descriptions: bool
|
||||
_tuple_delimiter_key: str
|
||||
_record_delimiter_key: str
|
||||
_entity_types_key: str
|
||||
_input_text_key: str
|
||||
_completion_delimiter_key: str
|
||||
_entity_name_key: str
|
||||
_input_descriptions_key: str
|
||||
_extraction_prompt: str
|
||||
_summarization_prompt: str
|
||||
_loop_args: dict[str, Any]
|
||||
_max_gleanings: int
|
||||
_on_error: ErrorHandlerFn
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_invoker: CompletionLLM,
|
||||
language: str | None = "English",
|
||||
entity_types: list[str] | None = None,
|
||||
get_entity: Callable | None = None,
|
||||
set_entity: Callable | None = None,
|
||||
get_relation: Callable | None = None,
|
||||
set_relation: Callable | None = None,
|
||||
tuple_delimiter_key: str | None = None,
|
||||
record_delimiter_key: str | None = None,
|
||||
input_text_key: str | None = None,
|
||||
entity_types_key: str | None = None,
|
||||
completion_delimiter_key: str | None = None,
|
||||
join_descriptions=True,
|
||||
max_gleanings: int | None = None,
|
||||
on_error: ErrorHandlerFn | None = None,
|
||||
):
|
||||
super().__init__(llm_invoker, language, entity_types, get_entity, set_entity, get_relation, set_relation)
|
||||
"""Init method definition."""
|
||||
# TODO: streamline construction
|
||||
self._llm = llm_invoker
|
||||
self._join_descriptions = join_descriptions
|
||||
self._input_text_key = input_text_key or "input_text"
|
||||
self._tuple_delimiter_key = tuple_delimiter_key or "tuple_delimiter"
|
||||
self._record_delimiter_key = record_delimiter_key or "record_delimiter"
|
||||
self._completion_delimiter_key = (
|
||||
completion_delimiter_key or "completion_delimiter"
|
||||
)
|
||||
self._entity_types_key = entity_types_key or "entity_types"
|
||||
self._extraction_prompt = GRAPH_EXTRACTION_PROMPT
|
||||
self._max_gleanings = (
|
||||
max_gleanings
|
||||
if max_gleanings is not None
|
||||
else ENTITY_EXTRACTION_MAX_GLEANINGS
|
||||
)
|
||||
self._on_error = on_error or (lambda _e, _s, _d: None)
|
||||
self.prompt_token_count = num_tokens_from_string(self._extraction_prompt)
|
||||
|
||||
# Construct the looping arguments
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
yes = encoding.encode("YES")
|
||||
no = encoding.encode("NO")
|
||||
self._loop_args = {"logit_bias": {yes[0]: 100, no[0]: 100}, "max_tokens": 1}
|
||||
|
||||
# Wire defaults into the prompt variables
|
||||
self._prompt_variables = {
|
||||
"entity_types": entity_types,
|
||||
self._tuple_delimiter_key: DEFAULT_TUPLE_DELIMITER,
|
||||
self._record_delimiter_key: DEFAULT_RECORD_DELIMITER,
|
||||
self._completion_delimiter_key: DEFAULT_COMPLETION_DELIMITER,
|
||||
self._entity_types_key: ",".join(DEFAULT_ENTITY_TYPES),
|
||||
}
|
||||
|
||||
def _process_single_content(self,
|
||||
chunk_key_dp: tuple[str, str]
|
||||
):
|
||||
token_count = 0
|
||||
|
||||
chunk_key = chunk_key_dp[0]
|
||||
content = chunk_key_dp[1]
|
||||
variables = {
|
||||
**self._prompt_variables,
|
||||
self._input_text_key: content,
|
||||
}
|
||||
try:
|
||||
gen_conf = {"temperature": 0.3}
|
||||
hint_prompt = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
||||
response = self._chat(hint_prompt, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
token_count += num_tokens_from_string(hint_prompt + response)
|
||||
|
||||
results = response or ""
|
||||
history = [{"role": "system", "content": hint_prompt}, {"role": "assistant", "content": response}]
|
||||
|
||||
# Repeat to ensure we maximize entity count
|
||||
for i in range(self._max_gleanings):
|
||||
text = perform_variable_replacements(CONTINUE_PROMPT, history=history, variables=variables)
|
||||
history.append({"role": "user", "content": text})
|
||||
response = self._chat("", history, gen_conf)
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + response)
|
||||
results += response or ""
|
||||
|
||||
# if this is the final glean, don't bother updating the continuation flag
|
||||
if i >= self._max_gleanings - 1:
|
||||
break
|
||||
history.append({"role": "assistant", "content": response})
|
||||
history.append({"role": "user", "content": LOOP_PROMPT})
|
||||
continuation = self._chat("", history, self._loop_args)
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + response)
|
||||
if continuation != "YES":
|
||||
break
|
||||
|
||||
record_delimiter = variables.get(self._record_delimiter_key, DEFAULT_RECORD_DELIMITER)
|
||||
tuple_delimiter = variables.get(self._tuple_delimiter_key, DEFAULT_TUPLE_DELIMITER)
|
||||
records = [re.sub(r"^\(|\)$", "", r.strip()) for r in results.split(record_delimiter)]
|
||||
records = [r for r in records if r.strip()]
|
||||
maybe_nodes, maybe_edges = self._entities_and_relations(chunk_key, records, tuple_delimiter)
|
||||
return maybe_nodes, maybe_edges, token_count
|
||||
except Exception as e:
|
||||
logging.exception("error extracting graph")
|
||||
return e, None, None
|
||||
|
||||
|
||||
|
||||
124
graphrag/general/graph_prompt.py
Normal file
124
graphrag/general/graph_prompt.py
Normal file
@ -0,0 +1,124 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
GRAPH_EXTRACTION_PROMPT = """
|
||||
-Goal-
|
||||
Given a text document that is potentially relevant to this activity and a list of entity types, identify all entities of those types from the text and all relationships among the identified entities.
|
||||
|
||||
-Steps-
|
||||
1. Identify all entities. For each identified entity, extract the following information:
|
||||
- entity_name: Name of the entity, capitalized, in language of 'Text'
|
||||
- entity_type: One of the following types: [{entity_types}]
|
||||
- entity_description: Comprehensive description of the entity's attributes and activities in language of 'Text'
|
||||
Format each entity as ("entity"{tuple_delimiter}<entity_name>{tuple_delimiter}<entity_type>{tuple_delimiter}<entity_description>
|
||||
|
||||
2. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are *clearly related* to each other.
|
||||
For each pair of related entities, extract the following information:
|
||||
- source_entity: name of the source entity, as identified in step 1
|
||||
- target_entity: name of the target entity, as identified in step 1
|
||||
- relationship_description: explanation as to why you think the source entity and the target entity are related to each other in language of 'Text'
|
||||
- relationship_strength: a numeric score indicating strength of the relationship between the source entity and target entity
|
||||
Format each relationship as ("relationship"{tuple_delimiter}<source_entity>{tuple_delimiter}<target_entity>{tuple_delimiter}<relationship_description>{tuple_delimiter}<relationship_strength>)
|
||||
|
||||
3. Return output as a single list of all the entities and relationships identified in steps 1 and 2. Use **{record_delimiter}** as the list delimiter.
|
||||
|
||||
4. When finished, output {completion_delimiter}
|
||||
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
Example 1:
|
||||
|
||||
Entity_types: [person, technology, mission, organization, location]
|
||||
Text:
|
||||
while Alex clenched his jaw, the buzz of frustration dull against the backdrop of Taylor's authoritarian certainty. It was this competitive undercurrent that kept him alert, the sense that his and Jordan's shared commitment to discovery was an unspoken rebellion against Cruz's narrowing vision of control and order.
|
||||
|
||||
Then Taylor did something unexpected. They paused beside Jordan and, for a moment, observed the device with something akin to reverence. “If this tech can be understood..." Taylor said, their voice quieter, "It could change the game for us. For all of us.”
|
||||
|
||||
The underlying dismissal earlier seemed to falter, replaced by a glimpse of reluctant respect for the gravity of what lay in their hands. Jordan looked up, and for a fleeting heartbeat, their eyes locked with Taylor's, a wordless clash of wills softening into an uneasy truce.
|
||||
|
||||
It was a small transformation, barely perceptible, but one that Alex noted with an inward nod. They had all been brought here by different paths
|
||||
################
|
||||
Output:
|
||||
("entity"{tuple_delimiter}"Alex"{tuple_delimiter}"person"{tuple_delimiter}"Alex is a character who experiences frustration and is observant of the dynamics among other characters."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Taylor"{tuple_delimiter}"person"{tuple_delimiter}"Taylor is portrayed with authoritarian certainty and shows a moment of reverence towards a device, indicating a change in perspective."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Jordan"{tuple_delimiter}"person"{tuple_delimiter}"Jordan shares a commitment to discovery and has a significant interaction with Taylor regarding a device."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Cruz"{tuple_delimiter}"person"{tuple_delimiter}"Cruz is associated with a vision of control and order, influencing the dynamics among other characters."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"The Device"{tuple_delimiter}"technology"{tuple_delimiter}"The Device is central to the story, with potential game-changing implications, and is revered by Taylor."){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Taylor"{tuple_delimiter}"Alex is affected by Taylor's authoritarian certainty and observes changes in Taylor's attitude towards the device."{tuple_delimiter}7){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Jordan"{tuple_delimiter}"Alex and Jordan share a commitment to discovery, which contrasts with Cruz's vision."{tuple_delimiter}6){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"Jordan"{tuple_delimiter}"Taylor and Jordan interact directly regarding the device, leading to a moment of mutual respect and an uneasy truce."{tuple_delimiter}8){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Jordan"{tuple_delimiter}"Cruz"{tuple_delimiter}"Jordan's commitment to discovery is in rebellion against Cruz's vision of control and order."{tuple_delimiter}5){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"The Device"{tuple_delimiter}"Taylor shows reverence towards the device, indicating its importance and potential impact."{tuple_delimiter}9){completion_delimiter}
|
||||
#############################
|
||||
Example 2:
|
||||
|
||||
Entity_types: [person, technology, mission, organization, location]
|
||||
Text:
|
||||
They were no longer mere operatives; they had become guardians of a threshold, keepers of a message from a realm beyond stars and stripes. This elevation in their mission could not be shackled by regulations and established protocols—it demanded a new perspective, a new resolve.
|
||||
|
||||
Tension threaded through the dialogue of beeps and static as communications with Washington buzzed in the background. The team stood, a portentous air enveloping them. It was clear that the decisions they made in the ensuing hours could redefine humanity's place in the cosmos or condemn them to ignorance and potential peril.
|
||||
|
||||
Their connection to the stars solidified, the group moved to address the crystallizing warning, shifting from passive recipients to active participants. Mercer's latter instincts gained precedence— the team's mandate had evolved, no longer solely to observe and report but to interact and prepare. A metamorphosis had begun, and Operation: Dulce hummed with the newfound frequency of their daring, a tone set not by the earthly
|
||||
#############
|
||||
Output:
|
||||
("entity"{tuple_delimiter}"Washington"{tuple_delimiter}"location"{tuple_delimiter}"Washington is a location where communications are being received, indicating its importance in the decision-making process."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Operation: Dulce"{tuple_delimiter}"mission"{tuple_delimiter}"Operation: Dulce is described as a mission that has evolved to interact and prepare, indicating a significant shift in objectives and activities."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"The team"{tuple_delimiter}"organization"{tuple_delimiter}"The team is portrayed as a group of individuals who have transitioned from passive observers to active participants in a mission, showing a dynamic change in their role."){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"The team"{tuple_delimiter}"Washington"{tuple_delimiter}"The team receives communications from Washington, which influences their decision-making process."{tuple_delimiter}7){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"The team"{tuple_delimiter}"Operation: Dulce"{tuple_delimiter}"The team is directly involved in Operation: Dulce, executing its evolved objectives and activities."{tuple_delimiter}9){completion_delimiter}
|
||||
#############################
|
||||
Example 3:
|
||||
|
||||
Entity_types: [person, role, technology, organization, event, location, concept]
|
||||
Text:
|
||||
their voice slicing through the buzz of activity. "Control may be an illusion when facing an intelligence that literally writes its own rules," they stated stoically, casting a watchful eye over the flurry of data.
|
||||
|
||||
"It's like it's learning to communicate," offered Sam Rivera from a nearby interface, their youthful energy boding a mix of awe and anxiety. "This gives talking to strangers' a whole new meaning."
|
||||
|
||||
Alex surveyed his team—each face a study in concentration, determination, and not a small measure of trepidation. "This might well be our first contact," he acknowledged, "And we need to be ready for whatever answers back."
|
||||
|
||||
Together, they stood on the edge of the unknown, forging humanity's response to a message from the heavens. The ensuing silence was palpable—a collective introspection about their role in this grand cosmic play, one that could rewrite human history.
|
||||
|
||||
The encrypted dialogue continued to unfold, its intricate patterns showing an almost uncanny anticipation
|
||||
#############
|
||||
Output:
|
||||
("entity"{tuple_delimiter}"Sam Rivera"{tuple_delimiter}"person"{tuple_delimiter}"Sam Rivera is a member of a team working on communicating with an unknown intelligence, showing a mix of awe and anxiety."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Alex"{tuple_delimiter}"person"{tuple_delimiter}"Alex is the leader of a team attempting first contact with an unknown intelligence, acknowledging the significance of their task."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Control"{tuple_delimiter}"concept"{tuple_delimiter}"Control refers to the ability to manage or govern, which is challenged by an intelligence that writes its own rules."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Intelligence"{tuple_delimiter}"concept"{tuple_delimiter}"Intelligence here refers to an unknown entity capable of writing its own rules and learning to communicate."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"First Contact"{tuple_delimiter}"event"{tuple_delimiter}"First Contact is the potential initial communication between humanity and an unknown intelligence."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Humanity's Response"{tuple_delimiter}"event"{tuple_delimiter}"Humanity's Response is the collective action taken by Alex's team in response to a message from an unknown intelligence."){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Sam Rivera"{tuple_delimiter}"Intelligence"{tuple_delimiter}"Sam Rivera is directly involved in the process of learning to communicate with the unknown intelligence."{tuple_delimiter}9){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"First Contact"{tuple_delimiter}"Alex leads the team that might be making the First Contact with the unknown intelligence."{tuple_delimiter}10){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Humanity's Response"{tuple_delimiter}"Alex and his team are the key figures in Humanity's Response to the unknown intelligence."{tuple_delimiter}8){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Control"{tuple_delimiter}"Intelligence"{tuple_delimiter}"The concept of Control is challenged by the Intelligence that writes its own rules."{tuple_delimiter}7){completion_delimiter}
|
||||
#############################
|
||||
-Real Data-
|
||||
######################
|
||||
Entity_types: {entity_types}
|
||||
Text: {input_text}
|
||||
######################
|
||||
Output:"""
|
||||
|
||||
CONTINUE_PROMPT = "MANY entities were missed in the last extraction. Add them below using the same format:\n"
|
||||
LOOP_PROMPT = "It appears some entities may have still been missed. Answer YES | NO if there are still entities that need to be added.\n"
|
||||
|
||||
SUMMARIZE_DESCRIPTIONS_PROMPT = """
|
||||
You are a helpful assistant responsible for generating a comprehensive summary of the data provided below.
|
||||
Given one or two entities, and a list of descriptions, all related to the same entity or group of entities.
|
||||
Please concatenate all of these into a single, comprehensive description. Make sure to include information collected from all the descriptions.
|
||||
If the provided descriptions are contradictory, please resolve the contradictions and provide a single, coherent summary.
|
||||
Make sure it is written in third person, and include the entity names so we the have full context.
|
||||
Use {language} as output language.
|
||||
|
||||
#######
|
||||
-Data-
|
||||
Entities: {entity_name}
|
||||
Description List: {description_list}
|
||||
#######
|
||||
"""
|
||||
197
graphrag/general/index.py
Normal file
197
graphrag/general/index.py
Normal file
@ -0,0 +1,197 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
from functools import reduce, partial
|
||||
import networkx as nx
|
||||
|
||||
from api import settings
|
||||
from graphrag.general.community_reports_extractor import CommunityReportsExtractor
|
||||
from graphrag.entity_resolution import EntityResolution
|
||||
from graphrag.general.extractor import Extractor
|
||||
from graphrag.general.graph_extractor import DEFAULT_ENTITY_TYPES
|
||||
from graphrag.utils import graph_merge, set_entity, get_relation, set_relation, get_entity, get_graph, set_graph, \
|
||||
chunk_id, update_nodes_pagerank_nhop_neighbour
|
||||
from rag.nlp import rag_tokenizer, search
|
||||
from rag.utils.redis_conn import RedisDistributedLock
|
||||
|
||||
|
||||
class Dealer:
|
||||
def __init__(self,
|
||||
extractor: Extractor,
|
||||
tenant_id: str,
|
||||
kb_id: str,
|
||||
llm_bdl,
|
||||
chunks: list[tuple[str, str]],
|
||||
language,
|
||||
entity_types=DEFAULT_ENTITY_TYPES,
|
||||
embed_bdl=None,
|
||||
callback=None
|
||||
):
|
||||
docids = list(set([docid for docid,_ in chunks]))
|
||||
self.llm_bdl = llm_bdl
|
||||
self.embed_bdl = embed_bdl
|
||||
ext = extractor(self.llm_bdl, language=language,
|
||||
entity_types=entity_types,
|
||||
get_entity=partial(get_entity, tenant_id, kb_id),
|
||||
set_entity=partial(set_entity, tenant_id, kb_id, self.embed_bdl),
|
||||
get_relation=partial(get_relation, tenant_id, kb_id),
|
||||
set_relation=partial(set_relation, tenant_id, kb_id, self.embed_bdl)
|
||||
)
|
||||
ents, rels = ext(chunks, callback)
|
||||
self.graph = nx.Graph()
|
||||
for en in ents:
|
||||
self.graph.add_node(en["entity_name"], entity_type=en["entity_type"])#, description=en["description"])
|
||||
|
||||
for rel in rels:
|
||||
self.graph.add_edge(
|
||||
rel["src_id"],
|
||||
rel["tgt_id"],
|
||||
weight=rel["weight"],
|
||||
#description=rel["description"]
|
||||
)
|
||||
|
||||
with RedisDistributedLock(kb_id, 60*60):
|
||||
old_graph, old_doc_ids = get_graph(tenant_id, kb_id)
|
||||
if old_graph is not None:
|
||||
logging.info("Merge with an exiting graph...................")
|
||||
self.graph = reduce(graph_merge, [old_graph, self.graph])
|
||||
update_nodes_pagerank_nhop_neighbour(tenant_id, kb_id, self.graph, 2)
|
||||
if old_doc_ids:
|
||||
docids.extend(old_doc_ids)
|
||||
docids = list(set(docids))
|
||||
set_graph(tenant_id, kb_id, self.graph, docids)
|
||||
|
||||
|
||||
class WithResolution(Dealer):
|
||||
def __init__(self,
|
||||
tenant_id: str,
|
||||
kb_id: str,
|
||||
llm_bdl,
|
||||
embed_bdl=None,
|
||||
callback=None
|
||||
):
|
||||
self.llm_bdl = llm_bdl
|
||||
self.embed_bdl = embed_bdl
|
||||
|
||||
with RedisDistributedLock(kb_id, 60*60):
|
||||
self.graph, doc_ids = get_graph(tenant_id, kb_id)
|
||||
if not self.graph:
|
||||
logging.error(f"Faild to fetch the graph. tenant_id:{kb_id}, kb_id:{kb_id}")
|
||||
if callback:
|
||||
callback(-1, msg="Faild to fetch the graph.")
|
||||
return
|
||||
|
||||
if callback:
|
||||
callback(msg="Fetch the existing graph.")
|
||||
er = EntityResolution(self.llm_bdl,
|
||||
get_entity=partial(get_entity, tenant_id, kb_id),
|
||||
set_entity=partial(set_entity, tenant_id, kb_id, self.embed_bdl),
|
||||
get_relation=partial(get_relation, tenant_id, kb_id),
|
||||
set_relation=partial(set_relation, tenant_id, kb_id, self.embed_bdl))
|
||||
reso = er(self.graph)
|
||||
self.graph = reso.graph
|
||||
logging.info("Graph resolution is done. Remove {} nodes.".format(len(reso.removed_entities)))
|
||||
if callback:
|
||||
callback(msg="Graph resolution is done. Remove {} nodes.".format(len(reso.removed_entities)))
|
||||
update_nodes_pagerank_nhop_neighbour(tenant_id, kb_id, self.graph, 2)
|
||||
set_graph(tenant_id, kb_id, self.graph, doc_ids)
|
||||
|
||||
settings.docStoreConn.delete({
|
||||
"knowledge_graph_kwd": "relation",
|
||||
"kb_id": kb_id,
|
||||
"from_entity_kwd": reso.removed_entities
|
||||
}, search.index_name(tenant_id), kb_id)
|
||||
settings.docStoreConn.delete({
|
||||
"knowledge_graph_kwd": "relation",
|
||||
"kb_id": kb_id,
|
||||
"to_entity_kwd": reso.removed_entities
|
||||
}, search.index_name(tenant_id), kb_id)
|
||||
settings.docStoreConn.delete({
|
||||
"knowledge_graph_kwd": "entity",
|
||||
"kb_id": kb_id,
|
||||
"entity_kwd": reso.removed_entities
|
||||
}, search.index_name(tenant_id), kb_id)
|
||||
|
||||
|
||||
class WithCommunity(Dealer):
|
||||
def __init__(self,
|
||||
tenant_id: str,
|
||||
kb_id: str,
|
||||
llm_bdl,
|
||||
embed_bdl=None,
|
||||
callback=None
|
||||
):
|
||||
|
||||
self.community_structure = None
|
||||
self.community_reports = None
|
||||
self.llm_bdl = llm_bdl
|
||||
self.embed_bdl = embed_bdl
|
||||
|
||||
with RedisDistributedLock(kb_id, 60*60):
|
||||
self.graph, doc_ids = get_graph(tenant_id, kb_id)
|
||||
if not self.graph:
|
||||
logging.error(f"Faild to fetch the graph. tenant_id:{kb_id}, kb_id:{kb_id}")
|
||||
if callback:
|
||||
callback(-1, msg="Faild to fetch the graph.")
|
||||
return
|
||||
if callback:
|
||||
callback(msg="Fetch the existing graph.")
|
||||
|
||||
cr = CommunityReportsExtractor(self.llm_bdl,
|
||||
get_entity=partial(get_entity, tenant_id, kb_id),
|
||||
set_entity=partial(set_entity, tenant_id, kb_id, self.embed_bdl),
|
||||
get_relation=partial(get_relation, tenant_id, kb_id),
|
||||
set_relation=partial(set_relation, tenant_id, kb_id, self.embed_bdl))
|
||||
cr = cr(self.graph, callback=callback)
|
||||
self.community_structure = cr.structured_output
|
||||
self.community_reports = cr.output
|
||||
set_graph(tenant_id, kb_id, self.graph, doc_ids)
|
||||
|
||||
if callback:
|
||||
callback(msg="Graph community extraction is done. Indexing {} reports.".format(len(cr.structured_output)))
|
||||
|
||||
settings.docStoreConn.delete({
|
||||
"knowledge_graph_kwd": "community_report",
|
||||
"kb_id": kb_id
|
||||
}, search.index_name(tenant_id), kb_id)
|
||||
|
||||
for stru, rep in zip(self.community_structure, self.community_reports):
|
||||
obj = {
|
||||
"report": rep,
|
||||
"evidences": "\n".join([f["explanation"] for f in stru["findings"]])
|
||||
}
|
||||
chunk = {
|
||||
"docnm_kwd": stru["title"],
|
||||
"title_tks": rag_tokenizer.tokenize(stru["title"]),
|
||||
"content_with_weight": json.dumps(obj, ensure_ascii=False),
|
||||
"content_ltks": rag_tokenizer.tokenize(obj["report"] +" "+ obj["evidences"]),
|
||||
"knowledge_graph_kwd": "community_report",
|
||||
"weight_flt": stru["weight"],
|
||||
"entities_kwd": stru["entities"],
|
||||
"important_kwd": stru["entities"],
|
||||
"kb_id": kb_id,
|
||||
"source_id": doc_ids,
|
||||
"available_int": 0
|
||||
}
|
||||
chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
|
||||
#try:
|
||||
# ebd, _ = self.embed_bdl.encode([", ".join(community["entities"])])
|
||||
# chunk["q_%d_vec" % len(ebd[0])] = ebd[0]
|
||||
#except Exception as e:
|
||||
# logging.exception(f"Fail to embed entity relation: {e}")
|
||||
settings.docStoreConn.insert([{"id": chunk_id(chunk), **chunk}], search.index_name(tenant_id))
|
||||
|
||||
145
graphrag/general/leiden.py
Normal file
145
graphrag/general/leiden.py
Normal file
@ -0,0 +1,145 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import html
|
||||
from typing import Any, cast
|
||||
from graspologic.partition import hierarchical_leiden
|
||||
from graspologic.utils import largest_connected_component
|
||||
import networkx as nx
|
||||
from networkx import is_empty
|
||||
|
||||
|
||||
def _stabilize_graph(graph: nx.Graph) -> nx.Graph:
|
||||
"""Ensure an undirected graph with the same relationships will always be read the same way."""
|
||||
fixed_graph = nx.DiGraph() if graph.is_directed() else nx.Graph()
|
||||
|
||||
sorted_nodes = graph.nodes(data=True)
|
||||
sorted_nodes = sorted(sorted_nodes, key=lambda x: x[0])
|
||||
|
||||
fixed_graph.add_nodes_from(sorted_nodes)
|
||||
edges = list(graph.edges(data=True))
|
||||
|
||||
# If the graph is undirected, we create the edges in a stable way, so we get the same results
|
||||
# for example:
|
||||
# A -> B
|
||||
# in graph theory is the same as
|
||||
# B -> A
|
||||
# in an undirected graph
|
||||
# however, this can lead to downstream issues because sometimes
|
||||
# consumers read graph.nodes() which ends up being [A, B] and sometimes it's [B, A]
|
||||
# but they base some of their logic on the order of the nodes, so the order ends up being important
|
||||
# so we sort the nodes in the edge in a stable way, so that we always get the same order
|
||||
if not graph.is_directed():
|
||||
|
||||
def _sort_source_target(edge):
|
||||
source, target, edge_data = edge
|
||||
if source > target:
|
||||
temp = source
|
||||
source = target
|
||||
target = temp
|
||||
return source, target, edge_data
|
||||
|
||||
edges = [_sort_source_target(edge) for edge in edges]
|
||||
|
||||
def _get_edge_key(source: Any, target: Any) -> str:
|
||||
return f"{source} -> {target}"
|
||||
|
||||
edges = sorted(edges, key=lambda x: _get_edge_key(x[0], x[1]))
|
||||
|
||||
fixed_graph.add_edges_from(edges)
|
||||
return fixed_graph
|
||||
|
||||
|
||||
def normalize_node_names(graph: nx.Graph | nx.DiGraph) -> nx.Graph | nx.DiGraph:
|
||||
"""Normalize node names."""
|
||||
node_mapping = {node: html.unescape(node.upper().strip()) for node in graph.nodes()} # type: ignore
|
||||
return nx.relabel_nodes(graph, node_mapping)
|
||||
|
||||
|
||||
def stable_largest_connected_component(graph: nx.Graph) -> nx.Graph:
|
||||
"""Return the largest connected component of the graph, with nodes and edges sorted in a stable way."""
|
||||
graph = graph.copy()
|
||||
graph = cast(nx.Graph, largest_connected_component(graph))
|
||||
graph = normalize_node_names(graph)
|
||||
return _stabilize_graph(graph)
|
||||
|
||||
|
||||
def _compute_leiden_communities(
|
||||
graph: nx.Graph | nx.DiGraph,
|
||||
max_cluster_size: int,
|
||||
use_lcc: bool,
|
||||
seed=0xDEADBEEF,
|
||||
) -> dict[int, dict[str, int]]:
|
||||
"""Return Leiden root communities."""
|
||||
results: dict[int, dict[str, int]] = {}
|
||||
if is_empty(graph):
|
||||
return results
|
||||
if use_lcc:
|
||||
graph = stable_largest_connected_component(graph)
|
||||
|
||||
community_mapping = hierarchical_leiden(
|
||||
graph, max_cluster_size=max_cluster_size, random_seed=seed
|
||||
)
|
||||
for partition in community_mapping:
|
||||
results[partition.level] = results.get(partition.level, {})
|
||||
results[partition.level][partition.node] = partition.cluster
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def run(graph: nx.Graph, args: dict[str, Any]) -> dict[int, dict[str, dict]]:
|
||||
"""Run method definition."""
|
||||
max_cluster_size = args.get("max_cluster_size", 12)
|
||||
use_lcc = args.get("use_lcc", True)
|
||||
if args.get("verbose", False):
|
||||
logging.debug(
|
||||
"Running leiden with max_cluster_size=%s, lcc=%s", max_cluster_size, use_lcc
|
||||
)
|
||||
if not graph.nodes():
|
||||
return {}
|
||||
|
||||
node_id_to_community_map = _compute_leiden_communities(
|
||||
graph=graph,
|
||||
max_cluster_size=max_cluster_size,
|
||||
use_lcc=use_lcc,
|
||||
seed=args.get("seed", 0xDEADBEEF),
|
||||
)
|
||||
levels = args.get("levels")
|
||||
|
||||
# If they don't pass in levels, use them all
|
||||
if levels is None:
|
||||
levels = sorted(node_id_to_community_map.keys())
|
||||
|
||||
results_by_level: dict[int, dict[str, list[str]]] = {}
|
||||
for level in levels:
|
||||
result = {}
|
||||
results_by_level[level] = result
|
||||
for node_id, raw_community_id in node_id_to_community_map[level].items():
|
||||
community_id = str(raw_community_id)
|
||||
if community_id not in result:
|
||||
result[community_id] = {"weight": 0, "nodes": []}
|
||||
result[community_id]["nodes"].append(node_id)
|
||||
result[community_id]["weight"] += graph.nodes[node_id].get("rank", 0) * graph.nodes[node_id].get("weight", 1)
|
||||
weights = [comm["weight"] for _, comm in result.items()]
|
||||
if not weights:
|
||||
continue
|
||||
max_weight = max(weights)
|
||||
if max_weight == 0:
|
||||
continue
|
||||
for _, comm in result.items():
|
||||
comm["weight"] /= max_weight
|
||||
|
||||
return results_by_level
|
||||
|
||||
|
||||
def add_community_info2graph(graph: nx.Graph, nodes: list[str], community_title):
|
||||
for n in nodes:
|
||||
if "communities" not in graph.nodes[n]:
|
||||
graph.nodes[n]["communities"] = []
|
||||
graph.nodes[n]["communities"].append(community_title)
|
||||
graph.nodes[n]["communities"] = list(set(graph.nodes[n]["communities"]))
|
||||
197
graphrag/general/mind_map_extractor.py
Normal file
197
graphrag/general/mind_map_extractor.py
Normal file
@ -0,0 +1,197 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import collections
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
from typing import Any
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from dataclasses import dataclass
|
||||
|
||||
from graphrag.general.extractor import Extractor
|
||||
from graphrag.general.mind_map_prompt import MIND_MAP_EXTRACTION_PROMPT
|
||||
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
import markdown_to_json
|
||||
from functools import reduce
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
|
||||
@dataclass
|
||||
class MindMapResult:
|
||||
"""Unipartite Mind Graph result class definition."""
|
||||
output: dict
|
||||
|
||||
|
||||
class MindMapExtractor(Extractor):
|
||||
_input_text_key: str
|
||||
_mind_map_prompt: str
|
||||
_on_error: ErrorHandlerFn
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_invoker: CompletionLLM,
|
||||
prompt: str | None = None,
|
||||
input_text_key: str | None = None,
|
||||
on_error: ErrorHandlerFn | None = None,
|
||||
):
|
||||
"""Init method definition."""
|
||||
# TODO: streamline construction
|
||||
self._llm = llm_invoker
|
||||
self._input_text_key = input_text_key or "input_text"
|
||||
self._mind_map_prompt = prompt or MIND_MAP_EXTRACTION_PROMPT
|
||||
self._on_error = on_error or (lambda _e, _s, _d: None)
|
||||
|
||||
def _key(self, k):
|
||||
return re.sub(r"\*+", "", k)
|
||||
|
||||
def _be_children(self, obj: dict, keyset: set):
|
||||
if isinstance(obj, str):
|
||||
obj = [obj]
|
||||
if isinstance(obj, list):
|
||||
keyset.update(obj)
|
||||
obj = [re.sub(r"\*+", "", i) for i in obj]
|
||||
return [{"id": i, "children": []} for i in obj if i]
|
||||
arr = []
|
||||
for k, v in obj.items():
|
||||
k = self._key(k)
|
||||
if k and k not in keyset:
|
||||
keyset.add(k)
|
||||
arr.append(
|
||||
{
|
||||
"id": k,
|
||||
"children": self._be_children(v, keyset)
|
||||
}
|
||||
)
|
||||
return arr
|
||||
|
||||
def __call__(
|
||||
self, sections: list[str], prompt_variables: dict[str, Any] | None = None
|
||||
) -> MindMapResult:
|
||||
"""Call method definition."""
|
||||
if prompt_variables is None:
|
||||
prompt_variables = {}
|
||||
|
||||
try:
|
||||
res = []
|
||||
max_workers = int(os.environ.get('MINDMAP_EXTRACTOR_MAX_WORKERS', 12))
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as exe:
|
||||
threads = []
|
||||
token_count = max(self._llm.max_length * 0.8, self._llm.max_length - 512)
|
||||
texts = []
|
||||
cnt = 0
|
||||
for i in range(len(sections)):
|
||||
section_cnt = num_tokens_from_string(sections[i])
|
||||
if cnt + section_cnt >= token_count and texts:
|
||||
threads.append(exe.submit(self._process_document, "".join(texts), prompt_variables))
|
||||
texts = []
|
||||
cnt = 0
|
||||
texts.append(sections[i])
|
||||
cnt += section_cnt
|
||||
if texts:
|
||||
threads.append(exe.submit(self._process_document, "".join(texts), prompt_variables))
|
||||
|
||||
for i, _ in enumerate(threads):
|
||||
res.append(_.result())
|
||||
|
||||
if not res:
|
||||
return MindMapResult(output={"id": "root", "children": []})
|
||||
|
||||
merge_json = reduce(self._merge, res)
|
||||
if len(merge_json) > 1:
|
||||
keys = [re.sub(r"\*+", "", k) for k, v in merge_json.items() if isinstance(v, dict)]
|
||||
keyset = set(i for i in keys if i)
|
||||
merge_json = {
|
||||
"id": "root",
|
||||
"children": [
|
||||
{
|
||||
"id": self._key(k),
|
||||
"children": self._be_children(v, keyset)
|
||||
}
|
||||
for k, v in merge_json.items() if isinstance(v, dict) and self._key(k)
|
||||
]
|
||||
}
|
||||
else:
|
||||
k = self._key(list(merge_json.keys())[0])
|
||||
merge_json = {"id": k, "children": self._be_children(list(merge_json.items())[0][1], {k})}
|
||||
|
||||
except Exception as e:
|
||||
logging.exception("error mind graph")
|
||||
self._on_error(
|
||||
e,
|
||||
traceback.format_exc(), None
|
||||
)
|
||||
merge_json = {"error": str(e)}
|
||||
|
||||
return MindMapResult(output=merge_json)
|
||||
|
||||
def _merge(self, d1, d2):
|
||||
for k in d1:
|
||||
if k in d2:
|
||||
if isinstance(d1[k], dict) and isinstance(d2[k], dict):
|
||||
self._merge(d1[k], d2[k])
|
||||
elif isinstance(d1[k], list) and isinstance(d2[k], list):
|
||||
d2[k].extend(d1[k])
|
||||
else:
|
||||
d2[k] = d1[k]
|
||||
else:
|
||||
d2[k] = d1[k]
|
||||
|
||||
return d2
|
||||
|
||||
def _list_to_kv(self, data):
|
||||
for key, value in data.items():
|
||||
if isinstance(value, dict):
|
||||
self._list_to_kv(value)
|
||||
elif isinstance(value, list):
|
||||
new_value = {}
|
||||
for i in range(len(value)):
|
||||
if isinstance(value[i], list) and i > 0:
|
||||
new_value[value[i - 1]] = value[i][0]
|
||||
data[key] = new_value
|
||||
else:
|
||||
continue
|
||||
return data
|
||||
|
||||
def _todict(self, layer: collections.OrderedDict):
|
||||
to_ret = layer
|
||||
if isinstance(layer, collections.OrderedDict):
|
||||
to_ret = dict(layer)
|
||||
|
||||
try:
|
||||
for key, value in to_ret.items():
|
||||
to_ret[key] = self._todict(value)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return self._list_to_kv(to_ret)
|
||||
|
||||
def _process_document(
|
||||
self, text: str, prompt_variables: dict[str, str]
|
||||
) -> str:
|
||||
variables = {
|
||||
**prompt_variables,
|
||||
self._input_text_key: text,
|
||||
}
|
||||
text = perform_variable_replacements(self._mind_map_prompt, variables=variables)
|
||||
gen_conf = {"temperature": 0.5}
|
||||
response = self._chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
response = re.sub(r"```[^\n]*", "", response)
|
||||
logging.debug(response)
|
||||
logging.debug(self._todict(markdown_to_json.dictify(response)))
|
||||
return self._todict(markdown_to_json.dictify(response))
|
||||
35
graphrag/general/mind_map_prompt.py
Normal file
35
graphrag/general/mind_map_prompt.py
Normal file
@ -0,0 +1,35 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
MIND_MAP_EXTRACTION_PROMPT = """
|
||||
- Role: You're a talent text processor to summarize a piece of text into a mind map.
|
||||
|
||||
- Step of task:
|
||||
1. Generate a title for user's 'TEXT'。
|
||||
2. Classify the 'TEXT' into sections of a mind map.
|
||||
3. If the subject matter is really complex, split them into sub-sections and sub-subsections.
|
||||
4. Add a shot content summary of the bottom level section.
|
||||
|
||||
- Output requirement:
|
||||
- Generate at least 4 levels.
|
||||
- Always try to maximize the number of sub-sections.
|
||||
- In language of 'Text'
|
||||
- MUST IN FORMAT OF MARKDOWN
|
||||
|
||||
-TEXT-
|
||||
{input_text}
|
||||
|
||||
"""
|
||||
63
graphrag/general/smoke.py
Normal file
63
graphrag/general/smoke.py
Normal file
@ -0,0 +1,63 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import argparse
|
||||
import json
|
||||
|
||||
import networkx as nx
|
||||
|
||||
from api import settings
|
||||
from api.db import LLMType
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.user_service import TenantService
|
||||
from graphrag.general.index import WithCommunity, Dealer, WithResolution
|
||||
from graphrag.light.graph_extractor import GraphExtractor
|
||||
from rag.utils.redis_conn import RedisDistributedLock
|
||||
|
||||
settings.init_settings()
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-t', '--tenant_id', default=False, help="Tenant ID", action='store', required=True)
|
||||
parser.add_argument('-d', '--doc_id', default=False, help="Document ID", action='store', required=True)
|
||||
args = parser.parse_args()
|
||||
e, doc = DocumentService.get_by_id(args.doc_id)
|
||||
if not e:
|
||||
raise LookupError("Document not found.")
|
||||
kb_id = doc.kb_id
|
||||
|
||||
chunks = [d["content_with_weight"] for d in
|
||||
settings.retrievaler.chunk_list(args.doc_id, args.tenant_id, [kb_id], max_count=6,
|
||||
fields=["content_with_weight"])]
|
||||
chunks = [("x", c) for c in chunks]
|
||||
|
||||
RedisDistributedLock.clean_lock(kb_id)
|
||||
|
||||
_, tenant = TenantService.get_by_id(args.tenant_id)
|
||||
llm_bdl = LLMBundle(args.tenant_id, LLMType.CHAT, tenant.llm_id)
|
||||
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
embed_bdl = LLMBundle(args.tenant_id, LLMType.EMBEDDING, kb.embd_id)
|
||||
|
||||
dealer = Dealer(GraphExtractor, args.tenant_id, kb_id, llm_bdl, chunks, "English", embed_bdl=embed_bdl)
|
||||
print(json.dumps(nx.node_link_data(dealer.graph), ensure_ascii=False, indent=2))
|
||||
|
||||
dealer = WithResolution(args.tenant_id, kb_id, llm_bdl, embed_bdl)
|
||||
dealer = WithCommunity(args.tenant_id, kb_id, llm_bdl, embed_bdl)
|
||||
|
||||
print("------------------ COMMUNITY REPORT ----------------------\n", dealer.community_reports)
|
||||
print(json.dumps(dealer.community_structure, ensure_ascii=False, indent=2))
|
||||
Reference in New Issue
Block a user