mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
rename some attributes in document sdk (#2481)
### What problem does this PR solve? #1102 ### Type of change - [x] Performance Improvement --------- Co-authored-by: Kevin Hu <kevinhu.sh@gmail.com>
This commit is contained in:
committed by
GitHub
parent
01acc3fd5a
commit
2b0dc01a88
@ -22,7 +22,7 @@ class Chunk(Base):
|
||||
Delete the chunk in the document.
|
||||
"""
|
||||
res = self.post('/doc/chunk/rm',
|
||||
{"doc_id": self.document_id, 'chunk_ids': [self.id]})
|
||||
{"document_id": self.document_id, 'chunk_ids': [self.id]})
|
||||
res = res.json()
|
||||
if res.get("retmsg") == "success":
|
||||
return True
|
||||
@ -34,13 +34,13 @@ class Chunk(Base):
|
||||
"""
|
||||
res = self.post('/doc/chunk/set',
|
||||
{"chunk_id": self.id,
|
||||
"kb_id": self.knowledgebase_id,
|
||||
"knowledgebase_id": self.knowledgebase_id,
|
||||
"name": self.document_name,
|
||||
"content_with_weight": self.content,
|
||||
"important_kwd": self.important_keywords,
|
||||
"content": self.content,
|
||||
"important_keywords": self.important_keywords,
|
||||
"create_time": self.create_time,
|
||||
"create_timestamp_flt": self.create_timestamp_float,
|
||||
"doc_id": self.document_id,
|
||||
"document_id": self.document_id,
|
||||
"status": self.status,
|
||||
})
|
||||
res = res.json()
|
||||
|
||||
@ -65,7 +65,7 @@ class DataSet(Base):
|
||||
"""
|
||||
# Construct the request payload for listing documents
|
||||
payload = {
|
||||
"kb_id": self.id,
|
||||
"knowledgebase_id": self.id,
|
||||
"keywords": keywords,
|
||||
"offset": offset,
|
||||
"limit": limit
|
||||
|
||||
@ -34,10 +34,10 @@ class Document(Base):
|
||||
Save the document details to the server.
|
||||
"""
|
||||
res = self.post('/doc/save',
|
||||
{"id": self.id, "name": self.name, "thumbnail": self.thumbnail, "kb_id": self.knowledgebase_id,
|
||||
"parser_id": self.parser_method, "parser_config": self.parser_config.to_json(),
|
||||
{"id": self.id, "name": self.name, "thumbnail": self.thumbnail, "knowledgebase_id": self.knowledgebase_id,
|
||||
"parser_method": self.parser_method, "parser_config": self.parser_config.to_json(),
|
||||
"source_type": self.source_type, "type": self.type, "created_by": self.created_by,
|
||||
"size": self.size, "token_num": self.token_count, "chunk_num": self.chunk_count,
|
||||
"size": self.size, "token_count": self.token_count, "chunk_count": self.chunk_count,
|
||||
"progress": self.progress, "progress_msg": self.progress_msg,
|
||||
"process_begin_at": self.process_begin_at, "process_duation": self.process_duration
|
||||
})
|
||||
@ -51,7 +51,7 @@ class Document(Base):
|
||||
Delete the document from the server.
|
||||
"""
|
||||
res = self.rm('/doc/delete',
|
||||
{"doc_id": self.id})
|
||||
{"document_id": self.id})
|
||||
res = res.json()
|
||||
if res.get("retmsg") == "success":
|
||||
return True
|
||||
@ -83,7 +83,7 @@ class Document(Base):
|
||||
"""
|
||||
try:
|
||||
# Construct request data including document ID and run status (assuming 1 means to run)
|
||||
data = {"doc_ids": [self.id], "run": 1}
|
||||
data = {"document_ids": [self.id], "run": 1}
|
||||
|
||||
# Send a POST request to the specified parsing status endpoint to start parsing
|
||||
res = self.post(f'/doc/run', data)
|
||||
@ -112,7 +112,7 @@ class Document(Base):
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout:
|
||||
# Check the parsing status
|
||||
res = self.get(f'/doc/{self.id}/status', {"doc_ids": [self.id]})
|
||||
res = self.get(f'/doc/{self.id}/status', {"document_ids": [self.id]})
|
||||
res_data = res.json()
|
||||
data = res_data.get("data", [])
|
||||
|
||||
@ -133,7 +133,7 @@ class Document(Base):
|
||||
"""
|
||||
try:
|
||||
# Construct request data, including document ID and action to cancel (assuming 2 means cancel)
|
||||
data = {"doc_ids": [self.id], "run": 2}
|
||||
data = {"document_ids": [self.id], "run": 2}
|
||||
|
||||
# Send a POST request to the specified parsing status endpoint to cancel parsing
|
||||
res = self.post(f'/doc/run', data)
|
||||
@ -162,7 +162,7 @@ class Document(Base):
|
||||
list: A list of chunks returned from the API.
|
||||
"""
|
||||
data = {
|
||||
"doc_id": self.id,
|
||||
"document_id": self.id,
|
||||
"page": page,
|
||||
"size": size,
|
||||
"keywords": keywords,
|
||||
@ -188,7 +188,7 @@ class Document(Base):
|
||||
raise Exception(f"API request failed with status code {res.status_code}")
|
||||
|
||||
def add_chunk(self, content: str):
|
||||
res = self.post('/doc/chunk/create', {"doc_id": self.id, "content_with_weight":content})
|
||||
res = self.post('/doc/chunk/create', {"document_id": self.id, "content":content})
|
||||
if res.status_code == 200:
|
||||
res_data = res.json().get("data")
|
||||
chunk_data = res_data.get("chunk")
|
||||
|
||||
@ -150,14 +150,11 @@ class RAGFlow:
|
||||
files = {
|
||||
'file': (name, blob)
|
||||
}
|
||||
data = {
|
||||
'kb_id': ds.id
|
||||
}
|
||||
headers = {
|
||||
'Authorization': f"Bearer {ds.rag.user_key}"
|
||||
}
|
||||
|
||||
response = requests.post(self.api_url + url, data=data, files=files,
|
||||
response = requests.post(self.api_url + url, files=files,
|
||||
headers=headers)
|
||||
|
||||
if response.status_code == 200 and response.json().get('retmsg') == 'success':
|
||||
@ -184,7 +181,7 @@ class RAGFlow:
|
||||
if not doc_ids or not isinstance(doc_ids, list):
|
||||
raise ValueError("doc_ids must be a non-empty list of document IDs")
|
||||
|
||||
data = {"doc_ids": doc_ids, "run": 1}
|
||||
data = {"document_ids": doc_ids, "run": 1}
|
||||
|
||||
res = self.post(f'/doc/run', data)
|
||||
|
||||
@ -206,7 +203,7 @@ class RAGFlow:
|
||||
try:
|
||||
if not doc_ids or not isinstance(doc_ids, list):
|
||||
raise ValueError("doc_ids must be a non-empty list of document IDs")
|
||||
data = {"doc_ids": doc_ids, "run": 2}
|
||||
data = {"document_ids": doc_ids, "run": 2}
|
||||
res = self.post(f'/doc/run', data)
|
||||
|
||||
if res.status_code != 200:
|
||||
@ -252,7 +249,7 @@ class RAGFlow:
|
||||
"similarity_threshold": similarity_threshold,
|
||||
"vector_similarity_weight": vector_similarity_weight,
|
||||
"top_k": top_k,
|
||||
"kb_id": datasets,
|
||||
"knowledgebase_id": datasets,
|
||||
}
|
||||
|
||||
# Send a POST request to the backend service (using requests library as an example, actual implementation may vary)
|
||||
|
||||
@ -255,14 +255,14 @@ class TestDocument(TestSdk):
|
||||
def test_add_chunk_to_chunk_list(self):
|
||||
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
||||
doc = rag.get_document(name='story.txt')
|
||||
chunk = doc.add_chunk(content="assss")
|
||||
chunk = doc.add_chunk(content="assssdd")
|
||||
assert chunk is not None, "Chunk is None"
|
||||
assert isinstance(chunk, Chunk), "Chunk was not added to chunk list"
|
||||
|
||||
def test_delete_chunk_of_chunk_list(self):
|
||||
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
||||
doc = rag.get_document(name='story.txt')
|
||||
chunk = doc.add_chunk(content="assss")
|
||||
chunk = doc.add_chunk(content="assssdd")
|
||||
assert chunk is not None, "Chunk is None"
|
||||
assert isinstance(chunk, Chunk), "Chunk was not added to chunk list"
|
||||
doc = rag.get_document(name='story.txt')
|
||||
@ -274,7 +274,7 @@ class TestDocument(TestSdk):
|
||||
def test_update_chunk_content(self):
|
||||
rag = RAGFlow(API_KEY, HOST_ADDRESS)
|
||||
doc = rag.get_document(name='story.txt')
|
||||
chunk = doc.add_chunk(content="assssd")
|
||||
chunk = doc.add_chunk(content="assssddd")
|
||||
assert chunk is not None, "Chunk is None"
|
||||
assert isinstance(chunk, Chunk), "Chunk was not added to chunk list"
|
||||
chunk.content = "ragflow123"
|
||||
|
||||
Reference in New Issue
Block a user