mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Test: fix test cases and improve document parsing validation (#8179)
### What problem does this PR solve? - Update chat assistant tests to use dataset.id directly in payloads - Enhance document parsing tests with better condition checking - Add explicit type hints and improve timeout handling Action_7556 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
@ -49,11 +49,11 @@ class TestChatAssistantUpdate:
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_avatar(self, client, add_chat_assistants_func, tmp_path):
|
||||
_, _, chat_assistants = add_chat_assistants_func
|
||||
dataset, _, chat_assistants = add_chat_assistants_func
|
||||
chat_assistant = chat_assistants[0]
|
||||
|
||||
fn = create_image_file(tmp_path / "ragflow_test.png")
|
||||
payload = {"name": "avatar_test", "avatar": encode_avatar(fn), "dataset_ids": chat_assistant.dataset_ids}
|
||||
payload = {"name": "avatar_test", "avatar": encode_avatar(fn), "dataset_ids": [dataset.id]}
|
||||
|
||||
chat_assistant.update(payload)
|
||||
updated_chat = client.list_chats(id=chat_assistant.id)[0]
|
||||
@ -96,9 +96,9 @@ class TestChatAssistantUpdate:
|
||||
],
|
||||
)
|
||||
def test_llm(self, client, add_chat_assistants_func, llm, expected_message):
|
||||
_, _, chat_assistants = add_chat_assistants_func
|
||||
dataset, _, chat_assistants = add_chat_assistants_func
|
||||
chat_assistant = chat_assistants[0]
|
||||
payload = {"name": "llm_test", "dataset_ids": chat_assistant.dataset_ids, "llm": llm}
|
||||
payload = {"name": "llm_test", "llm": llm, "dataset_ids": [dataset.id]}
|
||||
|
||||
if expected_message:
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
@ -173,9 +173,9 @@ class TestChatAssistantUpdate:
|
||||
],
|
||||
)
|
||||
def test_prompt(self, client, add_chat_assistants_func, prompt, expected_message):
|
||||
_, _, chat_assistants = add_chat_assistants_func
|
||||
dataset, _, chat_assistants = add_chat_assistants_func
|
||||
chat_assistant = chat_assistants[0]
|
||||
payload = {"name": "prompt_test", "dataset_ids": chat_assistant.dataset_ids, "prompt": prompt}
|
||||
payload = {"name": "prompt_test", "prompt": prompt, "dataset_ids": [dataset.id]}
|
||||
|
||||
if expected_message:
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
|
||||
@ -22,7 +22,7 @@ from utils import wait_for
|
||||
|
||||
|
||||
@wait_for(30, 1, "Document parsing timeout")
|
||||
def condition(_dataset: DataSet, _document_ids=None):
|
||||
def condition(_dataset: DataSet, _document_ids: list[str] = None):
|
||||
documents = _dataset.list_documents(page_size=1000)
|
||||
|
||||
if _document_ids is None:
|
||||
@ -116,17 +116,34 @@ class TestDocumentsParse:
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_parse_100_files(add_dataset_func, tmp_path):
|
||||
@wait_for(100, 1, "Document parsing timeout")
|
||||
def condition(_dataset: DataSet, _count: int):
|
||||
documents = _dataset.list_documents(page_size=_count * 2)
|
||||
for document in documents:
|
||||
if document.run != "DONE":
|
||||
return False
|
||||
return True
|
||||
|
||||
count = 100
|
||||
dataset = add_dataset_func
|
||||
documents = bulk_upload_documents(dataset, 100, tmp_path)
|
||||
documents = bulk_upload_documents(dataset, count, tmp_path)
|
||||
document_ids = [doc.id for doc in documents]
|
||||
|
||||
dataset.async_parse_documents(document_ids=document_ids)
|
||||
condition(dataset, document_ids)
|
||||
condition(dataset, count)
|
||||
validate_document_details(dataset, document_ids)
|
||||
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_concurrent_parse(add_dataset_func, tmp_path):
|
||||
@wait_for(120, 1, "Document parsing timeout")
|
||||
def condition(_dataset: DataSet, _count: int):
|
||||
documents = _dataset.list_documents(page_size=_count * 2)
|
||||
for document in documents:
|
||||
if document.run != "DONE":
|
||||
return False
|
||||
return True
|
||||
|
||||
count = 100
|
||||
dataset = add_dataset_func
|
||||
documents = bulk_upload_documents(dataset, count, tmp_path)
|
||||
@ -141,5 +158,5 @@ def test_concurrent_parse(add_dataset_func, tmp_path):
|
||||
responses = list(as_completed(futures))
|
||||
assert len(responses) == count, responses
|
||||
|
||||
condition(dataset, document_ids)
|
||||
condition(dataset, count)
|
||||
validate_document_details(dataset, document_ids)
|
||||
|
||||
Reference in New Issue
Block a user