mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Fix: Increase timeouts for document parsing and model checks (#8996)
### What problem does this PR solve? - Extended embedding model timeout from 3 to 10 seconds in api_utils.py - Added more time for large file batches and concurrent parsing operations to prevent test flakiness - Import from #8940 - https://github.com/infiniflow/ragflow/actions/runs/16422052652 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
@ -167,7 +167,7 @@ class TestDocumentsParse:
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_parse_100_files(HttpApiAuth, add_dataset_func, tmp_path):
|
||||
@wait_for(100, 1, "Document parsing timeout")
|
||||
@wait_for(200, 1, "Document parsing timeout")
|
||||
def condition(_auth, _dataset_id, _document_num):
|
||||
res = list_documents(_auth, _dataset_id, {"page_size": _document_num})
|
||||
for doc in res["data"]["docs"]:
|
||||
@ -188,7 +188,7 @@ def test_parse_100_files(HttpApiAuth, add_dataset_func, tmp_path):
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_concurrent_parse(HttpApiAuth, add_dataset_func, tmp_path):
|
||||
@wait_for(120, 1, "Document parsing timeout")
|
||||
@wait_for(200, 1, "Document parsing timeout")
|
||||
def condition(_auth, _dataset_id, _document_num):
|
||||
res = list_documents(_auth, _dataset_id, {"page_size": _document_num})
|
||||
for doc in res["data"]["docs"]:
|
||||
|
||||
@ -116,7 +116,7 @@ class TestDocumentsParse:
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_parse_100_files(add_dataset_func, tmp_path):
|
||||
@wait_for(100, 1, "Document parsing timeout")
|
||||
@wait_for(200, 1, "Document parsing timeout")
|
||||
def condition(_dataset: DataSet, _count: int):
|
||||
documents = _dataset.list_documents(page_size=_count * 2)
|
||||
for document in documents:
|
||||
@ -136,7 +136,7 @@ def test_parse_100_files(add_dataset_func, tmp_path):
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_concurrent_parse(add_dataset_func, tmp_path):
|
||||
@wait_for(120, 1, "Document parsing timeout")
|
||||
@wait_for(200, 1, "Document parsing timeout")
|
||||
def condition(_dataset: DataSet, _count: int):
|
||||
documents = _dataset.list_documents(page_size=_count * 2)
|
||||
for document in documents:
|
||||
|
||||
Reference in New Issue
Block a user