Fix: Increase timeouts for document parsing and model checks (#8996)

### What problem does this PR solve?

- Extended embedding model timeout from 3 to 10 seconds in api_utils.py
- Added more time for large file batches and concurrent parsing
operations to prevent test flakiness
- Import from #8940
- https://github.com/infiniflow/ragflow/actions/runs/16422052652

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
Liu An
2025-07-23 15:08:36 +08:00
committed by GitHub
parent d16505691c
commit b4b6d296ea
3 changed files with 8 additions and 10 deletions

View File

@ -167,7 +167,7 @@ class TestDocumentsParse:
@pytest.mark.p3
def test_parse_100_files(HttpApiAuth, add_dataset_func, tmp_path):
@wait_for(100, 1, "Document parsing timeout")
@wait_for(200, 1, "Document parsing timeout")
def condition(_auth, _dataset_id, _document_num):
res = list_documents(_auth, _dataset_id, {"page_size": _document_num})
for doc in res["data"]["docs"]:
@ -188,7 +188,7 @@ def test_parse_100_files(HttpApiAuth, add_dataset_func, tmp_path):
@pytest.mark.p3
def test_concurrent_parse(HttpApiAuth, add_dataset_func, tmp_path):
@wait_for(120, 1, "Document parsing timeout")
@wait_for(200, 1, "Document parsing timeout")
def condition(_auth, _dataset_id, _document_num):
res = list_documents(_auth, _dataset_id, {"page_size": _document_num})
for doc in res["data"]["docs"]:

View File

@ -116,7 +116,7 @@ class TestDocumentsParse:
@pytest.mark.p3
def test_parse_100_files(add_dataset_func, tmp_path):
@wait_for(100, 1, "Document parsing timeout")
@wait_for(200, 1, "Document parsing timeout")
def condition(_dataset: DataSet, _count: int):
documents = _dataset.list_documents(page_size=_count * 2)
for document in documents:
@ -136,7 +136,7 @@ def test_parse_100_files(add_dataset_func, tmp_path):
@pytest.mark.p3
def test_concurrent_parse(add_dataset_func, tmp_path):
@wait_for(120, 1, "Document parsing timeout")
@wait_for(200, 1, "Document parsing timeout")
def condition(_dataset: DataSet, _count: int):
documents = _dataset.list_documents(page_size=_count * 2)
for document in documents: