mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Test: Refactor test concurrency handling and add SDK chunk management tests (#8112)
### What problem does this PR solve? - Improve concurrent test cases by using as_completed for better reliability - Rename variables for clarity (chunk_num -> count) - Add new SDK API test suite for chunk management operations - Update HTTP API tests with consistent concurrency patterns ### Type of change - [x] Add test cases - [x] Refactoring
This commit is contained in:
@ -22,11 +22,7 @@ from utils.file_utils import create_txt_file
|
||||
|
||||
# DATASET MANAGEMENT
|
||||
def batch_create_datasets(client: RAGFlow, num: int) -> list[DataSet]:
|
||||
datasets = []
|
||||
for i in range(num):
|
||||
dataset = client.create_dataset(name=f"dataset_{i}")
|
||||
datasets.append(dataset)
|
||||
return datasets
|
||||
return [client.create_dataset(name=f"dataset_{i}") for i in range(num)]
|
||||
|
||||
|
||||
# FILE MANAGEMENT WITHIN DATASET
|
||||
@ -39,3 +35,8 @@ def bulk_upload_documents(dataset: DataSet, num: int, tmp_path: Path) -> list[Do
|
||||
document_infos.append({"display_name": fp.name, "blob": blob})
|
||||
|
||||
return dataset.upload_documents(document_infos)
|
||||
|
||||
|
||||
# CHUNK MANAGEMENT WITHIN DATASET
|
||||
def batch_add_chunks(document: Document, num: int):
|
||||
return [document.add_chunk(content=f"chunk test {i}") for i in range(num)]
|
||||
|
||||
Reference in New Issue
Block a user