Test: Added test cases for Create Chat Assistant HTTP API (#6763)

### What problem does this PR solve?

cover [create chat
assistant](https://ragflow.io/docs/v0.17.2/http_api_reference#create-chat-assistant)
endpoints

### Type of change

- [x] add test cases
This commit is contained in:
liu an
2025-04-02 18:49:59 +08:00
committed by GitHub
parent 6c77ef5a5e
commit 0d1c5fdd2f
4 changed files with 284 additions and 19 deletions

View File

@ -15,7 +15,8 @@
#
import pytest
from common import batch_create_datasets, bulk_upload_documents, delete_datasets
from common import add_chunk, batch_create_datasets, bulk_upload_documents, delete_chat_assistants, delete_datasets, list_documnets, parse_documnets
from libs.utils import wait_for
from libs.utils.file_utils import (
create_docx_file,
create_eml_file,
@ -30,12 +31,27 @@ from libs.utils.file_utils import (
)
@wait_for(30, 1, "Document parsing timeout")
def condition(_auth, _dataset_id):
res = list_documnets(_auth, _dataset_id)
for doc in res["data"]["docs"]:
if doc["run"] != "DONE":
return False
return True
@pytest.fixture(scope="function")
def clear_datasets(get_http_api_auth):
yield
delete_datasets(get_http_api_auth)
@pytest.fixture(scope="function")
def clear_chat_assistants(get_http_api_auth):
yield
delete_chat_assistants(get_http_api_auth)
@pytest.fixture
def generate_test_files(request, tmp_path):
file_creators = {
@ -92,3 +108,21 @@ def add_document(get_http_api_auth, add_dataset, ragflow_tmp_dir):
dataset_id = add_dataset
document_ids = bulk_upload_documents(get_http_api_auth, dataset_id, 1, ragflow_tmp_dir)
return dataset_id, document_ids[0]
@pytest.fixture(scope="class")
def add_chunks(get_http_api_auth, add_document):
dataset_id, document_id = add_document
parse_documnets(get_http_api_auth, dataset_id, {"document_ids": [document_id]})
condition(get_http_api_auth, dataset_id)
chunk_ids = []
for i in range(4):
res = add_chunk(get_http_api_auth, dataset_id, document_id, {"content": f"chunk test {i}"})
chunk_ids.append(res["data"]["chunk"]["id"])
# issues/6487
from time import sleep
sleep(1)
return dataset_id, document_id, chunk_ids