Test: Refactor test fixtures to use HttpApiAuth naming consistently (#8180)

### What problem does this PR solve?

- Rename `api_key` fixture to `HttpApiAuth` across all test files
- Update all dependent fixtures and test cases to use new naming
- Maintain same functionality while improving naming clarity

The rename better reflects the fixture's purpose as an HTTP API
authentication helper rather than just an API key.

### Type of change

- [x] Refactoring
This commit is contained in:
Liu An
2025-06-11 14:25:40 +08:00
committed by GitHub
parent f29d9fa3f9
commit 6aff3e052a
30 changed files with 756 additions and 756 deletions

View File

@ -83,31 +83,31 @@ def ragflow_tmp_dir(request, tmp_path_factory):
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def api_key(token): def HttpApiAuth(token):
return RAGFlowHttpApiAuth(token) return RAGFlowHttpApiAuth(token)
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def clear_datasets(request, api_key): def clear_datasets(request, HttpApiAuth):
def cleanup(): def cleanup():
delete_datasets(api_key, {"ids": None}) delete_datasets(HttpApiAuth, {"ids": None})
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def clear_chat_assistants(request, api_key): def clear_chat_assistants(request, HttpApiAuth):
def cleanup(): def cleanup():
delete_chat_assistants(api_key) delete_chat_assistants(HttpApiAuth)
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def clear_session_with_chat_assistants(request, api_key, add_chat_assistants): def clear_session_with_chat_assistants(request, HttpApiAuth, add_chat_assistants):
def cleanup(): def cleanup():
for chat_assistant_id in chat_assistant_ids: for chat_assistant_id in chat_assistant_ids:
delete_session_with_chat_assistants(api_key, chat_assistant_id) delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id)
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
@ -115,51 +115,51 @@ def clear_session_with_chat_assistants(request, api_key, add_chat_assistants):
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def add_dataset(request, api_key): def add_dataset(request, HttpApiAuth):
def cleanup(): def cleanup():
delete_datasets(api_key, {"ids": None}) delete_datasets(HttpApiAuth, {"ids": None})
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
dataset_ids = batch_create_datasets(api_key, 1) dataset_ids = batch_create_datasets(HttpApiAuth, 1)
return dataset_ids[0] return dataset_ids[0]
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def add_dataset_func(request, api_key): def add_dataset_func(request, HttpApiAuth):
def cleanup(): def cleanup():
delete_datasets(api_key, {"ids": None}) delete_datasets(HttpApiAuth, {"ids": None})
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
return batch_create_datasets(api_key, 1)[0] return batch_create_datasets(HttpApiAuth, 1)[0]
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def add_document(api_key, add_dataset, ragflow_tmp_dir): def add_document(HttpApiAuth, add_dataset, ragflow_tmp_dir):
dataset_id = add_dataset dataset_id = add_dataset
document_ids = bulk_upload_documents(api_key, dataset_id, 1, ragflow_tmp_dir) document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, 1, ragflow_tmp_dir)
return dataset_id, document_ids[0] return dataset_id, document_ids[0]
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def add_chunks(api_key, add_document): def add_chunks(HttpApiAuth, add_document):
dataset_id, document_id = add_document dataset_id, document_id = add_document
parse_documents(api_key, dataset_id, {"document_ids": [document_id]}) parse_documents(HttpApiAuth, dataset_id, {"document_ids": [document_id]})
condition(api_key, dataset_id) condition(HttpApiAuth, dataset_id)
chunk_ids = batch_add_chunks(api_key, dataset_id, document_id, 4) chunk_ids = batch_add_chunks(HttpApiAuth, dataset_id, document_id, 4)
sleep(1) # issues/6487 sleep(1) # issues/6487
return dataset_id, document_id, chunk_ids return dataset_id, document_id, chunk_ids
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def add_chat_assistants(request, api_key, add_document): def add_chat_assistants(request, HttpApiAuth, add_document):
def cleanup(): def cleanup():
delete_chat_assistants(api_key) delete_chat_assistants(HttpApiAuth)
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
dataset_id, document_id = add_document dataset_id, document_id = add_document
parse_documents(api_key, dataset_id, {"document_ids": [document_id]}) parse_documents(HttpApiAuth, dataset_id, {"document_ids": [document_id]})
condition(api_key, dataset_id) condition(HttpApiAuth, dataset_id)
return dataset_id, document_id, batch_create_chat_assistants(api_key, 5) return dataset_id, document_id, batch_create_chat_assistants(HttpApiAuth, 5)

View File

@ -28,13 +28,13 @@ def condition(_auth, _dataset_id):
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def add_chat_assistants_func(request, api_key, add_document): def add_chat_assistants_func(request, HttpApiAuth, add_document):
def cleanup(): def cleanup():
delete_chat_assistants(api_key) delete_chat_assistants(HttpApiAuth)
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
dataset_id, document_id = add_document dataset_id, document_id = add_document
parse_documents(api_key, dataset_id, {"document_ids": [document_id]}) parse_documents(HttpApiAuth, dataset_id, {"document_ids": [document_id]})
condition(api_key, dataset_id) condition(HttpApiAuth, dataset_id)
return dataset_id, document_id, batch_create_chat_assistants(api_key, 5) return dataset_id, document_id, batch_create_chat_assistants(HttpApiAuth, 5)

View File

@ -54,14 +54,14 @@ class TestChatAssistantCreate:
({"name": "case insensitive"}, 102, "Duplicated chat name in creating chat."), ({"name": "case insensitive"}, 102, "Duplicated chat name in creating chat."),
], ],
) )
def test_name(self, api_key, add_chunks, payload, expected_code, expected_message): def test_name(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
payload["dataset_ids"] = [] # issues/ payload["dataset_ids"] = [] # issues/
if payload["name"] == "duplicated_name": if payload["name"] == "duplicated_name":
create_chat_assistant(api_key, payload) create_chat_assistant(HttpApiAuth, payload)
elif payload["name"] == "case insensitive": elif payload["name"] == "case insensitive":
create_chat_assistant(api_key, {"name": payload["name"].upper()}) create_chat_assistant(HttpApiAuth, {"name": payload["name"].upper()})
res = create_chat_assistant(api_key, payload) res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == expected_code, res assert res["code"] == expected_code, res
if expected_code == 0: if expected_code == 0:
assert res["data"]["name"] == payload["name"] assert res["data"]["name"] == payload["name"]
@ -78,7 +78,7 @@ class TestChatAssistantCreate:
("invalid_dataset_id", 102, "You don't own the dataset i"), ("invalid_dataset_id", 102, "You don't own the dataset i"),
], ],
) )
def test_dataset_ids(self, api_key, add_chunks, dataset_ids, expected_code, expected_message): def test_dataset_ids(self, HttpApiAuth, add_chunks, dataset_ids, expected_code, expected_message):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
payload = {"name": "ragflow test"} payload = {"name": "ragflow test"}
if callable(dataset_ids): if callable(dataset_ids):
@ -86,7 +86,7 @@ class TestChatAssistantCreate:
else: else:
payload["dataset_ids"] = dataset_ids payload["dataset_ids"] = dataset_ids
res = create_chat_assistant(api_key, payload) res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == expected_code, res assert res["code"] == expected_code, res
if expected_code == 0: if expected_code == 0:
assert res["data"]["name"] == payload["name"] assert res["data"]["name"] == payload["name"]
@ -94,10 +94,10 @@ class TestChatAssistantCreate:
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_avatar(self, api_key, tmp_path): def test_avatar(self, HttpApiAuth, tmp_path):
fn = create_image_file(tmp_path / "ragflow_test.png") fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {"name": "avatar_test", "avatar": encode_avatar(fn), "dataset_ids": []} payload = {"name": "avatar_test", "avatar": encode_avatar(fn), "dataset_ids": []}
res = create_chat_assistant(api_key, payload) res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == 0 assert res["code"] == 0
@pytest.mark.p2 @pytest.mark.p2
@ -135,10 +135,10 @@ class TestChatAssistantCreate:
pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip), pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip),
], ],
) )
def test_llm(self, api_key, add_chunks, llm, expected_code, expected_message): def test_llm(self, HttpApiAuth, add_chunks, llm, expected_code, expected_message):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
payload = {"name": "llm_test", "dataset_ids": [dataset_id], "llm": llm} payload = {"name": "llm_test", "dataset_ids": [dataset_id], "llm": llm}
res = create_chat_assistant(api_key, payload) res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if llm: if llm:
@ -202,10 +202,10 @@ class TestChatAssistantCreate:
pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip), pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip),
], ],
) )
def test_prompt(self, api_key, add_chunks, prompt, expected_code, expected_message): def test_prompt(self, HttpApiAuth, add_chunks, prompt, expected_code, expected_message):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
payload = {"name": "prompt_test", "dataset_ids": [dataset_id], "prompt": prompt} payload = {"name": "prompt_test", "dataset_ids": [dataset_id], "prompt": prompt}
res = create_chat_assistant(api_key, payload) res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if prompt: if prompt:
@ -233,9 +233,9 @@ class TestChatAssistantCreate:
class TestChatAssistantCreate2: class TestChatAssistantCreate2:
@pytest.mark.p2 @pytest.mark.p2
def test_unparsed_document(self, api_key, add_document): def test_unparsed_document(self, HttpApiAuth, add_document):
dataset_id, _ = add_document dataset_id, _ = add_document
payload = {"name": "prompt_test", "dataset_ids": [dataset_id]} payload = {"name": "prompt_test", "dataset_ids": [dataset_id]}
res = create_chat_assistant(api_key, payload) res = create_chat_assistant(HttpApiAuth, payload)
assert res["code"] == 102 assert res["code"] == 102
assert "doesn't own parsed file" in res["message"] assert "doesn't own parsed file" in res["message"]

View File

@ -52,16 +52,16 @@ class TestChatAssistantsDelete:
pytest.param(lambda r: {"ids": r}, 0, "", 0, marks=pytest.mark.p1), pytest.param(lambda r: {"ids": r}, 0, "", 0, marks=pytest.mark.p1),
], ],
) )
def test_basic_scenarios(self, api_key, add_chat_assistants_func, payload, expected_code, expected_message, remaining): def test_basic_scenarios(self, HttpApiAuth, add_chat_assistants_func, payload, expected_code, expected_message, remaining):
_, _, chat_assistant_ids = add_chat_assistants_func _, _, chat_assistant_ids = add_chat_assistants_func
if callable(payload): if callable(payload):
payload = payload(chat_assistant_ids) payload = payload(chat_assistant_ids)
res = delete_chat_assistants(api_key, payload) res = delete_chat_assistants(HttpApiAuth, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if res["code"] != 0: if res["code"] != 0:
assert res["message"] == expected_message assert res["message"] == expected_message
res = list_chat_assistants(api_key) res = list_chat_assistants(HttpApiAuth)
assert len(res["data"]) == remaining assert len(res["data"]) == remaining
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -72,55 +72,55 @@ class TestChatAssistantsDelete:
pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3), pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
], ],
) )
def test_delete_partial_invalid_id(self, api_key, add_chat_assistants_func, payload): def test_delete_partial_invalid_id(self, HttpApiAuth, add_chat_assistants_func, payload):
_, _, chat_assistant_ids = add_chat_assistants_func _, _, chat_assistant_ids = add_chat_assistants_func
if callable(payload): if callable(payload):
payload = payload(chat_assistant_ids) payload = payload(chat_assistant_ids)
res = delete_chat_assistants(api_key, payload) res = delete_chat_assistants(HttpApiAuth, payload)
assert res["code"] == 0 assert res["code"] == 0
assert res["data"]["errors"][0] == "Assistant(invalid_id) not found." assert res["data"]["errors"][0] == "Assistant(invalid_id) not found."
assert res["data"]["success_count"] == 5 assert res["data"]["success_count"] == 5
res = list_chat_assistants(api_key) res = list_chat_assistants(HttpApiAuth)
assert len(res["data"]) == 0 assert len(res["data"]) == 0
@pytest.mark.p3 @pytest.mark.p3
def test_repeated_deletion(self, api_key, add_chat_assistants_func): def test_repeated_deletion(self, HttpApiAuth, add_chat_assistants_func):
_, _, chat_assistant_ids = add_chat_assistants_func _, _, chat_assistant_ids = add_chat_assistants_func
res = delete_chat_assistants(api_key, {"ids": chat_assistant_ids}) res = delete_chat_assistants(HttpApiAuth, {"ids": chat_assistant_ids})
assert res["code"] == 0 assert res["code"] == 0
res = delete_chat_assistants(api_key, {"ids": chat_assistant_ids}) res = delete_chat_assistants(HttpApiAuth, {"ids": chat_assistant_ids})
assert res["code"] == 102 assert res["code"] == 102
assert "not found" in res["message"] assert "not found" in res["message"]
@pytest.mark.p3 @pytest.mark.p3
def test_duplicate_deletion(self, api_key, add_chat_assistants_func): def test_duplicate_deletion(self, HttpApiAuth, add_chat_assistants_func):
_, _, chat_assistant_ids = add_chat_assistants_func _, _, chat_assistant_ids = add_chat_assistants_func
res = delete_chat_assistants(api_key, {"ids": chat_assistant_ids + chat_assistant_ids}) res = delete_chat_assistants(HttpApiAuth, {"ids": chat_assistant_ids + chat_assistant_ids})
assert res["code"] == 0 assert res["code"] == 0
assert "Duplicate assistant ids" in res["data"]["errors"][0] assert "Duplicate assistant ids" in res["data"]["errors"][0]
assert res["data"]["success_count"] == 5 assert res["data"]["success_count"] == 5
res = list_chat_assistants(api_key) res = list_chat_assistants(HttpApiAuth)
assert res["code"] == 0 assert res["code"] == 0
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_deletion(self, api_key): def test_concurrent_deletion(self, HttpApiAuth):
count = 100 count = 100
ids = batch_create_chat_assistants(api_key, count) ids = batch_create_chat_assistants(HttpApiAuth, count)
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(delete_chat_assistants, api_key, {"ids": ids[i : i + 1]}) for i in range(count)] futures = [executor.submit(delete_chat_assistants, HttpApiAuth, {"ids": ids[i : i + 1]}) for i in range(count)]
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3 @pytest.mark.p3
def test_delete_10k(self, api_key): def test_delete_10k(self, HttpApiAuth):
ids = batch_create_chat_assistants(api_key, 1_000) ids = batch_create_chat_assistants(HttpApiAuth, 1_000)
res = delete_chat_assistants(api_key, {"ids": ids}) res = delete_chat_assistants(HttpApiAuth, {"ids": ids})
assert res["code"] == 0 assert res["code"] == 0
res = list_chat_assistants(api_key) res = list_chat_assistants(HttpApiAuth)
assert len(res["data"]) == 0 assert len(res["data"]) == 0

View File

@ -43,8 +43,8 @@ class TestAuthorization:
@pytest.mark.usefixtures("add_chat_assistants") @pytest.mark.usefixtures("add_chat_assistants")
class TestChatAssistantsList: class TestChatAssistantsList:
@pytest.mark.p1 @pytest.mark.p1
def test_default(self, api_key): def test_default(self, HttpApiAuth):
res = list_chat_assistants(api_key) res = list_chat_assistants(HttpApiAuth)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]) == 5 assert len(res["data"]) == 5
@ -73,8 +73,8 @@ class TestChatAssistantsList:
), ),
], ],
) )
def test_page(self, api_key, params, expected_code, expected_page_size, expected_message): def test_page(self, HttpApiAuth, params, expected_code, expected_page_size, expected_message):
res = list_chat_assistants(api_key, params=params) res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]) == expected_page_size assert len(res["data"]) == expected_page_size
@ -108,13 +108,13 @@ class TestChatAssistantsList:
) )
def test_page_size( def test_page_size(
self, self,
api_key, HttpApiAuth,
params, params,
expected_code, expected_code,
expected_page_size, expected_page_size,
expected_message, expected_message,
): ):
res = list_chat_assistants(api_key, params=params) res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]) == expected_page_size assert len(res["data"]) == expected_page_size
@ -146,13 +146,13 @@ class TestChatAssistantsList:
) )
def test_orderby( def test_orderby(
self, self,
api_key, HttpApiAuth,
params, params,
expected_code, expected_code,
assertions, assertions,
expected_message, expected_message,
): ):
res = list_chat_assistants(api_key, params=params) res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if callable(assertions): if callable(assertions):
@ -183,13 +183,13 @@ class TestChatAssistantsList:
) )
def test_desc( def test_desc(
self, self,
api_key, HttpApiAuth,
params, params,
expected_code, expected_code,
assertions, assertions,
expected_message, expected_message,
): ):
res = list_chat_assistants(api_key, params=params) res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if callable(assertions): if callable(assertions):
@ -207,8 +207,8 @@ class TestChatAssistantsList:
({"name": "unknown"}, 102, 0, "The chat doesn't exist"), ({"name": "unknown"}, 102, 0, "The chat doesn't exist"),
], ],
) )
def test_name(self, api_key, params, expected_code, expected_num, expected_message): def test_name(self, HttpApiAuth, params, expected_code, expected_num, expected_message):
res = list_chat_assistants(api_key, params=params) res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if params["name"] in [None, ""]: if params["name"] in [None, ""]:
@ -230,7 +230,7 @@ class TestChatAssistantsList:
) )
def test_id( def test_id(
self, self,
api_key, HttpApiAuth,
add_chat_assistants, add_chat_assistants,
chat_assistant_id, chat_assistant_id,
expected_code, expected_code,
@ -243,7 +243,7 @@ class TestChatAssistantsList:
else: else:
params = {"id": chat_assistant_id} params = {"id": chat_assistant_id}
res = list_chat_assistants(api_key, params=params) res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if params["id"] in [None, ""]: if params["id"] in [None, ""]:
@ -265,7 +265,7 @@ class TestChatAssistantsList:
) )
def test_name_and_id( def test_name_and_id(
self, self,
api_key, HttpApiAuth,
add_chat_assistants, add_chat_assistants,
chat_assistant_id, chat_assistant_id,
name, name,
@ -279,7 +279,7 @@ class TestChatAssistantsList:
else: else:
params = {"id": chat_assistant_id, "name": name} params = {"id": chat_assistant_id, "name": name}
res = list_chat_assistants(api_key, params=params) res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]) == expected_num assert len(res["data"]) == expected_num
@ -287,27 +287,27 @@ class TestChatAssistantsList:
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_list(self, api_key): def test_concurrent_list(self, HttpApiAuth):
count = 100 count = 100
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_chat_assistants, api_key) for i in range(count)] futures = [executor.submit(list_chat_assistants, HttpApiAuth) for i in range(count)]
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3 @pytest.mark.p3
def test_invalid_params(self, api_key): def test_invalid_params(self, HttpApiAuth):
params = {"a": "b"} params = {"a": "b"}
res = list_chat_assistants(api_key, params=params) res = list_chat_assistants(HttpApiAuth, params=params)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]) == 5 assert len(res["data"]) == 5
@pytest.mark.p2 @pytest.mark.p2
def test_list_chats_after_deleting_associated_dataset(self, api_key, add_chat_assistants): def test_list_chats_after_deleting_associated_dataset(self, HttpApiAuth, add_chat_assistants):
dataset_id, _, _ = add_chat_assistants dataset_id, _, _ = add_chat_assistants
res = delete_datasets(api_key, {"ids": [dataset_id]}) res = delete_datasets(HttpApiAuth, {"ids": [dataset_id]})
assert res["code"] == 0 assert res["code"] == 0
res = list_chat_assistants(api_key) res = list_chat_assistants(HttpApiAuth)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]) == 5 assert len(res["data"]) == 5

View File

@ -51,13 +51,13 @@ class TestChatAssistantUpdate:
pytest.param({"name": "TEST_CHAT_ASSISTANT_1"}, 102, "Duplicated chat name in updating chat.", marks=pytest.mark.p3), pytest.param({"name": "TEST_CHAT_ASSISTANT_1"}, 102, "Duplicated chat name in updating chat.", marks=pytest.mark.p3),
], ],
) )
def test_name(self, api_key, add_chat_assistants_func, payload, expected_code, expected_message): def test_name(self, HttpApiAuth, add_chat_assistants_func, payload, expected_code, expected_message):
_, _, chat_assistant_ids = add_chat_assistants_func _, _, chat_assistant_ids = add_chat_assistants_func
res = update_chat_assistant(api_key, chat_assistant_ids[0], payload) res = update_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == expected_code, res assert res["code"] == expected_code, res
if expected_code == 0: if expected_code == 0:
res = list_chat_assistants(api_key, {"id": chat_assistant_ids[0]}) res = list_chat_assistants(HttpApiAuth, {"id": chat_assistant_ids[0]})
assert res["data"][0]["name"] == payload.get("name") assert res["data"][0]["name"] == payload.get("name")
else: else:
assert res["message"] == expected_message assert res["message"] == expected_message
@ -71,7 +71,7 @@ class TestChatAssistantUpdate:
pytest.param("invalid_dataset_id", 102, "You don't own the dataset i", marks=pytest.mark.p3), pytest.param("invalid_dataset_id", 102, "You don't own the dataset i", marks=pytest.mark.p3),
], ],
) )
def test_dataset_ids(self, api_key, add_chat_assistants_func, dataset_ids, expected_code, expected_message): def test_dataset_ids(self, HttpApiAuth, add_chat_assistants_func, dataset_ids, expected_code, expected_message):
dataset_id, _, chat_assistant_ids = add_chat_assistants_func dataset_id, _, chat_assistant_ids = add_chat_assistants_func
payload = {"name": "ragflow test"} payload = {"name": "ragflow test"}
if callable(dataset_ids): if callable(dataset_ids):
@ -79,20 +79,20 @@ class TestChatAssistantUpdate:
else: else:
payload["dataset_ids"] = dataset_ids payload["dataset_ids"] = dataset_ids
res = update_chat_assistant(api_key, chat_assistant_ids[0], payload) res = update_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == expected_code, res assert res["code"] == expected_code, res
if expected_code == 0: if expected_code == 0:
res = list_chat_assistants(api_key, {"id": chat_assistant_ids[0]}) res = list_chat_assistants(HttpApiAuth, {"id": chat_assistant_ids[0]})
assert res["data"][0]["name"] == payload.get("name") assert res["data"][0]["name"] == payload.get("name")
else: else:
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_avatar(self, api_key, add_chat_assistants_func, tmp_path): def test_avatar(self, HttpApiAuth, add_chat_assistants_func, tmp_path):
dataset_id, _, chat_assistant_ids = add_chat_assistants_func dataset_id, _, chat_assistant_ids = add_chat_assistants_func
fn = create_image_file(tmp_path / "ragflow_test.png") fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {"name": "avatar_test", "avatar": encode_avatar(fn), "dataset_ids": [dataset_id]} payload = {"name": "avatar_test", "avatar": encode_avatar(fn), "dataset_ids": [dataset_id]}
res = update_chat_assistant(api_key, chat_assistant_ids[0], payload) res = update_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == 0 assert res["code"] == 0
@pytest.mark.p3 @pytest.mark.p3
@ -130,13 +130,13 @@ class TestChatAssistantUpdate:
pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip), pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip),
], ],
) )
def test_llm(self, api_key, add_chat_assistants_func, llm, expected_code, expected_message): def test_llm(self, HttpApiAuth, add_chat_assistants_func, llm, expected_code, expected_message):
dataset_id, _, chat_assistant_ids = add_chat_assistants_func dataset_id, _, chat_assistant_ids = add_chat_assistants_func
payload = {"name": "llm_test", "dataset_ids": [dataset_id], "llm": llm} payload = {"name": "llm_test", "dataset_ids": [dataset_id], "llm": llm}
res = update_chat_assistant(api_key, chat_assistant_ids[0], payload) res = update_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
res = list_chat_assistants(api_key, {"id": chat_assistant_ids[0]}) res = list_chat_assistants(HttpApiAuth, {"id": chat_assistant_ids[0]})
if llm: if llm:
for k, v in llm.items(): for k, v in llm.items():
assert res["data"][0]["llm"][k] == v assert res["data"][0]["llm"][k] == v
@ -198,13 +198,13 @@ class TestChatAssistantUpdate:
pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip), pytest.param({"unknown": "unknown"}, 0, "", marks=pytest.mark.skip),
], ],
) )
def test_prompt(self, api_key, add_chat_assistants_func, prompt, expected_code, expected_message): def test_prompt(self, HttpApiAuth, add_chat_assistants_func, prompt, expected_code, expected_message):
dataset_id, _, chat_assistant_ids = add_chat_assistants_func dataset_id, _, chat_assistant_ids = add_chat_assistants_func
payload = {"name": "prompt_test", "dataset_ids": [dataset_id], "prompt": prompt} payload = {"name": "prompt_test", "dataset_ids": [dataset_id], "prompt": prompt}
res = update_chat_assistant(api_key, chat_assistant_ids[0], payload) res = update_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
res = list_chat_assistants(api_key, {"id": chat_assistant_ids[0]}) res = list_chat_assistants(HttpApiAuth, {"id": chat_assistant_ids[0]})
if prompt: if prompt:
for k, v in prompt.items(): for k, v in prompt.items():
if k == "keywords_similarity_weight": if k == "keywords_similarity_weight":

View File

@ -32,16 +32,16 @@ def condition(_auth, _dataset_id):
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def add_chunks_func(request, api_key, add_document): def add_chunks_func(request, HttpApiAuth, add_document):
def cleanup(): def cleanup():
delete_chunks(api_key, dataset_id, document_id, {"chunk_ids": []}) delete_chunks(HttpApiAuth, dataset_id, document_id, {"chunk_ids": []})
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
dataset_id, document_id = add_document dataset_id, document_id = add_document
parse_documents(api_key, dataset_id, {"document_ids": [document_id]}) parse_documents(HttpApiAuth, dataset_id, {"document_ids": [document_id]})
condition(api_key, dataset_id) condition(HttpApiAuth, dataset_id)
chunk_ids = batch_add_chunks(api_key, dataset_id, document_id, 4) chunk_ids = batch_add_chunks(HttpApiAuth, dataset_id, document_id, 4)
# issues/6487 # issues/6487
sleep(1) sleep(1)
return dataset_id, document_id, chunk_ids return dataset_id, document_id, chunk_ids

View File

@ -68,17 +68,17 @@ class TestAddChunk:
({"content": "\n!?。;!?\"'"}, 0, ""), ({"content": "\n!?。;!?\"'"}, 0, ""),
], ],
) )
def test_content(self, api_key, add_document, payload, expected_code, expected_message): def test_content(self, HttpApiAuth, add_document, payload, expected_code, expected_message):
dataset_id, document_id = add_document dataset_id, document_id = add_document
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
chunks_count = res["data"]["doc"]["chunk_count"] chunks_count = res["data"]["doc"]["chunk_count"]
res = add_chunk(api_key, dataset_id, document_id, payload) res = add_chunk(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
validate_chunk_details(dataset_id, document_id, payload, res) validate_chunk_details(dataset_id, document_id, payload, res)
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert res["data"]["doc"]["chunk_count"] == chunks_count + 1 assert res["data"]["doc"]["chunk_count"] == chunks_count + 1
@ -101,17 +101,17 @@ class TestAddChunk:
({"content": "chunk test", "important_keywords": 123}, 102, "`important_keywords` is required to be a list"), ({"content": "chunk test", "important_keywords": 123}, 102, "`important_keywords` is required to be a list"),
], ],
) )
def test_important_keywords(self, api_key, add_document, payload, expected_code, expected_message): def test_important_keywords(self, HttpApiAuth, add_document, payload, expected_code, expected_message):
dataset_id, document_id = add_document dataset_id, document_id = add_document
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
chunks_count = res["data"]["doc"]["chunk_count"] chunks_count = res["data"]["doc"]["chunk_count"]
res = add_chunk(api_key, dataset_id, document_id, payload) res = add_chunk(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
validate_chunk_details(dataset_id, document_id, payload, res) validate_chunk_details(dataset_id, document_id, payload, res)
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert res["data"]["doc"]["chunk_count"] == chunks_count + 1 assert res["data"]["doc"]["chunk_count"] == chunks_count + 1
@ -130,19 +130,19 @@ class TestAddChunk:
({"content": "chunk test", "questions": 123}, 102, "`questions` is required to be a list"), ({"content": "chunk test", "questions": 123}, 102, "`questions` is required to be a list"),
], ],
) )
def test_questions(self, api_key, add_document, payload, expected_code, expected_message): def test_questions(self, HttpApiAuth, add_document, payload, expected_code, expected_message):
dataset_id, document_id = add_document dataset_id, document_id = add_document
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
chunks_count = res["data"]["doc"]["chunk_count"] chunks_count = res["data"]["doc"]["chunk_count"]
res = add_chunk(api_key, dataset_id, document_id, payload) res = add_chunk(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
validate_chunk_details(dataset_id, document_id, payload, res) validate_chunk_details(dataset_id, document_id, payload, res)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
assert res["data"]["doc"]["chunk_count"] == chunks_count + 1 assert res["data"]["doc"]["chunk_count"] == chunks_count + 1
else: else:
assert res["message"] == expected_message assert res["message"] == expected_message
@ -161,14 +161,14 @@ class TestAddChunk:
) )
def test_invalid_dataset_id( def test_invalid_dataset_id(
self, self,
api_key, HttpApiAuth,
add_document, add_document,
dataset_id, dataset_id,
expected_code, expected_code,
expected_message, expected_message,
): ):
_, document_id = add_document _, document_id = add_document
res = add_chunk(api_key, dataset_id, document_id, {"content": "a"}) res = add_chunk(HttpApiAuth, dataset_id, document_id, {"content": "a"})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -184,49 +184,49 @@ class TestAddChunk:
), ),
], ],
) )
def test_invalid_document_id(self, api_key, add_document, document_id, expected_code, expected_message): def test_invalid_document_id(self, HttpApiAuth, add_document, document_id, expected_code, expected_message):
dataset_id, _ = add_document dataset_id, _ = add_document
res = add_chunk(api_key, dataset_id, document_id, {"content": "chunk test"}) res = add_chunk(HttpApiAuth, dataset_id, document_id, {"content": "chunk test"})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_repeated_add_chunk(self, api_key, add_document): def test_repeated_add_chunk(self, HttpApiAuth, add_document):
payload = {"content": "chunk test"} payload = {"content": "chunk test"}
dataset_id, document_id = add_document dataset_id, document_id = add_document
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
chunks_count = res["data"]["doc"]["chunk_count"] chunks_count = res["data"]["doc"]["chunk_count"]
res = add_chunk(api_key, dataset_id, document_id, payload) res = add_chunk(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == 0 assert res["code"] == 0
validate_chunk_details(dataset_id, document_id, payload, res) validate_chunk_details(dataset_id, document_id, payload, res)
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert res["data"]["doc"]["chunk_count"] == chunks_count + 1 assert res["data"]["doc"]["chunk_count"] == chunks_count + 1
res = add_chunk(api_key, dataset_id, document_id, payload) res = add_chunk(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == 0 assert res["code"] == 0
validate_chunk_details(dataset_id, document_id, payload, res) validate_chunk_details(dataset_id, document_id, payload, res)
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert res["data"]["doc"]["chunk_count"] == chunks_count + 2 assert res["data"]["doc"]["chunk_count"] == chunks_count + 2
@pytest.mark.p2 @pytest.mark.p2
def test_add_chunk_to_deleted_document(self, api_key, add_document): def test_add_chunk_to_deleted_document(self, HttpApiAuth, add_document):
dataset_id, document_id = add_document dataset_id, document_id = add_document
delete_documents(api_key, dataset_id, {"ids": [document_id]}) delete_documents(HttpApiAuth, dataset_id, {"ids": [document_id]})
res = add_chunk(api_key, dataset_id, document_id, {"content": "chunk test"}) res = add_chunk(HttpApiAuth, dataset_id, document_id, {"content": "chunk test"})
assert res["code"] == 102 assert res["code"] == 102
assert res["message"] == f"You don't own the document {document_id}." assert res["message"] == f"You don't own the document {document_id}."
@pytest.mark.skip(reason="issues/6411") @pytest.mark.skip(reason="issues/6411")
def test_concurrent_add_chunk(self, api_key, add_document): def test_concurrent_add_chunk(self, HttpApiAuth, add_document):
count = 50 count = 50
dataset_id, document_id = add_document dataset_id, document_id = add_document
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
chunks_count = res["data"]["doc"]["chunk_count"] chunks_count = res["data"]["doc"]["chunk_count"]
@ -235,7 +235,7 @@ class TestAddChunk:
futures = [ futures = [
executor.submit( executor.submit(
add_chunk, add_chunk,
api_key, HttpApiAuth,
dataset_id, dataset_id,
document_id, document_id,
{"content": f"chunk test {i}"}, {"content": f"chunk test {i}"},
@ -245,7 +245,7 @@ class TestAddChunk:
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert res["data"]["doc"]["chunk_count"] == chunks_count + count assert res["data"]["doc"]["chunk_count"] == chunks_count + count

View File

@ -52,9 +52,9 @@ class TestChunksDeletion:
), ),
], ],
) )
def test_invalid_dataset_id(self, api_key, add_chunks_func, dataset_id, expected_code, expected_message): def test_invalid_dataset_id(self, HttpApiAuth, add_chunks_func, dataset_id, expected_code, expected_message):
_, document_id, chunk_ids = add_chunks_func _, document_id, chunk_ids = add_chunks_func
res = delete_chunks(api_key, dataset_id, document_id, {"chunk_ids": chunk_ids}) res = delete_chunks(HttpApiAuth, dataset_id, document_id, {"chunk_ids": chunk_ids})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -66,9 +66,9 @@ class TestChunksDeletion:
("invalid_document_id", 100, """LookupError("Can't find the document with ID invalid_document_id!")"""), ("invalid_document_id", 100, """LookupError("Can't find the document with ID invalid_document_id!")"""),
], ],
) )
def test_invalid_document_id(self, api_key, add_chunks_func, document_id, expected_code, expected_message): def test_invalid_document_id(self, HttpApiAuth, add_chunks_func, document_id, expected_code, expected_message):
dataset_id, _, chunk_ids = add_chunks_func dataset_id, _, chunk_ids = add_chunks_func
res = delete_chunks(api_key, dataset_id, document_id, {"chunk_ids": chunk_ids}) res = delete_chunks(HttpApiAuth, dataset_id, document_id, {"chunk_ids": chunk_ids})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -80,56 +80,56 @@ class TestChunksDeletion:
pytest.param(lambda r: {"chunk_ids": r + ["invalid_id"]}, marks=pytest.mark.p3), pytest.param(lambda r: {"chunk_ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
], ],
) )
def test_delete_partial_invalid_id(self, api_key, add_chunks_func, payload): def test_delete_partial_invalid_id(self, HttpApiAuth, add_chunks_func, payload):
dataset_id, document_id, chunk_ids = add_chunks_func dataset_id, document_id, chunk_ids = add_chunks_func
if callable(payload): if callable(payload):
payload = payload(chunk_ids) payload = payload(chunk_ids)
res = delete_chunks(api_key, dataset_id, document_id, payload) res = delete_chunks(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == 102 assert res["code"] == 102
assert res["message"] == "rm_chunk deleted chunks 4, expect 5" assert res["message"] == "rm_chunk deleted chunks 4, expect 5"
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert len(res["data"]["chunks"]) == 1 assert len(res["data"]["chunks"]) == 1
assert res["data"]["total"] == 1 assert res["data"]["total"] == 1
@pytest.mark.p3 @pytest.mark.p3
def test_repeated_deletion(self, api_key, add_chunks_func): def test_repeated_deletion(self, HttpApiAuth, add_chunks_func):
dataset_id, document_id, chunk_ids = add_chunks_func dataset_id, document_id, chunk_ids = add_chunks_func
payload = {"chunk_ids": chunk_ids} payload = {"chunk_ids": chunk_ids}
res = delete_chunks(api_key, dataset_id, document_id, payload) res = delete_chunks(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == 0 assert res["code"] == 0
res = delete_chunks(api_key, dataset_id, document_id, payload) res = delete_chunks(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == 102 assert res["code"] == 102
assert res["message"] == "rm_chunk deleted chunks 0, expect 4" assert res["message"] == "rm_chunk deleted chunks 0, expect 4"
@pytest.mark.p3 @pytest.mark.p3
def test_duplicate_deletion(self, api_key, add_chunks_func): def test_duplicate_deletion(self, HttpApiAuth, add_chunks_func):
dataset_id, document_id, chunk_ids = add_chunks_func dataset_id, document_id, chunk_ids = add_chunks_func
res = delete_chunks(api_key, dataset_id, document_id, {"chunk_ids": chunk_ids * 2}) res = delete_chunks(HttpApiAuth, dataset_id, document_id, {"chunk_ids": chunk_ids * 2})
assert res["code"] == 0 assert res["code"] == 0
assert "Duplicate chunk ids" in res["data"]["errors"][0] assert "Duplicate chunk ids" in res["data"]["errors"][0]
assert res["data"]["success_count"] == 4 assert res["data"]["success_count"] == 4
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert len(res["data"]["chunks"]) == 1 assert len(res["data"]["chunks"]) == 1
assert res["data"]["total"] == 1 assert res["data"]["total"] == 1
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_deletion(self, api_key, add_document): def test_concurrent_deletion(self, HttpApiAuth, add_document):
count = 100 count = 100
dataset_id, document_id = add_document dataset_id, document_id = add_document
chunk_ids = batch_add_chunks(api_key, dataset_id, document_id, count) chunk_ids = batch_add_chunks(HttpApiAuth, dataset_id, document_id, count)
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [ futures = [
executor.submit( executor.submit(
delete_chunks, delete_chunks,
api_key, HttpApiAuth,
dataset_id, dataset_id,
document_id, document_id,
{"chunk_ids": chunk_ids[i : i + 1]}, {"chunk_ids": chunk_ids[i : i + 1]},
@ -141,20 +141,20 @@ class TestChunksDeletion:
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3 @pytest.mark.p3
def test_delete_1k(self, api_key, add_document): def test_delete_1k(self, HttpApiAuth, add_document):
chunks_num = 1_000 chunks_num = 1_000
dataset_id, document_id = add_document dataset_id, document_id = add_document
chunk_ids = batch_add_chunks(api_key, dataset_id, document_id, chunks_num) chunk_ids = batch_add_chunks(HttpApiAuth, dataset_id, document_id, chunks_num)
# issues/6487 # issues/6487
from time import sleep from time import sleep
sleep(1) sleep(1)
res = delete_chunks(api_key, dataset_id, document_id, {"chunk_ids": chunk_ids}) res = delete_chunks(HttpApiAuth, dataset_id, document_id, {"chunk_ids": chunk_ids})
assert res["code"] == 0 assert res["code"] == 0
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert len(res["data"]["chunks"]) == 1 assert len(res["data"]["chunks"]) == 1
@ -173,7 +173,7 @@ class TestChunksDeletion:
) )
def test_basic_scenarios( def test_basic_scenarios(
self, self,
api_key, HttpApiAuth,
add_chunks_func, add_chunks_func,
payload, payload,
expected_code, expected_code,
@ -183,12 +183,12 @@ class TestChunksDeletion:
dataset_id, document_id, chunk_ids = add_chunks_func dataset_id, document_id, chunk_ids = add_chunks_func
if callable(payload): if callable(payload):
payload = payload(chunk_ids) payload = payload(chunk_ids)
res = delete_chunks(api_key, dataset_id, document_id, payload) res = delete_chunks(HttpApiAuth, dataset_id, document_id, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if res["code"] != 0: if res["code"] != 0:
assert res["message"] == expected_message assert res["message"] == expected_message
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert len(res["data"]["chunks"]) == remaining assert len(res["data"]["chunks"]) == remaining

View File

@ -54,9 +54,9 @@ class TestChunksList:
pytest.param({"page": "a", "page_size": 2}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip), pytest.param({"page": "a", "page_size": 2}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
], ],
) )
def test_page(self, api_key, add_chunks, params, expected_code, expected_page_size, expected_message): def test_page(self, HttpApiAuth, add_chunks, params, expected_code, expected_page_size, expected_message):
dataset_id, document_id, _ = add_chunks dataset_id, document_id, _ = add_chunks
res = list_chunks(api_key, dataset_id, document_id, params=params) res = list_chunks(HttpApiAuth, dataset_id, document_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size assert len(res["data"]["chunks"]) == expected_page_size
@ -77,9 +77,9 @@ class TestChunksList:
pytest.param({"page_size": "a"}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip), pytest.param({"page_size": "a"}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
], ],
) )
def test_page_size(self, api_key, add_chunks, params, expected_code, expected_page_size, expected_message): def test_page_size(self, HttpApiAuth, add_chunks, params, expected_code, expected_page_size, expected_message):
dataset_id, document_id, _ = add_chunks dataset_id, document_id, _ = add_chunks
res = list_chunks(api_key, dataset_id, document_id, params=params) res = list_chunks(HttpApiAuth, dataset_id, document_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size assert len(res["data"]["chunks"]) == expected_page_size
@ -98,9 +98,9 @@ class TestChunksList:
({"keywords": "unknown"}, 0), ({"keywords": "unknown"}, 0),
], ],
) )
def test_keywords(self, api_key, add_chunks, params, expected_page_size): def test_keywords(self, HttpApiAuth, add_chunks, params, expected_page_size):
dataset_id, document_id, _ = add_chunks dataset_id, document_id, _ = add_chunks
res = list_chunks(api_key, dataset_id, document_id, params=params) res = list_chunks(HttpApiAuth, dataset_id, document_id, params=params)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]["chunks"]) == expected_page_size assert len(res["data"]["chunks"]) == expected_page_size
@ -116,7 +116,7 @@ class TestChunksList:
) )
def test_id( def test_id(
self, self,
api_key, HttpApiAuth,
add_chunks, add_chunks,
chunk_id, chunk_id,
expected_code, expected_code,
@ -128,7 +128,7 @@ class TestChunksList:
params = {"id": chunk_id(chunk_ids)} params = {"id": chunk_id(chunk_ids)}
else: else:
params = {"id": chunk_id} params = {"id": chunk_id}
res = list_chunks(api_key, dataset_id, document_id, params=params) res = list_chunks(HttpApiAuth, dataset_id, document_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if params["id"] in [None, ""]: if params["id"] in [None, ""]:
@ -139,35 +139,35 @@ class TestChunksList:
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_invalid_params(self, api_key, add_chunks): def test_invalid_params(self, HttpApiAuth, add_chunks):
dataset_id, document_id, _ = add_chunks dataset_id, document_id, _ = add_chunks
params = {"a": "b"} params = {"a": "b"}
res = list_chunks(api_key, dataset_id, document_id, params=params) res = list_chunks(HttpApiAuth, dataset_id, document_id, params=params)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]["chunks"]) == 5 assert len(res["data"]["chunks"]) == 5
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_list(self, api_key, add_chunks): def test_concurrent_list(self, HttpApiAuth, add_chunks):
dataset_id, document_id, _ = add_chunks dataset_id, document_id, _ = add_chunks
count = 100 count = 100
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_chunks, api_key, dataset_id, document_id) for i in range(count)] futures = [executor.submit(list_chunks, HttpApiAuth, dataset_id, document_id) for i in range(count)]
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(len(future.result()["data"]["chunks"]) == 5 for future in futures) assert all(len(future.result()["data"]["chunks"]) == 5 for future in futures)
@pytest.mark.p1 @pytest.mark.p1
def test_default(self, api_key, add_document): def test_default(self, HttpApiAuth, add_document):
dataset_id, document_id = add_document dataset_id, document_id = add_document
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
chunks_count = res["data"]["doc"]["chunk_count"] chunks_count = res["data"]["doc"]["chunk_count"]
batch_add_chunks(api_key, dataset_id, document_id, 31) batch_add_chunks(HttpApiAuth, dataset_id, document_id, 31)
# issues/6487 # issues/6487
from time import sleep from time import sleep
sleep(3) sleep(3)
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]["chunks"]) == 30 assert len(res["data"]["chunks"]) == 30
assert res["data"]["doc"]["chunk_count"] == chunks_count + 31 assert res["data"]["doc"]["chunk_count"] == chunks_count + 31
@ -184,9 +184,9 @@ class TestChunksList:
), ),
], ],
) )
def test_invalid_dataset_id(self, api_key, add_chunks, dataset_id, expected_code, expected_message): def test_invalid_dataset_id(self, HttpApiAuth, add_chunks, dataset_id, expected_code, expected_message):
_, document_id, _ = add_chunks _, document_id, _ = add_chunks
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -202,8 +202,8 @@ class TestChunksList:
), ),
], ],
) )
def test_invalid_document_id(self, api_key, add_chunks, document_id, expected_code, expected_message): def test_invalid_document_id(self, HttpApiAuth, add_chunks, document_id, expected_code, expected_message):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
res = list_chunks(api_key, dataset_id, document_id) res = list_chunks(HttpApiAuth, dataset_id, document_id)
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message

View File

@ -54,13 +54,13 @@ class TestChunksRetrieval:
({"question": "chunk"}, 102, 0, "`dataset_ids` is required."), ({"question": "chunk"}, 102, 0, "`dataset_ids` is required."),
], ],
) )
def test_basic_scenarios(self, api_key, add_chunks, payload, expected_code, expected_page_size, expected_message): def test_basic_scenarios(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, document_id, _ = add_chunks dataset_id, document_id, _ = add_chunks
if "dataset_ids" in payload: if "dataset_ids" in payload:
payload["dataset_ids"] = [dataset_id] payload["dataset_ids"] = [dataset_id]
if "document_ids" in payload: if "document_ids" in payload:
payload["document_ids"] = [document_id] payload["document_ids"] = [document_id]
res = retrieval_chunks(api_key, payload) res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size assert len(res["data"]["chunks"]) == expected_page_size
@ -104,10 +104,10 @@ class TestChunksRetrieval:
), ),
], ],
) )
def test_page(self, api_key, add_chunks, payload, expected_code, expected_page_size, expected_message): def test_page(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]}) payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(api_key, payload) res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size assert len(res["data"]["chunks"]) == expected_page_size
@ -139,11 +139,11 @@ class TestChunksRetrieval:
), ),
], ],
) )
def test_page_size(self, api_key, add_chunks, payload, expected_code, expected_page_size, expected_message): def test_page_size(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]}) payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(api_key, payload) res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size assert len(res["data"]["chunks"]) == expected_page_size
@ -166,10 +166,10 @@ class TestChunksRetrieval:
), ),
], ],
) )
def test_vector_similarity_weight(self, api_key, add_chunks, payload, expected_code, expected_page_size, expected_message): def test_vector_similarity_weight(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]}) payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(api_key, payload) res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size assert len(res["data"]["chunks"]) == expected_page_size
@ -218,10 +218,10 @@ class TestChunksRetrieval:
), ),
], ],
) )
def test_top_k(self, api_key, add_chunks, payload, expected_code, expected_page_size, expected_message): def test_top_k(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]}) payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(api_key, payload) res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size assert len(res["data"]["chunks"]) == expected_page_size
@ -236,10 +236,10 @@ class TestChunksRetrieval:
pytest.param({"rerank_id": "unknown"}, 100, "LookupError('Model(unknown) not authorized')", marks=pytest.mark.skip), pytest.param({"rerank_id": "unknown"}, 100, "LookupError('Model(unknown) not authorized')", marks=pytest.mark.skip),
], ],
) )
def test_rerank_id(self, api_key, add_chunks, payload, expected_code, expected_message): def test_rerank_id(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]}) payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(api_key, payload) res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["chunks"]) > 0 assert len(res["data"]["chunks"]) > 0
@ -257,10 +257,10 @@ class TestChunksRetrieval:
({"keyword": None}, 0, 5, ""), ({"keyword": None}, 0, 5, ""),
], ],
) )
def test_keyword(self, api_key, add_chunks, payload, expected_code, expected_page_size, expected_message): def test_keyword(self, HttpApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
payload.update({"question": "chunk test", "dataset_ids": [dataset_id]}) payload.update({"question": "chunk test", "dataset_ids": [dataset_id]})
res = retrieval_chunks(api_key, payload) res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["chunks"]) == expected_page_size assert len(res["data"]["chunks"]) == expected_page_size
@ -278,10 +278,10 @@ class TestChunksRetrieval:
pytest.param({"highlight": None}, 0, False, "", marks=pytest.mark.skip(reason="issues/6648")), pytest.param({"highlight": None}, 0, False, "", marks=pytest.mark.skip(reason="issues/6648")),
], ],
) )
def test_highlight(self, api_key, add_chunks, payload, expected_code, expected_highlight, expected_message): def test_highlight(self, HttpApiAuth, add_chunks, payload, expected_code, expected_highlight, expected_message):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
payload.update({"question": "chunk", "dataset_ids": [dataset_id]}) payload.update({"question": "chunk", "dataset_ids": [dataset_id]})
res = retrieval_chunks(api_key, payload) res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_highlight: if expected_highlight:
for chunk in res["data"]["chunks"]: for chunk in res["data"]["chunks"]:
@ -294,21 +294,21 @@ class TestChunksRetrieval:
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_invalid_params(self, api_key, add_chunks): def test_invalid_params(self, HttpApiAuth, add_chunks):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
payload = {"question": "chunk", "dataset_ids": [dataset_id], "a": "b"} payload = {"question": "chunk", "dataset_ids": [dataset_id], "a": "b"}
res = retrieval_chunks(api_key, payload) res = retrieval_chunks(HttpApiAuth, payload)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]["chunks"]) == 4 assert len(res["data"]["chunks"]) == 4
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_retrieval(self, api_key, add_chunks): def test_concurrent_retrieval(self, HttpApiAuth, add_chunks):
dataset_id, _, _ = add_chunks dataset_id, _, _ = add_chunks
count = 100 count = 100
payload = {"question": "chunk", "dataset_ids": [dataset_id]} payload = {"question": "chunk", "dataset_ids": [dataset_id]}
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(retrieval_chunks, api_key, payload) for i in range(count)] futures = [executor.submit(retrieval_chunks, HttpApiAuth, payload) for i in range(count)]
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)

View File

@ -69,9 +69,9 @@ class TestUpdatedChunk:
({"content": "\n!?。;!?\"'"}, 0, ""), ({"content": "\n!?。;!?\"'"}, 0, ""),
], ],
) )
def test_content(self, api_key, add_chunks, payload, expected_code, expected_message): def test_content(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, document_id, chunk_ids = add_chunks dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(api_key, dataset_id, document_id, chunk_ids[0], payload) res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code != 0: if expected_code != 0:
assert res["message"] == expected_message assert res["message"] == expected_message
@ -88,9 +88,9 @@ class TestUpdatedChunk:
({"important_keywords": 123}, 102, "`important_keywords` should be a list"), ({"important_keywords": 123}, 102, "`important_keywords` should be a list"),
], ],
) )
def test_important_keywords(self, api_key, add_chunks, payload, expected_code, expected_message): def test_important_keywords(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, document_id, chunk_ids = add_chunks dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(api_key, dataset_id, document_id, chunk_ids[0], payload) res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code != 0: if expected_code != 0:
assert res["message"] == expected_message assert res["message"] == expected_message
@ -107,9 +107,9 @@ class TestUpdatedChunk:
({"questions": 123}, 102, "`questions` should be a list"), ({"questions": 123}, 102, "`questions` should be a list"),
], ],
) )
def test_questions(self, api_key, add_chunks, payload, expected_code, expected_message): def test_questions(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, document_id, chunk_ids = add_chunks dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(api_key, dataset_id, document_id, chunk_ids[0], payload) res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code != 0: if expected_code != 0:
assert res["message"] == expected_message assert res["message"] == expected_message
@ -128,14 +128,14 @@ class TestUpdatedChunk:
) )
def test_available( def test_available(
self, self,
api_key, HttpApiAuth,
add_chunks, add_chunks,
payload, payload,
expected_code, expected_code,
expected_message, expected_message,
): ):
dataset_id, document_id, chunk_ids = add_chunks dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(api_key, dataset_id, document_id, chunk_ids[0], payload) res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code != 0: if expected_code != 0:
assert res["message"] == expected_message assert res["message"] == expected_message
@ -149,9 +149,9 @@ class TestUpdatedChunk:
pytest.param("invalid_dataset_id", 102, "Can't find this chunk", marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in [None, "opensearch", "elasticsearch"], reason="elasticsearch")), pytest.param("invalid_dataset_id", 102, "Can't find this chunk", marks=pytest.mark.skipif(os.getenv("DOC_ENGINE") in [None, "opensearch", "elasticsearch"], reason="elasticsearch")),
], ],
) )
def test_invalid_dataset_id(self, api_key, add_chunks, dataset_id, expected_code, expected_message): def test_invalid_dataset_id(self, HttpApiAuth, add_chunks, dataset_id, expected_code, expected_message):
_, document_id, chunk_ids = add_chunks _, document_id, chunk_ids = add_chunks
res = update_chunk(api_key, dataset_id, document_id, chunk_ids[0]) res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0])
assert res["code"] == expected_code assert res["code"] == expected_code
assert expected_message in res["message"] assert expected_message in res["message"]
@ -167,9 +167,9 @@ class TestUpdatedChunk:
), ),
], ],
) )
def test_invalid_document_id(self, api_key, add_chunks, document_id, expected_code, expected_message): def test_invalid_document_id(self, HttpApiAuth, add_chunks, document_id, expected_code, expected_message):
dataset_id, _, chunk_ids = add_chunks dataset_id, _, chunk_ids = add_chunks
res = update_chunk(api_key, dataset_id, document_id, chunk_ids[0]) res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0])
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -185,19 +185,19 @@ class TestUpdatedChunk:
), ),
], ],
) )
def test_invalid_chunk_id(self, api_key, add_chunks, chunk_id, expected_code, expected_message): def test_invalid_chunk_id(self, HttpApiAuth, add_chunks, chunk_id, expected_code, expected_message):
dataset_id, document_id, _ = add_chunks dataset_id, document_id, _ = add_chunks
res = update_chunk(api_key, dataset_id, document_id, chunk_id) res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_id)
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_repeated_update_chunk(self, api_key, add_chunks): def test_repeated_update_chunk(self, HttpApiAuth, add_chunks):
dataset_id, document_id, chunk_ids = add_chunks dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(api_key, dataset_id, document_id, chunk_ids[0], {"content": "chunk test 1"}) res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], {"content": "chunk test 1"})
assert res["code"] == 0 assert res["code"] == 0
res = update_chunk(api_key, dataset_id, document_id, chunk_ids[0], {"content": "chunk test 2"}) res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], {"content": "chunk test 2"})
assert res["code"] == 0 assert res["code"] == 0
@pytest.mark.p3 @pytest.mark.p3
@ -209,16 +209,16 @@ class TestUpdatedChunk:
pytest.param(None, 100, """TypeError("argument of type \'NoneType\' is not iterable")""", marks=pytest.mark.skip), pytest.param(None, 100, """TypeError("argument of type \'NoneType\' is not iterable")""", marks=pytest.mark.skip),
], ],
) )
def test_invalid_params(self, api_key, add_chunks, payload, expected_code, expected_message): def test_invalid_params(self, HttpApiAuth, add_chunks, payload, expected_code, expected_message):
dataset_id, document_id, chunk_ids = add_chunks dataset_id, document_id, chunk_ids = add_chunks
res = update_chunk(api_key, dataset_id, document_id, chunk_ids[0], payload) res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0], payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code != 0: if expected_code != 0:
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6554") @pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6554")
def test_concurrent_update_chunk(self, api_key, add_chunks): def test_concurrent_update_chunk(self, HttpApiAuth, add_chunks):
count = 50 count = 50
dataset_id, document_id, chunk_ids = add_chunks dataset_id, document_id, chunk_ids = add_chunks
@ -226,7 +226,7 @@ class TestUpdatedChunk:
futures = [ futures = [
executor.submit( executor.submit(
update_chunk, update_chunk,
api_key, HttpApiAuth,
dataset_id, dataset_id,
document_id, document_id,
chunk_ids[randint(0, 3)], chunk_ids[randint(0, 3)],
@ -239,9 +239,9 @@ class TestUpdatedChunk:
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3 @pytest.mark.p3
def test_update_chunk_to_deleted_document(self, api_key, add_chunks): def test_update_chunk_to_deleted_document(self, HttpApiAuth, add_chunks):
dataset_id, document_id, chunk_ids = add_chunks dataset_id, document_id, chunk_ids = add_chunks
delete_documents(api_key, dataset_id, {"ids": [document_id]}) delete_documents(HttpApiAuth, dataset_id, {"ids": [document_id]})
res = update_chunk(api_key, dataset_id, document_id, chunk_ids[0]) res = update_chunk(HttpApiAuth, dataset_id, document_id, chunk_ids[0])
assert res["code"] == 102 assert res["code"] == 102
assert res["message"] == f"Can't find this chunk {chunk_ids[0]}" assert res["message"] == f"Can't find this chunk {chunk_ids[0]}"

View File

@ -20,20 +20,20 @@ from common import batch_create_datasets, delete_datasets
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def add_datasets(api_key, request): def add_datasets(HttpApiAuth, request):
def cleanup(): def cleanup():
delete_datasets(api_key, {"ids": None}) delete_datasets(HttpApiAuth, {"ids": None})
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
return batch_create_datasets(api_key, 5) return batch_create_datasets(HttpApiAuth, 5)
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def add_datasets_func(api_key, request): def add_datasets_func(HttpApiAuth, request):
def cleanup(): def cleanup():
delete_datasets(api_key, {"ids": None}) delete_datasets(HttpApiAuth, {"ids": None})
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
return batch_create_datasets(api_key, 3) return batch_create_datasets(HttpApiAuth, 3)

View File

@ -48,9 +48,9 @@ class TestAuthorization:
class TestRquest: class TestRquest:
@pytest.mark.p3 @pytest.mark.p3
def test_content_type_bad(self, api_key): def test_content_type_bad(self, HttpApiAuth):
BAD_CONTENT_TYPE = "text/xml" BAD_CONTENT_TYPE = "text/xml"
res = create_dataset(api_key, {"name": "bad_content_type"}, headers={"Content-Type": BAD_CONTENT_TYPE}) res = create_dataset(HttpApiAuth, {"name": "bad_content_type"}, headers={"Content-Type": BAD_CONTENT_TYPE})
assert res["code"] == 101, res assert res["code"] == 101, res
assert res["message"] == f"Unsupported content type: Expected application/json, got {BAD_CONTENT_TYPE}", res assert res["message"] == f"Unsupported content type: Expected application/json, got {BAD_CONTENT_TYPE}", res
@ -63,8 +63,8 @@ class TestRquest:
], ],
ids=["malformed_json_syntax", "invalid_request_payload_type"], ids=["malformed_json_syntax", "invalid_request_payload_type"],
) )
def test_payload_bad(self, api_key, payload, expected_message): def test_payload_bad(self, HttpApiAuth, payload, expected_message):
res = create_dataset(api_key, data=payload) res = create_dataset(HttpApiAuth, data=payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert res["message"] == expected_message, res assert res["message"] == expected_message, res
@ -72,17 +72,17 @@ class TestRquest:
@pytest.mark.usefixtures("clear_datasets") @pytest.mark.usefixtures("clear_datasets")
class TestCapability: class TestCapability:
@pytest.mark.p3 @pytest.mark.p3
def test_create_dataset_1k(self, api_key): def test_create_dataset_1k(self, HttpApiAuth):
for i in range(1_000): for i in range(1_000):
payload = {"name": f"dataset_{i}"} payload = {"name": f"dataset_{i}"}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, f"Failed to create dataset {i}" assert res["code"] == 0, f"Failed to create dataset {i}"
@pytest.mark.p3 @pytest.mark.p3
def test_create_dataset_concurrent(self, api_key): def test_create_dataset_concurrent(self, HttpApiAuth):
count = 100 count = 100
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(create_dataset, api_key, {"name": f"dataset_{i}"}) for i in range(count)] futures = [executor.submit(create_dataset, HttpApiAuth, {"name": f"dataset_{i}"}) for i in range(count)]
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@ -94,8 +94,8 @@ class TestDatasetCreate:
@given(name=valid_names()) @given(name=valid_names())
@example("a" * 128) @example("a" * 128)
@settings(max_examples=20) @settings(max_examples=20)
def test_name(self, api_key, name): def test_name(self, HttpApiAuth, name):
res = create_dataset(api_key, {"name": name}) res = create_dataset(HttpApiAuth, {"name": name})
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["name"] == name, res assert res["data"]["name"] == name, res
@ -111,49 +111,49 @@ class TestDatasetCreate:
], ],
ids=["empty_name", "space_name", "too_long_name", "invalid_name", "None_name"], ids=["empty_name", "space_name", "too_long_name", "invalid_name", "None_name"],
) )
def test_name_invalid(self, api_key, name, expected_message): def test_name_invalid(self, HttpApiAuth, name, expected_message):
payload = {"name": name} payload = {"name": name}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert expected_message in res["message"], res assert expected_message in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_name_duplicated(self, api_key): def test_name_duplicated(self, HttpApiAuth):
name = "duplicated_name" name = "duplicated_name"
payload = {"name": name} payload = {"name": name}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 103, res assert res["code"] == 103, res
assert res["message"] == f"Dataset name '{name}' already exists", res assert res["message"] == f"Dataset name '{name}' already exists", res
@pytest.mark.p3 @pytest.mark.p3
def test_name_case_insensitive(self, api_key): def test_name_case_insensitive(self, HttpApiAuth):
name = "CaseInsensitive" name = "CaseInsensitive"
payload = {"name": name.upper()} payload = {"name": name.upper()}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
payload = {"name": name.lower()} payload = {"name": name.lower()}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 103, res assert res["code"] == 103, res
assert res["message"] == f"Dataset name '{name.lower()}' already exists", res assert res["message"] == f"Dataset name '{name.lower()}' already exists", res
@pytest.mark.p2 @pytest.mark.p2
def test_avatar(self, api_key, tmp_path): def test_avatar(self, HttpApiAuth, tmp_path):
fn = create_image_file(tmp_path / "ragflow_test.png") fn = create_image_file(tmp_path / "ragflow_test.png")
payload = { payload = {
"name": "avatar", "name": "avatar",
"avatar": f"data:image/png;base64,{encode_avatar(fn)}", "avatar": f"data:image/png;base64,{encode_avatar(fn)}",
} }
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
@pytest.mark.p2 @pytest.mark.p2
def test_avatar_exceeds_limit_length(self, api_key): def test_avatar_exceeds_limit_length(self, HttpApiAuth):
payload = {"name": "avatar_exceeds_limit_length", "avatar": "a" * 65536} payload = {"name": "avatar_exceeds_limit_length", "avatar": "a" * 65536}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "String should have at most 65535 characters" in res["message"], res assert "String should have at most 65535 characters" in res["message"], res
@ -168,55 +168,55 @@ class TestDatasetCreate:
], ],
ids=["empty_prefix", "missing_comma", "unsupported_mine_type", "invalid_mine_type"], ids=["empty_prefix", "missing_comma", "unsupported_mine_type", "invalid_mine_type"],
) )
def test_avatar_invalid_prefix(self, api_key, tmp_path, name, prefix, expected_message): def test_avatar_invalid_prefix(self, HttpApiAuth, tmp_path, name, prefix, expected_message):
fn = create_image_file(tmp_path / "ragflow_test.png") fn = create_image_file(tmp_path / "ragflow_test.png")
payload = { payload = {
"name": name, "name": name,
"avatar": f"{prefix}{encode_avatar(fn)}", "avatar": f"{prefix}{encode_avatar(fn)}",
} }
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert expected_message in res["message"], res assert expected_message in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_avatar_unset(self, api_key): def test_avatar_unset(self, HttpApiAuth):
payload = {"name": "avatar_unset"} payload = {"name": "avatar_unset"}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["avatar"] is None, res assert res["data"]["avatar"] is None, res
@pytest.mark.p3 @pytest.mark.p3
def test_avatar_none(self, api_key): def test_avatar_none(self, HttpApiAuth):
payload = {"name": "avatar_none", "avatar": None} payload = {"name": "avatar_none", "avatar": None}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["avatar"] is None, res assert res["data"]["avatar"] is None, res
@pytest.mark.p2 @pytest.mark.p2
def test_description(self, api_key): def test_description(self, HttpApiAuth):
payload = {"name": "description", "description": "description"} payload = {"name": "description", "description": "description"}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["description"] == "description", res assert res["data"]["description"] == "description", res
@pytest.mark.p2 @pytest.mark.p2
def test_description_exceeds_limit_length(self, api_key): def test_description_exceeds_limit_length(self, HttpApiAuth):
payload = {"name": "description_exceeds_limit_length", "description": "a" * 65536} payload = {"name": "description_exceeds_limit_length", "description": "a" * 65536}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "String should have at most 65535 characters" in res["message"], res assert "String should have at most 65535 characters" in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_description_unset(self, api_key): def test_description_unset(self, HttpApiAuth):
payload = {"name": "description_unset"} payload = {"name": "description_unset"}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["description"] is None, res assert res["data"]["description"] is None, res
@pytest.mark.p3 @pytest.mark.p3
def test_description_none(self, api_key): def test_description_none(self, HttpApiAuth):
payload = {"name": "description_none", "description": None} payload = {"name": "description_none", "description": None}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["description"] is None, res assert res["data"]["description"] is None, res
@ -230,9 +230,9 @@ class TestDatasetCreate:
], ],
ids=["builtin_baai", "builtin_youdao", "tenant_zhipu"], ids=["builtin_baai", "builtin_youdao", "tenant_zhipu"],
) )
def test_embedding_model(self, api_key, name, embedding_model): def test_embedding_model(self, HttpApiAuth, name, embedding_model):
payload = {"name": name, "embedding_model": embedding_model} payload = {"name": name, "embedding_model": embedding_model}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["embedding_model"] == embedding_model, res assert res["data"]["embedding_model"] == embedding_model, res
@ -247,9 +247,9 @@ class TestDatasetCreate:
], ],
ids=["unknown_llm_name", "unknown_llm_factory", "tenant_no_auth_default_tenant_llm", "tenant_no_auth"], ids=["unknown_llm_name", "unknown_llm_factory", "tenant_no_auth_default_tenant_llm", "tenant_no_auth"],
) )
def test_embedding_model_invalid(self, api_key, name, embedding_model): def test_embedding_model_invalid(self, HttpApiAuth, name, embedding_model):
payload = {"name": name, "embedding_model": embedding_model} payload = {"name": name, "embedding_model": embedding_model}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
if "tenant_no_auth" in name: if "tenant_no_auth" in name:
assert res["message"] == f"Unauthorized model: <{embedding_model}>", res assert res["message"] == f"Unauthorized model: <{embedding_model}>", res
@ -268,9 +268,9 @@ class TestDatasetCreate:
], ],
ids=["missing_at", "empty_model_name", "empty_provider", "whitespace_only_model_name", "whitespace_only_provider"], ids=["missing_at", "empty_model_name", "empty_provider", "whitespace_only_model_name", "whitespace_only_provider"],
) )
def test_embedding_model_format(self, api_key, name, embedding_model): def test_embedding_model_format(self, HttpApiAuth, name, embedding_model):
payload = {"name": name, "embedding_model": embedding_model} payload = {"name": name, "embedding_model": embedding_model}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
if name == "missing_at": if name == "missing_at":
assert "Embedding model identifier must follow <model_name>@<provider> format" in res["message"], res assert "Embedding model identifier must follow <model_name>@<provider> format" in res["message"], res
@ -278,16 +278,16 @@ class TestDatasetCreate:
assert "Both model_name and provider must be non-empty strings" in res["message"], res assert "Both model_name and provider must be non-empty strings" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_embedding_model_unset(self, api_key): def test_embedding_model_unset(self, HttpApiAuth):
payload = {"name": "embedding_model_unset"} payload = {"name": "embedding_model_unset"}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["embedding_model"] == "BAAI/bge-large-zh-v1.5@BAAI", res assert res["data"]["embedding_model"] == "BAAI/bge-large-zh-v1.5@BAAI", res
@pytest.mark.p2 @pytest.mark.p2
def test_embedding_model_none(self, api_key): def test_embedding_model_none(self, HttpApiAuth):
payload = {"name": "embedding_model_none", "embedding_model": None} payload = {"name": "embedding_model_none", "embedding_model": None}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be a valid string" in res["message"], res assert "Input should be a valid string" in res["message"], res
@ -303,9 +303,9 @@ class TestDatasetCreate:
], ],
ids=["me", "team", "me_upercase", "team_upercase", "whitespace"], ids=["me", "team", "me_upercase", "team_upercase", "whitespace"],
) )
def test_permission(self, api_key, name, permission): def test_permission(self, HttpApiAuth, name, permission):
payload = {"name": name, "permission": permission} payload = {"name": name, "permission": permission}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["permission"] == permission.lower().strip(), res assert res["data"]["permission"] == permission.lower().strip(), res
@ -319,23 +319,23 @@ class TestDatasetCreate:
], ],
ids=["empty", "unknown", "type_error"], ids=["empty", "unknown", "type_error"],
) )
def test_permission_invalid(self, api_key, name, permission): def test_permission_invalid(self, HttpApiAuth, name, permission):
payload = {"name": name, "permission": permission} payload = {"name": name, "permission": permission}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101 assert res["code"] == 101
assert "Input should be 'me' or 'team'" in res["message"] assert "Input should be 'me' or 'team'" in res["message"]
@pytest.mark.p2 @pytest.mark.p2
def test_permission_unset(self, api_key): def test_permission_unset(self, HttpApiAuth):
payload = {"name": "permission_unset"} payload = {"name": "permission_unset"}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["permission"] == "me", res assert res["data"]["permission"] == "me", res
@pytest.mark.p3 @pytest.mark.p3
def test_permission_none(self, api_key): def test_permission_none(self, HttpApiAuth):
payload = {"name": "permission_none", "permission": None} payload = {"name": "permission_none", "permission": None}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be 'me' or 'team'" in res["message"], res assert "Input should be 'me' or 'team'" in res["message"], res
@ -358,9 +358,9 @@ class TestDatasetCreate:
], ],
ids=["naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"], ids=["naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"],
) )
def test_chunk_method(self, api_key, name, chunk_method): def test_chunk_method(self, HttpApiAuth, name, chunk_method):
payload = {"name": name, "chunk_method": chunk_method} payload = {"name": name, "chunk_method": chunk_method}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["chunk_method"] == chunk_method, res assert res["data"]["chunk_method"] == chunk_method, res
@ -374,23 +374,23 @@ class TestDatasetCreate:
], ],
ids=["empty", "unknown", "type_error"], ids=["empty", "unknown", "type_error"],
) )
def test_chunk_method_invalid(self, api_key, name, chunk_method): def test_chunk_method_invalid(self, HttpApiAuth, name, chunk_method):
payload = {"name": name, "chunk_method": chunk_method} payload = {"name": name, "chunk_method": chunk_method}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in res["message"], res assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_chunk_method_unset(self, api_key): def test_chunk_method_unset(self, HttpApiAuth):
payload = {"name": "chunk_method_unset"} payload = {"name": "chunk_method_unset"}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["chunk_method"] == "naive", res assert res["data"]["chunk_method"] == "naive", res
@pytest.mark.p3 @pytest.mark.p3
def test_chunk_method_none(self, api_key): def test_chunk_method_none(self, HttpApiAuth):
payload = {"name": "chunk_method_none", "chunk_method": None} payload = {"name": "chunk_method_none", "chunk_method": None}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in res["message"], res assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in res["message"], res
@ -404,9 +404,9 @@ class TestDatasetCreate:
], ],
ids=["min", "mid", "max"], ids=["min", "mid", "max"],
) )
def test_pagerank(self, api_key, name, pagerank): def test_pagerank(self, HttpApiAuth, name, pagerank):
payload = {"name": name, "pagerank": pagerank} payload = {"name": name, "pagerank": pagerank}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["pagerank"] == pagerank, res assert res["data"]["pagerank"] == pagerank, res
@ -419,23 +419,23 @@ class TestDatasetCreate:
], ],
ids=["min_limit", "max_limit"], ids=["min_limit", "max_limit"],
) )
def test_pagerank_invalid(self, api_key, name, pagerank, expected_message): def test_pagerank_invalid(self, HttpApiAuth, name, pagerank, expected_message):
payload = {"name": name, "pagerank": pagerank} payload = {"name": name, "pagerank": pagerank}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert expected_message in res["message"], res assert expected_message in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_pagerank_unset(self, api_key): def test_pagerank_unset(self, HttpApiAuth):
payload = {"name": "pagerank_unset"} payload = {"name": "pagerank_unset"}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["pagerank"] == 0, res assert res["data"]["pagerank"] == 0, res
@pytest.mark.p3 @pytest.mark.p3
def test_pagerank_none(self, api_key): def test_pagerank_none(self, HttpApiAuth):
payload = {"name": "pagerank_unset", "pagerank": None} payload = {"name": "pagerank_unset", "pagerank": None}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be a valid integer" in res["message"], res assert "Input should be a valid integer" in res["message"], res
@ -543,9 +543,9 @@ class TestDatasetCreate:
"raptor_random_seed_min", "raptor_random_seed_min",
], ],
) )
def test_parser_config(self, api_key, name, parser_config): def test_parser_config(self, HttpApiAuth, name, parser_config):
payload = {"name": name, "parser_config": parser_config} payload = {"name": name, "parser_config": parser_config}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
for k, v in parser_config.items(): for k, v in parser_config.items():
if isinstance(v, dict): if isinstance(v, dict):
@ -670,16 +670,16 @@ class TestDatasetCreate:
"parser_config_type_invalid", "parser_config_type_invalid",
], ],
) )
def test_parser_config_invalid(self, api_key, name, parser_config, expected_message): def test_parser_config_invalid(self, HttpApiAuth, name, parser_config, expected_message):
payload = {"name": name, "parser_config": parser_config} payload = {"name": name, "parser_config": parser_config}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert expected_message in res["message"], res assert expected_message in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_parser_config_empty(self, api_key): def test_parser_config_empty(self, HttpApiAuth):
payload = {"name": "parser_config_empty", "parser_config": {}} payload = {"name": "parser_config_empty", "parser_config": {}}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["parser_config"] == { assert res["data"]["parser_config"] == {
"chunk_token_num": 128, "chunk_token_num": 128,
@ -690,9 +690,9 @@ class TestDatasetCreate:
}, res }, res
@pytest.mark.p2 @pytest.mark.p2
def test_parser_config_unset(self, api_key): def test_parser_config_unset(self, HttpApiAuth):
payload = {"name": "parser_config_unset"} payload = {"name": "parser_config_unset"}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["parser_config"] == { assert res["data"]["parser_config"] == {
"chunk_token_num": 128, "chunk_token_num": 128,
@ -703,9 +703,9 @@ class TestDatasetCreate:
}, res }, res
@pytest.mark.p3 @pytest.mark.p3
def test_parser_config_none(self, api_key): def test_parser_config_none(self, HttpApiAuth):
payload = {"name": "parser_config_none", "parser_config": None} payload = {"name": "parser_config_none", "parser_config": None}
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"]["parser_config"] == { assert res["data"]["parser_config"] == {
"chunk_token_num": 128, "chunk_token_num": 128,
@ -733,7 +733,7 @@ class TestDatasetCreate:
{"name": "unknown_field", "unknown_field": "unknown_field"}, {"name": "unknown_field", "unknown_field": "unknown_field"},
], ],
) )
def test_unsupported_field(self, api_key, payload): def test_unsupported_field(self, HttpApiAuth, payload):
res = create_dataset(api_key, payload) res = create_dataset(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Extra inputs are not permitted" in res["message"], res assert "Extra inputs are not permitted" in res["message"], res

View File

@ -47,9 +47,9 @@ class TestAuthorization:
class TestRquest: class TestRquest:
@pytest.mark.p3 @pytest.mark.p3
def test_content_type_bad(self, api_key): def test_content_type_bad(self, HttpApiAuth):
BAD_CONTENT_TYPE = "text/xml" BAD_CONTENT_TYPE = "text/xml"
res = delete_datasets(api_key, headers={"Content-Type": BAD_CONTENT_TYPE}) res = delete_datasets(HttpApiAuth, headers={"Content-Type": BAD_CONTENT_TYPE})
assert res["code"] == 101, res assert res["code"] == 101, res
assert res["message"] == f"Unsupported content type: Expected application/json, got {BAD_CONTENT_TYPE}", res assert res["message"] == f"Unsupported content type: Expected application/json, got {BAD_CONTENT_TYPE}", res
@ -62,35 +62,35 @@ class TestRquest:
], ],
ids=["malformed_json_syntax", "invalid_request_payload_type"], ids=["malformed_json_syntax", "invalid_request_payload_type"],
) )
def test_payload_bad(self, api_key, payload, expected_message): def test_payload_bad(self, HttpApiAuth, payload, expected_message):
res = delete_datasets(api_key, data=payload) res = delete_datasets(HttpApiAuth, data=payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert res["message"] == expected_message, res assert res["message"] == expected_message, res
@pytest.mark.p3 @pytest.mark.p3
def test_payload_unset(self, api_key): def test_payload_unset(self, HttpApiAuth):
res = delete_datasets(api_key, None) res = delete_datasets(HttpApiAuth, None)
assert res["code"] == 101, res assert res["code"] == 101, res
assert res["message"] == "Malformed JSON syntax: Missing commas/brackets or invalid encoding", res assert res["message"] == "Malformed JSON syntax: Missing commas/brackets or invalid encoding", res
class TestCapability: class TestCapability:
@pytest.mark.p3 @pytest.mark.p3
def test_delete_dataset_1k(self, api_key): def test_delete_dataset_1k(self, HttpApiAuth):
ids = batch_create_datasets(api_key, 1_000) ids = batch_create_datasets(HttpApiAuth, 1_000)
res = delete_datasets(api_key, {"ids": ids}) res = delete_datasets(HttpApiAuth, {"ids": ids})
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert len(res["data"]) == 0, res assert len(res["data"]) == 0, res
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_deletion(self, api_key): def test_concurrent_deletion(self, HttpApiAuth):
count = 1_000 count = 1_000
ids = batch_create_datasets(api_key, count) ids = batch_create_datasets(HttpApiAuth, count)
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(delete_datasets, api_key, {"ids": ids[i : i + 1]}) for i in range(count)] futures = [executor.submit(delete_datasets, HttpApiAuth, {"ids": ids[i : i + 1]}) for i in range(count)]
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@ -106,64 +106,64 @@ class TestDatasetsDelete:
], ],
ids=["single_dataset", "multiple_datasets"], ids=["single_dataset", "multiple_datasets"],
) )
def test_ids(self, api_key, add_datasets_func, func, expected_code, expected_message, remaining): def test_ids(self, HttpApiAuth, add_datasets_func, func, expected_code, expected_message, remaining):
dataset_ids = add_datasets_func dataset_ids = add_datasets_func
if callable(func): if callable(func):
payload = func(dataset_ids) payload = func(dataset_ids)
res = delete_datasets(api_key, payload) res = delete_datasets(HttpApiAuth, payload)
assert res["code"] == expected_code, res assert res["code"] == expected_code, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert len(res["data"]) == remaining, res assert len(res["data"]) == remaining, res
@pytest.mark.p1 @pytest.mark.p1
@pytest.mark.usefixtures("add_dataset_func") @pytest.mark.usefixtures("add_dataset_func")
def test_ids_empty(self, api_key): def test_ids_empty(self, HttpApiAuth):
payload = {"ids": []} payload = {"ids": []}
res = delete_datasets(api_key, payload) res = delete_datasets(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert len(res["data"]) == 1, res assert len(res["data"]) == 1, res
@pytest.mark.p1 @pytest.mark.p1
@pytest.mark.usefixtures("add_datasets_func") @pytest.mark.usefixtures("add_datasets_func")
def test_ids_none(self, api_key): def test_ids_none(self, HttpApiAuth):
payload = {"ids": None} payload = {"ids": None}
res = delete_datasets(api_key, payload) res = delete_datasets(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert len(res["data"]) == 0, res assert len(res["data"]) == 0, res
@pytest.mark.p2 @pytest.mark.p2
@pytest.mark.usefixtures("add_dataset_func") @pytest.mark.usefixtures("add_dataset_func")
def test_id_not_uuid(self, api_key): def test_id_not_uuid(self, HttpApiAuth):
payload = {"ids": ["not_uuid"]} payload = {"ids": ["not_uuid"]}
res = delete_datasets(api_key, payload) res = delete_datasets(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Invalid UUID1 format" in res["message"], res assert "Invalid UUID1 format" in res["message"], res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert len(res["data"]) == 1, res assert len(res["data"]) == 1, res
@pytest.mark.p3 @pytest.mark.p3
@pytest.mark.usefixtures("add_dataset_func") @pytest.mark.usefixtures("add_dataset_func")
def test_id_not_uuid1(self, api_key): def test_id_not_uuid1(self, HttpApiAuth):
payload = {"ids": [uuid.uuid4().hex]} payload = {"ids": [uuid.uuid4().hex]}
res = delete_datasets(api_key, payload) res = delete_datasets(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Invalid UUID1 format" in res["message"], res assert "Invalid UUID1 format" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
@pytest.mark.usefixtures("add_dataset_func") @pytest.mark.usefixtures("add_dataset_func")
def test_id_wrong_uuid(self, api_key): def test_id_wrong_uuid(self, HttpApiAuth):
payload = {"ids": ["d94a8dc02c9711f0930f7fbc369eab6d"]} payload = {"ids": ["d94a8dc02c9711f0930f7fbc369eab6d"]}
res = delete_datasets(api_key, payload) res = delete_datasets(HttpApiAuth, payload)
assert res["code"] == 108, res assert res["code"] == 108, res
assert "lacks permission for dataset" in res["message"], res assert "lacks permission for dataset" in res["message"], res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert len(res["data"]) == 1, res assert len(res["data"]) == 1, res
@pytest.mark.p2 @pytest.mark.p2
@ -175,46 +175,46 @@ class TestDatasetsDelete:
lambda r: {"ids": r + ["d94a8dc02c9711f0930f7fbc369eab6d"]}, lambda r: {"ids": r + ["d94a8dc02c9711f0930f7fbc369eab6d"]},
], ],
) )
def test_ids_partial_invalid(self, api_key, add_datasets_func, func): def test_ids_partial_invalid(self, HttpApiAuth, add_datasets_func, func):
dataset_ids = add_datasets_func dataset_ids = add_datasets_func
if callable(func): if callable(func):
payload = func(dataset_ids) payload = func(dataset_ids)
res = delete_datasets(api_key, payload) res = delete_datasets(HttpApiAuth, payload)
assert res["code"] == 108, res assert res["code"] == 108, res
assert "lacks permission for dataset" in res["message"], res assert "lacks permission for dataset" in res["message"], res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert len(res["data"]) == 3, res assert len(res["data"]) == 3, res
@pytest.mark.p2 @pytest.mark.p2
def test_ids_duplicate(self, api_key, add_datasets_func): def test_ids_duplicate(self, HttpApiAuth, add_datasets_func):
dataset_ids = add_datasets_func dataset_ids = add_datasets_func
payload = {"ids": dataset_ids + dataset_ids} payload = {"ids": dataset_ids + dataset_ids}
res = delete_datasets(api_key, payload) res = delete_datasets(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Duplicate ids:" in res["message"], res assert "Duplicate ids:" in res["message"], res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert len(res["data"]) == 3, res assert len(res["data"]) == 3, res
@pytest.mark.p2 @pytest.mark.p2
def test_repeated_delete(self, api_key, add_datasets_func): def test_repeated_delete(self, HttpApiAuth, add_datasets_func):
dataset_ids = add_datasets_func dataset_ids = add_datasets_func
payload = {"ids": dataset_ids} payload = {"ids": dataset_ids}
res = delete_datasets(api_key, payload) res = delete_datasets(HttpApiAuth, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = delete_datasets(api_key, payload) res = delete_datasets(HttpApiAuth, payload)
assert res["code"] == 108, res assert res["code"] == 108, res
assert "lacks permission for dataset" in res["message"], res assert "lacks permission for dataset" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
@pytest.mark.usefixtures("add_dataset_func") @pytest.mark.usefixtures("add_dataset_func")
def test_field_unsupported(self, api_key): def test_field_unsupported(self, HttpApiAuth):
payload = {"unknown_field": "unknown_field"} payload = {"unknown_field": "unknown_field"}
res = delete_datasets(api_key, payload) res = delete_datasets(HttpApiAuth, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Extra inputs are not permitted" in res["message"], res assert "Extra inputs are not permitted" in res["message"], res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert len(res["data"]) == 1, res assert len(res["data"]) == 1, res

View File

@ -43,10 +43,10 @@ class TestAuthorization:
class TestCapability: class TestCapability:
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_list(self, api_key): def test_concurrent_list(self, HttpApiAuth):
count = 100 count = 100
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_datasets, api_key) for i in range(count)] futures = [executor.submit(list_datasets, HttpApiAuth) for i in range(count)]
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@ -55,14 +55,14 @@ class TestCapability:
@pytest.mark.usefixtures("add_datasets") @pytest.mark.usefixtures("add_datasets")
class TestDatasetsList: class TestDatasetsList:
@pytest.mark.p1 @pytest.mark.p1
def test_params_unset(self, api_key): def test_params_unset(self, HttpApiAuth):
res = list_datasets(api_key, None) res = list_datasets(HttpApiAuth, None)
assert res["code"] == 0, res assert res["code"] == 0, res
assert len(res["data"]) == 5, res assert len(res["data"]) == 5, res
@pytest.mark.p2 @pytest.mark.p2
def test_params_empty(self, api_key): def test_params_empty(self, HttpApiAuth):
res = list_datasets(api_key, {}) res = list_datasets(HttpApiAuth, {})
assert res["code"] == 0, res assert res["code"] == 0, res
assert len(res["data"]) == 5, res assert len(res["data"]) == 5, res
@ -78,8 +78,8 @@ class TestDatasetsList:
], ],
ids=["normal_middle_page", "normal_last_partial_page", "beyond_max_page", "string_page_number", "full_data_single_page"], ids=["normal_middle_page", "normal_last_partial_page", "beyond_max_page", "string_page_number", "full_data_single_page"],
) )
def test_page(self, api_key, params, expected_page_size): def test_page(self, HttpApiAuth, params, expected_page_size):
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
assert len(res["data"]) == expected_page_size, res assert len(res["data"]) == expected_page_size, res
@ -92,15 +92,15 @@ class TestDatasetsList:
], ],
ids=["page_0", "page_a"], ids=["page_0", "page_a"],
) )
def test_page_invalid(self, api_key, params, expected_code, expected_message): def test_page_invalid(self, HttpApiAuth, params, expected_code, expected_message):
res = list_datasets(api_key, params=params) res = list_datasets(HttpApiAuth, params=params)
assert res["code"] == expected_code, res assert res["code"] == expected_code, res
assert expected_message in res["message"], res assert expected_message in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_page_none(self, api_key): def test_page_none(self, HttpApiAuth):
params = {"page": None} params = {"page": None}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
assert len(res["data"]) == 5, res assert len(res["data"]) == 5, res
@ -116,8 +116,8 @@ class TestDatasetsList:
], ],
ids=["min_valid_page_size", "medium_page_size", "page_size_equals_total", "page_size_exceeds_total", "string_type_page_size"], ids=["min_valid_page_size", "medium_page_size", "page_size_equals_total", "page_size_exceeds_total", "string_type_page_size"],
) )
def test_page_size(self, api_key, params, expected_page_size): def test_page_size(self, HttpApiAuth, params, expected_page_size):
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
assert len(res["data"]) == expected_page_size, res assert len(res["data"]) == expected_page_size, res
@ -129,15 +129,15 @@ class TestDatasetsList:
({"page_size": "a"}, 101, "Input should be a valid integer, unable to parse string as an integer"), ({"page_size": "a"}, 101, "Input should be a valid integer, unable to parse string as an integer"),
], ],
) )
def test_page_size_invalid(self, api_key, params, expected_code, expected_message): def test_page_size_invalid(self, HttpApiAuth, params, expected_code, expected_message):
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == expected_code, res assert res["code"] == expected_code, res
assert expected_message in res["message"], res assert expected_message in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_page_size_none(self, api_key): def test_page_size_none(self, HttpApiAuth):
params = {"page_size": None} params = {"page_size": None}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
assert len(res["data"]) == 5, res assert len(res["data"]) == 5, res
@ -153,8 +153,8 @@ class TestDatasetsList:
], ],
ids=["orderby_create_time", "orderby_update_time", "orderby_create_time_upper", "orderby_update_time_upper", "whitespace"], ids=["orderby_create_time", "orderby_update_time", "orderby_create_time_upper", "orderby_update_time_upper", "whitespace"],
) )
def test_orderby(self, api_key, params, assertions): def test_orderby(self, HttpApiAuth, params, assertions):
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
if callable(assertions): if callable(assertions):
assert assertions(res), res assert assertions(res), res
@ -168,15 +168,15 @@ class TestDatasetsList:
], ],
ids=["empty", "unknown"], ids=["empty", "unknown"],
) )
def test_orderby_invalid(self, api_key, params): def test_orderby_invalid(self, HttpApiAuth, params):
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be 'create_time' or 'update_time'" in res["message"], res assert "Input should be 'create_time' or 'update_time'" in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_orderby_none(self, api_key): def test_orderby_none(self, HttpApiAuth):
params = {"orderby": None} params = {"orderby": None}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
assert is_sorted(res["data"], "create_time", True), res assert is_sorted(res["data"], "create_time", True), res
@ -197,8 +197,8 @@ class TestDatasetsList:
], ],
ids=["desc=True", "desc=False", "desc=true", "desc=false", "desc=1", "desc=0", "desc=yes", "desc=no", "desc=y", "desc=n"], ids=["desc=True", "desc=False", "desc=true", "desc=false", "desc=1", "desc=0", "desc=yes", "desc=no", "desc=y", "desc=n"],
) )
def test_desc(self, api_key, params, assertions): def test_desc(self, HttpApiAuth, params, assertions):
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
if callable(assertions): if callable(assertions):
assert assertions(res), res assert assertions(res), res
@ -212,88 +212,88 @@ class TestDatasetsList:
], ],
ids=["empty", "unknown"], ids=["empty", "unknown"],
) )
def test_desc_invalid(self, api_key, params): def test_desc_invalid(self, HttpApiAuth, params):
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be a valid boolean, unable to interpret input" in res["message"], res assert "Input should be a valid boolean, unable to interpret input" in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_desc_none(self, api_key): def test_desc_none(self, HttpApiAuth):
params = {"desc": None} params = {"desc": None}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
assert is_sorted(res["data"], "create_time", True), res assert is_sorted(res["data"], "create_time", True), res
@pytest.mark.p1 @pytest.mark.p1
def test_name(self, api_key): def test_name(self, HttpApiAuth):
params = {"name": "dataset_1"} params = {"name": "dataset_1"}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
assert len(res["data"]) == 1, res assert len(res["data"]) == 1, res
assert res["data"][0]["name"] == "dataset_1", res assert res["data"][0]["name"] == "dataset_1", res
@pytest.mark.p2 @pytest.mark.p2
def test_name_wrong(self, api_key): def test_name_wrong(self, HttpApiAuth):
params = {"name": "wrong name"} params = {"name": "wrong name"}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 108, res assert res["code"] == 108, res
assert "lacks permission for dataset" in res["message"], res assert "lacks permission for dataset" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_name_empty(self, api_key): def test_name_empty(self, HttpApiAuth):
params = {"name": ""} params = {"name": ""}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
assert len(res["data"]) == 5, res assert len(res["data"]) == 5, res
@pytest.mark.p2 @pytest.mark.p2
def test_name_none(self, api_key): def test_name_none(self, HttpApiAuth):
params = {"name": None} params = {"name": None}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
assert len(res["data"]) == 5, res assert len(res["data"]) == 5, res
@pytest.mark.p1 @pytest.mark.p1
def test_id(self, api_key, add_datasets): def test_id(self, HttpApiAuth, add_datasets):
dataset_ids = add_datasets dataset_ids = add_datasets
params = {"id": dataset_ids[0]} params = {"id": dataset_ids[0]}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]) == 1 assert len(res["data"]) == 1
assert res["data"][0]["id"] == dataset_ids[0] assert res["data"][0]["id"] == dataset_ids[0]
@pytest.mark.p2 @pytest.mark.p2
def test_id_not_uuid(self, api_key): def test_id_not_uuid(self, HttpApiAuth):
params = {"id": "not_uuid"} params = {"id": "not_uuid"}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Invalid UUID1 format" in res["message"], res assert "Invalid UUID1 format" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_id_not_uuid1(self, api_key): def test_id_not_uuid1(self, HttpApiAuth):
params = {"id": uuid.uuid4().hex} params = {"id": uuid.uuid4().hex}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Invalid UUID1 format" in res["message"], res assert "Invalid UUID1 format" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_id_wrong_uuid(self, api_key): def test_id_wrong_uuid(self, HttpApiAuth):
params = {"id": "d94a8dc02c9711f0930f7fbc369eab6d"} params = {"id": "d94a8dc02c9711f0930f7fbc369eab6d"}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 108, res assert res["code"] == 108, res
assert "lacks permission for dataset" in res["message"], res assert "lacks permission for dataset" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_id_empty(self, api_key): def test_id_empty(self, HttpApiAuth):
params = {"id": ""} params = {"id": ""}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Invalid UUID1 format" in res["message"], res assert "Invalid UUID1 format" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_id_none(self, api_key): def test_id_none(self, HttpApiAuth):
params = {"id": None} params = {"id": None}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
assert len(res["data"]) == 5, res assert len(res["data"]) == 5, res
@ -306,11 +306,11 @@ class TestDatasetsList:
], ],
ids=["name_and_id_match", "name_and_id_mismatch"], ids=["name_and_id_match", "name_and_id_mismatch"],
) )
def test_name_and_id(self, api_key, add_datasets, func, name, expected_num): def test_name_and_id(self, HttpApiAuth, add_datasets, func, name, expected_num):
dataset_ids = add_datasets dataset_ids = add_datasets
if callable(func): if callable(func):
params = {"id": func(dataset_ids), "name": name} params = {"id": func(dataset_ids), "name": name}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 0, res assert res["code"] == 0, res
assert len(res["data"]) == expected_num, res assert len(res["data"]) == expected_num, res
@ -323,19 +323,19 @@ class TestDatasetsList:
], ],
ids=["name", "id"], ids=["name", "id"],
) )
def test_name_and_id_wrong(self, api_key, add_datasets, dataset_id, name): def test_name_and_id_wrong(self, HttpApiAuth, add_datasets, dataset_id, name):
dataset_ids = add_datasets dataset_ids = add_datasets
if callable(dataset_id): if callable(dataset_id):
params = {"id": dataset_id(dataset_ids), "name": name} params = {"id": dataset_id(dataset_ids), "name": name}
else: else:
params = {"id": dataset_id, "name": name} params = {"id": dataset_id, "name": name}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 108, res assert res["code"] == 108, res
assert "lacks permission for dataset" in res["message"], res assert "lacks permission for dataset" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_field_unsupported(self, api_key): def test_field_unsupported(self, HttpApiAuth):
params = {"unknown_field": "unknown_field"} params = {"unknown_field": "unknown_field"}
res = list_datasets(api_key, params) res = list_datasets(HttpApiAuth, params)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Extra inputs are not permitted" in res["message"], res assert "Extra inputs are not permitted" in res["message"], res

View File

@ -49,10 +49,10 @@ class TestAuthorization:
class TestRquest: class TestRquest:
@pytest.mark.p3 @pytest.mark.p3
def test_bad_content_type(self, api_key, add_dataset_func): def test_bad_content_type(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
BAD_CONTENT_TYPE = "text/xml" BAD_CONTENT_TYPE = "text/xml"
res = update_dataset(api_key, dataset_id, {"name": "bad_content_type"}, headers={"Content-Type": BAD_CONTENT_TYPE}) res = update_dataset(HttpApiAuth, dataset_id, {"name": "bad_content_type"}, headers={"Content-Type": BAD_CONTENT_TYPE})
assert res["code"] == 101, res assert res["code"] == 101, res
assert res["message"] == f"Unsupported content type: Expected application/json, got {BAD_CONTENT_TYPE}", res assert res["message"] == f"Unsupported content type: Expected application/json, got {BAD_CONTENT_TYPE}", res
@ -65,34 +65,34 @@ class TestRquest:
], ],
ids=["malformed_json_syntax", "invalid_request_payload_type"], ids=["malformed_json_syntax", "invalid_request_payload_type"],
) )
def test_payload_bad(self, api_key, add_dataset_func, payload, expected_message): def test_payload_bad(self, HttpApiAuth, add_dataset_func, payload, expected_message):
dataset_id = add_dataset_func dataset_id = add_dataset_func
res = update_dataset(api_key, dataset_id, data=payload) res = update_dataset(HttpApiAuth, dataset_id, data=payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert res["message"] == expected_message, res assert res["message"] == expected_message, res
@pytest.mark.p2 @pytest.mark.p2
def test_payload_empty(self, api_key, add_dataset_func): def test_payload_empty(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
res = update_dataset(api_key, dataset_id, {}) res = update_dataset(HttpApiAuth, dataset_id, {})
assert res["code"] == 101, res assert res["code"] == 101, res
assert res["message"] == "No properties were modified", res assert res["message"] == "No properties were modified", res
@pytest.mark.p3 @pytest.mark.p3
def test_payload_unset(self, api_key, add_dataset_func): def test_payload_unset(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
res = update_dataset(api_key, dataset_id, None) res = update_dataset(HttpApiAuth, dataset_id, None)
assert res["code"] == 101, res assert res["code"] == 101, res
assert res["message"] == "Malformed JSON syntax: Missing commas/brackets or invalid encoding", res assert res["message"] == "Malformed JSON syntax: Missing commas/brackets or invalid encoding", res
class TestCapability: class TestCapability:
@pytest.mark.p3 @pytest.mark.p3
def test_update_dateset_concurrent(self, api_key, add_dataset_func): def test_update_dateset_concurrent(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
count = 100 count = 100
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(update_dataset, api_key, dataset_id, {"name": f"dataset_{i}"}) for i in range(count)] futures = [executor.submit(update_dataset, HttpApiAuth, dataset_id, {"name": f"dataset_{i}"}) for i in range(count)]
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@ -100,23 +100,23 @@ class TestCapability:
class TestDatasetUpdate: class TestDatasetUpdate:
@pytest.mark.p3 @pytest.mark.p3
def test_dataset_id_not_uuid(self, api_key): def test_dataset_id_not_uuid(self, HttpApiAuth):
payload = {"name": "not uuid"} payload = {"name": "not uuid"}
res = update_dataset(api_key, "not_uuid", payload) res = update_dataset(HttpApiAuth, "not_uuid", payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Invalid UUID1 format" in res["message"], res assert "Invalid UUID1 format" in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_dataset_id_not_uuid1(self, api_key): def test_dataset_id_not_uuid1(self, HttpApiAuth):
payload = {"name": "not uuid1"} payload = {"name": "not uuid1"}
res = update_dataset(api_key, uuid.uuid4().hex, payload) res = update_dataset(HttpApiAuth, uuid.uuid4().hex, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Invalid UUID1 format" in res["message"], res assert "Invalid UUID1 format" in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_dataset_id_wrong_uuid(self, api_key): def test_dataset_id_wrong_uuid(self, HttpApiAuth):
payload = {"name": "wrong uuid"} payload = {"name": "wrong uuid"}
res = update_dataset(api_key, "d94a8dc02c9711f0930f7fbc369eab6d", payload) res = update_dataset(HttpApiAuth, "d94a8dc02c9711f0930f7fbc369eab6d", payload)
assert res["code"] == 108, res assert res["code"] == 108, res
assert "lacks permission for dataset" in res["message"], res assert "lacks permission for dataset" in res["message"], res
@ -124,13 +124,13 @@ class TestDatasetUpdate:
@given(name=valid_names()) @given(name=valid_names())
@example("a" * 128) @example("a" * 128)
@settings(max_examples=20, suppress_health_check=[HealthCheck.function_scoped_fixture]) @settings(max_examples=20, suppress_health_check=[HealthCheck.function_scoped_fixture])
def test_name(self, api_key, add_dataset_func, name): def test_name(self, HttpApiAuth, add_dataset_func, name):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"name": name} payload = {"name": name}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["name"] == name, res assert res["data"][0]["name"] == name, res
@ -146,50 +146,50 @@ class TestDatasetUpdate:
], ],
ids=["empty_name", "space_name", "too_long_name", "invalid_name", "None_name"], ids=["empty_name", "space_name", "too_long_name", "invalid_name", "None_name"],
) )
def test_name_invalid(self, api_key, add_dataset_func, name, expected_message): def test_name_invalid(self, HttpApiAuth, add_dataset_func, name, expected_message):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"name": name} payload = {"name": name}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert expected_message in res["message"], res assert expected_message in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_name_duplicated(self, api_key, add_datasets_func): def test_name_duplicated(self, HttpApiAuth, add_datasets_func):
dataset_ids = add_datasets_func[0] dataset_ids = add_datasets_func[0]
name = "dataset_1" name = "dataset_1"
payload = {"name": name} payload = {"name": name}
res = update_dataset(api_key, dataset_ids, payload) res = update_dataset(HttpApiAuth, dataset_ids, payload)
assert res["code"] == 102, res assert res["code"] == 102, res
assert res["message"] == f"Dataset name '{name}' already exists", res assert res["message"] == f"Dataset name '{name}' already exists", res
@pytest.mark.p3 @pytest.mark.p3
def test_name_case_insensitive(self, api_key, add_datasets_func): def test_name_case_insensitive(self, HttpApiAuth, add_datasets_func):
dataset_id = add_datasets_func[0] dataset_id = add_datasets_func[0]
name = "DATASET_1" name = "DATASET_1"
payload = {"name": name} payload = {"name": name}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 102, res assert res["code"] == 102, res
assert res["message"] == f"Dataset name '{name}' already exists", res assert res["message"] == f"Dataset name '{name}' already exists", res
@pytest.mark.p2 @pytest.mark.p2
def test_avatar(self, api_key, add_dataset_func, tmp_path): def test_avatar(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func dataset_id = add_dataset_func
fn = create_image_file(tmp_path / "ragflow_test.png") fn = create_image_file(tmp_path / "ragflow_test.png")
payload = { payload = {
"avatar": f"data:image/png;base64,{encode_avatar(fn)}", "avatar": f"data:image/png;base64,{encode_avatar(fn)}",
} }
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["avatar"] == f"data:image/png;base64,{encode_avatar(fn)}", res assert res["data"][0]["avatar"] == f"data:image/png;base64,{encode_avatar(fn)}", res
@pytest.mark.p2 @pytest.mark.p2
def test_avatar_exceeds_limit_length(self, api_key, add_dataset_func): def test_avatar_exceeds_limit_length(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"avatar": "a" * 65536} payload = {"avatar": "a" * 65536}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "String should have at most 65535 characters" in res["message"], res assert "String should have at most 65535 characters" in res["message"], res
@ -204,52 +204,52 @@ class TestDatasetUpdate:
], ],
ids=["empty_prefix", "missing_comma", "unsupported_mine_type", "invalid_mine_type"], ids=["empty_prefix", "missing_comma", "unsupported_mine_type", "invalid_mine_type"],
) )
def test_avatar_invalid_prefix(self, api_key, add_dataset_func, tmp_path, avatar_prefix, expected_message): def test_avatar_invalid_prefix(self, HttpApiAuth, add_dataset_func, tmp_path, avatar_prefix, expected_message):
dataset_id = add_dataset_func dataset_id = add_dataset_func
fn = create_image_file(tmp_path / "ragflow_test.png") fn = create_image_file(tmp_path / "ragflow_test.png")
payload = {"avatar": f"{avatar_prefix}{encode_avatar(fn)}"} payload = {"avatar": f"{avatar_prefix}{encode_avatar(fn)}"}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert expected_message in res["message"], res assert expected_message in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_avatar_none(self, api_key, add_dataset_func): def test_avatar_none(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"avatar": None} payload = {"avatar": None}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["avatar"] is None, res assert res["data"][0]["avatar"] is None, res
@pytest.mark.p2 @pytest.mark.p2
def test_description(self, api_key, add_dataset_func): def test_description(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"description": "description"} payload = {"description": "description"}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0 assert res["code"] == 0
res = list_datasets(api_key, {"id": dataset_id}) res = list_datasets(HttpApiAuth, {"id": dataset_id})
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["description"] == "description" assert res["data"][0]["description"] == "description"
@pytest.mark.p2 @pytest.mark.p2
def test_description_exceeds_limit_length(self, api_key, add_dataset_func): def test_description_exceeds_limit_length(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"description": "a" * 65536} payload = {"description": "a" * 65536}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "String should have at most 65535 characters" in res["message"], res assert "String should have at most 65535 characters" in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_description_none(self, api_key, add_dataset_func): def test_description_none(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"description": None} payload = {"description": None}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key, {"id": dataset_id}) res = list_datasets(HttpApiAuth, {"id": dataset_id})
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["description"] is None assert res["data"][0]["description"] is None
@ -263,13 +263,13 @@ class TestDatasetUpdate:
], ],
ids=["builtin_baai", "builtin_youdao", "tenant_zhipu"], ids=["builtin_baai", "builtin_youdao", "tenant_zhipu"],
) )
def test_embedding_model(self, api_key, add_dataset_func, embedding_model): def test_embedding_model(self, HttpApiAuth, add_dataset_func, embedding_model):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"embedding_model": embedding_model} payload = {"embedding_model": embedding_model}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["embedding_model"] == embedding_model, res assert res["data"][0]["embedding_model"] == embedding_model, res
@ -284,10 +284,10 @@ class TestDatasetUpdate:
], ],
ids=["unknown_llm_name", "unknown_llm_factory", "tenant_no_auth_default_tenant_llm", "tenant_no_auth"], ids=["unknown_llm_name", "unknown_llm_factory", "tenant_no_auth_default_tenant_llm", "tenant_no_auth"],
) )
def test_embedding_model_invalid(self, api_key, add_dataset_func, name, embedding_model): def test_embedding_model_invalid(self, HttpApiAuth, add_dataset_func, name, embedding_model):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"name": name, "embedding_model": embedding_model} payload = {"name": name, "embedding_model": embedding_model}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
if "tenant_no_auth" in name: if "tenant_no_auth" in name:
assert res["message"] == f"Unauthorized model: <{embedding_model}>", res assert res["message"] == f"Unauthorized model: <{embedding_model}>", res
@ -306,10 +306,10 @@ class TestDatasetUpdate:
], ],
ids=["missing_at", "empty_model_name", "empty_provider", "whitespace_only_model_name", "whitespace_only_provider"], ids=["missing_at", "empty_model_name", "empty_provider", "whitespace_only_model_name", "whitespace_only_provider"],
) )
def test_embedding_model_format(self, api_key, add_dataset_func, name, embedding_model): def test_embedding_model_format(self, HttpApiAuth, add_dataset_func, name, embedding_model):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"name": name, "embedding_model": embedding_model} payload = {"name": name, "embedding_model": embedding_model}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
if name == "missing_at": if name == "missing_at":
assert "Embedding model identifier must follow <model_name>@<provider> format" in res["message"], res assert "Embedding model identifier must follow <model_name>@<provider> format" in res["message"], res
@ -317,10 +317,10 @@ class TestDatasetUpdate:
assert "Both model_name and provider must be non-empty strings" in res["message"], res assert "Both model_name and provider must be non-empty strings" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_embedding_model_none(self, api_key, add_dataset_func): def test_embedding_model_none(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"embedding_model": None} payload = {"embedding_model": None}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be a valid string" in res["message"], res assert "Input should be a valid string" in res["message"], res
@ -336,13 +336,13 @@ class TestDatasetUpdate:
], ],
ids=["me", "team", "me_upercase", "team_upercase", "whitespace"], ids=["me", "team", "me_upercase", "team_upercase", "whitespace"],
) )
def test_permission(self, api_key, add_dataset_func, permission): def test_permission(self, HttpApiAuth, add_dataset_func, permission):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"permission": permission} payload = {"permission": permission}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["permission"] == permission.lower().strip(), res assert res["data"][0]["permission"] == permission.lower().strip(), res
@ -356,18 +356,18 @@ class TestDatasetUpdate:
], ],
ids=["empty", "unknown", "type_error"], ids=["empty", "unknown", "type_error"],
) )
def test_permission_invalid(self, api_key, add_dataset_func, permission): def test_permission_invalid(self, HttpApiAuth, add_dataset_func, permission):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"permission": permission} payload = {"permission": permission}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101 assert res["code"] == 101
assert "Input should be 'me' or 'team'" in res["message"] assert "Input should be 'me' or 'team'" in res["message"]
@pytest.mark.p3 @pytest.mark.p3
def test_permission_none(self, api_key, add_dataset_func): def test_permission_none(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"permission": None} payload = {"permission": None}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be 'me' or 'team'" in res["message"], res assert "Input should be 'me' or 'team'" in res["message"], res
@ -390,13 +390,13 @@ class TestDatasetUpdate:
], ],
ids=["naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"], ids=["naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"],
) )
def test_chunk_method(self, api_key, add_dataset_func, chunk_method): def test_chunk_method(self, HttpApiAuth, add_dataset_func, chunk_method):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"chunk_method": chunk_method} payload = {"chunk_method": chunk_method}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["chunk_method"] == chunk_method, res assert res["data"][0]["chunk_method"] == chunk_method, res
@ -410,30 +410,30 @@ class TestDatasetUpdate:
], ],
ids=["empty", "unknown", "type_error"], ids=["empty", "unknown", "type_error"],
) )
def test_chunk_method_invalid(self, api_key, add_dataset_func, chunk_method): def test_chunk_method_invalid(self, HttpApiAuth, add_dataset_func, chunk_method):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"chunk_method": chunk_method} payload = {"chunk_method": chunk_method}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in res["message"], res assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_chunk_method_none(self, api_key, add_dataset_func): def test_chunk_method_none(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"chunk_method": None} payload = {"chunk_method": None}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in res["message"], res assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
@pytest.mark.parametrize("pagerank", [0, 50, 100], ids=["min", "mid", "max"]) @pytest.mark.parametrize("pagerank", [0, 50, 100], ids=["min", "mid", "max"])
def test_pagerank(self, api_key, add_dataset_func, pagerank): def test_pagerank(self, HttpApiAuth, add_dataset_func, pagerank):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"pagerank": pagerank} payload = {"pagerank": pagerank}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0 assert res["code"] == 0
res = list_datasets(api_key, {"id": dataset_id}) res = list_datasets(HttpApiAuth, {"id": dataset_id})
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["pagerank"] == pagerank assert res["data"][0]["pagerank"] == pagerank
@ -446,18 +446,18 @@ class TestDatasetUpdate:
], ],
ids=["min_limit", "max_limit"], ids=["min_limit", "max_limit"],
) )
def test_pagerank_invalid(self, api_key, add_dataset_func, pagerank, expected_message): def test_pagerank_invalid(self, HttpApiAuth, add_dataset_func, pagerank, expected_message):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"pagerank": pagerank} payload = {"pagerank": pagerank}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert expected_message in res["message"], res assert expected_message in res["message"], res
@pytest.mark.p3 @pytest.mark.p3
def test_pagerank_none(self, api_key, add_dataset_func): def test_pagerank_none(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"pagerank": None} payload = {"pagerank": None}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Input should be a valid integer" in res["message"], res assert "Input should be a valid integer" in res["message"], res
@ -565,13 +565,13 @@ class TestDatasetUpdate:
"raptor_random_seed_min", "raptor_random_seed_min",
], ],
) )
def test_parser_config(self, api_key, add_dataset_func, parser_config): def test_parser_config(self, HttpApiAuth, add_dataset_func, parser_config):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"parser_config": parser_config} payload = {"parser_config": parser_config}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
for k, v in parser_config.items(): for k, v in parser_config.items():
if isinstance(v, dict): if isinstance(v, dict):
@ -696,21 +696,21 @@ class TestDatasetUpdate:
"parser_config_type_invalid", "parser_config_type_invalid",
], ],
) )
def test_parser_config_invalid(self, api_key, add_dataset_func, parser_config, expected_message): def test_parser_config_invalid(self, HttpApiAuth, add_dataset_func, parser_config, expected_message):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"parser_config": parser_config} payload = {"parser_config": parser_config}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert expected_message in res["message"], res assert expected_message in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_parser_config_empty(self, api_key, add_dataset_func): def test_parser_config_empty(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"parser_config": {}} payload = {"parser_config": {}}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["parser_config"] == { assert res["data"][0]["parser_config"] == {
"chunk_token_num": 128, "chunk_token_num": 128,
@ -721,13 +721,13 @@ class TestDatasetUpdate:
}, res }, res
@pytest.mark.p3 @pytest.mark.p3
def test_parser_config_none(self, api_key, add_dataset_func): def test_parser_config_none(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"parser_config": None} payload = {"parser_config": None}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key, {"id": dataset_id}) res = list_datasets(HttpApiAuth, {"id": dataset_id})
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["parser_config"] == { assert res["data"][0]["parser_config"] == {
"chunk_token_num": 128, "chunk_token_num": 128,
@ -738,35 +738,35 @@ class TestDatasetUpdate:
}, res }, res
@pytest.mark.p3 @pytest.mark.p3
def test_parser_config_empty_with_chunk_method_change(self, api_key, add_dataset_func): def test_parser_config_empty_with_chunk_method_change(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"chunk_method": "qa", "parser_config": {}} payload = {"chunk_method": "qa", "parser_config": {}}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["parser_config"] == {"raptor": {"use_raptor": False}}, res assert res["data"][0]["parser_config"] == {"raptor": {"use_raptor": False}}, res
@pytest.mark.p3 @pytest.mark.p3
def test_parser_config_unset_with_chunk_method_change(self, api_key, add_dataset_func): def test_parser_config_unset_with_chunk_method_change(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"chunk_method": "qa"} payload = {"chunk_method": "qa"}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["parser_config"] == {"raptor": {"use_raptor": False}}, res assert res["data"][0]["parser_config"] == {"raptor": {"use_raptor": False}}, res
@pytest.mark.p3 @pytest.mark.p3
def test_parser_config_none_with_chunk_method_change(self, api_key, add_dataset_func): def test_parser_config_none_with_chunk_method_change(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
payload = {"chunk_method": "qa", "parser_config": None} payload = {"chunk_method": "qa", "parser_config": None}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key, {"id": dataset_id}) res = list_datasets(HttpApiAuth, {"id": dataset_id})
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["parser_config"] == {"raptor": {"use_raptor": False}}, res assert res["data"][0]["parser_config"] == {"raptor": {"use_raptor": False}}, res
@ -788,24 +788,24 @@ class TestDatasetUpdate:
{"unknown_field": "unknown_field"}, {"unknown_field": "unknown_field"},
], ],
) )
def test_field_unsupported(self, api_key, add_dataset_func, payload): def test_field_unsupported(self, HttpApiAuth, add_dataset_func, payload):
dataset_id = add_dataset_func dataset_id = add_dataset_func
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 101, res assert res["code"] == 101, res
assert "Extra inputs are not permitted" in res["message"], res assert "Extra inputs are not permitted" in res["message"], res
@pytest.mark.p2 @pytest.mark.p2
def test_field_unset(self, api_key, add_dataset_func): def test_field_unset(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
original_data = res["data"][0] original_data = res["data"][0]
payload = {"name": "default_unset"} payload = {"name": "default_unset"}
res = update_dataset(api_key, dataset_id, payload) res = update_dataset(HttpApiAuth, dataset_id, payload)
assert res["code"] == 0, res assert res["code"] == 0, res
res = list_datasets(api_key) res = list_datasets(HttpApiAuth)
assert res["code"] == 0, res assert res["code"] == 0, res
assert res["data"][0]["avatar"] == original_data["avatar"], res assert res["data"][0]["avatar"] == original_data["avatar"], res
assert res["data"][0]["description"] == original_data["description"], res assert res["data"][0]["description"] == original_data["description"], res

View File

@ -20,33 +20,33 @@ from common import bulk_upload_documents, delete_documents
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def add_document_func(request, api_key, add_dataset, ragflow_tmp_dir): def add_document_func(request, HttpApiAuth, add_dataset, ragflow_tmp_dir):
def cleanup(): def cleanup():
delete_documents(api_key, dataset_id, {"ids": None}) delete_documents(HttpApiAuth, dataset_id, {"ids": None})
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
dataset_id = add_dataset dataset_id = add_dataset
return dataset_id, bulk_upload_documents(api_key, dataset_id, 1, ragflow_tmp_dir)[0] return dataset_id, bulk_upload_documents(HttpApiAuth, dataset_id, 1, ragflow_tmp_dir)[0]
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def add_documents(request, api_key, add_dataset, ragflow_tmp_dir): def add_documents(request, HttpApiAuth, add_dataset, ragflow_tmp_dir):
def cleanup(): def cleanup():
delete_documents(api_key, dataset_id, {"ids": None}) delete_documents(HttpApiAuth, dataset_id, {"ids": None})
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
dataset_id = add_dataset dataset_id = add_dataset
return dataset_id, bulk_upload_documents(api_key, dataset_id, 5, ragflow_tmp_dir) return dataset_id, bulk_upload_documents(HttpApiAuth, dataset_id, 5, ragflow_tmp_dir)
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def add_documents_func(request, api_key, add_dataset_func, ragflow_tmp_dir): def add_documents_func(request, HttpApiAuth, add_dataset_func, ragflow_tmp_dir):
def cleanup(): def cleanup():
delete_documents(api_key, dataset_id, {"ids": None}) delete_documents(HttpApiAuth, dataset_id, {"ids": None})
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
dataset_id = add_dataset_func dataset_id = add_dataset_func
return dataset_id, bulk_upload_documents(api_key, dataset_id, 3, ragflow_tmp_dir) return dataset_id, bulk_upload_documents(HttpApiAuth, dataset_id, 3, ragflow_tmp_dir)

View File

@ -65,7 +65,7 @@ class TestDocumentsDeletion:
) )
def test_basic_scenarios( def test_basic_scenarios(
self, self,
api_key, HttpApiAuth,
add_documents_func, add_documents_func,
payload, payload,
expected_code, expected_code,
@ -75,12 +75,12 @@ class TestDocumentsDeletion:
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
if callable(payload): if callable(payload):
payload = payload(document_ids) payload = payload(document_ids)
res = delete_documents(api_key, dataset_id, payload) res = delete_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if res["code"] != 0: if res["code"] != 0:
assert res["message"] == expected_message assert res["message"] == expected_message
res = list_documents(api_key, dataset_id) res = list_documents(HttpApiAuth, dataset_id)
assert len(res["data"]["docs"]) == remaining assert len(res["data"]["docs"]) == remaining
assert res["data"]["total"] == remaining assert res["data"]["total"] == remaining
@ -96,9 +96,9 @@ class TestDocumentsDeletion:
), ),
], ],
) )
def test_invalid_dataset_id(self, api_key, add_documents_func, dataset_id, expected_code, expected_message): def test_invalid_dataset_id(self, HttpApiAuth, add_documents_func, dataset_id, expected_code, expected_message):
_, document_ids = add_documents_func _, document_ids = add_documents_func
res = delete_documents(api_key, dataset_id, {"ids": document_ids[:1]}) res = delete_documents(HttpApiAuth, dataset_id, {"ids": document_ids[:1]})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -111,52 +111,52 @@ class TestDocumentsDeletion:
lambda r: {"ids": r + ["invalid_id"]}, lambda r: {"ids": r + ["invalid_id"]},
], ],
) )
def test_delete_partial_invalid_id(self, api_key, add_documents_func, payload): def test_delete_partial_invalid_id(self, HttpApiAuth, add_documents_func, payload):
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
if callable(payload): if callable(payload):
payload = payload(document_ids) payload = payload(document_ids)
res = delete_documents(api_key, dataset_id, payload) res = delete_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == 102 assert res["code"] == 102
assert res["message"] == "Documents not found: ['invalid_id']" assert res["message"] == "Documents not found: ['invalid_id']"
res = list_documents(api_key, dataset_id) res = list_documents(HttpApiAuth, dataset_id)
assert len(res["data"]["docs"]) == 0 assert len(res["data"]["docs"]) == 0
assert res["data"]["total"] == 0 assert res["data"]["total"] == 0
@pytest.mark.p2 @pytest.mark.p2
def test_repeated_deletion(self, api_key, add_documents_func): def test_repeated_deletion(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
res = delete_documents(api_key, dataset_id, {"ids": document_ids}) res = delete_documents(HttpApiAuth, dataset_id, {"ids": document_ids})
assert res["code"] == 0 assert res["code"] == 0
res = delete_documents(api_key, dataset_id, {"ids": document_ids}) res = delete_documents(HttpApiAuth, dataset_id, {"ids": document_ids})
assert res["code"] == 102 assert res["code"] == 102
assert "Documents not found" in res["message"] assert "Documents not found" in res["message"]
@pytest.mark.p2 @pytest.mark.p2
def test_duplicate_deletion(self, api_key, add_documents_func): def test_duplicate_deletion(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
res = delete_documents(api_key, dataset_id, {"ids": document_ids + document_ids}) res = delete_documents(HttpApiAuth, dataset_id, {"ids": document_ids + document_ids})
assert res["code"] == 0 assert res["code"] == 0
assert "Duplicate document ids" in res["data"]["errors"][0] assert "Duplicate document ids" in res["data"]["errors"][0]
assert res["data"]["success_count"] == 3 assert res["data"]["success_count"] == 3
res = list_documents(api_key, dataset_id) res = list_documents(HttpApiAuth, dataset_id)
assert len(res["data"]["docs"]) == 0 assert len(res["data"]["docs"]) == 0
assert res["data"]["total"] == 0 assert res["data"]["total"] == 0
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_deletion(api_key, add_dataset, tmp_path): def test_concurrent_deletion(HttpApiAuth, add_dataset, tmp_path):
count = 100 count = 100
dataset_id = add_dataset dataset_id = add_dataset
document_ids = bulk_upload_documents(api_key, dataset_id, count, tmp_path) document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, count, tmp_path)
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [ futures = [
executor.submit( executor.submit(
delete_documents, delete_documents,
api_key, HttpApiAuth,
dataset_id, dataset_id,
{"ids": document_ids[i : i + 1]}, {"ids": document_ids[i : i + 1]},
) )
@ -168,15 +168,15 @@ def test_concurrent_deletion(api_key, add_dataset, tmp_path):
@pytest.mark.p3 @pytest.mark.p3
def test_delete_1k(api_key, add_dataset, tmp_path): def test_delete_1k(HttpApiAuth, add_dataset, tmp_path):
documents_num = 1_000 documents_num = 1_000
dataset_id = add_dataset dataset_id = add_dataset
document_ids = bulk_upload_documents(api_key, dataset_id, documents_num, tmp_path) document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, documents_num, tmp_path)
res = list_documents(api_key, dataset_id) res = list_documents(HttpApiAuth, dataset_id)
assert res["data"]["total"] == documents_num assert res["data"]["total"] == documents_num
res = delete_documents(api_key, dataset_id, {"ids": document_ids}) res = delete_documents(HttpApiAuth, dataset_id, {"ids": document_ids})
assert res["code"] == 0 assert res["code"] == 0
res = list_documents(api_key, dataset_id) res = list_documents(HttpApiAuth, dataset_id)
assert res["data"]["total"] == 0 assert res["data"]["total"] == 0

View File

@ -63,14 +63,14 @@ class TestAuthorization:
], ],
indirect=True, indirect=True,
) )
def test_file_type_validation(api_key, add_dataset, generate_test_files, request): def test_file_type_validation(HttpApiAuth, add_dataset, generate_test_files, request):
dataset_id = add_dataset dataset_id = add_dataset
fp = generate_test_files[request.node.callspec.params["generate_test_files"]] fp = generate_test_files[request.node.callspec.params["generate_test_files"]]
res = upload_documents(api_key, dataset_id, [fp]) res = upload_documents(HttpApiAuth, dataset_id, [fp])
document_id = res["data"][0]["id"] document_id = res["data"][0]["id"]
res = download_document( res = download_document(
api_key, HttpApiAuth,
dataset_id, dataset_id,
document_id, document_id,
fp.with_stem("ragflow_test_download"), fp.with_stem("ragflow_test_download"),
@ -94,10 +94,10 @@ class TestDocumentDownload:
), ),
], ],
) )
def test_invalid_document_id(self, api_key, add_documents, tmp_path, document_id, expected_code, expected_message): def test_invalid_document_id(self, HttpApiAuth, add_documents, tmp_path, document_id, expected_code, expected_message):
dataset_id, _ = add_documents dataset_id, _ = add_documents
res = download_document( res = download_document(
api_key, HttpApiAuth,
dataset_id, dataset_id,
document_id, document_id,
tmp_path / "ragflow_test_download_1.txt", tmp_path / "ragflow_test_download_1.txt",
@ -120,10 +120,10 @@ class TestDocumentDownload:
), ),
], ],
) )
def test_invalid_dataset_id(self, api_key, add_documents, tmp_path, dataset_id, expected_code, expected_message): def test_invalid_dataset_id(self, HttpApiAuth, add_documents, tmp_path, dataset_id, expected_code, expected_message):
_, document_ids = add_documents _, document_ids = add_documents
res = download_document( res = download_document(
api_key, HttpApiAuth,
dataset_id, dataset_id,
document_ids[0], document_ids[0],
tmp_path / "ragflow_test_download_1.txt", tmp_path / "ragflow_test_download_1.txt",
@ -135,12 +135,12 @@ class TestDocumentDownload:
assert response_json["message"] == expected_message assert response_json["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_same_file_repeat(self, api_key, add_documents, tmp_path, ragflow_tmp_dir): def test_same_file_repeat(self, HttpApiAuth, add_documents, tmp_path, ragflow_tmp_dir):
num = 5 num = 5
dataset_id, document_ids = add_documents dataset_id, document_ids = add_documents
for i in range(num): for i in range(num):
res = download_document( res = download_document(
api_key, HttpApiAuth,
dataset_id, dataset_id,
document_ids[0], document_ids[0],
tmp_path / f"ragflow_test_download_{i}.txt", tmp_path / f"ragflow_test_download_{i}.txt",
@ -153,16 +153,16 @@ class TestDocumentDownload:
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_download(api_key, add_dataset, tmp_path): def test_concurrent_download(HttpApiAuth, add_dataset, tmp_path):
count = 20 count = 20
dataset_id = add_dataset dataset_id = add_dataset
document_ids = bulk_upload_documents(api_key, dataset_id, count, tmp_path) document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, count, tmp_path)
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [ futures = [
executor.submit( executor.submit(
download_document, download_document,
api_key, HttpApiAuth,
dataset_id, dataset_id,
document_ids[i], document_ids[i],
tmp_path / f"ragflow_test_download_{i}.txt", tmp_path / f"ragflow_test_download_{i}.txt",

View File

@ -42,9 +42,9 @@ class TestAuthorization:
class TestDocumentsList: class TestDocumentsList:
@pytest.mark.p1 @pytest.mark.p1
def test_default(self, api_key, add_documents): def test_default(self, HttpApiAuth, add_documents):
dataset_id, _ = add_documents dataset_id, _ = add_documents
res = list_documents(api_key, dataset_id) res = list_documents(HttpApiAuth, dataset_id)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]["docs"]) == 5 assert len(res["data"]["docs"]) == 5
assert res["data"]["total"] == 5 assert res["data"]["total"] == 5
@ -61,8 +61,8 @@ class TestDocumentsList:
), ),
], ],
) )
def test_invalid_dataset_id(self, api_key, dataset_id, expected_code, expected_message): def test_invalid_dataset_id(self, HttpApiAuth, dataset_id, expected_code, expected_message):
res = list_documents(api_key, dataset_id) res = list_documents(HttpApiAuth, dataset_id)
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -93,7 +93,7 @@ class TestDocumentsList:
) )
def test_page( def test_page(
self, self,
api_key, HttpApiAuth,
add_documents, add_documents,
params, params,
expected_code, expected_code,
@ -101,7 +101,7 @@ class TestDocumentsList:
expected_message, expected_message,
): ):
dataset_id, _ = add_documents dataset_id, _ = add_documents
res = list_documents(api_key, dataset_id, params=params) res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["docs"]) == expected_page_size assert len(res["data"]["docs"]) == expected_page_size
@ -136,7 +136,7 @@ class TestDocumentsList:
) )
def test_page_size( def test_page_size(
self, self,
api_key, HttpApiAuth,
add_documents, add_documents,
params, params,
expected_code, expected_code,
@ -144,7 +144,7 @@ class TestDocumentsList:
expected_message, expected_message,
): ):
dataset_id, _ = add_documents dataset_id, _ = add_documents
res = list_documents(api_key, dataset_id, params=params) res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["docs"]) == expected_page_size assert len(res["data"]["docs"]) == expected_page_size
@ -164,7 +164,7 @@ class TestDocumentsList:
) )
def test_orderby( def test_orderby(
self, self,
api_key, HttpApiAuth,
add_documents, add_documents,
params, params,
expected_code, expected_code,
@ -172,7 +172,7 @@ class TestDocumentsList:
expected_message, expected_message,
): ):
dataset_id, _ = add_documents dataset_id, _ = add_documents
res = list_documents(api_key, dataset_id, params=params) res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if callable(assertions): if callable(assertions):
@ -197,7 +197,7 @@ class TestDocumentsList:
) )
def test_desc( def test_desc(
self, self,
api_key, HttpApiAuth,
add_documents, add_documents,
params, params,
expected_code, expected_code,
@ -205,7 +205,7 @@ class TestDocumentsList:
expected_message, expected_message,
): ):
dataset_id, _ = add_documents dataset_id, _ = add_documents
res = list_documents(api_key, dataset_id, params=params) res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if callable(assertions): if callable(assertions):
@ -224,9 +224,9 @@ class TestDocumentsList:
({"keywords": "unknown"}, 0), ({"keywords": "unknown"}, 0),
], ],
) )
def test_keywords(self, api_key, add_documents, params, expected_num): def test_keywords(self, HttpApiAuth, add_documents, params, expected_num):
dataset_id, _ = add_documents dataset_id, _ = add_documents
res = list_documents(api_key, dataset_id, params=params) res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]["docs"]) == expected_num assert len(res["data"]["docs"]) == expected_num
assert res["data"]["total"] == expected_num assert res["data"]["total"] == expected_num
@ -248,7 +248,7 @@ class TestDocumentsList:
) )
def test_name( def test_name(
self, self,
api_key, HttpApiAuth,
add_documents, add_documents,
params, params,
expected_code, expected_code,
@ -256,7 +256,7 @@ class TestDocumentsList:
expected_message, expected_message,
): ):
dataset_id, _ = add_documents dataset_id, _ = add_documents
res = list_documents(api_key, dataset_id, params=params) res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if params["name"] in [None, ""]: if params["name"] in [None, ""]:
@ -278,7 +278,7 @@ class TestDocumentsList:
) )
def test_id( def test_id(
self, self,
api_key, HttpApiAuth,
add_documents, add_documents,
document_id, document_id,
expected_code, expected_code,
@ -290,7 +290,7 @@ class TestDocumentsList:
params = {"id": document_id(document_ids)} params = {"id": document_id(document_ids)}
else: else:
params = {"id": document_id} params = {"id": document_id}
res = list_documents(api_key, dataset_id, params=params) res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
@ -319,7 +319,7 @@ class TestDocumentsList:
) )
def test_name_and_id( def test_name_and_id(
self, self,
api_key, HttpApiAuth,
add_documents, add_documents,
document_id, document_id,
name, name,
@ -333,27 +333,27 @@ class TestDocumentsList:
else: else:
params = {"id": document_id, "name": name} params = {"id": document_id, "name": name}
res = list_documents(api_key, dataset_id, params=params) res = list_documents(HttpApiAuth, dataset_id, params=params)
if expected_code == 0: if expected_code == 0:
assert len(res["data"]["docs"]) == expected_num assert len(res["data"]["docs"]) == expected_num
else: else:
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_list(self, api_key, add_documents): def test_concurrent_list(self, HttpApiAuth, add_documents):
dataset_id, _ = add_documents dataset_id, _ = add_documents
count = 100 count = 100
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_documents, api_key, dataset_id) for i in range(count)] futures = [executor.submit(list_documents, HttpApiAuth, dataset_id) for i in range(count)]
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3 @pytest.mark.p3
def test_invalid_params(self, api_key, add_documents): def test_invalid_params(self, HttpApiAuth, add_documents):
dataset_id, _ = add_documents dataset_id, _ = add_documents
params = {"a": "b"} params = {"a": "b"}
res = list_documents(api_key, dataset_id, params=params) res = list_documents(HttpApiAuth, dataset_id, params=params)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]["docs"]) == 5 assert len(res["data"]["docs"]) == 5

View File

@ -83,17 +83,17 @@ class TestDocumentsParse:
pytest.param(lambda r: {"document_ids": r}, 0, "", marks=pytest.mark.p1), pytest.param(lambda r: {"document_ids": r}, 0, "", marks=pytest.mark.p1),
], ],
) )
def test_basic_scenarios(self, api_key, add_documents_func, payload, expected_code, expected_message): def test_basic_scenarios(self, HttpApiAuth, add_documents_func, payload, expected_code, expected_message):
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
if callable(payload): if callable(payload):
payload = payload(document_ids) payload = payload(document_ids)
res = parse_documents(api_key, dataset_id, payload) res = parse_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code != 0: if expected_code != 0:
assert res["message"] == expected_message assert res["message"] == expected_message
if expected_code == 0: if expected_code == 0:
condition(api_key, dataset_id, payload["document_ids"]) condition(HttpApiAuth, dataset_id, payload["document_ids"])
validate_document_details(api_key, dataset_id, payload["document_ids"]) validate_document_details(HttpApiAuth, dataset_id, payload["document_ids"])
@pytest.mark.p3 @pytest.mark.p3
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -109,14 +109,14 @@ class TestDocumentsParse:
) )
def test_invalid_dataset_id( def test_invalid_dataset_id(
self, self,
api_key, HttpApiAuth,
add_documents_func, add_documents_func,
dataset_id, dataset_id,
expected_code, expected_code,
expected_message, expected_message,
): ):
_, document_ids = add_documents_func _, document_ids = add_documents_func
res = parse_documents(api_key, dataset_id, {"document_ids": document_ids}) res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -128,44 +128,44 @@ class TestDocumentsParse:
pytest.param(lambda r: {"document_ids": r + ["invalid_id"]}, marks=pytest.mark.p3), pytest.param(lambda r: {"document_ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
], ],
) )
def test_parse_partial_invalid_document_id(self, api_key, add_documents_func, payload): def test_parse_partial_invalid_document_id(self, HttpApiAuth, add_documents_func, payload):
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
if callable(payload): if callable(payload):
payload = payload(document_ids) payload = payload(document_ids)
res = parse_documents(api_key, dataset_id, payload) res = parse_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == 102 assert res["code"] == 102
assert res["message"] == "Documents not found: ['invalid_id']" assert res["message"] == "Documents not found: ['invalid_id']"
condition(api_key, dataset_id) condition(HttpApiAuth, dataset_id)
validate_document_details(api_key, dataset_id, document_ids) validate_document_details(HttpApiAuth, dataset_id, document_ids)
@pytest.mark.p3 @pytest.mark.p3
def test_repeated_parse(self, api_key, add_documents_func): def test_repeated_parse(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
res = parse_documents(api_key, dataset_id, {"document_ids": document_ids}) res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0 assert res["code"] == 0
condition(api_key, dataset_id) condition(HttpApiAuth, dataset_id)
res = parse_documents(api_key, dataset_id, {"document_ids": document_ids}) res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0 assert res["code"] == 0
@pytest.mark.p3 @pytest.mark.p3
def test_duplicate_parse(self, api_key, add_documents_func): def test_duplicate_parse(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
res = parse_documents(api_key, dataset_id, {"document_ids": document_ids + document_ids}) res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids + document_ids})
assert res["code"] == 0 assert res["code"] == 0
assert "Duplicate document ids" in res["data"]["errors"][0] assert "Duplicate document ids" in res["data"]["errors"][0]
assert res["data"]["success_count"] == 3 assert res["data"]["success_count"] == 3
condition(api_key, dataset_id) condition(HttpApiAuth, dataset_id)
validate_document_details(api_key, dataset_id, document_ids) validate_document_details(HttpApiAuth, dataset_id, document_ids)
@pytest.mark.p3 @pytest.mark.p3
def test_parse_100_files(api_key, add_dataset_func, tmp_path): def test_parse_100_files(HttpApiAuth, add_dataset_func, tmp_path):
@wait_for(100, 1, "Document parsing timeout") @wait_for(100, 1, "Document parsing timeout")
def condition(_auth, _dataset_id, _document_num): def condition(_auth, _dataset_id, _document_num):
res = list_documents(_auth, _dataset_id, {"page_size": _document_num}) res = list_documents(_auth, _dataset_id, {"page_size": _document_num})
@ -176,17 +176,17 @@ def test_parse_100_files(api_key, add_dataset_func, tmp_path):
document_num = 100 document_num = 100
dataset_id = add_dataset_func dataset_id = add_dataset_func
document_ids = bulk_upload_documents(api_key, dataset_id, document_num, tmp_path) document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, document_num, tmp_path)
res = parse_documents(api_key, dataset_id, {"document_ids": document_ids}) res = parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0 assert res["code"] == 0
condition(api_key, dataset_id, document_num) condition(HttpApiAuth, dataset_id, document_num)
validate_document_details(api_key, dataset_id, document_ids) validate_document_details(HttpApiAuth, dataset_id, document_ids)
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_parse(api_key, add_dataset_func, tmp_path): def test_concurrent_parse(HttpApiAuth, add_dataset_func, tmp_path):
@wait_for(120, 1, "Document parsing timeout") @wait_for(120, 1, "Document parsing timeout")
def condition(_auth, _dataset_id, _document_num): def condition(_auth, _dataset_id, _document_num):
res = list_documents(_auth, _dataset_id, {"page_size": _document_num}) res = list_documents(_auth, _dataset_id, {"page_size": _document_num})
@ -197,13 +197,13 @@ def test_concurrent_parse(api_key, add_dataset_func, tmp_path):
count = 100 count = 100
dataset_id = add_dataset_func dataset_id = add_dataset_func
document_ids = bulk_upload_documents(api_key, dataset_id, count, tmp_path) document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, count, tmp_path)
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [ futures = [
executor.submit( executor.submit(
parse_documents, parse_documents,
api_key, HttpApiAuth,
dataset_id, dataset_id,
{"document_ids": document_ids[i : i + 1]}, {"document_ids": document_ids[i : i + 1]},
) )
@ -213,6 +213,6 @@ def test_concurrent_parse(api_key, add_dataset_func, tmp_path):
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
condition(api_key, dataset_id, count) condition(HttpApiAuth, dataset_id, count)
validate_document_details(api_key, dataset_id, document_ids) validate_document_details(HttpApiAuth, dataset_id, document_ids)

View File

@ -75,7 +75,7 @@ class TestDocumentsParseStop:
pytest.param(lambda r: {"document_ids": r}, 0, "", marks=pytest.mark.p1), pytest.param(lambda r: {"document_ids": r}, 0, "", marks=pytest.mark.p1),
], ],
) )
def test_basic_scenarios(self, api_key, add_documents_func, payload, expected_code, expected_message): def test_basic_scenarios(self, HttpApiAuth, add_documents_func, payload, expected_code, expected_message):
@wait_for(10, 1, "Document parsing timeout") @wait_for(10, 1, "Document parsing timeout")
def condition(_auth, _dataset_id, _document_ids): def condition(_auth, _dataset_id, _document_ids):
for _document_id in _document_ids: for _document_id in _document_ids:
@ -85,20 +85,20 @@ class TestDocumentsParseStop:
return True return True
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
parse_documents(api_key, dataset_id, {"document_ids": document_ids}) parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
if callable(payload): if callable(payload):
payload = payload(document_ids) payload = payload(document_ids)
res = stop_parse_documents(api_key, dataset_id, payload) res = stop_parse_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code != 0: if expected_code != 0:
assert res["message"] == expected_message assert res["message"] == expected_message
else: else:
completed_document_ids = list(set(document_ids) - set(payload["document_ids"])) completed_document_ids = list(set(document_ids) - set(payload["document_ids"]))
condition(api_key, dataset_id, completed_document_ids) condition(HttpApiAuth, dataset_id, completed_document_ids)
validate_document_parse_cancel(api_key, dataset_id, payload["document_ids"]) validate_document_parse_cancel(HttpApiAuth, dataset_id, payload["document_ids"])
validate_document_parse_done(api_key, dataset_id, completed_document_ids) validate_document_parse_done(HttpApiAuth, dataset_id, completed_document_ids)
@pytest.mark.p3 @pytest.mark.p3
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -114,15 +114,15 @@ class TestDocumentsParseStop:
) )
def test_invalid_dataset_id( def test_invalid_dataset_id(
self, self,
api_key, HttpApiAuth,
add_documents_func, add_documents_func,
invalid_dataset_id, invalid_dataset_id,
expected_code, expected_code,
expected_message, expected_message,
): ):
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
parse_documents(api_key, dataset_id, {"document_ids": document_ids}) parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
res = stop_parse_documents(api_key, invalid_dataset_id, {"document_ids": document_ids}) res = stop_parse_documents(HttpApiAuth, invalid_dataset_id, {"document_ids": document_ids})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -135,63 +135,63 @@ class TestDocumentsParseStop:
lambda r: {"document_ids": r + ["invalid_id"]}, lambda r: {"document_ids": r + ["invalid_id"]},
], ],
) )
def test_stop_parse_partial_invalid_document_id(self, api_key, add_documents_func, payload): def test_stop_parse_partial_invalid_document_id(self, HttpApiAuth, add_documents_func, payload):
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
parse_documents(api_key, dataset_id, {"document_ids": document_ids}) parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
if callable(payload): if callable(payload):
payload = payload(document_ids) payload = payload(document_ids)
res = stop_parse_documents(api_key, dataset_id, payload) res = stop_parse_documents(HttpApiAuth, dataset_id, payload)
assert res["code"] == 102 assert res["code"] == 102
assert res["message"] == "You don't own the document invalid_id." assert res["message"] == "You don't own the document invalid_id."
validate_document_parse_cancel(api_key, dataset_id, document_ids) validate_document_parse_cancel(HttpApiAuth, dataset_id, document_ids)
@pytest.mark.p3 @pytest.mark.p3
def test_repeated_stop_parse(self, api_key, add_documents_func): def test_repeated_stop_parse(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
parse_documents(api_key, dataset_id, {"document_ids": document_ids}) parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
res = stop_parse_documents(api_key, dataset_id, {"document_ids": document_ids}) res = stop_parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0 assert res["code"] == 0
res = stop_parse_documents(api_key, dataset_id, {"document_ids": document_ids}) res = stop_parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 102 assert res["code"] == 102
assert res["message"] == "Can't stop parsing document with progress at 0 or 1" assert res["message"] == "Can't stop parsing document with progress at 0 or 1"
@pytest.mark.p3 @pytest.mark.p3
def test_duplicate_stop_parse(self, api_key, add_documents_func): def test_duplicate_stop_parse(self, HttpApiAuth, add_documents_func):
dataset_id, document_ids = add_documents_func dataset_id, document_ids = add_documents_func
parse_documents(api_key, dataset_id, {"document_ids": document_ids}) parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
res = stop_parse_documents(api_key, dataset_id, {"document_ids": document_ids + document_ids}) res = stop_parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids + document_ids})
assert res["code"] == 0 assert res["code"] == 0
assert res["data"]["success_count"] == 3 assert res["data"]["success_count"] == 3
assert f"Duplicate document ids: {document_ids[0]}" in res["data"]["errors"] assert f"Duplicate document ids: {document_ids[0]}" in res["data"]["errors"]
@pytest.mark.skip(reason="unstable") @pytest.mark.skip(reason="unstable")
def test_stop_parse_100_files(api_key, add_dataset_func, tmp_path): def test_stop_parse_100_files(HttpApiAuth, add_dataset_func, tmp_path):
document_num = 100 document_num = 100
dataset_id = add_dataset_func dataset_id = add_dataset_func
document_ids = bulk_upload_documents(api_key, dataset_id, document_num, tmp_path) document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, document_num, tmp_path)
parse_documents(api_key, dataset_id, {"document_ids": document_ids}) parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
sleep(1) sleep(1)
res = stop_parse_documents(api_key, dataset_id, {"document_ids": document_ids}) res = stop_parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
assert res["code"] == 0 assert res["code"] == 0
validate_document_parse_cancel(api_key, dataset_id, document_ids) validate_document_parse_cancel(HttpApiAuth, dataset_id, document_ids)
@pytest.mark.skip(reason="unstable") @pytest.mark.skip(reason="unstable")
def test_concurrent_parse(api_key, add_dataset_func, tmp_path): def test_concurrent_parse(HttpApiAuth, add_dataset_func, tmp_path):
document_num = 50 document_num = 50
dataset_id = add_dataset_func dataset_id = add_dataset_func
document_ids = bulk_upload_documents(api_key, dataset_id, document_num, tmp_path) document_ids = bulk_upload_documents(HttpApiAuth, dataset_id, document_num, tmp_path)
parse_documents(api_key, dataset_id, {"document_ids": document_ids}) parse_documents(HttpApiAuth, dataset_id, {"document_ids": document_ids})
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [ futures = [
executor.submit( executor.submit(
stop_parse_documents, stop_parse_documents,
api_key, HttpApiAuth,
dataset_id, dataset_id,
{"document_ids": document_ids[i : i + 1]}, {"document_ids": document_ids[i : i + 1]},
) )
@ -199,4 +199,4 @@ def test_concurrent_parse(api_key, add_dataset_func, tmp_path):
] ]
responses = [f.result() for f in futures] responses = [f.result() for f in futures]
assert all(r["code"] == 0 for r in responses) assert all(r["code"] == 0 for r in responses)
validate_document_parse_cancel(api_key, dataset_id, document_ids) validate_document_parse_cancel(HttpApiAuth, dataset_id, document_ids)

View File

@ -82,12 +82,12 @@ class TestDocumentsUpdated:
), ),
], ],
) )
def test_name(self, api_key, add_documents, name, expected_code, expected_message): def test_name(self, HttpApiAuth, add_documents, name, expected_code, expected_message):
dataset_id, document_ids = add_documents dataset_id, document_ids = add_documents
res = update_document(api_key, dataset_id, document_ids[0], {"name": name}) res = update_document(HttpApiAuth, dataset_id, document_ids[0], {"name": name})
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
res = list_documents(api_key, dataset_id, {"id": document_ids[0]}) res = list_documents(HttpApiAuth, dataset_id, {"id": document_ids[0]})
assert res["data"]["docs"][0]["name"] == name assert res["data"]["docs"][0]["name"] == name
else: else:
assert res["message"] == expected_message assert res["message"] == expected_message
@ -104,9 +104,9 @@ class TestDocumentsUpdated:
), ),
], ],
) )
def test_invalid_document_id(self, api_key, add_documents, document_id, expected_code, expected_message): def test_invalid_document_id(self, HttpApiAuth, add_documents, document_id, expected_code, expected_message):
dataset_id, _ = add_documents dataset_id, _ = add_documents
res = update_document(api_key, dataset_id, document_id, {"name": "new_name.txt"}) res = update_document(HttpApiAuth, dataset_id, document_id, {"name": "new_name.txt"})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -122,9 +122,9 @@ class TestDocumentsUpdated:
), ),
], ],
) )
def test_invalid_dataset_id(self, api_key, add_documents, dataset_id, expected_code, expected_message): def test_invalid_dataset_id(self, HttpApiAuth, add_documents, dataset_id, expected_code, expected_message):
_, document_ids = add_documents _, document_ids = add_documents
res = update_document(api_key, dataset_id, document_ids[0], {"name": "new_name.txt"}) res = update_document(HttpApiAuth, dataset_id, document_ids[0], {"name": "new_name.txt"})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -133,11 +133,11 @@ class TestDocumentsUpdated:
"meta_fields, expected_code, expected_message", "meta_fields, expected_code, expected_message",
[({"test": "test"}, 0, ""), ("test", 102, "meta_fields must be a dictionary")], [({"test": "test"}, 0, ""), ("test", 102, "meta_fields must be a dictionary")],
) )
def test_meta_fields(self, api_key, add_documents, meta_fields, expected_code, expected_message): def test_meta_fields(self, HttpApiAuth, add_documents, meta_fields, expected_code, expected_message):
dataset_id, document_ids = add_documents dataset_id, document_ids = add_documents
res = update_document(api_key, dataset_id, document_ids[0], {"meta_fields": meta_fields}) res = update_document(HttpApiAuth, dataset_id, document_ids[0], {"meta_fields": meta_fields})
if expected_code == 0: if expected_code == 0:
res = list_documents(api_key, dataset_id, {"id": document_ids[0]}) res = list_documents(HttpApiAuth, dataset_id, {"id": document_ids[0]})
assert res["data"]["docs"][0]["meta_fields"] == meta_fields assert res["data"]["docs"][0]["meta_fields"] == meta_fields
else: else:
assert res["message"] == expected_message assert res["message"] == expected_message
@ -167,12 +167,12 @@ class TestDocumentsUpdated:
), ),
], ],
) )
def test_chunk_method(self, api_key, add_documents, chunk_method, expected_code, expected_message): def test_chunk_method(self, HttpApiAuth, add_documents, chunk_method, expected_code, expected_message):
dataset_id, document_ids = add_documents dataset_id, document_ids = add_documents
res = update_document(api_key, dataset_id, document_ids[0], {"chunk_method": chunk_method}) res = update_document(HttpApiAuth, dataset_id, document_ids[0], {"chunk_method": chunk_method})
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
res = list_documents(api_key, dataset_id, {"id": document_ids[0]}) res = list_documents(HttpApiAuth, dataset_id, {"id": document_ids[0]})
if chunk_method != "": if chunk_method != "":
assert res["data"]["docs"][0]["chunk_method"] == chunk_method assert res["data"]["docs"][0]["chunk_method"] == chunk_method
else: else:
@ -287,14 +287,14 @@ class TestDocumentsUpdated:
) )
def test_invalid_field( def test_invalid_field(
self, self,
api_key, HttpApiAuth,
add_documents, add_documents,
payload, payload,
expected_code, expected_code,
expected_message, expected_message,
): ):
dataset_id, document_ids = add_documents dataset_id, document_ids = add_documents
res = update_document(api_key, dataset_id, document_ids[0], payload) res = update_document(HttpApiAuth, dataset_id, document_ids[0], payload)
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -515,7 +515,7 @@ class TestUpdateDocumentParserConfig:
) )
def test_parser_config( def test_parser_config(
self, self,
api_key, HttpApiAuth,
add_documents, add_documents,
chunk_method, chunk_method,
parser_config, parser_config,
@ -524,14 +524,14 @@ class TestUpdateDocumentParserConfig:
): ):
dataset_id, document_ids = add_documents dataset_id, document_ids = add_documents
res = update_document( res = update_document(
api_key, HttpApiAuth,
dataset_id, dataset_id,
document_ids[0], document_ids[0],
{"chunk_method": chunk_method, "parser_config": parser_config}, {"chunk_method": chunk_method, "parser_config": parser_config},
) )
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
res = list_documents(api_key, dataset_id, {"id": document_ids[0]}) res = list_documents(HttpApiAuth, dataset_id, {"id": document_ids[0]})
if parser_config != {}: if parser_config != {}:
for k, v in parser_config.items(): for k, v in parser_config.items():
assert res["data"]["docs"][0]["parser_config"][k] == v assert res["data"]["docs"][0]["parser_config"][k] == v

View File

@ -47,10 +47,10 @@ class TestAuthorization:
class TestDocumentsUpload: class TestDocumentsUpload:
@pytest.mark.p1 @pytest.mark.p1
def test_valid_single_upload(self, api_key, add_dataset_func, tmp_path): def test_valid_single_upload(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func dataset_id = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt") fp = create_txt_file(tmp_path / "ragflow_test.txt")
res = upload_documents(api_key, dataset_id, [fp]) res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 0 assert res["code"] == 0
assert res["data"][0]["dataset_id"] == dataset_id assert res["data"][0]["dataset_id"] == dataset_id
assert res["data"][0]["name"] == fp.name assert res["data"][0]["name"] == fp.name
@ -72,10 +72,10 @@ class TestDocumentsUpload:
], ],
indirect=True, indirect=True,
) )
def test_file_type_validation(self, api_key, add_dataset_func, generate_test_files, request): def test_file_type_validation(self, HttpApiAuth, add_dataset_func, generate_test_files, request):
dataset_id = add_dataset_func dataset_id = add_dataset_func
fp = generate_test_files[request.node.callspec.params["generate_test_files"]] fp = generate_test_files[request.node.callspec.params["generate_test_files"]]
res = upload_documents(api_key, dataset_id, [fp]) res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 0 assert res["code"] == 0
assert res["data"][0]["dataset_id"] == dataset_id assert res["data"][0]["dataset_id"] == dataset_id
assert res["data"][0]["name"] == fp.name assert res["data"][0]["name"] == fp.name
@ -85,33 +85,33 @@ class TestDocumentsUpload:
"file_type", "file_type",
["exe", "unknown"], ["exe", "unknown"],
) )
def test_unsupported_file_type(self, api_key, add_dataset_func, tmp_path, file_type): def test_unsupported_file_type(self, HttpApiAuth, add_dataset_func, tmp_path, file_type):
dataset_id = add_dataset_func dataset_id = add_dataset_func
fp = tmp_path / f"ragflow_test.{file_type}" fp = tmp_path / f"ragflow_test.{file_type}"
fp.touch() fp.touch()
res = upload_documents(api_key, dataset_id, [fp]) res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 500 assert res["code"] == 500
assert res["message"] == f"ragflow_test.{file_type}: This type of file has not been supported yet!" assert res["message"] == f"ragflow_test.{file_type}: This type of file has not been supported yet!"
@pytest.mark.p2 @pytest.mark.p2
def test_missing_file(self, api_key, add_dataset_func): def test_missing_file(self, HttpApiAuth, add_dataset_func):
dataset_id = add_dataset_func dataset_id = add_dataset_func
res = upload_documents(api_key, dataset_id) res = upload_documents(HttpApiAuth, dataset_id)
assert res["code"] == 101 assert res["code"] == 101
assert res["message"] == "No file part!" assert res["message"] == "No file part!"
@pytest.mark.p3 @pytest.mark.p3
def test_empty_file(self, api_key, add_dataset_func, tmp_path): def test_empty_file(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func dataset_id = add_dataset_func
fp = tmp_path / "empty.txt" fp = tmp_path / "empty.txt"
fp.touch() fp.touch()
res = upload_documents(api_key, dataset_id, [fp]) res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 0 assert res["code"] == 0
assert res["data"][0]["size"] == 0 assert res["data"][0]["size"] == 0
@pytest.mark.p3 @pytest.mark.p3
def test_filename_empty(self, api_key, add_dataset_func, tmp_path): def test_filename_empty(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func dataset_id = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt") fp = create_txt_file(tmp_path / "ragflow_test.txt")
url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id) url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id)
@ -120,33 +120,33 @@ class TestDocumentsUpload:
res = requests.post( res = requests.post(
url=url, url=url,
headers={"Content-Type": m.content_type}, headers={"Content-Type": m.content_type},
auth=api_key, auth=HttpApiAuth,
data=m, data=m,
) )
assert res.json()["code"] == 101 assert res.json()["code"] == 101
assert res.json()["message"] == "No file selected!" assert res.json()["message"] == "No file selected!"
@pytest.mark.p2 @pytest.mark.p2
def test_filename_exceeds_max_length(self, api_key, add_dataset_func, tmp_path): def test_filename_exceeds_max_length(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func dataset_id = add_dataset_func
# filename_length = 129 # filename_length = 129
fp = create_txt_file(tmp_path / f"{'a' * (DOCUMENT_NAME_LIMIT - 3)}.txt") fp = create_txt_file(tmp_path / f"{'a' * (DOCUMENT_NAME_LIMIT - 3)}.txt")
res = upload_documents(api_key, dataset_id, [fp]) res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 101 assert res["code"] == 101
assert res["message"] == "File name should be less than 128 bytes." assert res["message"] == "File name should be less than 128 bytes."
@pytest.mark.p2 @pytest.mark.p2
def test_invalid_dataset_id(self, api_key, tmp_path): def test_invalid_dataset_id(self, HttpApiAuth, tmp_path):
fp = create_txt_file(tmp_path / "ragflow_test.txt") fp = create_txt_file(tmp_path / "ragflow_test.txt")
res = upload_documents(api_key, "invalid_dataset_id", [fp]) res = upload_documents(HttpApiAuth, "invalid_dataset_id", [fp])
assert res["code"] == 100 assert res["code"] == 100
assert res["message"] == """LookupError("Can\'t find the dataset with ID invalid_dataset_id!")""" assert res["message"] == """LookupError("Can\'t find the dataset with ID invalid_dataset_id!")"""
@pytest.mark.p2 @pytest.mark.p2
def test_duplicate_files(self, api_key, add_dataset_func, tmp_path): def test_duplicate_files(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func dataset_id = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt") fp = create_txt_file(tmp_path / "ragflow_test.txt")
res = upload_documents(api_key, dataset_id, [fp, fp]) res = upload_documents(HttpApiAuth, dataset_id, [fp, fp])
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]) == 2 assert len(res["data"]) == 2
for i in range(len(res["data"])): for i in range(len(res["data"])):
@ -157,11 +157,11 @@ class TestDocumentsUpload:
assert res["data"][i]["name"] == expected_name assert res["data"][i]["name"] == expected_name
@pytest.mark.p2 @pytest.mark.p2
def test_same_file_repeat(self, api_key, add_dataset_func, tmp_path): def test_same_file_repeat(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func dataset_id = add_dataset_func
fp = create_txt_file(tmp_path / "ragflow_test.txt") fp = create_txt_file(tmp_path / "ragflow_test.txt")
for i in range(3): for i in range(3):
res = upload_documents(api_key, dataset_id, [fp]) res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]) == 1 assert len(res["data"]) == 1
assert res["data"][0]["dataset_id"] == dataset_id assert res["data"][0]["dataset_id"] == dataset_id
@ -171,7 +171,7 @@ class TestDocumentsUpload:
assert res["data"][0]["name"] == expected_name assert res["data"][0]["name"] == expected_name
@pytest.mark.p3 @pytest.mark.p3
def test_filename_special_characters(self, api_key, add_dataset_func, tmp_path): def test_filename_special_characters(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func dataset_id = add_dataset_func
illegal_chars = '<>:"/\\|?*' illegal_chars = '<>:"/\\|?*'
translation_table = str.maketrans({char: "_" for char in illegal_chars}) translation_table = str.maketrans({char: "_" for char in illegal_chars})
@ -179,28 +179,28 @@ class TestDocumentsUpload:
fp = tmp_path / f"{safe_filename}.txt" fp = tmp_path / f"{safe_filename}.txt"
fp.write_text("Sample text content") fp.write_text("Sample text content")
res = upload_documents(api_key, dataset_id, [fp]) res = upload_documents(HttpApiAuth, dataset_id, [fp])
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]) == 1 assert len(res["data"]) == 1
assert res["data"][0]["dataset_id"] == dataset_id assert res["data"][0]["dataset_id"] == dataset_id
assert res["data"][0]["name"] == fp.name assert res["data"][0]["name"] == fp.name
@pytest.mark.p1 @pytest.mark.p1
def test_multiple_files(self, api_key, add_dataset_func, tmp_path): def test_multiple_files(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func dataset_id = add_dataset_func
expected_document_count = 20 expected_document_count = 20
fps = [] fps = []
for i in range(expected_document_count): for i in range(expected_document_count):
fp = create_txt_file(tmp_path / f"ragflow_test_{i}.txt") fp = create_txt_file(tmp_path / f"ragflow_test_{i}.txt")
fps.append(fp) fps.append(fp)
res = upload_documents(api_key, dataset_id, fps) res = upload_documents(HttpApiAuth, dataset_id, fps)
assert res["code"] == 0 assert res["code"] == 0
res = list_datasets(api_key, {"id": dataset_id}) res = list_datasets(HttpApiAuth, {"id": dataset_id})
assert res["data"][0]["document_count"] == expected_document_count assert res["data"][0]["document_count"] == expected_document_count
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_upload(self, api_key, add_dataset_func, tmp_path): def test_concurrent_upload(self, HttpApiAuth, add_dataset_func, tmp_path):
dataset_id = add_dataset_func dataset_id = add_dataset_func
count = 20 count = 20
@ -210,10 +210,10 @@ class TestDocumentsUpload:
fps.append(fp) fps.append(fp)
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(upload_documents, api_key, dataset_id, fps[i : i + 1]) for i in range(count)] futures = [executor.submit(upload_documents, HttpApiAuth, dataset_id, fps[i : i + 1]) for i in range(count)]
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
res = list_datasets(api_key, {"id": dataset_id}) res = list_datasets(HttpApiAuth, {"id": dataset_id})
assert res["data"][0]["document_count"] == count assert res["data"][0]["document_count"] == count

View File

@ -18,24 +18,24 @@ from common import batch_add_sessions_with_chat_assistant, delete_session_with_c
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def add_sessions_with_chat_assistant(request, api_key, add_chat_assistants): def add_sessions_with_chat_assistant(request, HttpApiAuth, add_chat_assistants):
def cleanup(): def cleanup():
for chat_assistant_id in chat_assistant_ids: for chat_assistant_id in chat_assistant_ids:
delete_session_with_chat_assistants(api_key, chat_assistant_id) delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id)
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
_, _, chat_assistant_ids = add_chat_assistants _, _, chat_assistant_ids = add_chat_assistants
return chat_assistant_ids[0], batch_add_sessions_with_chat_assistant(api_key, chat_assistant_ids[0], 5) return chat_assistant_ids[0], batch_add_sessions_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], 5)
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def add_sessions_with_chat_assistant_func(request, api_key, add_chat_assistants): def add_sessions_with_chat_assistant_func(request, HttpApiAuth, add_chat_assistants):
def cleanup(): def cleanup():
for chat_assistant_id in chat_assistant_ids: for chat_assistant_id in chat_assistant_ids:
delete_session_with_chat_assistants(api_key, chat_assistant_id) delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id)
request.addfinalizer(cleanup) request.addfinalizer(cleanup)
_, _, chat_assistant_ids = add_chat_assistants _, _, chat_assistant_ids = add_chat_assistants
return chat_assistant_ids[0], batch_add_sessions_with_chat_assistant(api_key, chat_assistant_ids[0], 5) return chat_assistant_ids[0], batch_add_sessions_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], 5)

View File

@ -53,14 +53,14 @@ class TestSessionWithChatAssistantCreate:
({"name": "case insensitive"}, 0, ""), ({"name": "case insensitive"}, 0, ""),
], ],
) )
def test_name(self, api_key, add_chat_assistants, payload, expected_code, expected_message): def test_name(self, HttpApiAuth, add_chat_assistants, payload, expected_code, expected_message):
_, _, chat_assistant_ids = add_chat_assistants _, _, chat_assistant_ids = add_chat_assistants
if payload["name"] == "duplicated_name": if payload["name"] == "duplicated_name":
create_session_with_chat_assistant(api_key, chat_assistant_ids[0], payload) create_session_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
elif payload["name"] == "case insensitive": elif payload["name"] == "case insensitive":
create_session_with_chat_assistant(api_key, chat_assistant_ids[0], {"name": payload["name"].upper()}) create_session_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], {"name": payload["name"].upper()})
res = create_session_with_chat_assistant(api_key, chat_assistant_ids[0], payload) res = create_session_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], payload)
assert res["code"] == expected_code, res assert res["code"] == expected_code, res
if expected_code == 0: if expected_code == 0:
assert res["data"]["name"] == payload["name"] assert res["data"]["name"] == payload["name"]
@ -76,16 +76,16 @@ class TestSessionWithChatAssistantCreate:
("invalid_chat_assistant_id", 102, "You do not own the assistant."), ("invalid_chat_assistant_id", 102, "You do not own the assistant."),
], ],
) )
def test_invalid_chat_assistant_id(self, api_key, chat_assistant_id, expected_code, expected_message): def test_invalid_chat_assistant_id(self, HttpApiAuth, chat_assistant_id, expected_code, expected_message):
res = create_session_with_chat_assistant(api_key, chat_assistant_id, {"name": "valid_name"}) res = create_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, {"name": "valid_name"})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_create_session(self, api_key, add_chat_assistants): def test_concurrent_create_session(self, HttpApiAuth, add_chat_assistants):
count = 1000 count = 1000
_, _, chat_assistant_ids = add_chat_assistants _, _, chat_assistant_ids = add_chat_assistants
res = list_session_with_chat_assistants(api_key, chat_assistant_ids[0]) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_ids[0])
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
sessions_count = len(res["data"]) sessions_count = len(res["data"])
@ -94,7 +94,7 @@ class TestSessionWithChatAssistantCreate:
futures = [ futures = [
executor.submit( executor.submit(
create_session_with_chat_assistant, create_session_with_chat_assistant,
api_key, HttpApiAuth,
chat_assistant_ids[0], chat_assistant_ids[0],
{"name": f"session with chat assistant test {i}"}, {"name": f"session with chat assistant test {i}"},
) )
@ -103,16 +103,16 @@ class TestSessionWithChatAssistantCreate:
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
res = list_session_with_chat_assistants(api_key, chat_assistant_ids[0], {"page_size": count * 2}) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_ids[0], {"page_size": count * 2})
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert len(res["data"]) == sessions_count + count assert len(res["data"]) == sessions_count + count
@pytest.mark.p3 @pytest.mark.p3
def test_add_session_to_deleted_chat_assistant(self, api_key, add_chat_assistants): def test_add_session_to_deleted_chat_assistant(self, HttpApiAuth, add_chat_assistants):
_, _, chat_assistant_ids = add_chat_assistants _, _, chat_assistant_ids = add_chat_assistants
res = delete_chat_assistants(api_key, {"ids": [chat_assistant_ids[0]]}) res = delete_chat_assistants(HttpApiAuth, {"ids": [chat_assistant_ids[0]]})
assert res["code"] == 0 assert res["code"] == 0
res = create_session_with_chat_assistant(api_key, chat_assistant_ids[0], {"name": "valid_name"}) res = create_session_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], {"name": "valid_name"})
assert res["code"] == 102 assert res["code"] == 102
assert res["message"] == "You do not own the assistant." assert res["message"] == "You do not own the assistant."

View File

@ -52,9 +52,9 @@ class TestSessionWithChatAssistantDelete:
), ),
], ],
) )
def test_invalid_chat_assistant_id(self, api_key, add_sessions_with_chat_assistant_func, chat_assistant_id, expected_code, expected_message): def test_invalid_chat_assistant_id(self, HttpApiAuth, add_sessions_with_chat_assistant_func, chat_assistant_id, expected_code, expected_message):
_, session_ids = add_sessions_with_chat_assistant_func _, session_ids = add_sessions_with_chat_assistant_func
res = delete_session_with_chat_assistants(api_key, chat_assistant_id, {"ids": session_ids}) res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, {"ids": session_ids})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -66,54 +66,54 @@ class TestSessionWithChatAssistantDelete:
pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3), pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3),
], ],
) )
def test_delete_partial_invalid_id(self, api_key, add_sessions_with_chat_assistant_func, payload): def test_delete_partial_invalid_id(self, HttpApiAuth, add_sessions_with_chat_assistant_func, payload):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
if callable(payload): if callable(payload):
payload = payload(session_ids) payload = payload(session_ids)
res = delete_session_with_chat_assistants(api_key, chat_assistant_id, payload) res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, payload)
assert res["code"] == 0 assert res["code"] == 0
assert res["data"]["errors"][0] == "The chat doesn't own the session invalid_id" assert res["data"]["errors"][0] == "The chat doesn't own the session invalid_id"
res = list_session_with_chat_assistants(api_key, chat_assistant_id) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert len(res["data"]) == 0 assert len(res["data"]) == 0
@pytest.mark.p3 @pytest.mark.p3
def test_repeated_deletion(self, api_key, add_sessions_with_chat_assistant_func): def test_repeated_deletion(self, HttpApiAuth, add_sessions_with_chat_assistant_func):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
payload = {"ids": session_ids} payload = {"ids": session_ids}
res = delete_session_with_chat_assistants(api_key, chat_assistant_id, payload) res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, payload)
assert res["code"] == 0 assert res["code"] == 0
res = delete_session_with_chat_assistants(api_key, chat_assistant_id, payload) res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, payload)
assert res["code"] == 102 assert res["code"] == 102
assert "The chat doesn't own the session" in res["message"] assert "The chat doesn't own the session" in res["message"]
@pytest.mark.p3 @pytest.mark.p3
def test_duplicate_deletion(self, api_key, add_sessions_with_chat_assistant_func): def test_duplicate_deletion(self, HttpApiAuth, add_sessions_with_chat_assistant_func):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
res = delete_session_with_chat_assistants(api_key, chat_assistant_id, {"ids": session_ids * 2}) res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, {"ids": session_ids * 2})
assert res["code"] == 0 assert res["code"] == 0
assert "Duplicate session ids" in res["data"]["errors"][0] assert "Duplicate session ids" in res["data"]["errors"][0]
assert res["data"]["success_count"] == 5 assert res["data"]["success_count"] == 5
res = list_session_with_chat_assistants(api_key, chat_assistant_id) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert len(res["data"]) == 0 assert len(res["data"]) == 0
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_deletion(self, api_key, add_chat_assistants): def test_concurrent_deletion(self, HttpApiAuth, add_chat_assistants):
count = 100 count = 100
_, _, chat_assistant_ids = add_chat_assistants _, _, chat_assistant_ids = add_chat_assistants
session_ids = batch_add_sessions_with_chat_assistant(api_key, chat_assistant_ids[0], count) session_ids = batch_add_sessions_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], count)
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [ futures = [
executor.submit( executor.submit(
delete_session_with_chat_assistants, delete_session_with_chat_assistants,
api_key, HttpApiAuth,
chat_assistant_ids[0], chat_assistant_ids[0],
{"ids": session_ids[i : i + 1]}, {"ids": session_ids[i : i + 1]},
) )
@ -124,15 +124,15 @@ class TestSessionWithChatAssistantDelete:
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3 @pytest.mark.p3
def test_delete_1k(self, api_key, add_chat_assistants): def test_delete_1k(self, HttpApiAuth, add_chat_assistants):
sessions_num = 1_000 sessions_num = 1_000
_, _, chat_assistant_ids = add_chat_assistants _, _, chat_assistant_ids = add_chat_assistants
session_ids = batch_add_sessions_with_chat_assistant(api_key, chat_assistant_ids[0], sessions_num) session_ids = batch_add_sessions_with_chat_assistant(HttpApiAuth, chat_assistant_ids[0], sessions_num)
res = delete_session_with_chat_assistants(api_key, chat_assistant_ids[0], {"ids": session_ids}) res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_ids[0], {"ids": session_ids})
assert res["code"] == 0 assert res["code"] == 0
res = list_session_with_chat_assistants(api_key, chat_assistant_ids[0]) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_ids[0])
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert len(res["data"]) == 0 assert len(res["data"]) == 0
@ -150,7 +150,7 @@ class TestSessionWithChatAssistantDelete:
) )
def test_basic_scenarios( def test_basic_scenarios(
self, self,
api_key, HttpApiAuth,
add_sessions_with_chat_assistant_func, add_sessions_with_chat_assistant_func,
payload, payload,
expected_code, expected_code,
@ -160,12 +160,12 @@ class TestSessionWithChatAssistantDelete:
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
if callable(payload): if callable(payload):
payload = payload(session_ids) payload = payload(session_ids)
res = delete_session_with_chat_assistants(api_key, chat_assistant_id, payload) res = delete_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if res["code"] != 0: if res["code"] != 0:
assert res["message"] == expected_message assert res["message"] == expected_message
res = list_session_with_chat_assistants(api_key, chat_assistant_id) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id)
if res["code"] != 0: if res["code"] != 0:
assert False, res assert False, res
assert len(res["data"]) == remaining assert len(res["data"]) == remaining

View File

@ -54,9 +54,9 @@ class TestSessionsWithChatAssistantList:
pytest.param({"page": "a", "page_size": 2}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip), pytest.param({"page": "a", "page_size": 2}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
], ],
) )
def test_page(self, api_key, add_sessions_with_chat_assistant, params, expected_code, expected_page_size, expected_message): def test_page(self, HttpApiAuth, add_sessions_with_chat_assistant, params, expected_code, expected_page_size, expected_message):
chat_assistant_id, _ = add_sessions_with_chat_assistant chat_assistant_id, _ = add_sessions_with_chat_assistant
res = list_session_with_chat_assistants(api_key, chat_assistant_id, params=params) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]) == expected_page_size assert len(res["data"]) == expected_page_size
@ -76,9 +76,9 @@ class TestSessionsWithChatAssistantList:
pytest.param({"page_size": "a"}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip), pytest.param({"page_size": "a"}, 100, 0, """ValueError("invalid literal for int() with base 10: \'a\'")""", marks=pytest.mark.skip),
], ],
) )
def test_page_size(self, api_key, add_sessions_with_chat_assistant, params, expected_code, expected_page_size, expected_message): def test_page_size(self, HttpApiAuth, add_sessions_with_chat_assistant, params, expected_code, expected_page_size, expected_message):
chat_assistant_id, _ = add_sessions_with_chat_assistant chat_assistant_id, _ = add_sessions_with_chat_assistant
res = list_session_with_chat_assistants(api_key, chat_assistant_id, params=params) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]) == expected_page_size assert len(res["data"]) == expected_page_size
@ -98,7 +98,7 @@ class TestSessionsWithChatAssistantList:
) )
def test_orderby( def test_orderby(
self, self,
api_key, HttpApiAuth,
add_sessions_with_chat_assistant, add_sessions_with_chat_assistant,
params, params,
expected_code, expected_code,
@ -106,7 +106,7 @@ class TestSessionsWithChatAssistantList:
expected_message, expected_message,
): ):
chat_assistant_id, _ = add_sessions_with_chat_assistant chat_assistant_id, _ = add_sessions_with_chat_assistant
res = list_session_with_chat_assistants(api_key, chat_assistant_id, params=params) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if callable(assertions): if callable(assertions):
@ -131,7 +131,7 @@ class TestSessionsWithChatAssistantList:
) )
def test_desc( def test_desc(
self, self,
api_key, HttpApiAuth,
add_sessions_with_chat_assistant, add_sessions_with_chat_assistant,
params, params,
expected_code, expected_code,
@ -139,7 +139,7 @@ class TestSessionsWithChatAssistantList:
expected_message, expected_message,
): ):
chat_assistant_id, _ = add_sessions_with_chat_assistant chat_assistant_id, _ = add_sessions_with_chat_assistant
res = list_session_with_chat_assistants(api_key, chat_assistant_id, params=params) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if callable(assertions): if callable(assertions):
@ -157,9 +157,9 @@ class TestSessionsWithChatAssistantList:
({"name": "unknown"}, 0, 0, ""), ({"name": "unknown"}, 0, 0, ""),
], ],
) )
def test_name(self, api_key, add_sessions_with_chat_assistant, params, expected_code, expected_num, expected_message): def test_name(self, HttpApiAuth, add_sessions_with_chat_assistant, params, expected_code, expected_num, expected_message):
chat_assistant_id, _ = add_sessions_with_chat_assistant chat_assistant_id, _ = add_sessions_with_chat_assistant
res = list_session_with_chat_assistants(api_key, chat_assistant_id, params=params) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if params["name"] != "session_with_chat_assistant_1": if params["name"] != "session_with_chat_assistant_1":
@ -179,14 +179,14 @@ class TestSessionsWithChatAssistantList:
("unknown", 0, 0, "The chat doesn't exist"), ("unknown", 0, 0, "The chat doesn't exist"),
], ],
) )
def test_id(self, api_key, add_sessions_with_chat_assistant, session_id, expected_code, expected_num, expected_message): def test_id(self, HttpApiAuth, add_sessions_with_chat_assistant, session_id, expected_code, expected_num, expected_message):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant chat_assistant_id, session_ids = add_sessions_with_chat_assistant
if callable(session_id): if callable(session_id):
params = {"id": session_id(session_ids)} params = {"id": session_id(session_ids)}
else: else:
params = {"id": session_id} params = {"id": session_id}
res = list_session_with_chat_assistants(api_key, chat_assistant_id, params=params) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
if params["id"] != session_ids[0]: if params["id"] != session_ids[0]:
@ -206,14 +206,14 @@ class TestSessionsWithChatAssistantList:
("id", "session_with_chat_assistant_0", 0, 0, ""), ("id", "session_with_chat_assistant_0", 0, 0, ""),
], ],
) )
def test_name_and_id(self, api_key, add_sessions_with_chat_assistant, session_id, name, expected_code, expected_num, expected_message): def test_name_and_id(self, HttpApiAuth, add_sessions_with_chat_assistant, session_id, name, expected_code, expected_num, expected_message):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant chat_assistant_id, session_ids = add_sessions_with_chat_assistant
if callable(session_id): if callable(session_id):
params = {"id": session_id(session_ids), "name": name} params = {"id": session_id(session_ids), "name": name}
else: else:
params = {"id": session_id, "name": name} params = {"id": session_id, "name": name}
res = list_session_with_chat_assistants(api_key, chat_assistant_id, params=params) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code == 0: if expected_code == 0:
assert len(res["data"]) == expected_num assert len(res["data"]) == expected_num
@ -221,29 +221,29 @@ class TestSessionsWithChatAssistantList:
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_list(self, api_key, add_sessions_with_chat_assistant): def test_concurrent_list(self, HttpApiAuth, add_sessions_with_chat_assistant):
count = 100 count = 100
chat_assistant_id, _ = add_sessions_with_chat_assistant chat_assistant_id, _ = add_sessions_with_chat_assistant
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(list_session_with_chat_assistants, api_key, chat_assistant_id) for i in range(count)] futures = [executor.submit(list_session_with_chat_assistants, HttpApiAuth, chat_assistant_id) for i in range(count)]
responses = list(as_completed(futures)) responses = list(as_completed(futures))
assert len(responses) == count, responses assert len(responses) == count, responses
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3 @pytest.mark.p3
def test_invalid_params(self, api_key, add_sessions_with_chat_assistant): def test_invalid_params(self, HttpApiAuth, add_sessions_with_chat_assistant):
chat_assistant_id, _ = add_sessions_with_chat_assistant chat_assistant_id, _ = add_sessions_with_chat_assistant
params = {"a": "b"} params = {"a": "b"}
res = list_session_with_chat_assistants(api_key, chat_assistant_id, params=params) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, params=params)
assert res["code"] == 0 assert res["code"] == 0
assert len(res["data"]) == 5 assert len(res["data"]) == 5
@pytest.mark.p3 @pytest.mark.p3
def test_list_chats_after_deleting_associated_chat_assistant(self, api_key, add_sessions_with_chat_assistant): def test_list_chats_after_deleting_associated_chat_assistant(self, HttpApiAuth, add_sessions_with_chat_assistant):
chat_assistant_id, _ = add_sessions_with_chat_assistant chat_assistant_id, _ = add_sessions_with_chat_assistant
res = delete_chat_assistants(api_key, {"ids": [chat_assistant_id]}) res = delete_chat_assistants(HttpApiAuth, {"ids": [chat_assistant_id]})
assert res["code"] == 0 assert res["code"] == 0
res = list_session_with_chat_assistants(api_key, chat_assistant_id) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id)
assert res["code"] == 102 assert res["code"] == 102
assert "You don't own the assistant" in res["message"] assert "You don't own the assistant" in res["message"]

View File

@ -52,17 +52,17 @@ class TestSessionWithChatAssistantUpdate:
pytest.param({"name": "case insensitive"}, 0, "", marks=pytest.mark.p3), pytest.param({"name": "case insensitive"}, 0, "", marks=pytest.mark.p3),
], ],
) )
def test_name(self, api_key, add_sessions_with_chat_assistant_func, payload, expected_code, expected_message): def test_name(self, HttpApiAuth, add_sessions_with_chat_assistant_func, payload, expected_code, expected_message):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
if payload["name"] == "duplicated_name": if payload["name"] == "duplicated_name":
update_session_with_chat_assistant(api_key, chat_assistant_id, session_ids[0], payload) update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], payload)
elif payload["name"] == "case insensitive": elif payload["name"] == "case insensitive":
update_session_with_chat_assistant(api_key, chat_assistant_id, session_ids[0], {"name": payload["name"].upper()}) update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], {"name": payload["name"].upper()})
res = update_session_with_chat_assistant(api_key, chat_assistant_id, session_ids[0], payload) res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], payload)
assert res["code"] == expected_code, res assert res["code"] == expected_code, res
if expected_code == 0: if expected_code == 0:
res = list_session_with_chat_assistants(api_key, chat_assistant_id, {"id": session_ids[0]}) res = list_session_with_chat_assistants(HttpApiAuth, chat_assistant_id, {"id": session_ids[0]})
assert res["data"][0]["name"] == payload["name"] assert res["data"][0]["name"] == payload["name"]
else: else:
assert res["message"] == expected_message assert res["message"] == expected_message
@ -75,9 +75,9 @@ class TestSessionWithChatAssistantUpdate:
pytest.param("invalid_chat_assistant_id", 102, "Session does not exist", marks=pytest.mark.skip(reason="issues/")), pytest.param("invalid_chat_assistant_id", 102, "Session does not exist", marks=pytest.mark.skip(reason="issues/")),
], ],
) )
def test_invalid_chat_assistant_id(self, api_key, add_sessions_with_chat_assistant_func, chat_assistant_id, expected_code, expected_message): def test_invalid_chat_assistant_id(self, HttpApiAuth, add_sessions_with_chat_assistant_func, chat_assistant_id, expected_code, expected_message):
_, session_ids = add_sessions_with_chat_assistant_func _, session_ids = add_sessions_with_chat_assistant_func
res = update_session_with_chat_assistant(api_key, chat_assistant_id, session_ids[0], {"name": "valid_name"}) res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], {"name": "valid_name"})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@ -89,19 +89,19 @@ class TestSessionWithChatAssistantUpdate:
("invalid_session_id", 102, "Session does not exist"), ("invalid_session_id", 102, "Session does not exist"),
], ],
) )
def test_invalid_session_id(self, api_key, add_sessions_with_chat_assistant_func, session_id, expected_code, expected_message): def test_invalid_session_id(self, HttpApiAuth, add_sessions_with_chat_assistant_func, session_id, expected_code, expected_message):
chat_assistant_id, _ = add_sessions_with_chat_assistant_func chat_assistant_id, _ = add_sessions_with_chat_assistant_func
res = update_session_with_chat_assistant(api_key, chat_assistant_id, session_id, {"name": "valid_name"}) res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_id, {"name": "valid_name"})
assert res["code"] == expected_code assert res["code"] == expected_code
assert res["message"] == expected_message assert res["message"] == expected_message
@pytest.mark.p3 @pytest.mark.p3
def test_repeated_update_session(self, api_key, add_sessions_with_chat_assistant_func): def test_repeated_update_session(self, HttpApiAuth, add_sessions_with_chat_assistant_func):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
res = update_session_with_chat_assistant(api_key, chat_assistant_id, session_ids[0], {"name": "valid_name_1"}) res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], {"name": "valid_name_1"})
assert res["code"] == 0 assert res["code"] == 0
res = update_session_with_chat_assistant(api_key, chat_assistant_id, session_ids[0], {"name": "valid_name_2"}) res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], {"name": "valid_name_2"})
assert res["code"] == 0 assert res["code"] == 0
@pytest.mark.p3 @pytest.mark.p3
@ -113,15 +113,15 @@ class TestSessionWithChatAssistantUpdate:
pytest.param(None, 100, "TypeError", marks=pytest.mark.skip), pytest.param(None, 100, "TypeError", marks=pytest.mark.skip),
], ],
) )
def test_invalid_params(self, api_key, add_sessions_with_chat_assistant_func, payload, expected_code, expected_message): def test_invalid_params(self, HttpApiAuth, add_sessions_with_chat_assistant_func, payload, expected_code, expected_message):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
res = update_session_with_chat_assistant(api_key, chat_assistant_id, session_ids[0], payload) res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], payload)
assert res["code"] == expected_code assert res["code"] == expected_code
if expected_code != 0: if expected_code != 0:
assert expected_message in res["message"] assert expected_message in res["message"]
@pytest.mark.p3 @pytest.mark.p3
def test_concurrent_update_session(self, api_key, add_sessions_with_chat_assistant_func): def test_concurrent_update_session(self, HttpApiAuth, add_sessions_with_chat_assistant_func):
count = 50 count = 50
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
@ -129,7 +129,7 @@ class TestSessionWithChatAssistantUpdate:
futures = [ futures = [
executor.submit( executor.submit(
update_session_with_chat_assistant, update_session_with_chat_assistant,
api_key, HttpApiAuth,
chat_assistant_id, chat_assistant_id,
session_ids[randint(0, 4)], session_ids[randint(0, 4)],
{"name": f"update session test {i}"}, {"name": f"update session test {i}"},
@ -141,9 +141,9 @@ class TestSessionWithChatAssistantUpdate:
assert all(future.result()["code"] == 0 for future in futures) assert all(future.result()["code"] == 0 for future in futures)
@pytest.mark.p3 @pytest.mark.p3
def test_update_session_to_deleted_chat_assistant(self, api_key, add_sessions_with_chat_assistant_func): def test_update_session_to_deleted_chat_assistant(self, HttpApiAuth, add_sessions_with_chat_assistant_func):
chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func
delete_chat_assistants(api_key, {"ids": [chat_assistant_id]}) delete_chat_assistants(HttpApiAuth, {"ids": [chat_assistant_id]})
res = update_session_with_chat_assistant(api_key, chat_assistant_id, session_ids[0], {"name": "valid_name"}) res = update_session_with_chat_assistant(HttpApiAuth, chat_assistant_id, session_ids[0], {"name": "valid_name"})
assert res["code"] == 102 assert res["code"] == 102
assert res["message"] == "You do not own the session" assert res["message"] == "You do not own the session"