mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Fix: Create dataset performance unmatched between HTTP api and web ui (#10960)
### What problem does this PR solve? Fix: Create dataset performance unmatched between HTTP api and web ui #10925 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
@ -22,7 +22,6 @@ from flask_login import login_required, current_user
|
||||
import numpy as np
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.document_service import DocumentService, queue_raptor_o_graphrag_tasks
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
@ -31,7 +30,6 @@ from api.db.services.pipeline_operation_log_service import PipelineOperationLogS
|
||||
from api.db.services.task_service import TaskService, GRAPH_RAPTOR_FAKE_DOC_ID
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.utils.api_utils import get_error_data_result, server_error_response, get_data_error_result, validate_request, not_allowed_parameters
|
||||
from common.misc_utils import get_uuid
|
||||
from api.db import PipelineTaskType, StatusEnum, FileSource, VALID_FILE_TYPES, VALID_TASK_STATUS
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.db_models import File
|
||||
@ -50,63 +48,17 @@ from rag.utils.doc_store_conn import OrderByExpr
|
||||
@validate_request("name")
|
||||
def create():
|
||||
req = request.json
|
||||
dataset_name = req["name"]
|
||||
if not isinstance(dataset_name, str):
|
||||
return get_data_error_result(message="Dataset name must be string.")
|
||||
if dataset_name.strip() == "":
|
||||
return get_data_error_result(message="Dataset name can't be empty.")
|
||||
if len(dataset_name.encode("utf-8")) > DATASET_NAME_LIMIT:
|
||||
return get_data_error_result(
|
||||
message=f"Dataset name length is {len(dataset_name)} which is larger than {DATASET_NAME_LIMIT}")
|
||||
req = KnowledgebaseService.create_with_name(
|
||||
name = req.pop("name", None),
|
||||
tenant_id = current_user.id,
|
||||
parser_id = req.pop("parser_id", None),
|
||||
**req
|
||||
)
|
||||
|
||||
dataset_name = dataset_name.strip()
|
||||
dataset_name = duplicate_name(
|
||||
KnowledgebaseService.query,
|
||||
name=dataset_name,
|
||||
tenant_id=current_user.id,
|
||||
status=StatusEnum.VALID.value)
|
||||
try:
|
||||
req["id"] = get_uuid()
|
||||
req["name"] = dataset_name
|
||||
req["tenant_id"] = current_user.id
|
||||
req["created_by"] = current_user.id
|
||||
if not req.get("parser_id"):
|
||||
req["parser_id"] = "naive"
|
||||
e, t = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Tenant not found.")
|
||||
|
||||
req["parser_config"] = {
|
||||
"layout_recognize": "DeepDOC",
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": "\n",
|
||||
"auto_keywords": 0,
|
||||
"auto_questions": 0,
|
||||
"html4excel": False,
|
||||
"topn_tags": 3,
|
||||
"raptor": {
|
||||
"use_raptor": True,
|
||||
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
|
||||
"max_token": 256,
|
||||
"threshold": 0.1,
|
||||
"max_cluster": 64,
|
||||
"random_seed": 0
|
||||
},
|
||||
"graphrag": {
|
||||
"use_graphrag": True,
|
||||
"entity_types": [
|
||||
"organization",
|
||||
"person",
|
||||
"geo",
|
||||
"event",
|
||||
"category"
|
||||
],
|
||||
"method": "light"
|
||||
}
|
||||
}
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_data_error_result()
|
||||
return get_json_result(data={"kb_id": req["id"]})
|
||||
return get_json_result(data={"kb_id":req["id"]})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@ -28,12 +28,10 @@ from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService
|
||||
from common.misc_utils import get_uuid
|
||||
from api.utils.api_utils import (
|
||||
deep_merge,
|
||||
get_error_argument_result,
|
||||
get_error_data_result,
|
||||
get_error_operating_result,
|
||||
get_error_permission_result,
|
||||
get_parser_config,
|
||||
get_result,
|
||||
@ -80,29 +78,28 @@ def create(tenant_id):
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Name of the dataset.
|
||||
description: Dataset name (required).
|
||||
avatar:
|
||||
type: string
|
||||
description: Base64 encoding of the avatar.
|
||||
description: Optional base64-encoded avatar image.
|
||||
description:
|
||||
type: string
|
||||
description: Description of the dataset.
|
||||
description: Optional dataset description.
|
||||
embedding_model:
|
||||
type: string
|
||||
description: Embedding model Name.
|
||||
description: Optional embedding model name; if omitted, the tenant's default embedding model is used.
|
||||
permission:
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Dataset permission.
|
||||
description: Visibility of the dataset (private to me or shared with team).
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "book", "email", "laws", "manual", "one", "paper",
|
||||
"picture", "presentation", "qa", "table", "tag"
|
||||
]
|
||||
description: Chunking method.
|
||||
"picture", "presentation", "qa", "table", "tag"]
|
||||
description: Chunking method; if omitted, defaults to "naive".
|
||||
parser_config:
|
||||
type: object
|
||||
description: Parser configuration.
|
||||
description: Optional parser configuration; server-side defaults will be applied.
|
||||
responses:
|
||||
200:
|
||||
description: Successful operation.
|
||||
@ -117,44 +114,43 @@ def create(tenant_id):
|
||||
# |----------------|-------------|
|
||||
# | embedding_model| embd_id |
|
||||
# | chunk_method | parser_id |
|
||||
|
||||
req, err = validate_and_parse_json_request(request, CreateDatasetReq)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
req = KnowledgebaseService.create_with_name(
|
||||
name = req.pop("name", None),
|
||||
tenant_id = tenant_id,
|
||||
parser_id = req.pop("parser_id", None),
|
||||
**req
|
||||
)
|
||||
|
||||
# Insert embedding model(embd id)
|
||||
ok, t = TenantService.get_by_id(tenant_id)
|
||||
if not ok:
|
||||
return get_error_permission_result(message="Tenant not found")
|
||||
if not req.get("embd_id"):
|
||||
req["embd_id"] = t.embd_id
|
||||
else:
|
||||
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
|
||||
if not ok:
|
||||
return err
|
||||
|
||||
|
||||
try:
|
||||
if KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_operating_result(message=f"Dataset name '{req['name']}' already exists")
|
||||
|
||||
req["parser_config"] = get_parser_config(req["parser_id"], req["parser_config"])
|
||||
req["id"] = get_uuid()
|
||||
req["tenant_id"] = tenant_id
|
||||
req["created_by"] = tenant_id
|
||||
|
||||
ok, t = TenantService.get_by_id(tenant_id)
|
||||
if not ok:
|
||||
return get_error_permission_result(message="Tenant not found")
|
||||
|
||||
if not req.get("embd_id"):
|
||||
req["embd_id"] = t.embd_id
|
||||
else:
|
||||
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
|
||||
if not ok:
|
||||
return err
|
||||
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_error_data_result(message="Create dataset error.(Database error)")
|
||||
|
||||
ok, k = KnowledgebaseService.get_by_id(req["id"])
|
||||
if not ok:
|
||||
return get_error_data_result(message="Dataset created failed")
|
||||
|
||||
response_data = remap_dictionary_keys(k.to_dict())
|
||||
return get_result(data=response_data)
|
||||
except OperationalError as e:
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_error_data_result()
|
||||
ok, k = KnowledgebaseService.get_by_id(req["id"])
|
||||
if not ok:
|
||||
return get_error_data_result(message="Dataset created failed")
|
||||
|
||||
response_data = remap_dictionary_keys(k.to_dict())
|
||||
return get_result(data=response_data)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
|
||||
@ -21,7 +21,11 @@ from api.db import StatusEnum, TenantPermission
|
||||
from api.db.db_models import DB, Document, Knowledgebase, User, UserTenant, UserCanvas
|
||||
from api.db.services.common_service import CommonService
|
||||
from common.time_utils import current_timestamp, datetime_format
|
||||
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.user_service import TenantService
|
||||
from common.misc_utils import get_uuid
|
||||
from api.constants import DATASET_NAME_LIMIT
|
||||
from api.utils.api_utils import get_parser_config, get_data_error_result
|
||||
|
||||
class KnowledgebaseService(CommonService):
|
||||
"""Service class for managing knowledge base operations.
|
||||
@ -363,6 +367,64 @@ class KnowledgebaseService(CommonService):
|
||||
# List of all knowledge base IDs
|
||||
return [m["id"] for m in cls.model.select(cls.model.id).dicts()]
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def create_with_name(
|
||||
cls,
|
||||
*,
|
||||
name: str,
|
||||
tenant_id: str,
|
||||
parser_id: str | None = None,
|
||||
**kwargs
|
||||
):
|
||||
"""Create a dataset (knowledgebase) by name with kb_app defaults.
|
||||
|
||||
This encapsulates the creation logic used in kb_app.create so other callers
|
||||
(including RESTful endpoints) can reuse the same behavior.
|
||||
|
||||
Returns:
|
||||
(ok: bool, model_or_msg): On success, returns (True, Knowledgebase model instance);
|
||||
on failure, returns (False, error_message).
|
||||
"""
|
||||
# Validate name
|
||||
if not isinstance(name, str):
|
||||
return get_data_error_result(message="Dataset name must be string.")
|
||||
dataset_name = name.strip()
|
||||
if dataset_name == "":
|
||||
return get_data_error_result(message="Dataset name can't be empty.")
|
||||
if len(dataset_name.encode("utf-8")) > DATASET_NAME_LIMIT:
|
||||
return get_data_error_result(message=f"Dataset name length is {len(dataset_name)} which is larger than {DATASET_NAME_LIMIT}")
|
||||
|
||||
# Deduplicate name within tenant
|
||||
dataset_name = duplicate_name(
|
||||
cls.query,
|
||||
name=dataset_name,
|
||||
tenant_id=tenant_id,
|
||||
status=StatusEnum.VALID.value,
|
||||
)
|
||||
|
||||
# Verify tenant exists
|
||||
ok, _t = TenantService.get_by_id(tenant_id)
|
||||
if not ok:
|
||||
return False, "Tenant not found."
|
||||
|
||||
# Build payload
|
||||
kb_id = get_uuid()
|
||||
payload = {
|
||||
"id": kb_id,
|
||||
"name": dataset_name,
|
||||
"tenant_id": tenant_id,
|
||||
"created_by": tenant_id,
|
||||
"parser_id": (parser_id or "naive"),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
# Default parser_config (align with kb_app.create) — do not accept external overrides
|
||||
payload["parser_config"] = get_parser_config(parser_id, kwargs.get("parser_config"))
|
||||
return payload
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, joined_tenant_ids, user_id,
|
||||
|
||||
@ -298,8 +298,34 @@ def get_parser_config(chunk_method, parser_config):
|
||||
|
||||
# Define default configurations for each chunking method
|
||||
key_mapping = {
|
||||
"naive": {"chunk_token_num": 512, "delimiter": r"\n", "html4excel": False, "layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False}, "graphrag": {"use_graphrag": False}},
|
||||
"naive": {
|
||||
"layout_recognize": "DeepDOC",
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": "\n",
|
||||
"auto_keywords": 0,
|
||||
"auto_questions": 0,
|
||||
"html4excel": False,
|
||||
"topn_tags": 3,
|
||||
"raptor": {
|
||||
"use_raptor": True,
|
||||
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
|
||||
"max_token": 256,
|
||||
"threshold": 0.1,
|
||||
"max_cluster": 64,
|
||||
"random_seed": 0,
|
||||
},
|
||||
"graphrag": {
|
||||
"use_graphrag": True,
|
||||
"entity_types": [
|
||||
"organization",
|
||||
"person",
|
||||
"geo",
|
||||
"event",
|
||||
"category",
|
||||
],
|
||||
"method": "light",
|
||||
},
|
||||
},
|
||||
"qa": {"raptor": {"use_raptor": False}, "graphrag": {"use_graphrag": False}},
|
||||
"tag": None,
|
||||
"resume": None,
|
||||
|
||||
@ -101,10 +101,10 @@ def test_invalid_name_dataset(get_auth):
|
||||
# create dataset
|
||||
# with pytest.raises(Exception) as e:
|
||||
res = create_dataset(get_auth, 0)
|
||||
assert res['code'] == 102
|
||||
assert res['code'] == 100
|
||||
|
||||
res = create_dataset(get_auth, "")
|
||||
assert res['code'] == 102
|
||||
assert res['code'] == 100
|
||||
|
||||
long_string = ""
|
||||
|
||||
@ -112,7 +112,7 @@ def test_invalid_name_dataset(get_auth):
|
||||
long_string += random.choice(string.ascii_letters + string.digits)
|
||||
|
||||
res = create_dataset(get_auth, long_string)
|
||||
assert res['code'] == 102
|
||||
assert res['code'] == 100
|
||||
print(res)
|
||||
|
||||
|
||||
|
||||
@ -34,3 +34,32 @@ DATASET_NAME_LIMIT = 128
|
||||
DOCUMENT_NAME_LIMIT = 255
|
||||
CHAT_ASSISTANT_NAME_LIMIT = 255
|
||||
SESSION_WITH_CHAT_NAME_LIMIT = 255
|
||||
|
||||
DEFAULT_PARSER_CONFIG = {
|
||||
"layout_recognize": "DeepDOC",
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": "\n",
|
||||
"auto_keywords": 0,
|
||||
"auto_questions": 0,
|
||||
"html4excel": False,
|
||||
"topn_tags": 3,
|
||||
"raptor": {
|
||||
"use_raptor": True,
|
||||
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
|
||||
"max_token": 256,
|
||||
"threshold": 0.1,
|
||||
"max_cluster": 64,
|
||||
"random_seed": 0,
|
||||
},
|
||||
"graphrag": {
|
||||
"use_graphrag": True,
|
||||
"entity_types": [
|
||||
"organization",
|
||||
"person",
|
||||
"geo",
|
||||
"event",
|
||||
"category",
|
||||
],
|
||||
"method": "light",
|
||||
},
|
||||
}
|
||||
@ -23,7 +23,7 @@ from libs.auth import RAGFlowHttpApiAuth
|
||||
from utils import encode_avatar
|
||||
from utils.file_utils import create_image_file
|
||||
from utils.hypothesis_utils import valid_names
|
||||
|
||||
from configs import DEFAULT_PARSER_CONFIG
|
||||
|
||||
@pytest.mark.usefixtures("clear_datasets")
|
||||
class TestAuthorization:
|
||||
@ -637,42 +637,21 @@ class TestDatasetCreate:
|
||||
payload = {"name": "parser_config_empty", "parser_config": {}}
|
||||
res = create_dataset(HttpApiAuth, payload)
|
||||
assert res["code"] == 0, res
|
||||
assert res["data"]["parser_config"] == {
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": r"\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
}, res
|
||||
assert res["data"]["parser_config"] == DEFAULT_PARSER_CONFIG, res
|
||||
|
||||
@pytest.mark.p2
|
||||
def test_parser_config_unset(self, HttpApiAuth):
|
||||
payload = {"name": "parser_config_unset"}
|
||||
res = create_dataset(HttpApiAuth, payload)
|
||||
assert res["code"] == 0, res
|
||||
assert res["data"]["parser_config"] == {
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": r"\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
}, res
|
||||
assert res["data"]["parser_config"] == DEFAULT_PARSER_CONFIG, res
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_parser_config_none(self, HttpApiAuth):
|
||||
payload = {"name": "parser_config_none", "parser_config": None}
|
||||
res = create_dataset(HttpApiAuth, payload)
|
||||
assert res["code"] == 0, res
|
||||
assert res["data"]["parser_config"] == {
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": "\\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
}, res
|
||||
assert res["data"]["parser_config"] == DEFAULT_PARSER_CONFIG, res
|
||||
|
||||
@pytest.mark.p2
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@ -25,7 +25,7 @@ from libs.auth import RAGFlowHttpApiAuth
|
||||
from utils import encode_avatar
|
||||
from utils.file_utils import create_image_file
|
||||
from utils.hypothesis_utils import valid_names
|
||||
|
||||
from configs import DEFAULT_PARSER_CONFIG
|
||||
# TODO: Missing scenario for updating embedding_model with chunk_count != 0
|
||||
|
||||
|
||||
@ -748,14 +748,7 @@ class TestDatasetUpdate:
|
||||
|
||||
res = list_datasets(HttpApiAuth)
|
||||
assert res["code"] == 0, res
|
||||
assert res["data"][0]["parser_config"] == {
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": r"\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
}, res
|
||||
assert res["data"][0]["parser_config"] == DEFAULT_PARSER_CONFIG, res
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_parser_config_none(self, HttpApiAuth, add_dataset_func):
|
||||
@ -766,14 +759,7 @@ class TestDatasetUpdate:
|
||||
|
||||
res = list_datasets(HttpApiAuth, {"id": dataset_id})
|
||||
assert res["code"] == 0, res
|
||||
assert res["data"][0]["parser_config"] == {
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": r"\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
}, res
|
||||
assert res["data"][0]["parser_config"] == DEFAULT_PARSER_CONFIG, res
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_parser_config_empty_with_chunk_method_change(self, HttpApiAuth, add_dataset_func):
|
||||
|
||||
@ -19,7 +19,7 @@ import pytest
|
||||
from common import list_documents, update_document
|
||||
from configs import DOCUMENT_NAME_LIMIT, INVALID_API_TOKEN
|
||||
from libs.auth import RAGFlowHttpApiAuth
|
||||
|
||||
from configs import DEFAULT_PARSER_CONFIG
|
||||
|
||||
@pytest.mark.p1
|
||||
class TestAuthorization:
|
||||
@ -308,14 +308,7 @@ class TestUpdateDocumentParserConfig:
|
||||
("naive", {}, 0, ""),
|
||||
(
|
||||
"naive",
|
||||
{
|
||||
"chunk_token_num": 512,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"html4excel": False,
|
||||
"delimiter": r"\n",
|
||||
"task_page_size": 12,
|
||||
"raptor": {"use_raptor": False},
|
||||
},
|
||||
DEFAULT_PARSER_CONFIG,
|
||||
0,
|
||||
"",
|
||||
),
|
||||
@ -419,7 +412,14 @@ class TestUpdateDocumentParserConfig:
|
||||
"",
|
||||
marks=pytest.mark.skip(reason="issues/6098"),
|
||||
),
|
||||
("naive", {"raptor": {"use_raptor": True}}, 0, ""),
|
||||
("naive", {"raptor": {"use_raptor": {
|
||||
"use_raptor": True,
|
||||
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
|
||||
"max_token": 256,
|
||||
"threshold": 0.1,
|
||||
"max_cluster": 64,
|
||||
"random_seed": 0,
|
||||
},}}, 0, ""),
|
||||
("naive", {"raptor": {"use_raptor": False}}, 0, ""),
|
||||
pytest.param(
|
||||
"naive",
|
||||
@ -534,14 +534,7 @@ class TestUpdateDocumentParserConfig:
|
||||
if expected_code == 0:
|
||||
res = list_documents(HttpApiAuth, dataset_id, {"id": document_ids[0]})
|
||||
if parser_config == {}:
|
||||
assert res["data"]["docs"][0]["parser_config"] == {
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": r"\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
}
|
||||
assert res["data"]["docs"][0]["parser_config"] == DEFAULT_PARSER_CONFIG
|
||||
else:
|
||||
for k, v in parser_config.items():
|
||||
assert res["data"]["docs"][0]["parser_config"][k] == v
|
||||
|
||||
@ -23,7 +23,7 @@ from ragflow_sdk import DataSet, RAGFlow
|
||||
from utils import encode_avatar
|
||||
from utils.file_utils import create_image_file
|
||||
from utils.hypothesis_utils import valid_names
|
||||
|
||||
from configs import DEFAULT_PARSER_CONFIG
|
||||
|
||||
@pytest.mark.usefixtures("clear_datasets")
|
||||
class TestAuthorization:
|
||||
@ -586,14 +586,7 @@ class TestDatasetCreate:
|
||||
def test_parser_config_empty(self, client):
|
||||
excepted_value = DataSet.ParserConfig(
|
||||
client,
|
||||
{
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": r"\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
},
|
||||
DEFAULT_PARSER_CONFIG,
|
||||
)
|
||||
parser_config_o = DataSet.ParserConfig(client, {})
|
||||
payload = {"name": "parser_config_empty", "parser_config": parser_config_o}
|
||||
@ -604,14 +597,7 @@ class TestDatasetCreate:
|
||||
def test_parser_config_unset(self, client):
|
||||
excepted_value = DataSet.ParserConfig(
|
||||
client,
|
||||
{
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": r"\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
},
|
||||
DEFAULT_PARSER_CONFIG,
|
||||
)
|
||||
payload = {"name": "parser_config_unset"}
|
||||
dataset = client.create_dataset(**payload)
|
||||
@ -621,14 +607,7 @@ class TestDatasetCreate:
|
||||
def test_parser_config_none(self, client):
|
||||
excepted_value = DataSet.ParserConfig(
|
||||
client,
|
||||
{
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": r"\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
},
|
||||
DEFAULT_PARSER_CONFIG,
|
||||
)
|
||||
payload = {"name": "parser_config_empty", "parser_config": None}
|
||||
dataset = client.create_dataset(**payload)
|
||||
|
||||
@ -24,7 +24,7 @@ from ragflow_sdk import DataSet
|
||||
from utils import encode_avatar
|
||||
from utils.file_utils import create_image_file
|
||||
from utils.hypothesis_utils import valid_names
|
||||
|
||||
from configs import DEFAULT_PARSER_CONFIG
|
||||
|
||||
class TestRquest:
|
||||
@pytest.mark.p2
|
||||
@ -634,14 +634,7 @@ class TestDatasetUpdate:
|
||||
dataset = add_dataset_func
|
||||
expected_config = DataSet.ParserConfig(
|
||||
client,
|
||||
{
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": r"\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
},
|
||||
DEFAULT_PARSER_CONFIG,
|
||||
)
|
||||
dataset.update({"parser_config": {}})
|
||||
assert str(dataset.parser_config) == str(expected_config), str(dataset)
|
||||
@ -654,14 +647,7 @@ class TestDatasetUpdate:
|
||||
dataset = add_dataset_func
|
||||
expected_config = DataSet.ParserConfig(
|
||||
client,
|
||||
{
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": r"\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
},
|
||||
DEFAULT_PARSER_CONFIG,
|
||||
)
|
||||
dataset.update({"parser_config": None})
|
||||
assert str(dataset.parser_config) == str(expected_config), str(dataset)
|
||||
|
||||
@ -17,7 +17,7 @@
|
||||
import pytest
|
||||
from configs import DOCUMENT_NAME_LIMIT
|
||||
from ragflow_sdk import DataSet
|
||||
|
||||
from configs import DEFAULT_PARSER_CONFIG
|
||||
|
||||
class TestDocumentsUpdated:
|
||||
@pytest.mark.p1
|
||||
@ -206,14 +206,7 @@ class TestUpdateDocumentParserConfig:
|
||||
("naive", {}, ""),
|
||||
(
|
||||
"naive",
|
||||
{
|
||||
"chunk_token_num": 512,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"html4excel": False,
|
||||
"delimiter": r"\n",
|
||||
"task_page_size": 12,
|
||||
"raptor": {"use_raptor": False},
|
||||
},
|
||||
DEFAULT_PARSER_CONFIG,
|
||||
"",
|
||||
),
|
||||
pytest.param(
|
||||
@ -294,7 +287,12 @@ class TestUpdateDocumentParserConfig:
|
||||
"",
|
||||
marks=pytest.mark.skip(reason="issues/6098"),
|
||||
),
|
||||
("naive", {"raptor": {"use_raptor": True}}, ""),
|
||||
("naive", {"raptor": {"use_raptor": True,
|
||||
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
|
||||
"max_token": 256,
|
||||
"threshold": 0.1,
|
||||
"max_cluster": 64,
|
||||
"random_seed": 0,}}, ""),
|
||||
("naive", {"raptor": {"use_raptor": False}}, ""),
|
||||
pytest.param(
|
||||
"naive",
|
||||
@ -400,13 +398,6 @@ class TestUpdateDocumentParserConfig:
|
||||
else:
|
||||
expected_config = DataSet.ParserConfig(
|
||||
client,
|
||||
{
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": r"\n",
|
||||
"html4excel": False,
|
||||
"layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
},
|
||||
DEFAULT_PARSER_CONFIG,
|
||||
)
|
||||
assert str(updated_doc.parser_config) == str(expected_config), str(updated_doc)
|
||||
|
||||
Reference in New Issue
Block a user