mirror of
https://github.com/infiniflow/ragflow.git
synced 2026-02-03 00:55:10 +08:00
Compare commits
29 Commits
v0.22.0
...
e841b09d63
| Author | SHA1 | Date | |
|---|---|---|---|
| e841b09d63 | |||
| b1a1eedf53 | |||
| 68e3b33ae4 | |||
| cd55f6c1b8 | |||
| 996b5fe14e | |||
| db4fd19c82 | |||
| 12db62b9c7 | |||
| b5f2cf16bc | |||
| e27ff8d3d4 | |||
| 5f59418aba | |||
| 87e69868c0 | |||
| 72c20022f6 | |||
| 3f2472f1b9 | |||
| 1d4d67daf8 | |||
| 7538e218a5 | |||
| 6b52f7df5a | |||
| 63131ec9b2 | |||
| e8f1a245a6 | |||
| 908450509f | |||
| 70a0f081f6 | |||
| 93422fa8cc | |||
| bfc84ba95b | |||
| 871055b0fc | |||
| ba71160b14 | |||
| bd5dda6b10 | |||
| 774563970b | |||
| 83d84e90ed | |||
| 8ef2f79d0a | |||
| 296476ab89 |
32
.github/workflows/tests.yml
vendored
32
.github/workflows/tests.yml
vendored
@ -95,6 +95,38 @@ jobs:
|
||||
version: ">=0.11.x"
|
||||
args: "check"
|
||||
|
||||
- name: Check comments of changed Python files
|
||||
if: ${{ false }}
|
||||
run: |
|
||||
if [[ ${{ github.event_name }} == 'pull_request_target' ]]; then
|
||||
CHANGED_FILES=$(git diff --name-only ${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }} \
|
||||
| grep -E '\.(py)$' || true)
|
||||
|
||||
if [ -n "$CHANGED_FILES" ]; then
|
||||
echo "Check comments of changed Python files with check_comment_ascii.py"
|
||||
|
||||
readarray -t files <<< "$CHANGED_FILES"
|
||||
HAS_ERROR=0
|
||||
|
||||
for file in "${files[@]}"; do
|
||||
if [ -f "$file" ]; then
|
||||
if python3 check_comment_ascii.py "$file"; then
|
||||
echo "✅ $file"
|
||||
else
|
||||
echo "❌ $file"
|
||||
HAS_ERROR=1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $HAS_ERROR -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "No Python files changed"
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Build ragflow:nightly
|
||||
run: |
|
||||
RUNNER_WORKSPACE_PREFIX=${RUNNER_WORKSPACE_PREFIX:-${HOME}}
|
||||
|
||||
@ -51,7 +51,9 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
||||
apt install -y libjemalloc-dev && \
|
||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less && \
|
||||
apt install -y ghostscript
|
||||
apt install -y ghostscript && \
|
||||
apt install -y pandoc && \
|
||||
apt install -y texlive
|
||||
|
||||
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \
|
||||
|
||||
@ -192,9 +192,10 @@ releases! 🌟
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
|
||||
|
||||
# Optional: use a stable tag (see releases: https://github.com/infiniflow/ragflow/releases), e.g.: git checkout v0.22.0
|
||||
|
||||
# This steps ensures the **entrypoint.sh** file in the code matches the Docker image version.
|
||||
|
||||
# Use CPU for DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
|
||||
@ -192,6 +192,7 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
$ cd ragflow/docker
|
||||
|
||||
# Opsional: gunakan tag stabil (lihat releases: https://github.com/infiniflow/ragflow/releases), contoh: git checkout v0.22.0
|
||||
# This steps ensures the **entrypoint.sh** file in the code matches the Docker image version.
|
||||
|
||||
# Use CPU for DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
@ -172,6 +172,7 @@
|
||||
$ cd ragflow/docker
|
||||
|
||||
# 任意: 安定版タグを利用 (一覧: https://github.com/infiniflow/ragflow/releases) 例: git checkout v0.22.0
|
||||
# この手順は、コード内の entrypoint.sh ファイルが Docker イメージのバージョンと一致していることを確認します。
|
||||
|
||||
# Use CPU for DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
@ -174,6 +174,7 @@
|
||||
$ cd ragflow/docker
|
||||
|
||||
# Optional: use a stable tag (see releases: https://github.com/infiniflow/ragflow/releases), e.g.: git checkout v0.22.0
|
||||
# 이 단계는 코드의 entrypoint.sh 파일이 Docker 이미지 버전과 일치하도록 보장합니다.
|
||||
|
||||
# Use CPU for DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
@ -192,6 +192,7 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
$ cd ragflow/docker
|
||||
|
||||
# Opcional: use uma tag estável (veja releases: https://github.com/infiniflow/ragflow/releases), ex.: git checkout v0.22.0
|
||||
# Esta etapa garante que o arquivo entrypoint.sh no código corresponda à versão da imagem do Docker.
|
||||
|
||||
# Use CPU for DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
@ -191,6 +191,7 @@
|
||||
$ cd ragflow/docker
|
||||
|
||||
# 可選:使用穩定版標籤(查看發佈:https://github.com/infiniflow/ragflow/releases),例:git checkout v0.22.0
|
||||
# 此步驟確保程式碼中的 entrypoint.sh 檔案與 Docker 映像版本一致。
|
||||
|
||||
# Use CPU for DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
@ -192,6 +192,7 @@
|
||||
$ cd ragflow/docker
|
||||
|
||||
# 可选:使用稳定版本标签(查看发布:https://github.com/infiniflow/ragflow/releases),例如:git checkout v0.22.0
|
||||
# 这一步确保代码中的 entrypoint.sh 文件与 Docker 镜像的版本保持一致。
|
||||
|
||||
# Use CPU for DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
|
||||
Admin Service is a dedicated management component designed to monitor, maintain, and administrate the RAGFlow system. It provides comprehensive tools for ensuring system stability, performing operational tasks, and managing users and permissions efficiently.
|
||||
|
||||
The service offers real-time monitoring of critical components, including the RAGFlow server, Task Executor processes, and dependent services such as MySQL, Elasticsearch, Redis, and MinIO. It automatically checks their health status, resource usage, and uptime, and performs restarts in case of failures to minimize downtime.
|
||||
The service offers real-time monitoring of critical components, including the RAGFlow server, Task Executor processes, and dependent services such as MySQL, Infinity, Elasticsearch, Redis, and MinIO. It automatically checks their health status, resource usage, and uptime, and performs restarts in case of failures to minimize downtime.
|
||||
|
||||
For user and system management, it supports listing, creating, modifying, and deleting users and their associated resources like knowledge bases and Agents.
|
||||
|
||||
|
||||
@ -393,7 +393,9 @@ class AdminCLI(Cmd):
|
||||
print(f"Can't access {self.host}, port: {self.port}")
|
||||
|
||||
def _format_service_detail_table(self, data):
|
||||
if not any([isinstance(v, list) for v in data.values()]):
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
if not all([isinstance(v, list) for v in data.values()]):
|
||||
# normal table
|
||||
return data
|
||||
# handle task_executor heartbeats map, for example {'name': [{'done': 2, 'now': timestamp1}, {'done': 3, 'now': timestamp2}]
|
||||
@ -404,7 +406,7 @@ class AdminCLI(Cmd):
|
||||
task_executor_list.append({
|
||||
"task_executor_name": k,
|
||||
**heartbeats[0],
|
||||
})
|
||||
} if heartbeats else {"task_executor_name": k})
|
||||
return task_executor_list
|
||||
|
||||
def _print_table_simple(self, data):
|
||||
@ -415,7 +417,8 @@ class AdminCLI(Cmd):
|
||||
# handle single row data
|
||||
data = [data]
|
||||
|
||||
columns = list(data[0].keys())
|
||||
columns = list(set().union(*(d.keys() for d in data)))
|
||||
columns.sort()
|
||||
col_widths = {}
|
||||
|
||||
def get_string_width(text):
|
||||
|
||||
@ -169,7 +169,7 @@ def login_verify(f):
|
||||
username = auth.parameters['username']
|
||||
password = auth.parameters['password']
|
||||
try:
|
||||
if check_admin(username, password) is False:
|
||||
if not check_admin(username, password):
|
||||
return jsonify({
|
||||
"code": 500,
|
||||
"message": "Access denied",
|
||||
|
||||
@ -25,8 +25,21 @@ from common.config_utils import read_config
|
||||
from urllib.parse import urlparse
|
||||
|
||||
|
||||
class BaseConfig(BaseModel):
|
||||
id: int
|
||||
name: str
|
||||
host: str
|
||||
port: int
|
||||
service_type: str
|
||||
detail_func_name: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {'id': self.id, 'name': self.name, 'host': self.host, 'port': self.port,
|
||||
'service_type': self.service_type}
|
||||
|
||||
|
||||
class ServiceConfigs:
|
||||
configs = dict
|
||||
configs = list[BaseConfig]
|
||||
|
||||
def __init__(self):
|
||||
self.configs = []
|
||||
@ -45,19 +58,6 @@ class ServiceType(Enum):
|
||||
FILE_STORE = "file_store"
|
||||
|
||||
|
||||
class BaseConfig(BaseModel):
|
||||
id: int
|
||||
name: str
|
||||
host: str
|
||||
port: int
|
||||
service_type: str
|
||||
detail_func_name: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {'id': self.id, 'name': self.name, 'host': self.host, 'port': self.port,
|
||||
'service_type': self.service_type}
|
||||
|
||||
|
||||
class MetaConfig(BaseConfig):
|
||||
meta_type: str
|
||||
|
||||
@ -227,7 +227,7 @@ def load_configurations(config_path: str) -> list[BaseConfig]:
|
||||
ragflow_count = 0
|
||||
id_count = 0
|
||||
for k, v in raw_configs.items():
|
||||
match (k):
|
||||
match k:
|
||||
case "ragflow":
|
||||
name: str = f'ragflow_{ragflow_count}'
|
||||
host: str = v['host']
|
||||
|
||||
@ -13,8 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import logging
|
||||
import re
|
||||
from werkzeug.security import check_password_hash
|
||||
from common.constants import ActiveEnum
|
||||
@ -190,7 +189,8 @@ class ServiceMgr:
|
||||
config_dict['status'] = service_detail['status']
|
||||
else:
|
||||
config_dict['status'] = 'timeout'
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logging.warning(f"Can't get service details, error: {e}")
|
||||
config_dict['status'] = 'timeout'
|
||||
if not config_dict['host']:
|
||||
config_dict['host'] = '-'
|
||||
@ -205,17 +205,13 @@ class ServiceMgr:
|
||||
|
||||
@staticmethod
|
||||
def get_service_details(service_id: int):
|
||||
service_id = int(service_id)
|
||||
service_idx = int(service_id)
|
||||
configs = SERVICE_CONFIGS.configs
|
||||
service_config_mapping = {
|
||||
c.id: {
|
||||
'name': c.name,
|
||||
'detail_func_name': c.detail_func_name
|
||||
} for c in configs
|
||||
}
|
||||
service_info = service_config_mapping.get(service_id, {})
|
||||
if not service_info:
|
||||
raise AdminException(f"invalid service_id: {service_id}")
|
||||
if service_idx < 0 or service_idx >= len(configs):
|
||||
raise AdminException(f"invalid service_index: {service_idx}")
|
||||
|
||||
service_config = configs[service_idx]
|
||||
service_info = {'name': service_config.name, 'detail_func_name': service_config.detail_func_name}
|
||||
|
||||
detail_func = getattr(health_utils, service_info.get('detail_func_name'))
|
||||
res = detail_func()
|
||||
|
||||
@ -298,8 +298,6 @@ class Canvas(Graph):
|
||||
for kk, vv in kwargs["webhook_payload"].items():
|
||||
self.components[k]["obj"].set_output(kk, vv)
|
||||
|
||||
self.components[k]["obj"].reset(True)
|
||||
|
||||
for k in kwargs.keys():
|
||||
if k in ["query", "user_id", "files"] and kwargs[k]:
|
||||
if k == "files":
|
||||
@ -408,6 +406,10 @@ class Canvas(Graph):
|
||||
else:
|
||||
yield decorate("message", {"content": cpn_obj.output("content")})
|
||||
cite = re.search(r"\[ID:[ 0-9]+\]", cpn_obj.output("content"))
|
||||
|
||||
if isinstance(cpn_obj.output("attachment"), tuple):
|
||||
yield decorate("message", {"attachment": cpn_obj.output("attachment")})
|
||||
|
||||
yield decorate("message_end", {"reference": self.get_reference() if cite else None})
|
||||
|
||||
while partials:
|
||||
|
||||
@ -368,11 +368,19 @@ Respond immediately with your final comprehensive answer.
|
||||
|
||||
return "Error occurred."
|
||||
|
||||
def reset(self, temp=False):
|
||||
def reset(self, only_output=False):
|
||||
"""
|
||||
Reset all tools if they have a reset method. This avoids errors for tools like MCPToolCallSession.
|
||||
"""
|
||||
for k in self._param.outputs.keys():
|
||||
self._param.outputs[k]["value"] = None
|
||||
|
||||
for k, cpn in self.tools.items():
|
||||
if hasattr(cpn, "reset") and callable(cpn.reset):
|
||||
cpn.reset()
|
||||
if only_output:
|
||||
return
|
||||
for k in self._param.inputs.keys():
|
||||
self._param.inputs[k]["value"] = None
|
||||
self._param.debug_inputs = {}
|
||||
|
||||
|
||||
@ -463,12 +463,15 @@ class ComponentBase(ABC):
|
||||
return self._param.outputs.get("_ERROR", {}).get("value")
|
||||
|
||||
def reset(self, only_output=False):
|
||||
for k in self._param.outputs.keys():
|
||||
self._param.outputs[k]["value"] = None
|
||||
outputs: dict = self._param.outputs # for better performance
|
||||
for k in outputs.keys():
|
||||
outputs[k]["value"] = None
|
||||
if only_output:
|
||||
return
|
||||
for k in self._param.inputs.keys():
|
||||
self._param.inputs[k]["value"] = None
|
||||
|
||||
inputs: dict = self._param.inputs # for better performance
|
||||
for k in inputs.keys():
|
||||
inputs[k]["value"] = None
|
||||
self._param.debug_inputs = {}
|
||||
|
||||
def get_input(self, key: str=None) -> Union[Any, dict[str, Any]]:
|
||||
|
||||
166
agent/component/list_operations.py
Normal file
166
agent/component/list_operations.py
Normal file
@ -0,0 +1,166 @@
|
||||
from abc import ABC
|
||||
import os
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from api.utils.api_utils import timeout
|
||||
|
||||
class ListOperationsParam(ComponentParamBase):
|
||||
"""
|
||||
Define the List Operations component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.query = ""
|
||||
self.operations = "topN"
|
||||
self.n=0
|
||||
self.sort_method = "asc"
|
||||
self.filter = {
|
||||
"operator": "=",
|
||||
"value": ""
|
||||
}
|
||||
self.outputs = {
|
||||
"result": {
|
||||
"value": [],
|
||||
"type": "Array of ?"
|
||||
},
|
||||
"first": {
|
||||
"value": "",
|
||||
"type": "?"
|
||||
},
|
||||
"last": {
|
||||
"value": "",
|
||||
"type": "?"
|
||||
}
|
||||
}
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.query, "query")
|
||||
self.check_valid_value(self.operations, "Support operations", ["topN","head","tail","filter","sort","drop_duplicates"])
|
||||
|
||||
def get_input_form(self) -> dict[str, dict]:
|
||||
return {}
|
||||
|
||||
|
||||
class ListOperations(ComponentBase,ABC):
|
||||
component_name = "ListOperations"
|
||||
|
||||
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||
def _invoke(self, **kwargs):
|
||||
self.input_objects=[]
|
||||
inputs = getattr(self._param, "query", None)
|
||||
self.inputs=self._canvas.get_variable_value(inputs)
|
||||
self.set_input_value(inputs, self.inputs)
|
||||
if self._param.operations == "topN":
|
||||
self._topN()
|
||||
elif self._param.operations == "head":
|
||||
self._head()
|
||||
elif self._param.operations == "tail":
|
||||
self._tail()
|
||||
elif self._param.operations == "filter":
|
||||
self._filter()
|
||||
elif self._param.operations == "sort":
|
||||
self._sort()
|
||||
elif self._param.operations == "drop_duplicates":
|
||||
self._drop_duplicates()
|
||||
|
||||
|
||||
def _coerce_n(self):
|
||||
try:
|
||||
return int(getattr(self._param, "n", 0))
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
def _set_outputs(self, outputs):
|
||||
self._param.outputs["result"]["value"] = outputs
|
||||
self._param.outputs["first"]["value"] = outputs[0] if outputs else None
|
||||
self._param.outputs["last"]["value"] = outputs[-1] if outputs else None
|
||||
|
||||
def _topN(self):
|
||||
n = self._coerce_n()
|
||||
if n < 1:
|
||||
outputs = []
|
||||
else:
|
||||
n = min(n, len(self.inputs))
|
||||
outputs = self.inputs[:n]
|
||||
self._set_outputs(outputs)
|
||||
|
||||
def _head(self):
|
||||
n = self._coerce_n()
|
||||
if 1 <= n <= len(self.inputs):
|
||||
outputs = [self.inputs[n - 1]]
|
||||
else:
|
||||
outputs = []
|
||||
self._set_outputs(outputs)
|
||||
|
||||
def _tail(self):
|
||||
n = self._coerce_n()
|
||||
if 1 <= n <= len(self.inputs):
|
||||
outputs = [self.inputs[-n]]
|
||||
else:
|
||||
outputs = []
|
||||
self._set_outputs(outputs)
|
||||
|
||||
def _filter(self):
|
||||
self._set_outputs([i for i in self.inputs if self._eval(self._norm(i),self._param.filter["operator"],self._param.filter["value"])])
|
||||
|
||||
def _norm(self,v):
|
||||
s = "" if v is None else str(v)
|
||||
return s
|
||||
|
||||
def _eval(self, v, operator, value):
|
||||
if operator == "=":
|
||||
return v == value
|
||||
elif operator == "≠":
|
||||
return v != value
|
||||
elif operator == "contains":
|
||||
return value in v
|
||||
elif operator == "start with":
|
||||
return v.startswith(value)
|
||||
elif operator == "end with":
|
||||
return v.endswith(value)
|
||||
else:
|
||||
return False
|
||||
|
||||
def _sort(self):
|
||||
items = self.inputs or []
|
||||
method = getattr(self._param, "sort_method", "asc") or "asc"
|
||||
reverse = method == "desc"
|
||||
|
||||
if not items:
|
||||
self._set_outputs([])
|
||||
return
|
||||
|
||||
first = items[0]
|
||||
|
||||
if isinstance(first, dict):
|
||||
outputs = sorted(
|
||||
items,
|
||||
key=lambda x: self._hashable(x),
|
||||
reverse=reverse,
|
||||
)
|
||||
else:
|
||||
outputs = sorted(items, reverse=reverse)
|
||||
|
||||
self._set_outputs(outputs)
|
||||
|
||||
def _drop_duplicates(self):
|
||||
seen = set()
|
||||
outs = []
|
||||
for item in self.inputs:
|
||||
k = self._hashable(item)
|
||||
if k in seen:
|
||||
continue
|
||||
seen.add(k)
|
||||
outs.append(item)
|
||||
self._set_outputs(outs)
|
||||
|
||||
def _hashable(self,x):
|
||||
if isinstance(x, dict):
|
||||
return tuple(sorted((k, self._hashable(v)) for k, v in x.items()))
|
||||
if isinstance(x, (list, tuple)):
|
||||
return tuple(self._hashable(v) for v in x)
|
||||
if isinstance(x, set):
|
||||
return tuple(sorted(self._hashable(v) for v in x))
|
||||
return x
|
||||
|
||||
def thoughts(self) -> str:
|
||||
return "ListOperation in progress"
|
||||
@ -222,7 +222,7 @@ class LLM(ComponentBase):
|
||||
output_structure = self._param.outputs['structured']
|
||||
except Exception:
|
||||
pass
|
||||
if output_structure:
|
||||
if output_structure and isinstance(output_structure, dict) and output_structure.get("properties"):
|
||||
schema=json.dumps(output_structure, ensure_ascii=False, indent=2)
|
||||
prompt += structured_output_prompt(schema)
|
||||
for _ in range(self._param.max_retries+1):
|
||||
|
||||
@ -17,6 +17,9 @@ import json
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import pypandoc
|
||||
import logging
|
||||
import tempfile
|
||||
from functools import partial
|
||||
from typing import Any
|
||||
|
||||
@ -24,7 +27,8 @@ from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from jinja2 import Template as Jinja2Template
|
||||
|
||||
from common.connection_utils import timeout
|
||||
|
||||
from common.misc_utils import get_uuid
|
||||
from common import settings
|
||||
|
||||
class MessageParam(ComponentParamBase):
|
||||
"""
|
||||
@ -34,6 +38,7 @@ class MessageParam(ComponentParamBase):
|
||||
super().__init__()
|
||||
self.content = []
|
||||
self.stream = True
|
||||
self.output_format = None # default output format
|
||||
self.outputs = {
|
||||
"content": {
|
||||
"type": "str"
|
||||
@ -133,6 +138,7 @@ class Message(ComponentBase):
|
||||
yield rand_cnt[s: ]
|
||||
|
||||
self.set_output("content", all_content)
|
||||
self._convert_content(all_content)
|
||||
|
||||
def _is_jinjia2(self, content:str) -> bool:
|
||||
patt = [
|
||||
@ -164,6 +170,68 @@ class Message(ComponentBase):
|
||||
content = re.sub(n, v, content)
|
||||
|
||||
self.set_output("content", content)
|
||||
self._convert_content(content)
|
||||
|
||||
def thoughts(self) -> str:
|
||||
return ""
|
||||
|
||||
def _convert_content(self, content):
|
||||
doc_id = get_uuid()
|
||||
|
||||
if self._param.output_format.lower() not in {"markdown", "html", "pdf", "docx"}:
|
||||
self._param.output_format = "markdown"
|
||||
|
||||
try:
|
||||
if self._param.output_format in {"markdown", "html"}:
|
||||
if isinstance(content, str):
|
||||
converted = pypandoc.convert_text(
|
||||
content,
|
||||
to=self._param.output_format,
|
||||
format="markdown",
|
||||
)
|
||||
else:
|
||||
converted = pypandoc.convert_file(
|
||||
content,
|
||||
to=self._param.output_format,
|
||||
format="markdown",
|
||||
)
|
||||
|
||||
binary_content = converted.encode("utf-8")
|
||||
|
||||
else: # pdf, docx
|
||||
with tempfile.NamedTemporaryFile(suffix=f".{self._param.output_format}", delete=False) as tmp:
|
||||
tmp_name = tmp.name
|
||||
|
||||
try:
|
||||
if isinstance(content, str):
|
||||
pypandoc.convert_text(
|
||||
content,
|
||||
to=self._param.output_format,
|
||||
format="markdown",
|
||||
outputfile=tmp_name,
|
||||
)
|
||||
else:
|
||||
pypandoc.convert_file(
|
||||
content,
|
||||
to=self._param.output_format,
|
||||
format="markdown",
|
||||
outputfile=tmp_name,
|
||||
)
|
||||
|
||||
with open(tmp_name, "rb") as f:
|
||||
binary_content = f.read()
|
||||
|
||||
finally:
|
||||
if os.path.exists(tmp_name):
|
||||
os.remove(tmp_name)
|
||||
|
||||
settings.STORAGE_IMPL.put(self._canvas._tenant_id, doc_id, binary_content)
|
||||
self.set_output("attachment", {
|
||||
"doc_id":doc_id,
|
||||
"format":self._param.output_format,
|
||||
"file_name":f"{doc_id[:8]}.{self._param.output_format}"})
|
||||
|
||||
logging.info(f"Converted content uploaded as {doc_id} (format={self._param.output_format})")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error converting content to {self._param.output_format}: {e}")
|
||||
@ -83,10 +83,10 @@
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"password": "20010812Yy!",
|
||||
"password": "",
|
||||
"port": 3306,
|
||||
"sql": "{Agent:WickedGoatsDivide@content}",
|
||||
"username": "13637682833@163.com"
|
||||
"username": ""
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
@ -527,10 +527,10 @@
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"password": "20010812Yy!",
|
||||
"password": "",
|
||||
"port": 3306,
|
||||
"sql": "{Agent:WickedGoatsDivide@content}",
|
||||
"username": "13637682833@163.com"
|
||||
"username": ""
|
||||
},
|
||||
"label": "ExeSQL",
|
||||
"name": "ExeSQL"
|
||||
|
||||
@ -96,12 +96,12 @@ login_manager.init_app(app)
|
||||
commands.register_commands(app)
|
||||
|
||||
|
||||
def search_pages_path(pages_dir):
|
||||
def search_pages_path(page_path):
|
||||
app_path_list = [
|
||||
path for path in pages_dir.glob("*_app.py") if not path.name.startswith(".")
|
||||
path for path in page_path.glob("*_app.py") if not path.name.startswith(".")
|
||||
]
|
||||
api_path_list = [
|
||||
path for path in pages_dir.glob("*sdk/*.py") if not path.name.startswith(".")
|
||||
path for path in page_path.glob("*sdk/*.py") if not path.name.startswith(".")
|
||||
]
|
||||
app_path_list.extend(api_path_list)
|
||||
return app_path_list
|
||||
@ -138,7 +138,7 @@ pages_dir = [
|
||||
]
|
||||
|
||||
client_urls_prefix = [
|
||||
register_page(path) for dir in pages_dir for path in search_pages_path(dir)
|
||||
register_page(path) for directory in pages_dir for path in search_pages_path(directory)
|
||||
]
|
||||
|
||||
|
||||
@ -177,5 +177,7 @@ def load_user(web_request):
|
||||
|
||||
|
||||
@app.teardown_request
|
||||
def _db_close(exc):
|
||||
def _db_close(exception):
|
||||
if exception:
|
||||
logging.exception(f"Request failed: {exception}")
|
||||
close_connection()
|
||||
|
||||
@ -13,41 +13,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime, timedelta
|
||||
from flask import request, Response
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db import VALID_FILE_TYPES, FileType
|
||||
from api.db.db_models import APIToken, Task, File
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.db_models import APIToken
|
||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||
from api.db.services.dialog_service import DialogService, chat
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.task_service import queue_tasks, TaskService
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from common.misc_utils import get_uuid
|
||||
from common.constants import RetCode, VALID_TASK_STATUS, LLMType, ParserType, FileSource
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, get_json_result, validate_request, \
|
||||
generate_confirmation_token
|
||||
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
from rag.app.tag import label_question
|
||||
from rag.prompts.generator import keyword_extraction
|
||||
from common.time_utils import current_timestamp, datetime_format
|
||||
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from agent.canvas import Canvas
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from common import settings
|
||||
|
||||
|
||||
@manager.route('/new_token', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@ -138,758 +113,3 @@ def stats():
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/new_conversation', methods=['GET']) # noqa: F821
|
||||
def set_conversation():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
if objs[0].source == "agent":
|
||||
e, cvs = UserCanvasService.get_by_id(objs[0].dialog_id)
|
||||
if not e:
|
||||
return server_error_response("canvas not found.")
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
canvas = Canvas(cvs.dsl, objs[0].tenant_id)
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": request.args.get("user_id", ""),
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||
"source": "agent"
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
return get_json_result(data=conv)
|
||||
else:
|
||||
e, dia = DialogService.get_by_id(objs[0].dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found")
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": dia.id,
|
||||
"user_id": request.args.get("user_id", ""),
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||
@validate_request("conversation_id", "messages")
|
||||
def completion():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
if "quote" not in req:
|
||||
req["quote"] = False
|
||||
|
||||
msg = []
|
||||
for m in req["messages"]:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
if not msg[-1].get("id"):
|
||||
msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else:
|
||||
conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
||||
ans["id"] = message_id
|
||||
|
||||
def rename_field(ans):
|
||||
reference = ans['reference']
|
||||
if not isinstance(reference, dict):
|
||||
return
|
||||
for chunk_i in reference.get('chunks', []):
|
||||
if 'docnm_kwd' in chunk_i:
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
|
||||
try:
|
||||
if conv.source == "agent":
|
||||
stream = req.get("stream", True)
|
||||
conv.message.append(msg[-1])
|
||||
e, cvs = UserCanvasService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return server_error_response("canvas not found.")
|
||||
del req["conversation_id"]
|
||||
del req["messages"]
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
final_ans = {"reference": [], "content": ""}
|
||||
canvas = Canvas(cvs.dsl, objs[0].tenant_id)
|
||||
|
||||
canvas.messages.append(msg[-1])
|
||||
canvas.add_user_input(msg[-1]["content"])
|
||||
answer = canvas.run(stream=stream)
|
||||
|
||||
assert answer is not None, "Nothing. Is it over?"
|
||||
|
||||
if stream:
|
||||
assert isinstance(answer, partial), "Nothing. Is it over?"
|
||||
|
||||
def sse():
|
||||
nonlocal answer, cvs, conv
|
||||
try:
|
||||
for ans in answer():
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
fillin_conv(ans)
|
||||
rename_field(ans)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(sse(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
|
||||
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
||||
fillin_conv(result)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
rename_field(result)
|
||||
return get_json_result(data=result)
|
||||
|
||||
# ******************For dialog******************
|
||||
conv.message.append(msg[-1])
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
del req["messages"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
fillin_conv(ans)
|
||||
rename_field(ans)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
break
|
||||
rename_field(answer)
|
||||
return get_json_result(data=answer)
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/conversation/<conversation_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get_conversation(conversation_id):
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
e, conv = API4ConversationService.get_by_id(conversation_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
|
||||
conv = conv.to_dict()
|
||||
if token != APIToken.query(dialog_id=conv['dialog_id'])[0].token:
|
||||
return get_json_result(data=False, message='Authentication error: API key is invalid for this conversation_id!"',
|
||||
code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
for referenct_i in conv['reference']:
|
||||
if referenct_i is None or len(referenct_i) == 0:
|
||||
continue
|
||||
for chunk_i in referenct_i['chunks']:
|
||||
if 'docnm_kwd' in chunk_i.keys():
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/document/upload', methods=['POST']) # noqa: F821
|
||||
@validate_request("kb_name")
|
||||
def upload():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
kb_name = request.form.get("kb_name").strip()
|
||||
tenant_id = objs[0].tenant_id
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
kb_id = kb.id
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file = request.files['file']
|
||||
if file.filename == '':
|
||||
return get_json_result(
|
||||
data=False, message='No file selected!', code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||
kb_root_folder = FileService.get_kb_folder(tenant_id)
|
||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
try:
|
||||
if DocumentService.get_doc_count(kb.tenant_id) >= int(os.environ.get('MAX_FILE_NUM_PER_USER', 8192)):
|
||||
return get_data_error_result(
|
||||
message="Exceed the maximum file number of a free user!")
|
||||
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=file.filename,
|
||||
kb_id=kb_id)
|
||||
filetype = filename_type(filename)
|
||||
if not filetype:
|
||||
return get_data_error_result(
|
||||
message="This type of file has not been supported yet!")
|
||||
|
||||
location = filename
|
||||
while settings.STORAGE_IMPL.obj_exist(kb_id, location):
|
||||
location += "_"
|
||||
blob = request.files['file'].read()
|
||||
settings.STORAGE_IMPL.put(kb_id, location, blob)
|
||||
doc = {
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": kb.tenant_id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob),
|
||||
"suffix": Path(filename).suffix.lstrip("."),
|
||||
}
|
||||
|
||||
form_data = request.form
|
||||
if "parser_id" in form_data.keys():
|
||||
if request.form.get("parser_id").strip() in list(vars(ParserType).values())[1:-3]:
|
||||
doc["parser_id"] = request.form.get("parser_id").strip()
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
if doc["type"] == FileType.AURAL:
|
||||
doc["parser_id"] = ParserType.AUDIO.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||
if re.search(r"\.(eml)$", filename):
|
||||
doc["parser_id"] = ParserType.EMAIL.value
|
||||
|
||||
doc_result = DocumentService.insert(doc)
|
||||
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
if "run" in form_data.keys():
|
||||
if request.form.get("run").strip() == "1":
|
||||
try:
|
||||
info = {"run": 1, "progress": 0, "progress_msg": "", "chunk_num": 0, "token_num": 0}
|
||||
DocumentService.update_by_id(doc["id"], info)
|
||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||
tenant_id = DocumentService.get_tenant_id(doc["id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
# e, doc = DocumentService.get_by_id(doc["id"])
|
||||
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
||||
e, doc = DocumentService.get_by_id(doc["id"])
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name, 0)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
return get_json_result(data=doc_result.to_json())
|
||||
|
||||
|
||||
@manager.route('/document/upload_and_parse', methods=['POST']) # noqa: F821
|
||||
@validate_request("conversation_id")
|
||||
def upload_parse():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, message='No file selected!', code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, objs[0].tenant_id)
|
||||
return get_json_result(data=doc_ids)
|
||||
|
||||
|
||||
@manager.route('/list_chunks', methods=['POST']) # noqa: F821
|
||||
# @login_required
|
||||
def list_chunks():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
|
||||
try:
|
||||
if "doc_name" in req.keys():
|
||||
tenant_id = DocumentService.get_tenant_id_by_name(req['doc_name'])
|
||||
doc_id = DocumentService.get_doc_id_by_doc_name(req['doc_name'])
|
||||
|
||||
elif "doc_id" in req.keys():
|
||||
tenant_id = DocumentService.get_tenant_id(req['doc_id'])
|
||||
doc_id = req['doc_id']
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, message="Can't find doc_name or doc_id"
|
||||
)
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
|
||||
res = settings.retriever.chunk_list(doc_id, tenant_id, kb_ids)
|
||||
res = [
|
||||
{
|
||||
"content": res_item["content_with_weight"],
|
||||
"doc_name": res_item["docnm_kwd"],
|
||||
"image_id": res_item["img_id"]
|
||||
} for res_item in res
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
return get_json_result(data=res)
|
||||
|
||||
@manager.route('/get_chunk/<chunk_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get_chunk(chunk_id):
|
||||
from rag.nlp import search
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
tenant_id = objs[0].tenant_id
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
||||
if chunk is None:
|
||||
return server_error_response(Exception("Chunk not found"))
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
k.append(n)
|
||||
for n in k:
|
||||
del chunk[n]
|
||||
|
||||
return get_json_result(data=chunk)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/list_kb_docs', methods=['POST']) # noqa: F821
|
||||
# @login_required
|
||||
def list_kb_docs():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
tenant_id = objs[0].tenant_id
|
||||
kb_name = req.get("kb_name", "").strip()
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
kb_id = kb.id
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
page_number = int(req.get("page", 1))
|
||||
items_per_page = int(req.get("page_size", 15))
|
||||
orderby = req.get("orderby", "create_time")
|
||||
desc = req.get("desc", True)
|
||||
keywords = req.get("keywords", "")
|
||||
status = req.get("status", [])
|
||||
if status:
|
||||
invalid_status = {s for s in status if s not in VALID_TASK_STATUS}
|
||||
if invalid_status:
|
||||
return get_data_error_result(
|
||||
message=f"Invalid filter status conditions: {', '.join(invalid_status)}"
|
||||
)
|
||||
types = req.get("types", [])
|
||||
if types:
|
||||
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
|
||||
if invalid_types:
|
||||
return get_data_error_result(
|
||||
message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}"
|
||||
)
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords, status, types)
|
||||
docs = [{"doc_id": doc['id'], "doc_name": doc['name']} for doc in docs]
|
||||
|
||||
return get_json_result(data={"total": tol, "docs": docs})
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/document/infos', methods=['POST']) # noqa: F821
|
||||
@validate_request("doc_ids")
|
||||
def docinfos():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
doc_ids = req["doc_ids"]
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
return get_json_result(data=list(docs.dicts()))
|
||||
|
||||
|
||||
@manager.route('/document', methods=['DELETE']) # noqa: F821
|
||||
# @login_required
|
||||
def document_rm():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
tenant_id = objs[0].tenant_id
|
||||
req = request.json
|
||||
try:
|
||||
doc_ids = DocumentService.get_doc_ids_by_doc_names(req.get("doc_names", []))
|
||||
for doc_id in req.get("doc_ids", []):
|
||||
if doc_id not in doc_ids:
|
||||
doc_ids.append(doc_id)
|
||||
|
||||
if not doc_ids:
|
||||
return get_json_result(
|
||||
data=False, message="Can't find doc_names or doc_ids"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||
|
||||
errors = ""
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
doc_dic = {}
|
||||
for doc in docs:
|
||||
doc_dic[doc.id] = doc
|
||||
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
if doc_id not in doc_dic:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
doc = doc_dic[doc_id]
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc_id)
|
||||
|
||||
settings.STORAGE_IMPL.rm(b, n)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
|
||||
if errors:
|
||||
return get_json_result(data=False, message=errors, code=RetCode.SERVER_ERROR)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/completion_aibotk', methods=['POST']) # noqa: F821
|
||||
@validate_request("Authorization", "conversation_id", "word")
|
||||
def completion_faq():
|
||||
import base64
|
||||
req = request.json
|
||||
|
||||
token = req["Authorization"]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
if "quote" not in req:
|
||||
req["quote"] = True
|
||||
|
||||
msg = [{"role": "user", "content": req["word"]}]
|
||||
if not msg[-1].get("id"):
|
||||
msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else:
|
||||
conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
||||
ans["id"] = message_id
|
||||
|
||||
try:
|
||||
if conv.source == "agent":
|
||||
conv.message.append(msg[-1])
|
||||
e, cvs = UserCanvasService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return server_error_response("canvas not found.")
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
final_ans = {"reference": [], "doc_aggs": []}
|
||||
canvas = Canvas(cvs.dsl, objs[0].tenant_id)
|
||||
|
||||
canvas.messages.append(msg[-1])
|
||||
canvas.add_user_input(msg[-1]["content"])
|
||||
answer = canvas.run(stream=False)
|
||||
|
||||
assert answer is not None, "Nothing. Is it over?"
|
||||
|
||||
data_type_picture = {
|
||||
"type": 3,
|
||||
"url": "base64 content"
|
||||
}
|
||||
data = [
|
||||
{
|
||||
"type": 1,
|
||||
"content": ""
|
||||
}
|
||||
]
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
|
||||
ans = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
||||
data[0]["content"] += re.sub(r'##\d\$\$', '', ans["answer"])
|
||||
fillin_conv(ans)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
|
||||
chunk_idxs = [int(match[2]) for match in re.findall(r'##\d\$\$', ans["answer"])]
|
||||
for chunk_idx in chunk_idxs[:1]:
|
||||
if ans["reference"]["chunks"][chunk_idx]["img_id"]:
|
||||
try:
|
||||
bkt, nm = ans["reference"]["chunks"][chunk_idx]["img_id"].split("-")
|
||||
response = settings.STORAGE_IMPL.get(bkt, nm)
|
||||
data_type_picture["url"] = base64.b64encode(response).decode('utf-8')
|
||||
data.append(data_type_picture)
|
||||
break
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
response = {"code": 200, "msg": "success", "data": data}
|
||||
return response
|
||||
|
||||
# ******************For dialog******************
|
||||
conv.message.append(msg[-1])
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
data_type_picture = {
|
||||
"type": 3,
|
||||
"url": "base64 content"
|
||||
}
|
||||
data = [
|
||||
{
|
||||
"type": 1,
|
||||
"content": ""
|
||||
}
|
||||
]
|
||||
ans = ""
|
||||
for a in chat(dia, msg, stream=False, **req):
|
||||
ans = a
|
||||
break
|
||||
data[0]["content"] += re.sub(r'##\d\$\$', '', ans["answer"])
|
||||
fillin_conv(ans)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
|
||||
chunk_idxs = [int(match[2]) for match in re.findall(r'##\d\$\$', ans["answer"])]
|
||||
for chunk_idx in chunk_idxs[:1]:
|
||||
if ans["reference"]["chunks"][chunk_idx]["img_id"]:
|
||||
try:
|
||||
bkt, nm = ans["reference"]["chunks"][chunk_idx]["img_id"].split("-")
|
||||
response = settings.STORAGE_IMPL.get(bkt, nm)
|
||||
data_type_picture["url"] = base64.b64encode(response).decode('utf-8')
|
||||
data.append(data_type_picture)
|
||||
break
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
response = {"code": 200, "msg": "success", "data": data}
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/retrieval', methods=['POST']) # noqa: F821
|
||||
@validate_request("kb_id", "question")
|
||||
def retrieval():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
kb_ids = req.get("kb_id", [])
|
||||
doc_ids = req.get("doc_ids", [])
|
||||
question = req.get("question")
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("page_size", 30))
|
||||
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
top = int(req.get("top_k", 1024))
|
||||
highlight = bool(req.get("highlight", False))
|
||||
|
||||
try:
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_nms) != 1:
|
||||
return get_json_result(
|
||||
data=False, message='Knowledge bases use different embedding models or does not exist."',
|
||||
code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
embd_mdl = LLMBundle(kbs[0].tenant_id, LLMType.EMBEDDING, llm_name=kbs[0].embd_id)
|
||||
rerank_mdl = None
|
||||
if req.get("rerank_id"):
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, llm_name=req["rerank_id"])
|
||||
if req.get("keyword", False):
|
||||
chat_mdl = LLMBundle(kbs[0].tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
ranks = settings.retriever.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl, highlight= highlight,
|
||||
rank_feature=label_question(question, kbs))
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
return get_json_result(data=ranks)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, message='No chunk found! Check the chunk status please!',
|
||||
code=RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
@ -426,7 +426,6 @@ def test_db_connect():
|
||||
try:
|
||||
import trino
|
||||
import os
|
||||
from trino.auth import BasicAuthentication
|
||||
except Exception as e:
|
||||
return server_error_response(f"Missing dependency 'trino'. Please install: pip install trino, detail: {e}")
|
||||
|
||||
@ -438,7 +437,7 @@ def test_db_connect():
|
||||
|
||||
auth = None
|
||||
if http_scheme == "https" and req.get("password"):
|
||||
auth = BasicAuthentication(req.get("username") or "ragflow", req["password"])
|
||||
auth = trino.BasicAuthentication(req.get("username") or "ragflow", req["password"])
|
||||
|
||||
conn = trino.dbapi.connect(
|
||||
host=req["host"],
|
||||
@ -471,8 +470,8 @@ def test_db_connect():
|
||||
@login_required
|
||||
def getlistversion(canvas_id):
|
||||
try:
|
||||
list =sorted([c.to_dict() for c in UserCanvasVersionService.list_by_canvas_id(canvas_id)], key=lambda x: x["update_time"]*-1)
|
||||
return get_json_result(data=list)
|
||||
versions =sorted([c.to_dict() for c in UserCanvasVersionService.list_by_canvas_id(canvas_id)], key=lambda x: x["update_time"]*-1)
|
||||
return get_json_result(data=versions)
|
||||
except Exception as e:
|
||||
return get_data_error_result(message=f"Error getting history files: {e}")
|
||||
|
||||
|
||||
@ -55,7 +55,6 @@ def set_connector():
|
||||
"timeout_secs": int(req.get("timeout_secs", 60 * 29)),
|
||||
"status": TaskStatus.SCHEDULE,
|
||||
}
|
||||
conn["status"] = TaskStatus.SCHEDULE
|
||||
ConnectorService.save(**conn)
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
@ -85,7 +85,6 @@ def get():
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
avatar = None
|
||||
for tenant in tenants:
|
||||
dialog = DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id)
|
||||
if dialog and len(dialog) > 0:
|
||||
|
||||
@ -154,15 +154,15 @@ def get_kb_names(kb_ids):
|
||||
@login_required
|
||||
def list_dialogs():
|
||||
try:
|
||||
diags = DialogService.query(
|
||||
conversations = DialogService.query(
|
||||
tenant_id=current_user.id,
|
||||
status=StatusEnum.VALID.value,
|
||||
reverse=True,
|
||||
order_by=DialogService.model.create_time)
|
||||
diags = [d.to_dict() for d in diags]
|
||||
for d in diags:
|
||||
d["kb_ids"], d["kb_names"] = get_kb_names(d["kb_ids"])
|
||||
return get_json_result(data=diags)
|
||||
conversations = [d.to_dict() for d in conversations]
|
||||
for conversation in conversations:
|
||||
conversation["kb_ids"], conversation["kb_names"] = get_kb_names(conversation["kb_ids"])
|
||||
return get_json_result(data=conversations)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@ -308,7 +308,7 @@ def get_filter():
|
||||
|
||||
@manager.route("/infos", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def docinfos():
|
||||
def doc_infos():
|
||||
req = request.json
|
||||
doc_ids = req["doc_ids"]
|
||||
for doc_id in doc_ids:
|
||||
@ -508,6 +508,7 @@ def get(doc_id):
|
||||
ext = ext.group(1) if ext else None
|
||||
if ext:
|
||||
if doc.type == FileType.VISUAL.value:
|
||||
|
||||
content_type = CONTENT_TYPE_MAP.get(ext, f"image/{ext}")
|
||||
else:
|
||||
content_type = CONTENT_TYPE_MAP.get(ext, f"application/{ext}")
|
||||
@ -517,6 +518,22 @@ def get(doc_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/download/<attachment_id>", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def download_attachment(attachment_id):
|
||||
try:
|
||||
ext = request.args.get("ext", "markdown")
|
||||
data = settings.STORAGE_IMPL.get(current_user.id, attachment_id)
|
||||
# data = settings.STORAGE_IMPL.get("eb500d50bb0411f0907561d2782adda5", attachment_id)
|
||||
response = flask.make_response(data)
|
||||
response.headers.set("Content-Type", CONTENT_TYPE_MAP.get(ext, f"application/{ext}"))
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/change_parser", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
@ -544,6 +561,7 @@ def change_parser():
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
return None
|
||||
|
||||
try:
|
||||
if "pipeline_id" in req and req["pipeline_id"] != "":
|
||||
|
||||
@ -246,8 +246,8 @@ def rm():
|
||||
try:
|
||||
if file.location:
|
||||
settings.STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||
except Exception:
|
||||
logging.exception(f"Fail to remove object: {file.parent_id}/{file.location}")
|
||||
except Exception as e:
|
||||
logging.exception(f"Fail to remove object: {file.parent_id}/{file.location}, error: {e}")
|
||||
|
||||
informs = File2DocumentService.get_by_file_id(file.id)
|
||||
for inform in informs:
|
||||
|
||||
@ -16,6 +16,7 @@
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
@ -731,6 +732,8 @@ def delete_kb_task():
|
||||
def cancel_task(task_id):
|
||||
REDIS_CONN.set(f"{task_id}-cancel", "x")
|
||||
|
||||
kb_task_id_field: str = ""
|
||||
kb_task_finish_at: str = ""
|
||||
match pipeline_task_type:
|
||||
case PipelineTaskType.GRAPH_RAG:
|
||||
kb_task_id_field = "graphrag_task_id"
|
||||
@ -807,7 +810,7 @@ def check_embedding():
|
||||
offset=0, limit=1,
|
||||
indexNames=index_nm, knowledgebaseIds=[kb_id]
|
||||
)
|
||||
total = docStoreConn.getTotal(res0)
|
||||
total = docStoreConn.get_total(res0)
|
||||
if total <= 0:
|
||||
return []
|
||||
|
||||
@ -824,7 +827,7 @@ def check_embedding():
|
||||
offset=off, limit=1,
|
||||
indexNames=index_nm, knowledgebaseIds=[kb_id]
|
||||
)
|
||||
ids = docStoreConn.getChunkIds(res1)
|
||||
ids = docStoreConn.get_chunk_ids(res1)
|
||||
if not ids:
|
||||
continue
|
||||
|
||||
@ -845,8 +848,13 @@ def check_embedding():
|
||||
"position_int": full_doc.get("position_int"),
|
||||
"top_int": full_doc.get("top_int"),
|
||||
"content_with_weight": full_doc.get("content_with_weight") or "",
|
||||
"question_kwd": full_doc.get("question_kwd") or []
|
||||
})
|
||||
return out
|
||||
|
||||
def _clean(s: str) -> str:
|
||||
s = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", s or "")
|
||||
return s if s else "None"
|
||||
req = request.json
|
||||
kb_id = req.get("kb_id", "")
|
||||
embd_id = req.get("embd_id", "")
|
||||
@ -859,8 +867,10 @@ def check_embedding():
|
||||
|
||||
results, eff_sims = [], []
|
||||
for ck in samples:
|
||||
txt = (ck.get("content_with_weight") or "").strip()
|
||||
if not txt:
|
||||
title = ck.get("doc_name") or "Title"
|
||||
txt_in = "\n".join(ck.get("question_kwd") or []) or ck.get("content_with_weight") or ""
|
||||
txt_in = _clean(txt_in)
|
||||
if not txt_in:
|
||||
results.append({"chunk_id": ck["chunk_id"], "reason": "no_text"})
|
||||
continue
|
||||
|
||||
@ -869,8 +879,16 @@ def check_embedding():
|
||||
continue
|
||||
|
||||
try:
|
||||
qv, _ = emb_mdl.encode_queries(txt)
|
||||
sim = _cos_sim(qv, ck["vector"])
|
||||
v, _ = emb_mdl.encode([title, txt_in])
|
||||
sim_content = _cos_sim(v[1], ck["vector"])
|
||||
title_w = 0.1
|
||||
qv_mix = title_w * v[0] + (1 - title_w) * v[1]
|
||||
sim_mix = _cos_sim(qv_mix, ck["vector"])
|
||||
sim = sim_content
|
||||
mode = "content_only"
|
||||
if sim_mix > sim:
|
||||
sim = sim_mix
|
||||
mode = "title+content"
|
||||
except Exception:
|
||||
return get_error_data_result(message="embedding failure")
|
||||
|
||||
@ -892,8 +910,9 @@ def check_embedding():
|
||||
"avg_cos_sim": round(float(np.mean(eff_sims)) if eff_sims else 0.0, 6),
|
||||
"min_cos_sim": round(float(np.min(eff_sims)) if eff_sims else 0.0, 6),
|
||||
"max_cos_sim": round(float(np.max(eff_sims)) if eff_sims else 0.0, 6),
|
||||
"match_mode": mode,
|
||||
}
|
||||
if summary["avg_cos_sim"] > 0.99:
|
||||
if summary["avg_cos_sim"] > 0.9:
|
||||
return get_json_result(data={"summary": summary, "results": results})
|
||||
return get_json_result(code=RetCode.NOT_EFFECTIVE, message="failed", data={"summary": summary, "results": results})
|
||||
|
||||
|
||||
@ -21,10 +21,11 @@ import json
|
||||
from flask import request
|
||||
from peewee import OperationalError
|
||||
from api.db.db_models import File
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.document_service import DocumentService, queue_raptor_o_graphrag_tasks
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.task_service import GRAPH_RAPTOR_FAKE_DOC_ID, TaskService
|
||||
from api.db.services.user_service import TenantService
|
||||
from common.constants import RetCode, FileSource, StatusEnum
|
||||
from api.utils.api_utils import (
|
||||
@ -118,7 +119,6 @@ def create(tenant_id):
|
||||
req, err = validate_and_parse_json_request(request, CreateDatasetReq)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
req = KnowledgebaseService.create_with_name(
|
||||
name = req.pop("name", None),
|
||||
tenant_id = tenant_id,
|
||||
@ -144,7 +144,6 @@ def create(tenant_id):
|
||||
ok, k = KnowledgebaseService.get_by_id(req["id"])
|
||||
if not ok:
|
||||
return get_error_data_result(message="Dataset created failed")
|
||||
|
||||
response_data = remap_dictionary_keys(k.to_dict())
|
||||
return get_result(data=response_data)
|
||||
except Exception as e:
|
||||
@ -532,3 +531,157 @@ def delete_knowledge_graph(tenant_id, dataset_id):
|
||||
search.index_name(kb.tenant_id), dataset_id)
|
||||
|
||||
return get_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/run_graphrag", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def run_graphrag(tenant_id,dataset_id):
|
||||
if not dataset_id:
|
||||
return get_error_data_result(message='Lack of "Dataset ID"')
|
||||
if not KnowledgebaseService.accessible(dataset_id, tenant_id):
|
||||
return get_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
|
||||
ok, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Invalid Dataset ID")
|
||||
|
||||
task_id = kb.graphrag_task_id
|
||||
if task_id:
|
||||
ok, task = TaskService.get_by_id(task_id)
|
||||
if not ok:
|
||||
logging.warning(f"A valid GraphRAG task id is expected for Dataset {dataset_id}")
|
||||
|
||||
if task and task.progress not in [-1, 1]:
|
||||
return get_error_data_result(message=f"Task {task_id} in progress with status {task.progress}. A Graph Task is already running.")
|
||||
|
||||
documents, _ = DocumentService.get_by_kb_id(
|
||||
kb_id=dataset_id,
|
||||
page_number=0,
|
||||
items_per_page=0,
|
||||
orderby="create_time",
|
||||
desc=False,
|
||||
keywords="",
|
||||
run_status=[],
|
||||
types=[],
|
||||
suffix=[],
|
||||
)
|
||||
if not documents:
|
||||
return get_error_data_result(message=f"No documents in Dataset {dataset_id}")
|
||||
|
||||
sample_document = documents[0]
|
||||
document_ids = [document["id"] for document in documents]
|
||||
|
||||
task_id = queue_raptor_o_graphrag_tasks(sample_doc_id=sample_document, ty="graphrag", priority=0, fake_doc_id=GRAPH_RAPTOR_FAKE_DOC_ID, doc_ids=list(document_ids))
|
||||
|
||||
if not KnowledgebaseService.update_by_id(kb.id, {"graphrag_task_id": task_id}):
|
||||
logging.warning(f"Cannot save graphrag_task_id for Dataset {dataset_id}")
|
||||
|
||||
return get_result(data={"graphrag_task_id": task_id})
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/trace_graphrag", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def trace_graphrag(tenant_id,dataset_id):
|
||||
if not dataset_id:
|
||||
return get_error_data_result(message='Lack of "Dataset ID"')
|
||||
if not KnowledgebaseService.accessible(dataset_id, tenant_id):
|
||||
return get_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
|
||||
ok, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Invalid Dataset ID")
|
||||
|
||||
task_id = kb.graphrag_task_id
|
||||
if not task_id:
|
||||
return get_result(data={})
|
||||
|
||||
ok, task = TaskService.get_by_id(task_id)
|
||||
if not ok:
|
||||
return get_result(data={})
|
||||
|
||||
return get_result(data=task.to_dict())
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/run_raptor", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def run_raptor(tenant_id,dataset_id):
|
||||
if not dataset_id:
|
||||
return get_error_data_result(message='Lack of "Dataset ID"')
|
||||
if not KnowledgebaseService.accessible(dataset_id, tenant_id):
|
||||
return get_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
|
||||
ok, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Invalid Dataset ID")
|
||||
|
||||
task_id = kb.raptor_task_id
|
||||
if task_id:
|
||||
ok, task = TaskService.get_by_id(task_id)
|
||||
if not ok:
|
||||
logging.warning(f"A valid RAPTOR task id is expected for Dataset {dataset_id}")
|
||||
|
||||
if task and task.progress not in [-1, 1]:
|
||||
return get_error_data_result(message=f"Task {task_id} in progress with status {task.progress}. A RAPTOR Task is already running.")
|
||||
|
||||
documents, _ = DocumentService.get_by_kb_id(
|
||||
kb_id=dataset_id,
|
||||
page_number=0,
|
||||
items_per_page=0,
|
||||
orderby="create_time",
|
||||
desc=False,
|
||||
keywords="",
|
||||
run_status=[],
|
||||
types=[],
|
||||
suffix=[],
|
||||
)
|
||||
if not documents:
|
||||
return get_error_data_result(message=f"No documents in Dataset {dataset_id}")
|
||||
|
||||
sample_document = documents[0]
|
||||
document_ids = [document["id"] for document in documents]
|
||||
|
||||
task_id = queue_raptor_o_graphrag_tasks(sample_doc_id=sample_document, ty="raptor", priority=0, fake_doc_id=GRAPH_RAPTOR_FAKE_DOC_ID, doc_ids=list(document_ids))
|
||||
|
||||
if not KnowledgebaseService.update_by_id(kb.id, {"raptor_task_id": task_id}):
|
||||
logging.warning(f"Cannot save raptor_task_id for Dataset {dataset_id}")
|
||||
|
||||
return get_result(data={"raptor_task_id": task_id})
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/trace_raptor", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def trace_raptor(tenant_id,dataset_id):
|
||||
if not dataset_id:
|
||||
return get_error_data_result(message='Lack of "Dataset ID"')
|
||||
|
||||
if not KnowledgebaseService.accessible(dataset_id, tenant_id):
|
||||
return get_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
ok, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Invalid Dataset ID")
|
||||
|
||||
task_id = kb.raptor_task_id
|
||||
if not task_id:
|
||||
return get_result(data={})
|
||||
|
||||
ok, task = TaskService.get_by_id(task_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="RAPTOR Task Not Found or Error Occurred")
|
||||
|
||||
return get_result(data=task.to_dict())
|
||||
@ -93,6 +93,10 @@ def upload(dataset_id, tenant_id):
|
||||
type: file
|
||||
required: true
|
||||
description: Document files to upload.
|
||||
- in: formData
|
||||
name: parent_path
|
||||
type: string
|
||||
description: Optional nested path under the parent folder. Uses '/' separators.
|
||||
responses:
|
||||
200:
|
||||
description: Successfully uploaded documents.
|
||||
@ -151,7 +155,7 @@ def upload(dataset_id, tenant_id):
|
||||
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not e:
|
||||
raise LookupError(f"Can't find the dataset with ID {dataset_id}!")
|
||||
err, files = FileService.upload_document(kb, file_objs, tenant_id)
|
||||
err, files = FileService.upload_document(kb, file_objs, tenant_id, parent_path=request.form.get("parent_path"))
|
||||
if err:
|
||||
return get_result(message="\n".join(err), code=RetCode.SERVER_ERROR)
|
||||
# rename key's name
|
||||
|
||||
@ -242,7 +242,7 @@ class Connector2KbService(CommonService):
|
||||
"id": get_uuid(),
|
||||
"connector_id": conn_id,
|
||||
"kb_id": kb_id,
|
||||
"auto_parse": conn.get("auto_parse", "1")
|
||||
"auto_parse": conn.get("auto_parse", "1")
|
||||
})
|
||||
SyncLogsService.schedule(conn_id, kb_id, reindex=True)
|
||||
|
||||
|
||||
@ -309,7 +309,7 @@ class DocumentService(CommonService):
|
||||
chunks = settings.docStoreConn.search(["img_id"], [], {"doc_id": doc.id}, [], OrderByExpr(),
|
||||
page * page_size, page_size, search.index_name(tenant_id),
|
||||
[doc.kb_id])
|
||||
chunk_ids = settings.docStoreConn.getChunkIds(chunks)
|
||||
chunk_ids = settings.docStoreConn.get_chunk_ids(chunks)
|
||||
if not chunk_ids:
|
||||
break
|
||||
all_chunk_ids.extend(chunk_ids)
|
||||
@ -322,7 +322,7 @@ class DocumentService(CommonService):
|
||||
settings.STORAGE_IMPL.rm(doc.kb_id, doc.thumbnail)
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
|
||||
graph_source = settings.docStoreConn.getFields(
|
||||
graph_source = settings.docStoreConn.get_fields(
|
||||
settings.docStoreConn.search(["source_id"], [], {"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]}, [], OrderByExpr(), 0, 1, search.index_name(tenant_id), [doc.kb_id]), ["source_id"]
|
||||
)
|
||||
if len(graph_source) > 0 and doc.id in list(graph_source.values())[0]["source_id"]:
|
||||
|
||||
@ -31,7 +31,7 @@ from common.misc_utils import get_uuid
|
||||
from common.constants import TaskStatus, FileSource, ParserType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.task_service import TaskService
|
||||
from api.utils.file_utils import filename_type, read_potential_broken_pdf, thumbnail_img
|
||||
from api.utils.file_utils import filename_type, read_potential_broken_pdf, thumbnail_img, sanitize_path
|
||||
from rag.llm.cv_model import GptV4
|
||||
from common import settings
|
||||
|
||||
@ -329,7 +329,7 @@ class FileService(CommonService):
|
||||
current_id = start_id
|
||||
while current_id:
|
||||
e, file = cls.get_by_id(current_id)
|
||||
if file.parent_id != file.id and e:
|
||||
if e and file.parent_id != file.id:
|
||||
parent_folders.append(file)
|
||||
current_id = file.parent_id
|
||||
else:
|
||||
@ -423,13 +423,15 @@ class FileService(CommonService):
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def upload_document(self, kb, file_objs, user_id, src="local"):
|
||||
def upload_document(self, kb, file_objs, user_id, src="local", parent_path: str | None = None):
|
||||
root_folder = self.get_root_folder(user_id)
|
||||
pf_id = root_folder["id"]
|
||||
self.init_knowledgebase_docs(pf_id, user_id)
|
||||
kb_root_folder = self.get_kb_folder(user_id)
|
||||
kb_folder = self.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
safe_parent_path = sanitize_path(parent_path)
|
||||
|
||||
err, files = [], []
|
||||
for file in file_objs:
|
||||
try:
|
||||
@ -439,7 +441,7 @@ class FileService(CommonService):
|
||||
if filetype == FileType.OTHER.value:
|
||||
raise RuntimeError("This type of file has not been supported yet!")
|
||||
|
||||
location = filename
|
||||
location = filename if not safe_parent_path else f"{safe_parent_path}/{filename}"
|
||||
while settings.STORAGE_IMPL.obj_exist(kb.id, location):
|
||||
location += "_"
|
||||
|
||||
|
||||
@ -164,3 +164,23 @@ def read_potential_broken_pdf(blob):
|
||||
return repaired
|
||||
|
||||
return blob
|
||||
|
||||
|
||||
def sanitize_path(raw_path: str | None) -> str:
|
||||
"""Normalize and sanitize a user-provided path segment.
|
||||
|
||||
- Converts backslashes to forward slashes
|
||||
- Strips leading/trailing slashes
|
||||
- Removes '.' and '..' segments
|
||||
- Restricts characters to A-Za-z0-9, underscore, dash, and '/'
|
||||
"""
|
||||
if not raw_path:
|
||||
return ""
|
||||
backslash_re = re.compile(r"[\\]+")
|
||||
unsafe_re = re.compile(r"[^A-Za-z0-9_\-/]")
|
||||
normalized = backslash_re.sub("/", raw_path)
|
||||
normalized = normalized.strip("/")
|
||||
parts = [seg for seg in normalized.split("/") if seg and seg not in (".", "..")]
|
||||
sanitized = "/".join(parts)
|
||||
sanitized = unsafe_re.sub("", sanitized)
|
||||
return sanitized
|
||||
|
||||
@ -173,7 +173,8 @@ def check_task_executor_alive():
|
||||
heartbeats = [json.loads(heartbeat) for heartbeat in heartbeats]
|
||||
task_executor_heartbeats[task_executor_id] = heartbeats
|
||||
if task_executor_heartbeats:
|
||||
return {"status": "alive", "message": task_executor_heartbeats}
|
||||
status = "alive" if any(task_executor_heartbeats.values()) else "timeout"
|
||||
return {"status": status, "message": task_executor_heartbeats}
|
||||
else:
|
||||
return {"status": "timeout", "message": "Not found any task executor."}
|
||||
except Exception as e:
|
||||
|
||||
48
check_comment_ascii.py
Normal file
48
check_comment_ascii.py
Normal file
@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Check whether given python files contain non-ASCII comments.
|
||||
|
||||
How to check the whole git repo:
|
||||
|
||||
```
|
||||
$ git ls-files -z -- '*.py' | xargs -0 python3 check_comment_ascii.py
|
||||
```
|
||||
"""
|
||||
|
||||
import sys
|
||||
import tokenize
|
||||
import ast
|
||||
import pathlib
|
||||
import re
|
||||
|
||||
ASCII = re.compile(r"^[\n -~]*\Z") # Printable ASCII + newline
|
||||
|
||||
|
||||
def check(src: str, name: str) -> int:
|
||||
"""
|
||||
docstring line 1
|
||||
docstring line 2
|
||||
"""
|
||||
ok = 1
|
||||
# A common comment begins with `#`
|
||||
with tokenize.open(src) as fp:
|
||||
for tk in tokenize.generate_tokens(fp.readline):
|
||||
if tk.type == tokenize.COMMENT and not ASCII.fullmatch(tk.string):
|
||||
print(f"{name}:{tk.start[0]}: non-ASCII comment: {tk.string}")
|
||||
ok = 0
|
||||
# A docstring begins and ends with `'''`
|
||||
for node in ast.walk(ast.parse(pathlib.Path(src).read_text(), filename=name)):
|
||||
if isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.Module)):
|
||||
if (doc := ast.get_docstring(node)) and not ASCII.fullmatch(doc):
|
||||
print(f"{name}:{node.lineno}: non-ASCII docstring: {doc}")
|
||||
ok = 0
|
||||
return ok
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
status = 0
|
||||
for file in sys.argv[1:]:
|
||||
if not check(file, file):
|
||||
status = 1
|
||||
sys.exit(status)
|
||||
@ -3,15 +3,9 @@ import os
|
||||
import threading
|
||||
from typing import Any, Callable
|
||||
|
||||
import requests
|
||||
|
||||
from common.data_source.config import DocumentSource
|
||||
from common.data_source.google_util.constant import GOOGLE_SCOPES
|
||||
|
||||
GOOGLE_DEVICE_CODE_URL = "https://oauth2.googleapis.com/device/code"
|
||||
GOOGLE_DEVICE_TOKEN_URL = "https://oauth2.googleapis.com/token"
|
||||
DEFAULT_DEVICE_INTERVAL = 5
|
||||
|
||||
|
||||
def _get_requested_scopes(source: DocumentSource) -> list[str]:
|
||||
"""Return the scopes to request, honoring an optional override env var."""
|
||||
@ -55,62 +49,6 @@ def _run_with_timeout(func: Callable[[], Any], timeout_secs: int, timeout_messag
|
||||
return result.get("value")
|
||||
|
||||
|
||||
def _extract_client_info(credentials: dict[str, Any]) -> tuple[str, str | None]:
|
||||
if "client_id" in credentials:
|
||||
return credentials["client_id"], credentials.get("client_secret")
|
||||
for key in ("installed", "web"):
|
||||
if key in credentials and isinstance(credentials[key], dict):
|
||||
nested = credentials[key]
|
||||
if "client_id" not in nested:
|
||||
break
|
||||
return nested["client_id"], nested.get("client_secret")
|
||||
raise ValueError("Provided Google OAuth credentials are missing client_id.")
|
||||
|
||||
|
||||
def start_device_authorization_flow(
|
||||
credentials: dict[str, Any],
|
||||
source: DocumentSource,
|
||||
) -> tuple[dict[str, Any], dict[str, Any]]:
|
||||
client_id, client_secret = _extract_client_info(credentials)
|
||||
data = {
|
||||
"client_id": client_id,
|
||||
"scope": " ".join(_get_requested_scopes(source)),
|
||||
}
|
||||
if client_secret:
|
||||
data["client_secret"] = client_secret
|
||||
resp = requests.post(GOOGLE_DEVICE_CODE_URL, data=data, timeout=15)
|
||||
resp.raise_for_status()
|
||||
payload = resp.json()
|
||||
state = {
|
||||
"client_id": client_id,
|
||||
"client_secret": client_secret,
|
||||
"device_code": payload.get("device_code"),
|
||||
"interval": payload.get("interval", DEFAULT_DEVICE_INTERVAL),
|
||||
}
|
||||
response_data = {
|
||||
"user_code": payload.get("user_code"),
|
||||
"verification_url": payload.get("verification_url") or payload.get("verification_uri"),
|
||||
"verification_url_complete": payload.get("verification_url_complete")
|
||||
or payload.get("verification_uri_complete"),
|
||||
"expires_in": payload.get("expires_in"),
|
||||
"interval": state["interval"],
|
||||
}
|
||||
return state, response_data
|
||||
|
||||
|
||||
def poll_device_authorization_flow(state: dict[str, Any]) -> dict[str, Any]:
|
||||
data = {
|
||||
"client_id": state["client_id"],
|
||||
"device_code": state["device_code"],
|
||||
"grant_type": "urn:ietf:params:oauth:grant-type:device_code",
|
||||
}
|
||||
if state.get("client_secret"):
|
||||
data["client_secret"] = state["client_secret"]
|
||||
resp = requests.post(GOOGLE_DEVICE_TOKEN_URL, data=data, timeout=20)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
|
||||
def _run_local_server_flow(client_config: dict[str, Any], source: DocumentSource) -> dict[str, Any]:
|
||||
"""Launch the standard Google OAuth local-server flow to mint user tokens."""
|
||||
from google_auth_oauthlib.flow import InstalledAppFlow # type: ignore
|
||||
@ -125,10 +63,7 @@ def _run_local_server_flow(client_config: dict[str, Any], source: DocumentSource
|
||||
preferred_port = os.environ.get("GOOGLE_OAUTH_LOCAL_SERVER_PORT")
|
||||
port = int(preferred_port) if preferred_port else 0
|
||||
timeout_secs = _get_oauth_timeout_secs()
|
||||
timeout_message = (
|
||||
f"Google OAuth verification timed out after {timeout_secs} seconds. "
|
||||
"Close any pending consent windows and rerun the connector configuration to try again."
|
||||
)
|
||||
timeout_message = f"Google OAuth verification timed out after {timeout_secs} seconds. Close any pending consent windows and rerun the connector configuration to try again."
|
||||
|
||||
print("Launching Google OAuth flow. A browser window should open shortly.")
|
||||
print("If it does not, copy the URL shown in the console into your browser manually.")
|
||||
@ -153,11 +88,8 @@ def _run_local_server_flow(client_config: dict[str, Any], source: DocumentSource
|
||||
instructions = [
|
||||
"Google rejected one or more of the requested OAuth scopes.",
|
||||
"Fix options:",
|
||||
" 1. In Google Cloud Console, open APIs & Services > OAuth consent screen and add the missing scopes "
|
||||
" (Drive metadata + Admin Directory read scopes), then re-run the flow.",
|
||||
" 1. In Google Cloud Console, open APIs & Services > OAuth consent screen and add the missing scopes (Drive metadata + Admin Directory read scopes), then re-run the flow.",
|
||||
" 2. Set GOOGLE_OAUTH_SCOPE_OVERRIDE to a comma-separated list of scopes you are allowed to request.",
|
||||
" 3. For quick local testing only, export OAUTHLIB_RELAX_TOKEN_SCOPE=1 to accept the reduced scopes "
|
||||
" (be aware the connector may lose functionality).",
|
||||
]
|
||||
raise RuntimeError("\n".join(instructions)) from warning
|
||||
raise
|
||||
@ -184,8 +116,6 @@ def ensure_oauth_token_dict(credentials: dict[str, Any], source: DocumentSource)
|
||||
client_config = {"web": credentials["web"]}
|
||||
|
||||
if client_config is None:
|
||||
raise ValueError(
|
||||
"Provided Google OAuth credentials are missing both tokens and a client configuration."
|
||||
)
|
||||
raise ValueError("Provided Google OAuth credentials are missing both tokens and a client configuration.")
|
||||
|
||||
return _run_local_server_flow(client_config, source)
|
||||
|
||||
@ -186,9 +186,6 @@ class DoclingParser(RAGFlowPdfParser):
|
||||
yield (DoclingContentType.EQUATION.value, text, bbox)
|
||||
|
||||
def _transfer_to_sections(self, doc) -> list[tuple[str, str]]:
|
||||
"""
|
||||
和 MinerUParser 保持一致:返回 [(section_text, line_tag), ...]
|
||||
"""
|
||||
sections: list[tuple[str, str]] = []
|
||||
for typ, payload, bbox in self._iter_doc_items(doc):
|
||||
if typ == DoclingContentType.TEXT.value:
|
||||
|
||||
@ -34,6 +34,7 @@ def vision_figure_parser_figure_data_wrapper(figures_data_without_positions):
|
||||
if isinstance(figure_data[1], Image.Image)
|
||||
]
|
||||
|
||||
|
||||
def vision_figure_parser_docx_wrapper(sections,tbls,callback=None,**kwargs):
|
||||
try:
|
||||
vision_model = LLMBundle(kwargs["tenant_id"], LLMType.IMAGE2TEXT)
|
||||
@ -50,7 +51,8 @@ def vision_figure_parser_docx_wrapper(sections,tbls,callback=None,**kwargs):
|
||||
callback(0.8, f"Visual model error: {e}. Skipping figure parsing enhancement.")
|
||||
return tbls
|
||||
|
||||
def vision_figure_parser_pdf_wrapper(tbls,callback=None,**kwargs):
|
||||
|
||||
def vision_figure_parser_pdf_wrapper(tbls, callback=None, **kwargs):
|
||||
try:
|
||||
vision_model = LLMBundle(kwargs["tenant_id"], LLMType.IMAGE2TEXT)
|
||||
callback(0.7, "Visual model detected. Attempting to enhance figure extraction...")
|
||||
@ -72,6 +74,7 @@ def vision_figure_parser_pdf_wrapper(tbls,callback=None,**kwargs):
|
||||
callback(0.8, f"Visual model error: {e}. Skipping figure parsing enhancement.")
|
||||
return tbls
|
||||
|
||||
|
||||
shared_executor = ThreadPoolExecutor(max_workers=10)
|
||||
|
||||
|
||||
|
||||
@ -434,7 +434,7 @@ class MinerUParser(RAGFlowPdfParser):
|
||||
if not section.strip():
|
||||
section = "FAILED TO PARSE TABLE"
|
||||
case MinerUContentType.IMAGE:
|
||||
section = "".join(output["image_caption"]) + "\n" + "".join(output["image_footnote"])
|
||||
section = "".join(output.get(["image_caption"],[])) + "\n" + "".join(output.get(["image_footnote"],[]))
|
||||
case MinerUContentType.EQUATION:
|
||||
section = output["text"]
|
||||
case MinerUContentType.CODE:
|
||||
|
||||
@ -117,7 +117,6 @@ def load_model(model_dir, nm, device_id: int | None = None):
|
||||
providers=['CUDAExecutionProvider'],
|
||||
provider_options=[cuda_provider_options]
|
||||
)
|
||||
run_options.add_run_config_entry("memory.enable_memory_arena_shrinkage", "gpu:" + str(provider_device_id))
|
||||
logging.info(f"load_model {model_file_path} uses GPU (device {provider_device_id}, gpu_mem_limit={cuda_provider_options['gpu_mem_limit']}, arena_strategy={arena_strategy})")
|
||||
else:
|
||||
sess = ort.InferenceSession(
|
||||
|
||||
@ -12,6 +12,10 @@ The RAGFlow Admin UI is a web-based interface that provides comprehensive system
|
||||
|
||||
To access the RAGFlow admin UI, append `/admin` to the web UI's address, e.g. `http://[RAGFLOW_WEB_UI_ADDR]/admin`, replace `[RAGFLOW_WEB_UI_ADDR]` with real RAGFlow web UI address.
|
||||
|
||||
### Default Credentials
|
||||
| Username | Password |
|
||||
|----------|----------|
|
||||
| admin@ragflow.io | admin |
|
||||
|
||||
## Admin UI Overview
|
||||
|
||||
|
||||
8
docs/guides/dataset/add_data_source/_category_.json
Normal file
8
docs/guides/dataset/add_data_source/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Add data source",
|
||||
"position": 18,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "Add various data sources"
|
||||
}
|
||||
}
|
||||
137
docs/guides/dataset/add_data_source/add_google_drive.md
Normal file
137
docs/guides/dataset/add_data_source/add_google_drive.md
Normal file
@ -0,0 +1,137 @@
|
||||
---
|
||||
sidebar_position: 3
|
||||
slug: /add_google_drive
|
||||
---
|
||||
|
||||
# Add Google Drive
|
||||
|
||||
## 1. Create a Google Cloud Project
|
||||
|
||||
You can either create a dedicated project for RAGFlow or use an existing
|
||||
Google Cloud external project.
|
||||
|
||||
**Steps:**
|
||||
1. Open the project creation page\
|
||||
`https://console.cloud.google.com/projectcreate`
|
||||

|
||||
2. Select **External** as the Audience
|
||||

|
||||
3. Click **Create**
|
||||

|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
## 2. Configure OAuth Consent Screen
|
||||
|
||||
1. Go to **APIs & Services → OAuth consent screen**
|
||||
2. Ensure **User Type = External**
|
||||

|
||||
3. Add your test users under **Test Users** by entering email addresses
|
||||

|
||||

|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
## 3. Create OAuth Client Credentials
|
||||
|
||||
1. Navigate to:\
|
||||
`https://console.cloud.google.com/auth/clients`
|
||||
2. Create a **Web Application**
|
||||

|
||||
3. Enter a name for the client
|
||||
4. Add the following **Authorized Redirect URIs**:
|
||||
|
||||
```
|
||||
http://localhost:9380/v1/connector/google-drive/oauth/web/callback
|
||||
```
|
||||
|
||||
### If using Docker deployment:
|
||||
|
||||
**Authorized JavaScript origin:**
|
||||
```
|
||||
http://localhost:80
|
||||
```
|
||||
|
||||

|
||||
### If running from source:
|
||||
**Authorized JavaScript origin:**
|
||||
```
|
||||
http://localhost:9222
|
||||
```
|
||||
|
||||

|
||||
5. After saving, click **Download JSON**. This file will later be
|
||||
uploaded into RAGFlow.
|
||||
|
||||

|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
## 4. Add Scopes
|
||||
|
||||
1. Open **Data Access → Add or remove scopes**
|
||||
|
||||
2. Paste and add the following entries:
|
||||
|
||||
```
|
||||
https://www.googleapis.com/auth/drive.readonly
|
||||
https://www.googleapis.com/auth/drive.metadata.readonly
|
||||
https://www.googleapis.com/auth/admin.directory.group.readonly
|
||||
https://www.googleapis.com/auth/admin.directory.user.readonly
|
||||
```
|
||||
|
||||

|
||||
3. Update and Save changes
|
||||
|
||||

|
||||

|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
## 5. Enable Required APIs
|
||||
Navigate to the Google API Library:\
|
||||
`https://console.cloud.google.com/apis/library`
|
||||

|
||||
|
||||
Enable the following APIs:
|
||||
|
||||
- Google Drive API
|
||||
- Admin SDK API
|
||||
- Google Sheets API
|
||||
- Google Docs API
|
||||
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
## 6. Add Google Drive As a Data Source in RAGFlow
|
||||
|
||||
1. Go to **Data Sources** inside RAGFlow
|
||||
2. Select **Google Drive**
|
||||
3. Upload the previously downloaded JSON credentials
|
||||

|
||||
4. Enter the shared Google Drive folder link (https://drive.google.com/drive), such as:
|
||||

|
||||
|
||||
5. Click **Authorize with Google**
|
||||
A browser window will appear.
|
||||

|
||||
Click: - **Continue** - **Select All → Continue** - Authorization should
|
||||
succeed - Select **OK** to add the data source
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"label": "Best practices",
|
||||
"position": 11,
|
||||
"position": 19,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "Best practices on configuring a dataset."
|
||||
|
||||
@ -64,7 +64,10 @@ The Admin CLI and Admin Service form a client-server architectural suite for RAG
|
||||
|
||||
- -p: RAGFlow admin server port
|
||||
|
||||
## Default administrative account
|
||||
|
||||
- Username: admin@ragflow.io
|
||||
- Password: admin
|
||||
|
||||
## Supported Commands
|
||||
|
||||
|
||||
@ -974,6 +974,237 @@ Failure:
|
||||
|
||||
---
|
||||
|
||||
### Construct knowledge graph
|
||||
|
||||
**POST** `/api/v1/datasets/{dataset_id}/run_graphrag`
|
||||
|
||||
Constructs a knowledge graph from a specified dataset.
|
||||
|
||||
#### Request
|
||||
|
||||
- Method: POST
|
||||
- URL: `/api/v1/datasets/{dataset_id}/run_graphrag`
|
||||
- Headers:
|
||||
- `'Authorization: Bearer <YOUR_API_KEY>'`
|
||||
|
||||
##### Request example
|
||||
|
||||
```bash
|
||||
curl --request POST \
|
||||
--url http://{address}/api/v1/datasets/{dataset_id}/run_graphrag \
|
||||
--header 'Authorization: Bearer <YOUR_API_KEY>'
|
||||
```
|
||||
|
||||
##### Request parameters
|
||||
|
||||
- `dataset_id`: (*Path parameter*)
|
||||
The ID of the target dataset.
|
||||
|
||||
#### Response
|
||||
|
||||
Success:
|
||||
|
||||
```json
|
||||
{
|
||||
"code":0,
|
||||
"data":{
|
||||
"graphrag_task_id":"e498de54bfbb11f0ba028f704583b57b"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Failure:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 102,
|
||||
"message": "Invalid Dataset ID"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Get knowledge graph construction status
|
||||
|
||||
**GET** `/api/v1/datasets/{dataset_id}/trace_graphrag`
|
||||
|
||||
Retrieves the knowledge graph construction status for a specified dataset.
|
||||
|
||||
#### Request
|
||||
|
||||
- Method: GET
|
||||
- URL: `/api/v1/datasets/{dataset_id}/trace_graphrag`
|
||||
- Headers:
|
||||
- `'Authorization: Bearer <YOUR_API_KEY>'`
|
||||
|
||||
##### Request example
|
||||
|
||||
```bash
|
||||
curl --request GET \
|
||||
--url http://{address}/api/v1/datasets/{dataset_id}/trace_graphrag \
|
||||
--header 'Authorization: Bearer <YOUR_API_KEY>'
|
||||
```
|
||||
|
||||
##### Request parameters
|
||||
|
||||
- `dataset_id`: (*Path parameter*)
|
||||
The ID of the target dataset.
|
||||
|
||||
#### Response
|
||||
|
||||
Success:
|
||||
|
||||
```json
|
||||
{
|
||||
"code":0,
|
||||
"data":{
|
||||
"begin_at":"Wed, 12 Nov 2025 19:36:56 GMT",
|
||||
"chunk_ids":"",
|
||||
"create_date":"Wed, 12 Nov 2025 19:36:56 GMT",
|
||||
"create_time":1762947416350,
|
||||
"digest":"39e43572e3dcd84f",
|
||||
"doc_id":"44661c10bde211f0bc93c164a47ffc40",
|
||||
"from_page":100000000,
|
||||
"id":"e498de54bfbb11f0ba028f704583b57b",
|
||||
"priority":0,
|
||||
"process_duration":2.45419,
|
||||
"progress":1.0,
|
||||
"progress_msg":"19:36:56 created task graphrag\n19:36:57 Task has been received.\n19:36:58 [GraphRAG] doc:083661febe2411f0bc79456921e5745f has no available chunks, skip generation.\n19:36:58 [GraphRAG] build_subgraph doc:44661c10bde211f0bc93c164a47ffc40 start (chunks=1, timeout=10000000000s)\n19:36:58 Graph already contains 44661c10bde211f0bc93c164a47ffc40\n19:36:58 [GraphRAG] build_subgraph doc:44661c10bde211f0bc93c164a47ffc40 empty\n19:36:58 [GraphRAG] kb:33137ed0bde211f0bc93c164a47ffc40 no subgraphs generated successfully, end.\n19:36:58 Knowledge Graph done (0.72s)","retry_count":1,
|
||||
"task_type":"graphrag",
|
||||
"to_page":100000000,
|
||||
"update_date":"Wed, 12 Nov 2025 19:36:58 GMT",
|
||||
"update_time":1762947418454
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Failure:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 102,
|
||||
"message": "Invalid Dataset ID"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Construct RAPTOR
|
||||
|
||||
**POST** `/api/v1/datasets/{dataset_id}/run_raptor`
|
||||
|
||||
Construct a RAPTOR from a specified dataset.
|
||||
|
||||
#### Request
|
||||
|
||||
- Method: POST
|
||||
- URL: `/api/v1/datasets/{dataset_id}/run_raptor`
|
||||
- Headers:
|
||||
- `'Authorization: Bearer <YOUR_API_KEY>'`
|
||||
|
||||
##### Request example
|
||||
|
||||
```bash
|
||||
curl --request POST \
|
||||
--url http://{address}/api/v1/datasets/{dataset_id}/run_raptor \
|
||||
--header 'Authorization: Bearer <YOUR_API_KEY>'
|
||||
```
|
||||
|
||||
##### Request parameters
|
||||
|
||||
- `dataset_id`: (*Path parameter*)
|
||||
The ID of the target dataset.
|
||||
|
||||
#### Response
|
||||
|
||||
Success:
|
||||
|
||||
```json
|
||||
{
|
||||
"code":0,
|
||||
"data":{
|
||||
"raptor_task_id":"50d3c31cbfbd11f0ba028f704583b57b"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Failure:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 102,
|
||||
"message": "Invalid Dataset ID"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Get RAPTOR construction status
|
||||
|
||||
**GET** `/api/v1/datasets/{dataset_id}/trace_raptor`
|
||||
|
||||
Retrieves the RAPTOR construction status for a specified dataset.
|
||||
|
||||
#### Request
|
||||
|
||||
- Method: GET
|
||||
- URL: `/api/v1/datasets/{dataset_id}/trace_raptor`
|
||||
- Headers:
|
||||
- `'Authorization: Bearer <YOUR_API_KEY>'`
|
||||
|
||||
##### Request example
|
||||
|
||||
```bash
|
||||
curl --request GET \
|
||||
--url http://{address}/api/v1/datasets/{dataset_id}/trace_raptor \
|
||||
--header 'Authorization: Bearer <YOUR_API_KEY>'
|
||||
```
|
||||
|
||||
##### Request parameters
|
||||
|
||||
- `dataset_id`: (*Path parameter*)
|
||||
The ID of the target dataset.
|
||||
|
||||
#### Response
|
||||
|
||||
Success:
|
||||
|
||||
```json
|
||||
{
|
||||
"code":0,
|
||||
"data":{
|
||||
"begin_at":"Wed, 12 Nov 2025 19:47:07 GMT",
|
||||
"chunk_ids":"",
|
||||
"create_date":"Wed, 12 Nov 2025 19:47:07 GMT",
|
||||
"create_time":1762948027427,
|
||||
"digest":"8b279a6248cb8fc6",
|
||||
"doc_id":"44661c10bde211f0bc93c164a47ffc40",
|
||||
"from_page":100000000,
|
||||
"id":"50d3c31cbfbd11f0ba028f704583b57b",
|
||||
"priority":0,
|
||||
"process_duration":0.948244,
|
||||
"progress":1.0,
|
||||
"progress_msg":"19:47:07 created task raptor\n19:47:07 Task has been received.\n19:47:07 Processing...\n19:47:07 Processing...\n19:47:07 Indexing done (0.01s).\n19:47:07 Task done (0.29s)",
|
||||
"retry_count":1,
|
||||
"task_type":"raptor",
|
||||
"to_page":100000000,
|
||||
"update_date":"Wed, 12 Nov 2025 19:47:07 GMT",
|
||||
"update_time":1762948027948
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Failure:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 102,
|
||||
"message": "Invalid Dataset ID"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## FILE MANAGEMENT WITHIN DATASET
|
||||
|
||||
---
|
||||
|
||||
@ -114,7 +114,7 @@ class Extractor:
|
||||
async def extract_all(doc_id, chunks, max_concurrency=MAX_CONCURRENT_PROCESS_AND_EXTRACT_CHUNK, task_id=""):
|
||||
out_results = []
|
||||
error_count = 0
|
||||
max_errors = 3
|
||||
max_errors = int(os.environ.get("GRAPHRAG_MAX_ERRORS", 3))
|
||||
|
||||
limiter = trio.Semaphore(max_concurrency)
|
||||
|
||||
|
||||
@ -69,7 +69,7 @@ class KGSearch(Dealer):
|
||||
def _ent_info_from_(self, es_res, sim_thr=0.3):
|
||||
res = {}
|
||||
flds = ["content_with_weight", "_score", "entity_kwd", "rank_flt", "n_hop_with_weight"]
|
||||
es_res = self.dataStore.getFields(es_res, flds)
|
||||
es_res = self.dataStore.get_fields(es_res, flds)
|
||||
for _, ent in es_res.items():
|
||||
for f in flds:
|
||||
if f in ent and ent[f] is None:
|
||||
@ -88,7 +88,7 @@ class KGSearch(Dealer):
|
||||
|
||||
def _relation_info_from_(self, es_res, sim_thr=0.3):
|
||||
res = {}
|
||||
es_res = self.dataStore.getFields(es_res, ["content_with_weight", "_score", "from_entity_kwd", "to_entity_kwd",
|
||||
es_res = self.dataStore.get_fields(es_res, ["content_with_weight", "_score", "from_entity_kwd", "to_entity_kwd",
|
||||
"weight_int"])
|
||||
for _, ent in es_res.items():
|
||||
if get_float(ent["_score"]) < sim_thr:
|
||||
@ -300,7 +300,7 @@ class KGSearch(Dealer):
|
||||
fltr["entities_kwd"] = entities
|
||||
comm_res = self.dataStore.search(fields, [], fltr, [],
|
||||
OrderByExpr(), 0, topn, idxnms, kb_ids)
|
||||
comm_res_fields = self.dataStore.getFields(comm_res, fields)
|
||||
comm_res_fields = self.dataStore.get_fields(comm_res, fields)
|
||||
txts = []
|
||||
for ii, (_, row) in enumerate(comm_res_fields.items()):
|
||||
obj = json.loads(row["content_with_weight"])
|
||||
|
||||
@ -382,7 +382,7 @@ async def does_graph_contains(tenant_id, kb_id, doc_id):
|
||||
"removed_kwd": "N",
|
||||
}
|
||||
res = await trio.to_thread.run_sync(lambda: settings.docStoreConn.search(fields, [], condition, [], OrderByExpr(), 0, 1, search.index_name(tenant_id), [kb_id]))
|
||||
fields2 = settings.docStoreConn.getFields(res, fields)
|
||||
fields2 = settings.docStoreConn.get_fields(res, fields)
|
||||
graph_doc_ids = set()
|
||||
for chunk_id in fields2.keys():
|
||||
graph_doc_ids = set(fields2[chunk_id]["source_id"])
|
||||
@ -591,8 +591,8 @@ async def rebuild_graph(tenant_id, kb_id, exclude_rebuild=None):
|
||||
es_res = await trio.to_thread.run_sync(
|
||||
lambda: settings.docStoreConn.search(flds, [], {"kb_id": kb_id, "knowledge_graph_kwd": ["subgraph"]}, [], OrderByExpr(), i, bs, search.index_name(tenant_id), [kb_id])
|
||||
)
|
||||
# tot = settings.docStoreConn.getTotal(es_res)
|
||||
es_res = settings.docStoreConn.getFields(es_res, flds)
|
||||
# tot = settings.docStoreConn.get_total(es_res)
|
||||
es_res = settings.docStoreConn.get_fields(es_res, flds)
|
||||
|
||||
if len(es_res) == 0:
|
||||
break
|
||||
|
||||
@ -145,6 +145,7 @@ dependencies = [
|
||||
"markdownify>=1.2.0",
|
||||
"captcha>=0.7.1",
|
||||
"pip>=25.2",
|
||||
"pypandoc>=1.16",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
|
||||
@ -482,7 +482,7 @@ def tree_merge(bull, sections, depth):
|
||||
root = Node(level=0, depth=target_level, texts=[])
|
||||
root.build_tree(lines)
|
||||
|
||||
return [("\n").join(element) for element in root.get_tree() if element]
|
||||
return [element for element in root.get_tree() if element]
|
||||
|
||||
def hierarchical_merge(bull, sections, depth):
|
||||
|
||||
|
||||
@ -38,11 +38,11 @@ class FulltextQueryer:
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def subSpecialChar(line):
|
||||
def sub_special_char(line):
|
||||
return re.sub(r"([:\{\}/\[\]\-\*\"\(\)\|\+~\^])", r"\\\1", line).strip()
|
||||
|
||||
@staticmethod
|
||||
def isChinese(line):
|
||||
def is_chinese(line):
|
||||
arr = re.split(r"[ \t]+", line)
|
||||
if len(arr) <= 3:
|
||||
return True
|
||||
@ -92,7 +92,7 @@ class FulltextQueryer:
|
||||
otxt = txt
|
||||
txt = FulltextQueryer.rmWWW(txt)
|
||||
|
||||
if not self.isChinese(txt):
|
||||
if not self.is_chinese(txt):
|
||||
txt = FulltextQueryer.rmWWW(txt)
|
||||
tks = rag_tokenizer.tokenize(txt).split()
|
||||
keywords = [t for t in tks if t]
|
||||
@ -163,7 +163,7 @@ class FulltextQueryer:
|
||||
)
|
||||
for m in sm
|
||||
]
|
||||
sm = [FulltextQueryer.subSpecialChar(m) for m in sm if len(m) > 1]
|
||||
sm = [FulltextQueryer.sub_special_char(m) for m in sm if len(m) > 1]
|
||||
sm = [m for m in sm if len(m) > 1]
|
||||
|
||||
if len(keywords) < 32:
|
||||
@ -171,7 +171,7 @@ class FulltextQueryer:
|
||||
keywords.extend(sm)
|
||||
|
||||
tk_syns = self.syn.lookup(tk)
|
||||
tk_syns = [FulltextQueryer.subSpecialChar(s) for s in tk_syns]
|
||||
tk_syns = [FulltextQueryer.sub_special_char(s) for s in tk_syns]
|
||||
if len(keywords) < 32:
|
||||
keywords.extend([s for s in tk_syns if s])
|
||||
tk_syns = [rag_tokenizer.fine_grained_tokenize(s) for s in tk_syns if s]
|
||||
@ -180,7 +180,7 @@ class FulltextQueryer:
|
||||
if len(keywords) >= 32:
|
||||
break
|
||||
|
||||
tk = FulltextQueryer.subSpecialChar(tk)
|
||||
tk = FulltextQueryer.sub_special_char(tk)
|
||||
if tk.find(" ") > 0:
|
||||
tk = '"%s"' % tk
|
||||
if tk_syns:
|
||||
@ -198,7 +198,7 @@ class FulltextQueryer:
|
||||
syns = " OR ".join(
|
||||
[
|
||||
'"%s"'
|
||||
% rag_tokenizer.tokenize(FulltextQueryer.subSpecialChar(s))
|
||||
% rag_tokenizer.tokenize(FulltextQueryer.sub_special_char(s))
|
||||
for s in syns
|
||||
]
|
||||
)
|
||||
@ -217,17 +217,17 @@ class FulltextQueryer:
|
||||
return None, keywords
|
||||
|
||||
def hybrid_similarity(self, avec, bvecs, atks, btkss, tkweight=0.3, vtweight=0.7):
|
||||
from sklearn.metrics.pairwise import cosine_similarity as CosineSimilarity
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
import numpy as np
|
||||
|
||||
sims = CosineSimilarity([avec], bvecs)
|
||||
sims = cosine_similarity([avec], bvecs)
|
||||
tksim = self.token_similarity(atks, btkss)
|
||||
if np.sum(sims[0]) == 0:
|
||||
return np.array(tksim), tksim, sims[0]
|
||||
return np.array(sims[0]) * vtweight + np.array(tksim) * tkweight, tksim, sims[0]
|
||||
|
||||
def token_similarity(self, atks, btkss):
|
||||
def toDict(tks):
|
||||
def to_dict(tks):
|
||||
if isinstance(tks, str):
|
||||
tks = tks.split()
|
||||
d = defaultdict(int)
|
||||
@ -236,8 +236,8 @@ class FulltextQueryer:
|
||||
d[t] += c
|
||||
return d
|
||||
|
||||
atks = toDict(atks)
|
||||
btkss = [toDict(tks) for tks in btkss]
|
||||
atks = to_dict(atks)
|
||||
btkss = [to_dict(tks) for tks in btkss]
|
||||
return [self.similarity(atks, btks) for btks in btkss]
|
||||
|
||||
def similarity(self, qtwt, dtwt):
|
||||
@ -262,10 +262,10 @@ class FulltextQueryer:
|
||||
keywords = [f'"{k.strip()}"' for k in keywords]
|
||||
for tk, w in sorted(tks_w, key=lambda x: x[1] * -1)[:keywords_topn]:
|
||||
tk_syns = self.syn.lookup(tk)
|
||||
tk_syns = [FulltextQueryer.subSpecialChar(s) for s in tk_syns]
|
||||
tk_syns = [FulltextQueryer.sub_special_char(s) for s in tk_syns]
|
||||
tk_syns = [rag_tokenizer.fine_grained_tokenize(s) for s in tk_syns if s]
|
||||
tk_syns = [f"\"{s}\"" if s.find(" ") > 0 else s for s in tk_syns]
|
||||
tk = FulltextQueryer.subSpecialChar(tk)
|
||||
tk = FulltextQueryer.sub_special_char(tk)
|
||||
if tk.find(" ") > 0:
|
||||
tk = '"%s"' % tk
|
||||
if tk_syns:
|
||||
|
||||
@ -35,7 +35,7 @@ class RagTokenizer:
|
||||
def rkey_(self, line):
|
||||
return str(("DD" + (line[::-1].lower())).encode("utf-8"))[2:-1]
|
||||
|
||||
def loadDict_(self, fnm):
|
||||
def _load_dict(self, fnm):
|
||||
logging.info(f"[HUQIE]:Build trie from {fnm}")
|
||||
try:
|
||||
of = open(fnm, "r", encoding='utf-8')
|
||||
@ -85,18 +85,18 @@ class RagTokenizer:
|
||||
self.trie_ = datrie.Trie(string.printable)
|
||||
|
||||
# load data from dict file and save to trie file
|
||||
self.loadDict_(self.DIR_ + ".txt")
|
||||
self._load_dict(self.DIR_ + ".txt")
|
||||
|
||||
def loadUserDict(self, fnm):
|
||||
def load_user_dict(self, fnm):
|
||||
try:
|
||||
self.trie_ = datrie.Trie.load(fnm + ".trie")
|
||||
return
|
||||
except Exception:
|
||||
self.trie_ = datrie.Trie(string.printable)
|
||||
self.loadDict_(fnm)
|
||||
self._load_dict(fnm)
|
||||
|
||||
def addUserDict(self, fnm):
|
||||
self.loadDict_(fnm)
|
||||
def add_user_dict(self, fnm):
|
||||
self._load_dict(fnm)
|
||||
|
||||
def _strQ2B(self, ustring):
|
||||
"""Convert full-width characters to half-width characters"""
|
||||
@ -221,7 +221,7 @@ class RagTokenizer:
|
||||
logging.debug("[SC] {} {} {} {} {}".format(tks, len(tks), L, F, B / len(tks) + L + F))
|
||||
return tks, B / len(tks) + L + F
|
||||
|
||||
def sortTks_(self, tkslist):
|
||||
def _sort_tokens(self, tkslist):
|
||||
res = []
|
||||
for tfts in tkslist:
|
||||
tks, s = self.score_(tfts)
|
||||
@ -246,7 +246,7 @@ class RagTokenizer:
|
||||
|
||||
return " ".join(res)
|
||||
|
||||
def maxForward_(self, line):
|
||||
def _max_forward(self, line):
|
||||
res = []
|
||||
s = 0
|
||||
while s < len(line):
|
||||
@ -270,7 +270,7 @@ class RagTokenizer:
|
||||
|
||||
return self.score_(res)
|
||||
|
||||
def maxBackward_(self, line):
|
||||
def _max_backward(self, line):
|
||||
res = []
|
||||
s = len(line) - 1
|
||||
while s >= 0:
|
||||
@ -336,8 +336,8 @@ class RagTokenizer:
|
||||
continue
|
||||
|
||||
# use maxforward for the first time
|
||||
tks, s = self.maxForward_(L)
|
||||
tks1, s1 = self.maxBackward_(L)
|
||||
tks, s = self._max_forward(L)
|
||||
tks1, s1 = self._max_backward(L)
|
||||
if self.DEBUG:
|
||||
logging.debug("[FW] {} {}".format(tks, s))
|
||||
logging.debug("[BW] {} {}".format(tks1, s1))
|
||||
@ -369,7 +369,7 @@ class RagTokenizer:
|
||||
# backward tokens from_i to i are different from forward tokens from _j to j.
|
||||
tkslist = []
|
||||
self.dfs_("".join(tks[_j:j]), 0, [], tkslist)
|
||||
res.append(" ".join(self.sortTks_(tkslist)[0][0]))
|
||||
res.append(" ".join(self._sort_tokens(tkslist)[0][0]))
|
||||
|
||||
same = 1
|
||||
while i + same < len(tks1) and j + same < len(tks) and tks1[i + same] == tks[j + same]:
|
||||
@ -385,7 +385,7 @@ class RagTokenizer:
|
||||
assert "".join(tks1[_i:]) == "".join(tks[_j:])
|
||||
tkslist = []
|
||||
self.dfs_("".join(tks[_j:]), 0, [], tkslist)
|
||||
res.append(" ".join(self.sortTks_(tkslist)[0][0]))
|
||||
res.append(" ".join(self._sort_tokens(tkslist)[0][0]))
|
||||
|
||||
res = " ".join(res)
|
||||
logging.debug("[TKS] {}".format(self.merge_(res)))
|
||||
@ -413,7 +413,7 @@ class RagTokenizer:
|
||||
if len(tkslist) < 2:
|
||||
res.append(tk)
|
||||
continue
|
||||
stk = self.sortTks_(tkslist)[1][0]
|
||||
stk = self._sort_tokens(tkslist)[1][0]
|
||||
if len(stk) == len(tk):
|
||||
stk = tk
|
||||
else:
|
||||
@ -447,14 +447,13 @@ def is_number(s):
|
||||
|
||||
|
||||
def is_alphabet(s):
|
||||
if (s >= u'\u0041' and s <= u'\u005a') or (
|
||||
s >= u'\u0061' and s <= u'\u007a'):
|
||||
if (u'\u0041' <= s <= u'\u005a') or (u'\u0061' <= s <= u'\u007a'):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def naiveQie(txt):
|
||||
def naive_qie(txt):
|
||||
tks = []
|
||||
for t in txt.split():
|
||||
if tks and re.match(r".*[a-zA-Z]$", tks[-1]
|
||||
@ -469,14 +468,14 @@ tokenize = tokenizer.tokenize
|
||||
fine_grained_tokenize = tokenizer.fine_grained_tokenize
|
||||
tag = tokenizer.tag
|
||||
freq = tokenizer.freq
|
||||
loadUserDict = tokenizer.loadUserDict
|
||||
addUserDict = tokenizer.addUserDict
|
||||
load_user_dict = tokenizer.load_user_dict
|
||||
add_user_dict = tokenizer.add_user_dict
|
||||
tradi2simp = tokenizer._tradi2simp
|
||||
strQ2B = tokenizer._strQ2B
|
||||
|
||||
if __name__ == '__main__':
|
||||
tknzr = RagTokenizer(debug=True)
|
||||
# huqie.addUserDict("/tmp/tmp.new.tks.dict")
|
||||
# huqie.add_user_dict("/tmp/tmp.new.tks.dict")
|
||||
tks = tknzr.tokenize(
|
||||
"哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈")
|
||||
logging.info(tknzr.fine_grained_tokenize(tks))
|
||||
@ -506,7 +505,7 @@ if __name__ == '__main__':
|
||||
if len(sys.argv) < 2:
|
||||
sys.exit()
|
||||
tknzr.DEBUG = False
|
||||
tknzr.loadUserDict(sys.argv[1])
|
||||
tknzr.load_user_dict(sys.argv[1])
|
||||
of = open(sys.argv[2], "r")
|
||||
while True:
|
||||
line = of.readline()
|
||||
|
||||
@ -102,7 +102,7 @@ class Dealer:
|
||||
orderBy.asc("top_int")
|
||||
orderBy.desc("create_timestamp_flt")
|
||||
res = self.dataStore.search(src, [], filters, [], orderBy, offset, limit, idx_names, kb_ids)
|
||||
total = self.dataStore.getTotal(res)
|
||||
total = self.dataStore.get_total(res)
|
||||
logging.debug("Dealer.search TOTAL: {}".format(total))
|
||||
else:
|
||||
highlightFields = ["content_ltks", "title_tks"]
|
||||
@ -115,7 +115,7 @@ class Dealer:
|
||||
matchExprs = [matchText]
|
||||
res = self.dataStore.search(src, highlightFields, filters, matchExprs, orderBy, offset, limit,
|
||||
idx_names, kb_ids, rank_feature=rank_feature)
|
||||
total = self.dataStore.getTotal(res)
|
||||
total = self.dataStore.get_total(res)
|
||||
logging.debug("Dealer.search TOTAL: {}".format(total))
|
||||
else:
|
||||
matchDense = self.get_vector(qst, emb_mdl, topk, req.get("similarity", 0.1))
|
||||
@ -127,20 +127,20 @@ class Dealer:
|
||||
|
||||
res = self.dataStore.search(src, highlightFields, filters, matchExprs, orderBy, offset, limit,
|
||||
idx_names, kb_ids, rank_feature=rank_feature)
|
||||
total = self.dataStore.getTotal(res)
|
||||
total = self.dataStore.get_total(res)
|
||||
logging.debug("Dealer.search TOTAL: {}".format(total))
|
||||
|
||||
# If result is empty, try again with lower min_match
|
||||
if total == 0:
|
||||
if filters.get("doc_id"):
|
||||
res = self.dataStore.search(src, [], filters, [], orderBy, offset, limit, idx_names, kb_ids)
|
||||
total = self.dataStore.getTotal(res)
|
||||
total = self.dataStore.get_total(res)
|
||||
else:
|
||||
matchText, _ = self.qryr.question(qst, min_match=0.1)
|
||||
matchDense.extra_options["similarity"] = 0.17
|
||||
res = self.dataStore.search(src, highlightFields, filters, [matchText, matchDense, fusionExpr],
|
||||
orderBy, offset, limit, idx_names, kb_ids, rank_feature=rank_feature)
|
||||
total = self.dataStore.getTotal(res)
|
||||
total = self.dataStore.get_total(res)
|
||||
logging.debug("Dealer.search 2 TOTAL: {}".format(total))
|
||||
|
||||
for k in keywords:
|
||||
@ -153,17 +153,17 @@ class Dealer:
|
||||
kwds.add(kk)
|
||||
|
||||
logging.debug(f"TOTAL: {total}")
|
||||
ids = self.dataStore.getChunkIds(res)
|
||||
ids = self.dataStore.get_chunk_ids(res)
|
||||
keywords = list(kwds)
|
||||
highlight = self.dataStore.getHighlight(res, keywords, "content_with_weight")
|
||||
aggs = self.dataStore.getAggregation(res, "docnm_kwd")
|
||||
highlight = self.dataStore.get_highlight(res, keywords, "content_with_weight")
|
||||
aggs = self.dataStore.get_aggregation(res, "docnm_kwd")
|
||||
return self.SearchResult(
|
||||
total=total,
|
||||
ids=ids,
|
||||
query_vector=q_vec,
|
||||
aggregation=aggs,
|
||||
highlight=highlight,
|
||||
field=self.dataStore.getFields(res, src + ["_score"]),
|
||||
field=self.dataStore.get_fields(res, src + ["_score"]),
|
||||
keywords=keywords
|
||||
)
|
||||
|
||||
@ -347,7 +347,7 @@ class Dealer:
|
||||
## For rank feature(tag_fea) scores.
|
||||
rank_fea = self._rank_feature_scores(rank_feature, sres)
|
||||
|
||||
return tkweight * (np.array(tksim)+rank_fea) + vtweight * vtsim, tksim, vtsim
|
||||
return tkweight * np.array(tksim) + vtweight * vtsim + rank_fea, tksim, vtsim
|
||||
|
||||
def hybrid_similarity(self, ans_embd, ins_embd, ans, inst):
|
||||
return self.qryr.hybrid_similarity(ans_embd,
|
||||
@ -488,7 +488,7 @@ class Dealer:
|
||||
for p in range(offset, max_count, bs):
|
||||
es_res = self.dataStore.search(fields, [], condition, [], orderBy, p, bs, index_name(tenant_id),
|
||||
kb_ids)
|
||||
dict_chunks = self.dataStore.getFields(es_res, fields)
|
||||
dict_chunks = self.dataStore.get_fields(es_res, fields)
|
||||
for id, doc in dict_chunks.items():
|
||||
doc["id"] = id
|
||||
if dict_chunks:
|
||||
@ -501,11 +501,11 @@ class Dealer:
|
||||
if not self.dataStore.indexExist(index_name(tenant_id), kb_ids[0]):
|
||||
return []
|
||||
res = self.dataStore.search([], [], {}, [], OrderByExpr(), 0, 0, index_name(tenant_id), kb_ids, ["tag_kwd"])
|
||||
return self.dataStore.getAggregation(res, "tag_kwd")
|
||||
return self.dataStore.get_aggregation(res, "tag_kwd")
|
||||
|
||||
def all_tags_in_portion(self, tenant_id: str, kb_ids: list[str], S=1000):
|
||||
res = self.dataStore.search([], [], {}, [], OrderByExpr(), 0, 0, index_name(tenant_id), kb_ids, ["tag_kwd"])
|
||||
res = self.dataStore.getAggregation(res, "tag_kwd")
|
||||
res = self.dataStore.get_aggregation(res, "tag_kwd")
|
||||
total = np.sum([c for _, c in res])
|
||||
return {t: (c + 1) / (total + S) for t, c in res}
|
||||
|
||||
@ -513,7 +513,7 @@ class Dealer:
|
||||
idx_nm = index_name(tenant_id)
|
||||
match_txt = self.qryr.paragraph(doc["title_tks"] + " " + doc["content_ltks"], doc.get("important_kwd", []), keywords_topn)
|
||||
res = self.dataStore.search([], [], {}, [match_txt], OrderByExpr(), 0, 0, idx_nm, kb_ids, ["tag_kwd"])
|
||||
aggs = self.dataStore.getAggregation(res, "tag_kwd")
|
||||
aggs = self.dataStore.get_aggregation(res, "tag_kwd")
|
||||
if not aggs:
|
||||
return False
|
||||
cnt = np.sum([c for _, c in aggs])
|
||||
@ -529,7 +529,7 @@ class Dealer:
|
||||
idx_nms = [index_name(tid) for tid in tenant_ids]
|
||||
match_txt, _ = self.qryr.question(question, min_match=0.0)
|
||||
res = self.dataStore.search([], [], {}, [match_txt], OrderByExpr(), 0, 0, idx_nms, kb_ids, ["tag_kwd"])
|
||||
aggs = self.dataStore.getAggregation(res, "tag_kwd")
|
||||
aggs = self.dataStore.get_aggregation(res, "tag_kwd")
|
||||
if not aggs:
|
||||
return {}
|
||||
cnt = np.sum([c for _, c in aggs])
|
||||
@ -552,7 +552,7 @@ class Dealer:
|
||||
es_res = self.dataStore.search(["content_with_weight"], [], {"doc_id": doc_id, "toc_kwd": "toc"}, [], OrderByExpr(), 0, 128, idx_nms,
|
||||
kb_ids)
|
||||
toc = []
|
||||
dict_chunks = self.dataStore.getFields(es_res, ["content_with_weight"])
|
||||
dict_chunks = self.dataStore.get_fields(es_res, ["content_with_weight"])
|
||||
for _, doc in dict_chunks.items():
|
||||
try:
|
||||
toc.extend(json.loads(doc["content_with_weight"]))
|
||||
|
||||
@ -113,20 +113,20 @@ class Dealer:
|
||||
res.append(tk)
|
||||
return res
|
||||
|
||||
def tokenMerge(self, tks):
|
||||
def oneTerm(t): return len(t) == 1 or re.match(r"[0-9a-z]{1,2}$", t)
|
||||
def token_merge(self, tks):
|
||||
def one_term(t): return len(t) == 1 or re.match(r"[0-9a-z]{1,2}$", t)
|
||||
|
||||
res, i = [], 0
|
||||
while i < len(tks):
|
||||
j = i
|
||||
if i == 0 and oneTerm(tks[i]) and len(
|
||||
if i == 0 and one_term(tks[i]) and len(
|
||||
tks) > 1 and (len(tks[i + 1]) > 1 and not re.match(r"[0-9a-zA-Z]", tks[i + 1])): # 多 工位
|
||||
res.append(" ".join(tks[0:2]))
|
||||
i = 2
|
||||
continue
|
||||
|
||||
while j < len(
|
||||
tks) and tks[j] and tks[j] not in self.stop_words and oneTerm(tks[j]):
|
||||
tks) and tks[j] and tks[j] not in self.stop_words and one_term(tks[j]):
|
||||
j += 1
|
||||
if j - i > 1:
|
||||
if j - i < 5:
|
||||
@ -232,7 +232,7 @@ class Dealer:
|
||||
tw = list(zip(tks, wts))
|
||||
else:
|
||||
for tk in tks:
|
||||
tt = self.tokenMerge(self.pretoken(tk, True))
|
||||
tt = self.token_merge(self.pretoken(tk, True))
|
||||
idf1 = np.array([idf(freq(t), 10000000) for t in tt])
|
||||
idf2 = np.array([idf(df(t), 1000000000) for t in tt])
|
||||
wts = (0.3 * idf1 + 0.7 * idf2) * \
|
||||
|
||||
154
rag/raptor.py
154
rag/raptor.py
@ -15,27 +15,35 @@
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
import umap
|
||||
|
||||
import numpy as np
|
||||
from sklearn.mixture import GaussianMixture
|
||||
import trio
|
||||
import umap
|
||||
from sklearn.mixture import GaussianMixture
|
||||
|
||||
from api.db.services.task_service import has_canceled
|
||||
from common.connection_utils import timeout
|
||||
from common.exceptions import TaskCanceledException
|
||||
from common.token_utils import truncate
|
||||
from graphrag.utils import (
|
||||
get_llm_cache,
|
||||
chat_limiter,
|
||||
get_embed_cache,
|
||||
get_llm_cache,
|
||||
set_embed_cache,
|
||||
set_llm_cache,
|
||||
chat_limiter,
|
||||
)
|
||||
from common.token_utils import truncate
|
||||
|
||||
|
||||
class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
|
||||
def __init__(
|
||||
self, max_cluster, llm_model, embd_model, prompt, max_token=512, threshold=0.1
|
||||
self,
|
||||
max_cluster,
|
||||
llm_model,
|
||||
embd_model,
|
||||
prompt,
|
||||
max_token=512,
|
||||
threshold=0.1,
|
||||
max_errors=3,
|
||||
):
|
||||
self._max_cluster = max_cluster
|
||||
self._llm_model = llm_model
|
||||
@ -43,31 +51,35 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
|
||||
self._threshold = threshold
|
||||
self._prompt = prompt
|
||||
self._max_token = max_token
|
||||
self._max_errors = max(1, max_errors)
|
||||
self._error_count = 0
|
||||
|
||||
@timeout(60*20)
|
||||
@timeout(60 * 20)
|
||||
async def _chat(self, system, history, gen_conf):
|
||||
response = await trio.to_thread.run_sync(
|
||||
lambda: get_llm_cache(self._llm_model.llm_name, system, history, gen_conf)
|
||||
)
|
||||
cached = await trio.to_thread.run_sync(lambda: get_llm_cache(self._llm_model.llm_name, system, history, gen_conf))
|
||||
if cached:
|
||||
return cached
|
||||
|
||||
if response:
|
||||
return response
|
||||
response = await trio.to_thread.run_sync(
|
||||
lambda: self._llm_model.chat(system, history, gen_conf)
|
||||
)
|
||||
response = re.sub(r"^.*</think>", "", response, flags=re.DOTALL)
|
||||
if response.find("**ERROR**") >= 0:
|
||||
raise Exception(response)
|
||||
await trio.to_thread.run_sync(
|
||||
lambda: set_llm_cache(self._llm_model.llm_name, system, response, history, gen_conf)
|
||||
)
|
||||
return response
|
||||
last_exc = None
|
||||
for attempt in range(3):
|
||||
try:
|
||||
response = await trio.to_thread.run_sync(lambda: self._llm_model.chat(system, history, gen_conf))
|
||||
response = re.sub(r"^.*</think>", "", response, flags=re.DOTALL)
|
||||
if response.find("**ERROR**") >= 0:
|
||||
raise Exception(response)
|
||||
await trio.to_thread.run_sync(lambda: set_llm_cache(self._llm_model.llm_name, system, response, history, gen_conf))
|
||||
return response
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
logging.warning("RAPTOR LLM call failed on attempt %d/3: %s", attempt + 1, exc)
|
||||
if attempt < 2:
|
||||
await trio.sleep(1 + attempt)
|
||||
|
||||
raise last_exc if last_exc else Exception("LLM chat failed without exception")
|
||||
|
||||
@timeout(20)
|
||||
async def _embedding_encode(self, txt):
|
||||
response = await trio.to_thread.run_sync(
|
||||
lambda: get_embed_cache(self._embd_model.llm_name, txt)
|
||||
)
|
||||
response = await trio.to_thread.run_sync(lambda: get_embed_cache(self._embd_model.llm_name, txt))
|
||||
if response is not None:
|
||||
return response
|
||||
embds, _ = await trio.to_thread.run_sync(lambda: self._embd_model.encode([txt]))
|
||||
@ -82,7 +94,6 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
|
||||
n_clusters = np.arange(1, max_clusters)
|
||||
bics = []
|
||||
for n in n_clusters:
|
||||
|
||||
if task_id:
|
||||
if has_canceled(task_id):
|
||||
logging.info(f"Task {task_id} cancelled during get optimal clusters.")
|
||||
@ -101,7 +112,7 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
|
||||
layers = [(0, len(chunks))]
|
||||
start, end = 0, len(chunks)
|
||||
|
||||
@timeout(60*20)
|
||||
@timeout(60 * 20)
|
||||
async def summarize(ck_idx: list[int]):
|
||||
nonlocal chunks
|
||||
|
||||
@ -111,47 +122,50 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
|
||||
raise TaskCanceledException(f"Task {task_id} was cancelled")
|
||||
|
||||
texts = [chunks[i][0] for i in ck_idx]
|
||||
len_per_chunk = int(
|
||||
(self._llm_model.max_length - self._max_token) / len(texts)
|
||||
)
|
||||
cluster_content = "\n".join(
|
||||
[truncate(t, max(1, len_per_chunk)) for t in texts]
|
||||
)
|
||||
async with chat_limiter:
|
||||
len_per_chunk = int((self._llm_model.max_length - self._max_token) / len(texts))
|
||||
cluster_content = "\n".join([truncate(t, max(1, len_per_chunk)) for t in texts])
|
||||
try:
|
||||
async with chat_limiter:
|
||||
if task_id and has_canceled(task_id):
|
||||
logging.info(f"Task {task_id} cancelled before RAPTOR LLM call.")
|
||||
raise TaskCanceledException(f"Task {task_id} was cancelled")
|
||||
|
||||
if task_id and has_canceled(task_id):
|
||||
logging.info(f"Task {task_id} cancelled before RAPTOR LLM call.")
|
||||
raise TaskCanceledException(f"Task {task_id} was cancelled")
|
||||
cnt = await self._chat(
|
||||
"You're a helpful assistant.",
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": self._prompt.format(cluster_content=cluster_content),
|
||||
}
|
||||
],
|
||||
{"max_tokens": max(self._max_token, 512)}, # fix issue: #10235
|
||||
)
|
||||
cnt = re.sub(
|
||||
"(······\n由于长度的原因,回答被截断了,要继续吗?|For the content length reason, it stopped, continue?)",
|
||||
"",
|
||||
cnt,
|
||||
)
|
||||
logging.debug(f"SUM: {cnt}")
|
||||
|
||||
cnt = await self._chat(
|
||||
"You're a helpful assistant.",
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": self._prompt.format(
|
||||
cluster_content=cluster_content
|
||||
),
|
||||
}
|
||||
],
|
||||
{"max_tokens": max(self._max_token, 512)}, # fix issue: #10235
|
||||
)
|
||||
cnt = re.sub(
|
||||
"(······\n由于长度的原因,回答被截断了,要继续吗?|For the content length reason, it stopped, continue?)",
|
||||
"",
|
||||
cnt,
|
||||
)
|
||||
logging.debug(f"SUM: {cnt}")
|
||||
if task_id and has_canceled(task_id):
|
||||
logging.info(f"Task {task_id} cancelled before RAPTOR embedding.")
|
||||
raise TaskCanceledException(f"Task {task_id} was cancelled")
|
||||
|
||||
if task_id and has_canceled(task_id):
|
||||
logging.info(f"Task {task_id} cancelled before RAPTOR embedding.")
|
||||
raise TaskCanceledException(f"Task {task_id} was cancelled")
|
||||
|
||||
embds = await self._embedding_encode(cnt)
|
||||
chunks.append((cnt, embds))
|
||||
embds = await self._embedding_encode(cnt)
|
||||
chunks.append((cnt, embds))
|
||||
except TaskCanceledException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
self._error_count += 1
|
||||
warn_msg = f"[RAPTOR] Skip cluster ({len(ck_idx)} chunks) due to error: {exc}"
|
||||
logging.warning(warn_msg)
|
||||
if callback:
|
||||
callback(msg=warn_msg)
|
||||
if self._error_count >= self._max_errors:
|
||||
raise RuntimeError(f"RAPTOR aborted after {self._error_count} errors. Last error: {exc}") from exc
|
||||
|
||||
labels = []
|
||||
while end - start > 1:
|
||||
|
||||
if task_id:
|
||||
if has_canceled(task_id):
|
||||
logging.info(f"Task {task_id} cancelled during RAPTOR layer processing.")
|
||||
@ -161,11 +175,7 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
|
||||
if len(embeddings) == 2:
|
||||
await summarize([start, start + 1])
|
||||
if callback:
|
||||
callback(
|
||||
msg="Cluster one layer: {} -> {}".format(
|
||||
end - start, len(chunks) - end
|
||||
)
|
||||
)
|
||||
callback(msg="Cluster one layer: {} -> {}".format(end - start, len(chunks) - end))
|
||||
labels.extend([0, 0])
|
||||
layers.append((end, len(chunks)))
|
||||
start = end
|
||||
@ -199,17 +209,11 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
|
||||
|
||||
nursery.start_soon(summarize, ck_idx)
|
||||
|
||||
assert len(chunks) - end == n_clusters, "{} vs. {}".format(
|
||||
len(chunks) - end, n_clusters
|
||||
)
|
||||
assert len(chunks) - end == n_clusters, "{} vs. {}".format(len(chunks) - end, n_clusters)
|
||||
labels.extend(lbls)
|
||||
layers.append((end, len(chunks)))
|
||||
if callback:
|
||||
callback(
|
||||
msg="Cluster one layer: {} -> {}".format(
|
||||
end - start, len(chunks) - end
|
||||
)
|
||||
)
|
||||
callback(msg="Cluster one layer: {} -> {}".format(end - start, len(chunks) - end))
|
||||
start = end
|
||||
end = len(chunks)
|
||||
|
||||
|
||||
@ -28,7 +28,7 @@ def collect():
|
||||
logging.debug(doc_locations)
|
||||
if len(doc_locations) == 0:
|
||||
time.sleep(1)
|
||||
return
|
||||
return None
|
||||
return doc_locations
|
||||
|
||||
|
||||
|
||||
@ -359,7 +359,7 @@ async def build_chunks(task, progress_callback):
|
||||
task_canceled = has_canceled(task["id"])
|
||||
if task_canceled:
|
||||
progress_callback(-1, msg="Task has been canceled.")
|
||||
return
|
||||
return None
|
||||
if settings.retriever.tag_content(tenant_id, kb_ids, d, all_tags, topn_tags=topn_tags, S=S) and len(d[TAG_FLD]) > 0:
|
||||
examples.append({"content": d["content_with_weight"], TAG_FLD: d[TAG_FLD]})
|
||||
else:
|
||||
@ -417,6 +417,7 @@ def build_TOC(task, docs, progress_callback):
|
||||
d["page_num_int"] = [100000000]
|
||||
d["id"] = xxhash.xxh64((d["content_with_weight"] + str(d["doc_id"])).encode("utf-8", "surrogatepass")).hexdigest()
|
||||
return d
|
||||
return None
|
||||
|
||||
|
||||
def init_kb(row, vector_size: int):
|
||||
@ -441,7 +442,7 @@ async def embedding(docs, mdl, parser_config=None, callback=None):
|
||||
tk_count = 0
|
||||
if len(tts) == len(cnts):
|
||||
vts, c = await trio.to_thread.run_sync(lambda: mdl.encode(tts[0: 1]))
|
||||
tts = np.concatenate([vts[0] for _ in range(len(tts))], axis=0)
|
||||
tts = np.tile(vts[0], (len(cnts), 1))
|
||||
tk_count += c
|
||||
|
||||
@timeout(60)
|
||||
@ -464,8 +465,10 @@ async def embedding(docs, mdl, parser_config=None, callback=None):
|
||||
if not filename_embd_weight:
|
||||
filename_embd_weight = 0.1
|
||||
title_w = float(filename_embd_weight)
|
||||
vects = (title_w * tts + (1 - title_w) *
|
||||
cnts) if len(tts) == len(cnts) else cnts
|
||||
if tts.ndim == 2 and cnts.ndim == 2 and tts.shape == cnts.shape:
|
||||
vects = title_w * tts + (1 - title_w) * cnts
|
||||
else:
|
||||
vects = cnts
|
||||
|
||||
assert len(vects) == len(docs)
|
||||
vector_size = 0
|
||||
@ -648,6 +651,8 @@ async def run_raptor_for_kb(row, kb_parser_config, chat_mdl, embd_mdl, vector_si
|
||||
|
||||
res = []
|
||||
tk_count = 0
|
||||
max_errors = int(os.environ.get("RAPTOR_MAX_ERRORS", 3))
|
||||
|
||||
async def generate(chunks, did):
|
||||
nonlocal tk_count, res
|
||||
raptor = Raptor(
|
||||
@ -657,6 +662,7 @@ async def run_raptor_for_kb(row, kb_parser_config, chat_mdl, embd_mdl, vector_si
|
||||
raptor_config["prompt"],
|
||||
raptor_config["max_token"],
|
||||
raptor_config["threshold"],
|
||||
max_errors=max_errors,
|
||||
)
|
||||
original_length = len(chunks)
|
||||
chunks = await raptor(chunks, kb_parser_config["raptor"]["random_seed"], callback, row["id"])
|
||||
@ -719,7 +725,7 @@ async def insert_es(task_id, task_tenant_id, task_dataset_id, chunks, progress_c
|
||||
task_canceled = has_canceled(task_id)
|
||||
if task_canceled:
|
||||
progress_callback(-1, msg="Task has been canceled.")
|
||||
return
|
||||
return False
|
||||
if b % 128 == 0:
|
||||
progress_callback(prog=0.8 + 0.1 * (b + 1) / len(chunks), msg="")
|
||||
if doc_store_result:
|
||||
@ -737,7 +743,7 @@ async def insert_es(task_id, task_tenant_id, task_dataset_id, chunks, progress_c
|
||||
for chunk_id in chunk_ids:
|
||||
nursery.start_soon(delete_image, task_dataset_id, chunk_id)
|
||||
progress_callback(-1, msg=f"Chunk updates failed since task {task_id} is unknown.")
|
||||
return
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
|
||||
@ -67,6 +67,8 @@ class RAGFlowAzureSpnBlob:
|
||||
logging.exception(f"Fail put {bucket}/{fnm}")
|
||||
self.__open__()
|
||||
time.sleep(1)
|
||||
return None
|
||||
return None
|
||||
|
||||
def rm(self, bucket, fnm):
|
||||
try:
|
||||
@ -84,7 +86,7 @@ class RAGFlowAzureSpnBlob:
|
||||
logging.exception(f"fail get {bucket}/{fnm}")
|
||||
self.__open__()
|
||||
time.sleep(1)
|
||||
return
|
||||
return None
|
||||
|
||||
def obj_exist(self, bucket, fnm):
|
||||
try:
|
||||
@ -102,4 +104,4 @@ class RAGFlowAzureSpnBlob:
|
||||
logging.exception(f"fail get {bucket}/{fnm}")
|
||||
self.__open__()
|
||||
time.sleep(1)
|
||||
return
|
||||
return None
|
||||
@ -241,23 +241,23 @@ class DocStoreConnection(ABC):
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def getTotal(self, res):
|
||||
def get_total(self, res):
|
||||
raise NotImplementedError("Not implemented")
|
||||
|
||||
@abstractmethod
|
||||
def getChunkIds(self, res):
|
||||
def get_chunk_ids(self, res):
|
||||
raise NotImplementedError("Not implemented")
|
||||
|
||||
@abstractmethod
|
||||
def getFields(self, res, fields: list[str]) -> dict[str, dict]:
|
||||
def get_fields(self, res, fields: list[str]) -> dict[str, dict]:
|
||||
raise NotImplementedError("Not implemented")
|
||||
|
||||
@abstractmethod
|
||||
def getHighlight(self, res, keywords: list[str], fieldnm: str):
|
||||
def get_highlight(self, res, keywords: list[str], fieldnm: str):
|
||||
raise NotImplementedError("Not implemented")
|
||||
|
||||
@abstractmethod
|
||||
def getAggregation(self, res, fieldnm: str):
|
||||
def get_aggregation(self, res, fieldnm: str):
|
||||
raise NotImplementedError("Not implemented")
|
||||
|
||||
"""
|
||||
|
||||
@ -471,12 +471,12 @@ class ESConnection(DocStoreConnection):
|
||||
Helper functions for search result
|
||||
"""
|
||||
|
||||
def getTotal(self, res):
|
||||
def get_total(self, res):
|
||||
if isinstance(res["hits"]["total"], type({})):
|
||||
return res["hits"]["total"]["value"]
|
||||
return res["hits"]["total"]
|
||||
|
||||
def getChunkIds(self, res):
|
||||
def get_chunk_ids(self, res):
|
||||
return [d["_id"] for d in res["hits"]["hits"]]
|
||||
|
||||
def __getSource(self, res):
|
||||
@ -487,7 +487,7 @@ class ESConnection(DocStoreConnection):
|
||||
rr.append(d["_source"])
|
||||
return rr
|
||||
|
||||
def getFields(self, res, fields: list[str]) -> dict[str, dict]:
|
||||
def get_fields(self, res, fields: list[str]) -> dict[str, dict]:
|
||||
res_fields = {}
|
||||
if not fields:
|
||||
return {}
|
||||
@ -509,7 +509,7 @@ class ESConnection(DocStoreConnection):
|
||||
res_fields[d["id"]] = m
|
||||
return res_fields
|
||||
|
||||
def getHighlight(self, res, keywords: list[str], fieldnm: str):
|
||||
def get_highlight(self, res, keywords: list[str], fieldnm: str):
|
||||
ans = {}
|
||||
for d in res["hits"]["hits"]:
|
||||
hlts = d.get("highlight")
|
||||
@ -534,7 +534,7 @@ class ESConnection(DocStoreConnection):
|
||||
|
||||
return ans
|
||||
|
||||
def getAggregation(self, res, fieldnm: str):
|
||||
def get_aggregation(self, res, fieldnm: str):
|
||||
agg_field = "aggs_" + fieldnm
|
||||
if "aggregations" not in res or agg_field not in res["aggregations"]:
|
||||
return list()
|
||||
|
||||
@ -470,7 +470,7 @@ class InfinityConnection(DocStoreConnection):
|
||||
df_list.append(kb_res)
|
||||
self.connPool.release_conn(inf_conn)
|
||||
res = concat_dataframes(df_list, ["id"])
|
||||
res_fields = self.getFields(res, res.columns.tolist())
|
||||
res_fields = self.get_fields(res, res.columns.tolist())
|
||||
return res_fields.get(chunkId, None)
|
||||
|
||||
def insert(self, documents: list[dict], indexName: str, knowledgebaseId: str = None) -> list[str]:
|
||||
@ -599,7 +599,7 @@ class InfinityConnection(DocStoreConnection):
|
||||
col_to_remove = list(removeValue.keys())
|
||||
row_to_opt = table_instance.output(col_to_remove + ["id"]).filter(filter).to_df()
|
||||
logger.debug(f"INFINITY search table {str(table_name)}, filter {filter}, result: {str(row_to_opt[0])}")
|
||||
row_to_opt = self.getFields(row_to_opt, col_to_remove)
|
||||
row_to_opt = self.get_fields(row_to_opt, col_to_remove)
|
||||
for id, old_v in row_to_opt.items():
|
||||
for k, remove_v in removeValue.items():
|
||||
if remove_v in old_v[k]:
|
||||
@ -639,17 +639,17 @@ class InfinityConnection(DocStoreConnection):
|
||||
Helper functions for search result
|
||||
"""
|
||||
|
||||
def getTotal(self, res: tuple[pd.DataFrame, int] | pd.DataFrame) -> int:
|
||||
def get_total(self, res: tuple[pd.DataFrame, int] | pd.DataFrame) -> int:
|
||||
if isinstance(res, tuple):
|
||||
return res[1]
|
||||
return len(res)
|
||||
|
||||
def getChunkIds(self, res: tuple[pd.DataFrame, int] | pd.DataFrame) -> list[str]:
|
||||
def get_chunk_ids(self, res: tuple[pd.DataFrame, int] | pd.DataFrame) -> list[str]:
|
||||
if isinstance(res, tuple):
|
||||
res = res[0]
|
||||
return list(res["id"])
|
||||
|
||||
def getFields(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, fields: list[str]) -> dict[str, dict]:
|
||||
def get_fields(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, fields: list[str]) -> dict[str, dict]:
|
||||
if isinstance(res, tuple):
|
||||
res = res[0]
|
||||
if not fields:
|
||||
@ -690,7 +690,7 @@ class InfinityConnection(DocStoreConnection):
|
||||
|
||||
return res2.set_index("id").to_dict(orient="index")
|
||||
|
||||
def getHighlight(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, keywords: list[str], fieldnm: str):
|
||||
def get_highlight(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, keywords: list[str], fieldnm: str):
|
||||
if isinstance(res, tuple):
|
||||
res = res[0]
|
||||
ans = {}
|
||||
@ -732,7 +732,7 @@ class InfinityConnection(DocStoreConnection):
|
||||
ans[id] = txt
|
||||
return ans
|
||||
|
||||
def getAggregation(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, fieldnm: str):
|
||||
def get_aggregation(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, fieldnm: str):
|
||||
"""
|
||||
Manual aggregation for tag fields since Infinity doesn't provide native aggregation
|
||||
"""
|
||||
|
||||
@ -92,7 +92,7 @@ class RAGFlowMinio:
|
||||
logging.exception(f"Fail to get {bucket}/{filename}")
|
||||
self.__open__()
|
||||
time.sleep(1)
|
||||
return
|
||||
return None
|
||||
|
||||
def obj_exist(self, bucket, filename, tenant_id=None):
|
||||
try:
|
||||
@ -130,7 +130,7 @@ class RAGFlowMinio:
|
||||
logging.exception(f"Fail to get_presigned {bucket}/{fnm}:")
|
||||
self.__open__()
|
||||
time.sleep(1)
|
||||
return
|
||||
return None
|
||||
|
||||
def remove_bucket(self, bucket):
|
||||
try:
|
||||
|
||||
@ -62,8 +62,7 @@ class OpenDALStorage:
|
||||
|
||||
def health(self):
|
||||
bucket, fnm, binary = "txtxtxtxt1", "txtxtxtxt1", b"_t@@@1"
|
||||
r = self._operator.write(f"{bucket}/{fnm}", binary)
|
||||
return r
|
||||
return self._operator.write(f"{bucket}/{fnm}", binary)
|
||||
|
||||
def put(self, bucket, fnm, binary, tenant_id=None):
|
||||
self._operator.write(f"{bucket}/{fnm}", binary)
|
||||
|
||||
@ -455,12 +455,12 @@ class OSConnection(DocStoreConnection):
|
||||
Helper functions for search result
|
||||
"""
|
||||
|
||||
def getTotal(self, res):
|
||||
def get_total(self, res):
|
||||
if isinstance(res["hits"]["total"], type({})):
|
||||
return res["hits"]["total"]["value"]
|
||||
return res["hits"]["total"]
|
||||
|
||||
def getChunkIds(self, res):
|
||||
def get_chunk_ids(self, res):
|
||||
return [d["_id"] for d in res["hits"]["hits"]]
|
||||
|
||||
def __getSource(self, res):
|
||||
@ -471,7 +471,7 @@ class OSConnection(DocStoreConnection):
|
||||
rr.append(d["_source"])
|
||||
return rr
|
||||
|
||||
def getFields(self, res, fields: list[str]) -> dict[str, dict]:
|
||||
def get_fields(self, res, fields: list[str]) -> dict[str, dict]:
|
||||
res_fields = {}
|
||||
if not fields:
|
||||
return {}
|
||||
@ -490,7 +490,7 @@ class OSConnection(DocStoreConnection):
|
||||
res_fields[d["id"]] = m
|
||||
return res_fields
|
||||
|
||||
def getHighlight(self, res, keywords: list[str], fieldnm: str):
|
||||
def get_highlight(self, res, keywords: list[str], fieldnm: str):
|
||||
ans = {}
|
||||
for d in res["hits"]["hits"]:
|
||||
hlts = d.get("highlight")
|
||||
@ -515,7 +515,7 @@ class OSConnection(DocStoreConnection):
|
||||
|
||||
return ans
|
||||
|
||||
def getAggregation(self, res, fieldnm: str):
|
||||
def get_aggregation(self, res, fieldnm: str):
|
||||
agg_field = "aggs_" + fieldnm
|
||||
if "aggregations" not in res or agg_field not in res["aggregations"]:
|
||||
return list()
|
||||
|
||||
@ -141,7 +141,7 @@ class RAGFlowOSS:
|
||||
logging.exception(f"fail get {bucket}/{fnm}")
|
||||
self.__open__()
|
||||
time.sleep(1)
|
||||
return
|
||||
return None
|
||||
|
||||
@use_prefix_path
|
||||
@use_default_bucket
|
||||
@ -170,5 +170,5 @@ class RAGFlowOSS:
|
||||
logging.exception(f"fail get url {bucket}/{fnm}")
|
||||
self.__open__()
|
||||
time.sleep(1)
|
||||
return
|
||||
return None
|
||||
|
||||
|
||||
@ -104,12 +104,13 @@ class RedisDB:
|
||||
|
||||
if self.REDIS.get(a) == b:
|
||||
return True
|
||||
return False
|
||||
|
||||
def info(self):
|
||||
info = self.REDIS.info()
|
||||
return {
|
||||
'redis_version': info["redis_version"],
|
||||
'server_mode': info["server_mode"],
|
||||
'server_mode': info["server_mode"] if "server_mode" in info else info.get("redis_mode", ""),
|
||||
'used_memory': info["used_memory_human"],
|
||||
'total_system_memory': info["total_system_memory_human"],
|
||||
'mem_fragmentation_ratio': info["mem_fragmentation_ratio"],
|
||||
@ -124,7 +125,7 @@ class RedisDB:
|
||||
|
||||
def exist(self, k):
|
||||
if not self.REDIS:
|
||||
return
|
||||
return None
|
||||
try:
|
||||
return self.REDIS.exists(k)
|
||||
except Exception as e:
|
||||
@ -133,7 +134,7 @@ class RedisDB:
|
||||
|
||||
def get(self, k):
|
||||
if not self.REDIS:
|
||||
return
|
||||
return None
|
||||
try:
|
||||
return self.REDIS.get(k)
|
||||
except Exception as e:
|
||||
|
||||
@ -164,7 +164,7 @@ class RAGFlowS3:
|
||||
logging.exception(f"fail get {bucket}/{fnm}")
|
||||
self.__open__()
|
||||
time.sleep(1)
|
||||
return
|
||||
return None
|
||||
|
||||
@use_prefix_path
|
||||
@use_default_bucket
|
||||
@ -193,7 +193,7 @@ class RAGFlowS3:
|
||||
logging.exception(f"fail get url {bucket}/{fnm}")
|
||||
self.__open__()
|
||||
time.sleep(1)
|
||||
return
|
||||
return None
|
||||
|
||||
@use_default_bucket
|
||||
def rm_bucket(self, bucket, *args, **kwargs):
|
||||
|
||||
@ -16,14 +16,15 @@
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
import pytest
|
||||
from common import create_dataset
|
||||
from configs import DATASET_NAME_LIMIT, INVALID_API_TOKEN
|
||||
from configs import DATASET_NAME_LIMIT, DEFAULT_PARSER_CONFIG, INVALID_API_TOKEN
|
||||
from hypothesis import example, given, settings
|
||||
from libs.auth import RAGFlowHttpApiAuth
|
||||
from utils import encode_avatar
|
||||
from utils.file_utils import create_image_file
|
||||
from utils.hypothesis_utils import valid_names
|
||||
from configs import DEFAULT_PARSER_CONFIG
|
||||
|
||||
from common import create_dataset
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("clear_datasets")
|
||||
class TestAuthorization:
|
||||
@ -125,8 +126,8 @@ class TestDatasetCreate:
|
||||
assert res["code"] == 0, res
|
||||
|
||||
res = create_dataset(HttpApiAuth, payload)
|
||||
assert res["code"] == 103, res
|
||||
assert res["message"] == f"Dataset name '{name}' already exists", res
|
||||
assert res["code"] == 0, res
|
||||
assert res["data"]["name"] == name + "(1)", res
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_name_case_insensitive(self, HttpApiAuth):
|
||||
@ -137,8 +138,8 @@ class TestDatasetCreate:
|
||||
|
||||
payload = {"name": name.lower()}
|
||||
res = create_dataset(HttpApiAuth, payload)
|
||||
assert res["code"] == 103, res
|
||||
assert res["message"] == f"Dataset name '{name.lower()}' already exists", res
|
||||
assert res["code"] == 0, res
|
||||
assert res["data"]["name"] == name.lower() + "(1)", res
|
||||
|
||||
@pytest.mark.p2
|
||||
def test_avatar(self, HttpApiAuth, tmp_path):
|
||||
|
||||
@ -17,13 +17,13 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from operator import attrgetter
|
||||
|
||||
import pytest
|
||||
from configs import DATASET_NAME_LIMIT, HOST_ADDRESS, INVALID_API_TOKEN
|
||||
from configs import DATASET_NAME_LIMIT, DEFAULT_PARSER_CONFIG, HOST_ADDRESS, INVALID_API_TOKEN
|
||||
from hypothesis import example, given, settings
|
||||
from ragflow_sdk import DataSet, RAGFlow
|
||||
from utils import encode_avatar
|
||||
from utils.file_utils import create_image_file
|
||||
from utils.hypothesis_utils import valid_names
|
||||
from configs import DEFAULT_PARSER_CONFIG
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("clear_datasets")
|
||||
class TestAuthorization:
|
||||
@ -95,9 +95,8 @@ class TestDatasetCreate:
|
||||
payload = {"name": name}
|
||||
client.create_dataset(**payload)
|
||||
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
client.create_dataset(**payload)
|
||||
assert str(excinfo.value) == f"Dataset name '{name}' already exists", str(excinfo.value)
|
||||
dataset = client.create_dataset(**payload)
|
||||
assert dataset.name == name + "(1)", str(dataset)
|
||||
|
||||
@pytest.mark.p3
|
||||
def test_name_case_insensitive(self, client):
|
||||
@ -106,9 +105,8 @@ class TestDatasetCreate:
|
||||
client.create_dataset(**payload)
|
||||
|
||||
payload = {"name": name.lower()}
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
client.create_dataset(**payload)
|
||||
assert str(excinfo.value) == f"Dataset name '{name.lower()}' already exists", str(excinfo.value)
|
||||
dataset = client.create_dataset(**payload)
|
||||
assert dataset.name == name.lower() + "(1)", str(dataset)
|
||||
|
||||
@pytest.mark.p2
|
||||
def test_avatar(self, client, tmp_path):
|
||||
|
||||
10
uv.lock
generated
10
uv.lock
generated
@ -4892,6 +4892,14 @@ wheels = [
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/80/28/2659c02301b9500751f8d42f9a6632e1508aa5120de5e43042b8b30f8d5d/pyopenssl-25.1.0-py3-none-any.whl", hash = "sha256:2b11f239acc47ac2e5aca04fd7fa829800aeee22a2eb30d744572a157bd8a1ab", size = 56771, upload-time = "2025-05-17T16:28:29.197Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pypandoc"
|
||||
version = "1.16"
|
||||
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||
wheels = [
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/24/77/af1fc54740a0712988f9518e629d38edc7b8ffccd7549203f19c3d8a2db6/pypandoc-1.16-py3-none-any.whl", hash = "sha256:868f390d48388743e7a5885915cbbaa005dea36a825ecdfd571f8c523416c822", size = 19425, upload-time = "2025-11-08T15:44:38.429Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyparsing"
|
||||
version = "3.2.3"
|
||||
@ -5292,6 +5300,7 @@ dependencies = [
|
||||
{ name = "pyicu" },
|
||||
{ name = "pymysql" },
|
||||
{ name = "pyodbc" },
|
||||
{ name = "pypandoc" },
|
||||
{ name = "pypdf" },
|
||||
{ name = "pypdf2" },
|
||||
{ name = "python-calamine" },
|
||||
@ -5447,6 +5456,7 @@ requires-dist = [
|
||||
{ name = "pyicu", specifier = ">=2.15.3,<3.0.0" },
|
||||
{ name = "pymysql", specifier = ">=1.1.1,<2.0.0" },
|
||||
{ name = "pyodbc", specifier = ">=5.2.0,<6.0.0" },
|
||||
{ name = "pypandoc", specifier = ">=1.16" },
|
||||
{ name = "pypdf", specifier = "==6.0.0" },
|
||||
{ name = "pypdf2", specifier = ">=3.0.1,<4.0.0" },
|
||||
{ name = "python-calamine", specifier = ">=0.4.0" },
|
||||
|
||||
@ -5,12 +5,14 @@ import {
|
||||
forwardRef,
|
||||
useCallback,
|
||||
useEffect,
|
||||
useRef,
|
||||
useState,
|
||||
} from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { Avatar, AvatarFallback, AvatarImage } from './ui/avatar';
|
||||
import { Button } from './ui/button';
|
||||
import { Input } from './ui/input';
|
||||
import { Modal } from './ui/modal/modal';
|
||||
|
||||
type AvatarUploadProps = {
|
||||
value?: string;
|
||||
@ -22,14 +24,24 @@ export const AvatarUpload = forwardRef<HTMLInputElement, AvatarUploadProps>(
|
||||
function AvatarUpload({ value, onChange, tips }, ref) {
|
||||
const { t } = useTranslation();
|
||||
const [avatarBase64Str, setAvatarBase64Str] = useState(''); // Avatar Image base64
|
||||
const [isCropModalOpen, setIsCropModalOpen] = useState(false);
|
||||
const [imageToCrop, setImageToCrop] = useState<string | null>(null);
|
||||
const [cropArea, setCropArea] = useState({ x: 0, y: 0, size: 200 });
|
||||
const imageRef = useRef<HTMLImageElement>(null);
|
||||
const canvasRef = useRef<HTMLCanvasElement>(null);
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
const isDraggingRef = useRef(false);
|
||||
const dragStartRef = useRef({ x: 0, y: 0 });
|
||||
const [imageScale, setImageScale] = useState(1);
|
||||
const [imageOffset, setImageOffset] = useState({ x: 0, y: 0 });
|
||||
|
||||
const handleChange: ChangeEventHandler<HTMLInputElement> = useCallback(
|
||||
async (ev) => {
|
||||
const file = ev.target?.files?.[0];
|
||||
if (/\.(jpg|jpeg|png|webp|bmp)$/i.test(file?.name ?? '')) {
|
||||
const str = await transformFile2Base64(file!);
|
||||
setAvatarBase64Str(str);
|
||||
onChange?.(str);
|
||||
const str = await transformFile2Base64(file!, 1000);
|
||||
setImageToCrop(str);
|
||||
setIsCropModalOpen(true);
|
||||
}
|
||||
ev.target.value = '';
|
||||
},
|
||||
@ -41,17 +53,209 @@ export const AvatarUpload = forwardRef<HTMLInputElement, AvatarUploadProps>(
|
||||
onChange?.('');
|
||||
}, [onChange]);
|
||||
|
||||
const handleCrop = useCallback(() => {
|
||||
if (!imageRef.current || !canvasRef.current) return;
|
||||
|
||||
const canvas = canvasRef.current;
|
||||
const ctx = canvas.getContext('2d');
|
||||
const image = imageRef.current;
|
||||
|
||||
if (!ctx) return;
|
||||
|
||||
// Set canvas size to 64x64 (avatar size)
|
||||
canvas.width = 64;
|
||||
canvas.height = 64;
|
||||
|
||||
// Draw cropped image on canvas
|
||||
ctx.drawImage(
|
||||
image,
|
||||
cropArea.x,
|
||||
cropArea.y,
|
||||
cropArea.size,
|
||||
cropArea.size,
|
||||
0,
|
||||
0,
|
||||
64,
|
||||
64,
|
||||
);
|
||||
|
||||
// Convert to base64
|
||||
const croppedImageBase64 = canvas.toDataURL('image/png');
|
||||
setAvatarBase64Str(croppedImageBase64);
|
||||
onChange?.(croppedImageBase64);
|
||||
setIsCropModalOpen(false);
|
||||
}, [cropArea, onChange]);
|
||||
|
||||
const handleCancelCrop = useCallback(() => {
|
||||
setIsCropModalOpen(false);
|
||||
setImageToCrop(null);
|
||||
}, []);
|
||||
|
||||
const initCropArea = useCallback(() => {
|
||||
if (!imageRef.current || !containerRef.current) return;
|
||||
|
||||
const image = imageRef.current;
|
||||
const container = containerRef.current;
|
||||
|
||||
// Calculate image scale to fit container
|
||||
const scale = Math.min(
|
||||
container.clientWidth / image.width,
|
||||
container.clientHeight / image.height,
|
||||
);
|
||||
setImageScale(scale);
|
||||
|
||||
// Calculate image offset to center it
|
||||
const scaledWidth = image.width * scale;
|
||||
const scaledHeight = image.height * scale;
|
||||
const offsetX = (container.clientWidth - scaledWidth) / 2;
|
||||
const offsetY = (container.clientHeight - scaledHeight) / 2;
|
||||
setImageOffset({ x: offsetX, y: offsetY });
|
||||
|
||||
// Initialize crop area to center of image
|
||||
const size = Math.min(scaledWidth, scaledHeight) * 0.8; // 80% of the smaller dimension
|
||||
const x = (image.width - size / scale) / 2;
|
||||
const y = (image.height - size / scale) / 2;
|
||||
|
||||
setCropArea({ x, y, size: size / scale });
|
||||
}, []);
|
||||
|
||||
const handleMouseMove = useCallback(
|
||||
(e: MouseEvent) => {
|
||||
if (
|
||||
!isDraggingRef.current ||
|
||||
!imageRef.current ||
|
||||
!containerRef.current
|
||||
)
|
||||
return;
|
||||
|
||||
const image = imageRef.current;
|
||||
const container = containerRef.current;
|
||||
const containerRect = container.getBoundingClientRect();
|
||||
|
||||
// Calculate mouse position relative to container
|
||||
const mouseX = e.clientX - containerRect.left;
|
||||
const mouseY = e.clientY - containerRect.top;
|
||||
|
||||
// Calculate mouse position relative to image
|
||||
const imageX = (mouseX - imageOffset.x) / imageScale;
|
||||
const imageY = (mouseY - imageOffset.y) / imageScale;
|
||||
|
||||
// Calculate new crop area position based on mouse movement
|
||||
let newX = imageX - dragStartRef.current.x;
|
||||
let newY = imageY - dragStartRef.current.y;
|
||||
|
||||
// Boundary checks
|
||||
newX = Math.max(0, Math.min(newX, image.width - cropArea.size));
|
||||
newY = Math.max(0, Math.min(newY, image.height - cropArea.size));
|
||||
|
||||
setCropArea((prev) => ({
|
||||
...prev,
|
||||
x: newX,
|
||||
y: newY,
|
||||
}));
|
||||
},
|
||||
[cropArea.size, imageScale, imageOffset],
|
||||
);
|
||||
|
||||
const handleMouseUp = useCallback(() => {
|
||||
isDraggingRef.current = false;
|
||||
document.removeEventListener('mousemove', handleMouseMove);
|
||||
document.removeEventListener('mouseup', handleMouseUp);
|
||||
}, [handleMouseMove]);
|
||||
|
||||
const handleMouseDown = useCallback(
|
||||
(e: React.MouseEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
isDraggingRef.current = true;
|
||||
if (imageRef.current && containerRef.current) {
|
||||
const container = containerRef.current;
|
||||
const containerRect = container.getBoundingClientRect();
|
||||
|
||||
// Calculate mouse position relative to container
|
||||
const mouseX = e.clientX - containerRect.left;
|
||||
const mouseY = e.clientY - containerRect.top;
|
||||
|
||||
// Calculate mouse position relative to image
|
||||
const imageX = (mouseX - imageOffset.x) / imageScale;
|
||||
const imageY = (mouseY - imageOffset.y) / imageScale;
|
||||
|
||||
// Store the offset between mouse position and crop area position
|
||||
dragStartRef.current = {
|
||||
x: imageX - cropArea.x,
|
||||
y: imageY - cropArea.y,
|
||||
};
|
||||
}
|
||||
document.addEventListener('mousemove', handleMouseMove);
|
||||
document.addEventListener('mouseup', handleMouseUp);
|
||||
},
|
||||
[cropArea, imageScale, imageOffset],
|
||||
);
|
||||
|
||||
const handleWheel = useCallback((e: React.WheelEvent) => {
|
||||
if (!imageRef.current) return;
|
||||
|
||||
e.preventDefault();
|
||||
const image = imageRef.current;
|
||||
const delta = e.deltaY > 0 ? 0.9 : 1.1; // Zoom factor
|
||||
|
||||
setCropArea((prev) => {
|
||||
const newSize = Math.max(
|
||||
20,
|
||||
Math.min(prev.size * delta, Math.min(image.width, image.height)),
|
||||
);
|
||||
|
||||
// Adjust position to keep crop area centered
|
||||
const centerRatioX = (prev.x + prev.size / 2) / image.width;
|
||||
const centerRatioY = (prev.y + prev.size / 2) / image.height;
|
||||
|
||||
const newX = centerRatioX * image.width - newSize / 2;
|
||||
const newY = centerRatioY * image.height - newSize / 2;
|
||||
|
||||
// Boundary checks
|
||||
const boundedX = Math.max(0, Math.min(newX, image.width - newSize));
|
||||
const boundedY = Math.max(0, Math.min(newY, image.height - newSize));
|
||||
|
||||
return {
|
||||
x: boundedX,
|
||||
y: boundedY,
|
||||
size: newSize,
|
||||
};
|
||||
});
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (value) {
|
||||
setAvatarBase64Str(value);
|
||||
}
|
||||
}, [value]);
|
||||
|
||||
useEffect(() => {
|
||||
const container = containerRef.current;
|
||||
setTimeout(() => {
|
||||
console.log('container', container);
|
||||
// initCropArea();
|
||||
if (imageToCrop && container && isCropModalOpen) {
|
||||
container.addEventListener(
|
||||
'wheel',
|
||||
handleWheel as unknown as EventListener,
|
||||
{ passive: false },
|
||||
);
|
||||
return () => {
|
||||
container.removeEventListener(
|
||||
'wheel',
|
||||
handleWheel as unknown as EventListener,
|
||||
);
|
||||
};
|
||||
}
|
||||
}, 100);
|
||||
}, [handleWheel, containerRef.current]);
|
||||
|
||||
return (
|
||||
<div className="flex justify-start items-end space-x-2">
|
||||
<div className="relative group">
|
||||
{!avatarBase64Str ? (
|
||||
<div className="w-[64px] h-[64px] grid place-content-center border border-dashed bg-bg-input rounded-md">
|
||||
<div className="w-[64px] h-[64px] grid place-content-center border border-dashed bg-bg-input rounded-md">
|
||||
<div className="flex flex-col items-center">
|
||||
<Plus />
|
||||
<p>{t('common.upload')}</p>
|
||||
@ -60,7 +264,7 @@ export const AvatarUpload = forwardRef<HTMLInputElement, AvatarUploadProps>(
|
||||
) : (
|
||||
<div className="w-[64px] h-[64px] relative grid place-content-center">
|
||||
<Avatar className="w-[64px] h-[64px] rounded-md">
|
||||
<AvatarImage className=" block" src={avatarBase64Str} alt="" />
|
||||
<AvatarImage className="block" src={avatarBase64Str} alt="" />
|
||||
<AvatarFallback></AvatarFallback>
|
||||
</Avatar>
|
||||
<div className="absolute inset-0 bg-[#000]/20 group-hover:bg-[#000]/60">
|
||||
@ -93,6 +297,79 @@ export const AvatarUpload = forwardRef<HTMLInputElement, AvatarUploadProps>(
|
||||
<div className="margin-1 text-text-secondary">
|
||||
{tips ?? t('knowledgeConfiguration.photoTip')}
|
||||
</div>
|
||||
|
||||
{/* Crop Modal */}
|
||||
<Modal
|
||||
open={isCropModalOpen}
|
||||
onOpenChange={(open) => {
|
||||
setIsCropModalOpen(open);
|
||||
if (!open) {
|
||||
setImageToCrop(null);
|
||||
}
|
||||
}}
|
||||
title={t('setting.cropImage')}
|
||||
size="small"
|
||||
onCancel={handleCancelCrop}
|
||||
onOk={handleCrop}
|
||||
// footer={
|
||||
// <div className="flex justify-end space-x-2">
|
||||
// <Button variant="secondary" onClick={handleCancelCrop}>
|
||||
// {t('common.cancel')}
|
||||
// </Button>
|
||||
// <Button onClick={handleCrop}>{t('common.confirm')}</Button>
|
||||
// </div>
|
||||
// }
|
||||
>
|
||||
<div className="flex flex-col items-center p-4">
|
||||
{imageToCrop && (
|
||||
<div className="w-full">
|
||||
<div
|
||||
ref={containerRef}
|
||||
className="relative overflow-hidden border border-border rounded-md mx-auto bg-bg-card"
|
||||
style={{
|
||||
width: '300px',
|
||||
height: '300px',
|
||||
touchAction: 'none',
|
||||
}}
|
||||
// onWheel={handleWheel}
|
||||
>
|
||||
<img
|
||||
ref={imageRef}
|
||||
src={imageToCrop}
|
||||
alt="To crop"
|
||||
className="absolute block"
|
||||
style={{
|
||||
transform: `scale(${imageScale})`,
|
||||
transformOrigin: 'top left',
|
||||
left: `${imageOffset.x}px`,
|
||||
top: `${imageOffset.y}px`,
|
||||
}}
|
||||
onLoad={initCropArea}
|
||||
/>
|
||||
{imageRef.current && (
|
||||
<div
|
||||
className="absolute border-2 border-white border-dashed cursor-move"
|
||||
style={{
|
||||
left: `${imageOffset.x + cropArea.x * imageScale}px`,
|
||||
top: `${imageOffset.y + cropArea.y * imageScale}px`,
|
||||
width: `${cropArea.size * imageScale}px`,
|
||||
height: `${cropArea.size * imageScale}px`,
|
||||
boxShadow: '0 0 0 9999px rgba(0, 0, 0, 0.5)',
|
||||
}}
|
||||
onMouseDown={handleMouseDown}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex justify-center mt-4">
|
||||
<p className="text-sm text-text-secondary">
|
||||
{t('setting.cropTip')}
|
||||
</p>
|
||||
</div>
|
||||
<canvas ref={canvasRef} className="hidden" />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</Modal>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
|
||||
@ -61,6 +61,12 @@ export interface FormFieldConfig {
|
||||
horizontal?: boolean;
|
||||
onChange?: (value: any) => void;
|
||||
tooltip?: React.ReactNode;
|
||||
customValidate?: (
|
||||
value: any,
|
||||
formValues: any,
|
||||
) => string | boolean | Promise<string | boolean>;
|
||||
dependencies?: string[];
|
||||
schema?: ZodSchema;
|
||||
}
|
||||
|
||||
// Component props interface
|
||||
@ -94,36 +100,40 @@ const generateSchema = (fields: FormFieldConfig[]): ZodSchema<any> => {
|
||||
let fieldSchema: ZodSchema;
|
||||
|
||||
// Create base validation schema based on field type
|
||||
switch (field.type) {
|
||||
case FormFieldType.Email:
|
||||
fieldSchema = z.string().email('Please enter a valid email address');
|
||||
break;
|
||||
case FormFieldType.Number:
|
||||
fieldSchema = z.coerce.number();
|
||||
if (field.validation?.min !== undefined) {
|
||||
fieldSchema = (fieldSchema as z.ZodNumber).min(
|
||||
field.validation.min,
|
||||
field.validation.message ||
|
||||
`Value cannot be less than ${field.validation.min}`,
|
||||
);
|
||||
}
|
||||
if (field.validation?.max !== undefined) {
|
||||
fieldSchema = (fieldSchema as z.ZodNumber).max(
|
||||
field.validation.max,
|
||||
field.validation.message ||
|
||||
`Value cannot be greater than ${field.validation.max}`,
|
||||
);
|
||||
}
|
||||
break;
|
||||
case FormFieldType.Checkbox:
|
||||
fieldSchema = z.boolean();
|
||||
break;
|
||||
case FormFieldType.Tag:
|
||||
fieldSchema = z.array(z.string());
|
||||
break;
|
||||
default:
|
||||
fieldSchema = z.string();
|
||||
break;
|
||||
if (field.schema) {
|
||||
fieldSchema = field.schema;
|
||||
} else {
|
||||
switch (field.type) {
|
||||
case FormFieldType.Email:
|
||||
fieldSchema = z.string().email('Please enter a valid email address');
|
||||
break;
|
||||
case FormFieldType.Number:
|
||||
fieldSchema = z.coerce.number();
|
||||
if (field.validation?.min !== undefined) {
|
||||
fieldSchema = (fieldSchema as z.ZodNumber).min(
|
||||
field.validation.min,
|
||||
field.validation.message ||
|
||||
`Value cannot be less than ${field.validation.min}`,
|
||||
);
|
||||
}
|
||||
if (field.validation?.max !== undefined) {
|
||||
fieldSchema = (fieldSchema as z.ZodNumber).max(
|
||||
field.validation.max,
|
||||
field.validation.message ||
|
||||
`Value cannot be greater than ${field.validation.max}`,
|
||||
);
|
||||
}
|
||||
break;
|
||||
case FormFieldType.Checkbox:
|
||||
fieldSchema = z.boolean();
|
||||
break;
|
||||
case FormFieldType.Tag:
|
||||
fieldSchema = z.array(z.string());
|
||||
break;
|
||||
default:
|
||||
fieldSchema = z.string();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle required fields
|
||||
@ -300,10 +310,90 @@ const DynamicForm = {
|
||||
|
||||
// Initialize form
|
||||
const form = useForm<T>({
|
||||
resolver: zodResolver(schema),
|
||||
resolver: async (data, context, options) => {
|
||||
const zodResult = await zodResolver(schema)(data, context, options);
|
||||
|
||||
let combinedErrors = { ...zodResult.errors };
|
||||
|
||||
const fieldErrors: Record<string, { type: string; message: string }> =
|
||||
{};
|
||||
for (const field of fields) {
|
||||
if (field.customValidate && data[field.name] !== undefined) {
|
||||
try {
|
||||
const result = await field.customValidate(
|
||||
data[field.name],
|
||||
data,
|
||||
);
|
||||
if (typeof result === 'string') {
|
||||
fieldErrors[field.name] = {
|
||||
type: 'custom',
|
||||
message: result,
|
||||
};
|
||||
} else if (result === false) {
|
||||
fieldErrors[field.name] = {
|
||||
type: 'custom',
|
||||
message:
|
||||
field.validation?.message || `${field.label} is invalid`,
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
fieldErrors[field.name] = {
|
||||
type: 'custom',
|
||||
message:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: 'Validation failed',
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
combinedErrors = {
|
||||
...combinedErrors,
|
||||
...fieldErrors,
|
||||
} as any;
|
||||
console.log('combinedErrors', combinedErrors);
|
||||
return {
|
||||
values: Object.keys(combinedErrors).length ? {} : data,
|
||||
errors: combinedErrors,
|
||||
} as any;
|
||||
},
|
||||
defaultValues,
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
const dependencyMap: Record<string, string[]> = {};
|
||||
|
||||
fields.forEach((field) => {
|
||||
if (field.dependencies && field.dependencies.length > 0) {
|
||||
field.dependencies.forEach((dep) => {
|
||||
if (!dependencyMap[dep]) {
|
||||
dependencyMap[dep] = [];
|
||||
}
|
||||
dependencyMap[dep].push(field.name);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const subscriptions = Object.keys(dependencyMap).map((depField) => {
|
||||
return form.watch((values: any, { name }) => {
|
||||
if (name === depField && dependencyMap[depField]) {
|
||||
dependencyMap[depField].forEach((dependentField) => {
|
||||
form.trigger(dependentField as any);
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
return () => {
|
||||
subscriptions.forEach((sub) => {
|
||||
if (sub.unsubscribe) {
|
||||
sub.unsubscribe();
|
||||
}
|
||||
});
|
||||
};
|
||||
}, [fields, form]);
|
||||
|
||||
// Expose form methods via ref
|
||||
useImperativeHandle(ref, () => ({
|
||||
submit: () => form.handleSubmit(onSubmit)(),
|
||||
|
||||
@ -18,8 +18,10 @@ import { cn } from '@/lib/utils';
|
||||
import { AgentChatContext } from '@/pages/agent/context';
|
||||
import { WorkFlowTimeline } from '@/pages/agent/log-sheet/workflow-timeline';
|
||||
import { IMessage } from '@/pages/chat/interface';
|
||||
import { downloadFile } from '@/services/file-manager-service';
|
||||
import { downloadFileFromBlob } from '@/utils/file-util';
|
||||
import { isEmpty } from 'lodash';
|
||||
import { Atom, ChevronDown, ChevronUp } from 'lucide-react';
|
||||
import { Atom, ChevronDown, ChevronUp, Download } from 'lucide-react';
|
||||
import MarkdownContent from '../next-markdown-content';
|
||||
import { RAGFlowAvatar } from '../ragflow-avatar';
|
||||
import { useTheme } from '../theme-provider';
|
||||
@ -245,6 +247,32 @@ function MessageItem({
|
||||
{isUser && (
|
||||
<UploadedMessageFiles files={item.files}></UploadedMessageFiles>
|
||||
)}
|
||||
{isAssistant && item.attachment && item.attachment.doc_id && (
|
||||
<div className="w-full flex items-center justify-end">
|
||||
<Button
|
||||
variant="link"
|
||||
className="p-1 m-0 h-auto text-text-sub-title-invert"
|
||||
onClick={async () => {
|
||||
if (item.attachment?.doc_id) {
|
||||
try {
|
||||
const response = await downloadFile({
|
||||
docId: item.attachment.doc_id,
|
||||
ext: item.attachment.format,
|
||||
});
|
||||
const blob = new Blob([response.data], {
|
||||
type: response.data.type,
|
||||
});
|
||||
downloadFileFromBlob(blob, item.attachment.file_name);
|
||||
} catch (error) {
|
||||
console.error('Download failed:', error);
|
||||
}
|
||||
}
|
||||
}}
|
||||
>
|
||||
<Download size={16} />
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
</section>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
@ -51,6 +51,7 @@ export interface SegmentedProps
|
||||
direction?: 'ltr' | 'rtl';
|
||||
motionName?: string;
|
||||
activeClassName?: string;
|
||||
itemClassName?: string;
|
||||
rounded?: keyof typeof segmentedVariants.round;
|
||||
sizeType?: keyof typeof segmentedVariants.size;
|
||||
buttonSize?: keyof typeof segmentedVariants.buttonSize;
|
||||
@ -62,6 +63,7 @@ export function Segmented({
|
||||
onChange,
|
||||
className,
|
||||
activeClassName,
|
||||
itemClassName,
|
||||
rounded = 'default',
|
||||
sizeType = 'default',
|
||||
buttonSize = 'default',
|
||||
@ -92,12 +94,13 @@ export function Segmented({
|
||||
<div
|
||||
key={actualValue}
|
||||
className={cn(
|
||||
'inline-flex items-center text-base font-normal cursor-pointer',
|
||||
'inline-flex items-center text-base font-normal cursor-pointer',
|
||||
segmentedVariants.round[rounded],
|
||||
segmentedVariants.buttonSize[buttonSize],
|
||||
{
|
||||
'text-text-primary bg-bg-base': selectedValue === actualValue,
|
||||
},
|
||||
itemClassName,
|
||||
activeClassName && selectedValue === actualValue
|
||||
? activeClassName
|
||||
: '',
|
||||
|
||||
@ -109,6 +109,7 @@ export enum Operator {
|
||||
SearXNG = 'SearXNG',
|
||||
Placeholder = 'Placeholder',
|
||||
DataOperations = 'DataOperations',
|
||||
ListOperations = 'ListOperations',
|
||||
VariableAssigner = 'VariableAssigner',
|
||||
VariableAggregator = 'VariableAggregator',
|
||||
File = 'File', // pipeline
|
||||
|
||||
@ -44,9 +44,14 @@ export interface IInputData {
|
||||
inputs: Record<string, BeginQuery>;
|
||||
tips: string;
|
||||
}
|
||||
|
||||
export interface IAttachment {
|
||||
doc_id: string;
|
||||
format: string;
|
||||
file_name: string;
|
||||
}
|
||||
export interface IMessageData {
|
||||
content: string;
|
||||
outputs: any;
|
||||
start_to_think?: boolean;
|
||||
end_to_think?: boolean;
|
||||
}
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import { MessageType } from '@/constants/chat';
|
||||
import { IAttachment } from '@/hooks/use-send-message';
|
||||
|
||||
export interface PromptConfig {
|
||||
empty_response: string;
|
||||
@ -97,6 +98,7 @@ export interface Message {
|
||||
data?: any;
|
||||
files?: File[];
|
||||
chatBoxId?: string;
|
||||
attachment?: IAttachment;
|
||||
}
|
||||
|
||||
export interface IReferenceChunk {
|
||||
@ -126,6 +128,7 @@ export interface IReferenceObject {
|
||||
|
||||
export interface IAnswer {
|
||||
answer: string;
|
||||
attachment?: IAttachment;
|
||||
reference?: IReference;
|
||||
conversationId?: string;
|
||||
prompt?: string;
|
||||
|
||||
@ -694,6 +694,9 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s
|
||||
tocEnhanceTip: ` During the parsing of the document, table of contents information was generated (see the 'Enable Table of Contents Extraction' option in the General method). This allows the large model to return table of contents items relevant to the user's query, thereby using these items to retrieve related chunks and apply weighting to these chunks during the sorting process. This approach is derived from mimicking the behavioral logic of how humans search for knowledge in books.`,
|
||||
},
|
||||
setting: {
|
||||
cropTip:
|
||||
'Drag the selection area to choose the cropping position of the image, and scroll to zoom in/out',
|
||||
cropImage: 'Crop image',
|
||||
selectModelPlaceholder: 'Select model',
|
||||
configureModelTitle: 'Configure model',
|
||||
confluenceIsCloudTip:
|
||||
@ -1006,6 +1009,9 @@ Example: general/v2/`,
|
||||
pleaseUploadAtLeastOneFile: 'Please upload at least one file',
|
||||
},
|
||||
flow: {
|
||||
downloadFileTypeTip: 'The file type to download',
|
||||
downloadFileType: 'Download file type',
|
||||
formatTypeError: 'Format or type error',
|
||||
variableNameMessage:
|
||||
'Variable name can only contain letters and underscores',
|
||||
variableDescription: 'Variable Description',
|
||||
@ -1587,6 +1593,8 @@ This delimiter is used to split the input text into several text pieces echo of
|
||||
codeDescription: 'It allows developers to write custom Python logic.',
|
||||
dataOperations: 'Data operations',
|
||||
dataOperationsDescription: 'Perform various operations on a Data object.',
|
||||
listOperations: 'List operations',
|
||||
listOperationsDescription: 'Perform operations on a list.',
|
||||
variableAssigner: 'Variable assigner',
|
||||
variableAssignerDescription:
|
||||
'This component performs operations on Data objects, including extracting, filtering, and editing keys and values in the Data.',
|
||||
@ -1802,6 +1810,19 @@ Important structured information may include: names, dates, locations, events, k
|
||||
removeKeys: 'Remove keys',
|
||||
renameKeys: 'Rename keys',
|
||||
},
|
||||
ListOperationsOptions: {
|
||||
topN: 'Top N',
|
||||
head: 'Head',
|
||||
tail: 'Tail',
|
||||
sort: 'Sort',
|
||||
filter: 'Filter',
|
||||
dropDuplicates: 'Drop duplicates',
|
||||
},
|
||||
sortMethod: 'Sort method',
|
||||
SortMethodOptions: {
|
||||
asc: 'Ascending',
|
||||
desc: 'Descending',
|
||||
},
|
||||
},
|
||||
llmTools: {
|
||||
bad_calculator: {
|
||||
|
||||
@ -684,6 +684,8 @@ General:实体和关系提取提示来自 GitHub - microsoft/graphrag:基于
|
||||
tocEnhanceTip: `解析文档时生成了目录信息(见General方法的‘启用目录抽取’),让大模型返回和用户问题相关的目录项,从而利用目录项拿到相关chunk,对这些chunk在排序中进行加权。这种方法来源于模仿人类查询书本中知识的行为逻辑`,
|
||||
},
|
||||
setting: {
|
||||
cropTip: '拖动选区可以选择要图片的裁剪位置,滚动可以放大/缩小选区',
|
||||
cropImage: '剪裁图片',
|
||||
selectModelPlaceholder: '请选择模型',
|
||||
configureModelTitle: '配置模型',
|
||||
confluenceIsCloudTip:
|
||||
@ -954,6 +956,9 @@ General:实体和关系提取提示来自 GitHub - microsoft/graphrag:基于
|
||||
pleaseUploadAtLeastOneFile: '请上传至少一个文件',
|
||||
},
|
||||
flow: {
|
||||
downloadFileTypeTip: '文件下载的类型',
|
||||
downloadFileType: '文件类型',
|
||||
formatTypeError: '格式或类型错误',
|
||||
variableNameMessage: '名称只能包含字母和下划线',
|
||||
variableDescription: '变量的描述',
|
||||
defaultValue: '默认值',
|
||||
@ -1505,6 +1510,8 @@ General:实体和关系提取提示来自 GitHub - microsoft/graphrag:基于
|
||||
codeDescription: '它允许开发人员编写自定义 Python 逻辑。',
|
||||
dataOperations: '数据操作',
|
||||
dataOperationsDescription: '对数据对象执行各种操作。',
|
||||
listOperations: '列表操作',
|
||||
listOperationsDescription: '对列表对象执行各种操作。',
|
||||
variableAssigner: '变量赋值器',
|
||||
variableAssignerDescription:
|
||||
'此组件对数据对象执行操作,包括提取、筛选和编辑数据中的键和值。',
|
||||
@ -1676,6 +1683,19 @@ Tokenizer 会根据所选方式将内容存储为对应的数据结构。`,
|
||||
removeKeys: '删除键',
|
||||
renameKeys: '重命名键',
|
||||
},
|
||||
ListOperationsOptions: {
|
||||
topN: '取前N项',
|
||||
head: '取前第N项',
|
||||
tail: '取后第N项',
|
||||
sort: '排序',
|
||||
filter: '筛选',
|
||||
dropDuplicates: '去重',
|
||||
},
|
||||
sortMethod: '排序方式',
|
||||
SortMethodOptions: {
|
||||
asc: '升序',
|
||||
desc: '降序',
|
||||
},
|
||||
},
|
||||
footer: {
|
||||
profile: 'All rights reserved @ React',
|
||||
|
||||
@ -61,6 +61,7 @@ import { FileNode } from './node/file-node';
|
||||
import { InvokeNode } from './node/invoke-node';
|
||||
import { IterationNode, IterationStartNode } from './node/iteration-node';
|
||||
import { KeywordNode } from './node/keyword-node';
|
||||
import { ListOperationsNode } from './node/list-operations-node';
|
||||
import { MessageNode } from './node/message-node';
|
||||
import NoteNode from './node/note-node';
|
||||
import ParserNode from './node/parser-node';
|
||||
@ -101,6 +102,7 @@ export const nodeTypes: NodeTypes = {
|
||||
splitterNode: SplitterNode,
|
||||
contextNode: ExtractorNode,
|
||||
dataOperationsNode: DataOperationsNode,
|
||||
listOperationsNode: ListOperationsNode,
|
||||
variableAssignerNode: VariableAssignerNode,
|
||||
variableAggregatorNode: VariableAggregatorNode,
|
||||
};
|
||||
|
||||
@ -79,6 +79,7 @@ export function AccordionOperators({
|
||||
Operator.Code,
|
||||
Operator.StringTransform,
|
||||
Operator.DataOperations,
|
||||
Operator.ListOperations,
|
||||
// Operator.VariableAssigner,
|
||||
Operator.VariableAggregator,
|
||||
]}
|
||||
|
||||
22
web/src/pages/agent/canvas/node/list-operations-node.tsx
Normal file
22
web/src/pages/agent/canvas/node/list-operations-node.tsx
Normal file
@ -0,0 +1,22 @@
|
||||
import { BaseNode } from '@/interfaces/database/agent';
|
||||
import { NodeProps } from '@xyflow/react';
|
||||
import { camelCase } from 'lodash';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { RagNode } from '.';
|
||||
import { ListOperationsFormSchemaType } from '../../form/list-operations-form';
|
||||
import { LabelCard } from './card';
|
||||
|
||||
export function ListOperationsNode({
|
||||
...props
|
||||
}: NodeProps<BaseNode<ListOperationsFormSchemaType>>) {
|
||||
const { data } = props;
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<RagNode {...props}>
|
||||
<LabelCard>
|
||||
{t(`flow.ListOperationsOptions.${camelCase(data.form?.operations)}`)}
|
||||
</LabelCard>
|
||||
</RagNode>
|
||||
);
|
||||
}
|
||||
@ -5,6 +5,7 @@ import {
|
||||
useSelectDerivedMessages,
|
||||
} from '@/hooks/logic-hooks';
|
||||
import {
|
||||
IAttachment,
|
||||
IEventList,
|
||||
IInputEvent,
|
||||
IMessageEndData,
|
||||
@ -75,9 +76,13 @@ export function findMessageFromList(eventList: IEventList) {
|
||||
nextContent += '</think>';
|
||||
}
|
||||
|
||||
const workflowFinished = eventList.find(
|
||||
(x) => x.event === MessageEventType.WorkflowFinished,
|
||||
) as IMessageEvent;
|
||||
return {
|
||||
id: eventList[0]?.message_id,
|
||||
content: nextContent,
|
||||
attachment: workflowFinished?.data?.outputs?.attachment || {},
|
||||
};
|
||||
}
|
||||
|
||||
@ -388,12 +393,13 @@ export const useSendAgentMessage = ({
|
||||
}, [sendMessageInTaskMode]);
|
||||
|
||||
useEffect(() => {
|
||||
const { content, id } = findMessageFromList(answerList);
|
||||
const { content, id, attachment } = findMessageFromList(answerList);
|
||||
const inputAnswer = findInputFromList(answerList);
|
||||
const answer = content || getLatestError(answerList);
|
||||
if (answerList.length > 0) {
|
||||
addNewestOneAnswer({
|
||||
answer: answer ?? '',
|
||||
attachment: attachment as IAttachment,
|
||||
id: id,
|
||||
...inputAnswer,
|
||||
});
|
||||
|
||||
@ -417,6 +417,7 @@ export const initialIterationValues = {
|
||||
items_ref: '',
|
||||
outputs: {},
|
||||
};
|
||||
|
||||
export const initialIterationStartValues = {
|
||||
outputs: {
|
||||
item: {
|
||||
@ -595,6 +596,35 @@ export const initialDataOperationsValues = {
|
||||
},
|
||||
},
|
||||
};
|
||||
export enum SortMethod {
|
||||
Asc = 'asc',
|
||||
Desc = 'desc',
|
||||
}
|
||||
|
||||
export enum ListOperations {
|
||||
TopN = 'topN',
|
||||
Head = 'head',
|
||||
Tail = 'tail',
|
||||
Filter = 'filter',
|
||||
Sort = 'sort',
|
||||
DropDuplicates = 'drop_duplicates',
|
||||
}
|
||||
|
||||
export const initialListOperationsValues = {
|
||||
query: '',
|
||||
operations: ListOperations.TopN,
|
||||
outputs: {
|
||||
result: {
|
||||
type: 'Array<?>',
|
||||
},
|
||||
first: {
|
||||
type: '?',
|
||||
},
|
||||
last: {
|
||||
type: '?',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export const initialVariableAssignerValues = {};
|
||||
|
||||
@ -673,6 +703,7 @@ export const RestrictedUpstreamMap = {
|
||||
[Operator.Tool]: [Operator.Begin],
|
||||
[Operator.Placeholder]: [Operator.Begin],
|
||||
[Operator.DataOperations]: [Operator.Begin],
|
||||
[Operator.ListOperations]: [Operator.Begin],
|
||||
[Operator.Parser]: [Operator.Begin], // pipeline
|
||||
[Operator.Splitter]: [Operator.Begin],
|
||||
[Operator.HierarchicalMerger]: [Operator.Begin],
|
||||
@ -729,6 +760,7 @@ export const NodeMap = {
|
||||
[Operator.HierarchicalMerger]: 'splitterNode',
|
||||
[Operator.Extractor]: 'contextNode',
|
||||
[Operator.DataOperations]: 'dataOperationsNode',
|
||||
[Operator.ListOperations]: 'listOperationsNode',
|
||||
[Operator.VariableAssigner]: 'variableAssignerNode',
|
||||
[Operator.VariableAggregator]: 'variableAggregatorNode',
|
||||
};
|
||||
@ -814,3 +846,10 @@ export enum JsonSchemaDataType {
|
||||
Array = 'array',
|
||||
Object = 'object',
|
||||
}
|
||||
|
||||
export enum ExportFileType {
|
||||
PDF = 'pdf',
|
||||
HTML = 'html',
|
||||
Markdown = 'md',
|
||||
DOCX = 'docx',
|
||||
}
|
||||
|
||||
@ -21,6 +21,7 @@ import IterationForm from '../form/iteration-form';
|
||||
import IterationStartForm from '../form/iteration-start-from';
|
||||
import Jin10Form from '../form/jin10-form';
|
||||
import KeywordExtractForm from '../form/keyword-extract-form';
|
||||
import ListOperationsForm from '../form/list-operations-form';
|
||||
import MessageForm from '../form/message-form';
|
||||
import ParserForm from '../form/parser-form';
|
||||
import PubMedForm from '../form/pubmed-form';
|
||||
@ -184,6 +185,9 @@ export const FormConfigMap = {
|
||||
[Operator.DataOperations]: {
|
||||
component: DataOperationsForm,
|
||||
},
|
||||
[Operator.ListOperations]: {
|
||||
component: ListOperationsForm,
|
||||
},
|
||||
[Operator.VariableAssigner]: {
|
||||
component: VariableAssignerForm,
|
||||
},
|
||||
|
||||
140
web/src/pages/agent/form/list-operations-form/index.tsx
Normal file
140
web/src/pages/agent/form/list-operations-form/index.tsx
Normal file
@ -0,0 +1,140 @@
|
||||
import NumberInput from '@/components/originui/number-input';
|
||||
import { SelectWithSearch } from '@/components/originui/select-with-search';
|
||||
import { RAGFlowFormItem } from '@/components/ragflow-form';
|
||||
import {
|
||||
Form,
|
||||
FormControl,
|
||||
FormField,
|
||||
FormItem,
|
||||
FormLabel,
|
||||
FormMessage,
|
||||
} from '@/components/ui/form';
|
||||
import { Separator } from '@/components/ui/separator';
|
||||
import { useBuildSwitchOperatorOptions } from '@/hooks/logic-hooks/use-build-operator-options';
|
||||
import { buildOptions } from '@/utils/form';
|
||||
import { zodResolver } from '@hookform/resolvers/zod';
|
||||
import { memo } from 'react';
|
||||
import { useForm, useWatch } from 'react-hook-form';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { z } from 'zod';
|
||||
import {
|
||||
DataOperationsOperatorOptions,
|
||||
JsonSchemaDataType,
|
||||
ListOperations,
|
||||
SortMethod,
|
||||
initialListOperationsValues,
|
||||
} from '../../constant';
|
||||
import { useFormValues } from '../../hooks/use-form-values';
|
||||
import { useWatchFormChange } from '../../hooks/use-watch-form-change';
|
||||
import { INextOperatorForm } from '../../interface';
|
||||
import { buildOutputList } from '../../utils/build-output-list';
|
||||
import { FormWrapper } from '../components/form-wrapper';
|
||||
import { Output, OutputSchema } from '../components/output';
|
||||
import { PromptEditor } from '../components/prompt-editor';
|
||||
import { QueryVariable } from '../components/query-variable';
|
||||
|
||||
export const RetrievalPartialSchema = {
|
||||
query: z.string(),
|
||||
operations: z.string(),
|
||||
n: z.number().int().min(0).optional(),
|
||||
sort_method: z.string().optional(),
|
||||
filter: z
|
||||
.object({
|
||||
value: z.string().optional(),
|
||||
operator: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
...OutputSchema,
|
||||
};
|
||||
|
||||
export const FormSchema = z.object(RetrievalPartialSchema);
|
||||
|
||||
export type ListOperationsFormSchemaType = z.infer<typeof FormSchema>;
|
||||
|
||||
const outputList = buildOutputList(initialListOperationsValues.outputs);
|
||||
|
||||
function ListOperationsForm({ node }: INextOperatorForm) {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const defaultValues = useFormValues(initialListOperationsValues, node);
|
||||
|
||||
const form = useForm<ListOperationsFormSchemaType>({
|
||||
defaultValues: defaultValues,
|
||||
mode: 'onChange',
|
||||
resolver: zodResolver(FormSchema),
|
||||
shouldUnregister: true,
|
||||
});
|
||||
|
||||
const operations = useWatch({ control: form.control, name: 'operations' });
|
||||
|
||||
const ListOperationsOptions = buildOptions(
|
||||
ListOperations,
|
||||
t,
|
||||
`flow.ListOperationsOptions`,
|
||||
true,
|
||||
);
|
||||
const SortMethodOptions = buildOptions(
|
||||
SortMethod,
|
||||
t,
|
||||
`flow.SortMethodOptions`,
|
||||
true,
|
||||
);
|
||||
const operatorOptions = useBuildSwitchOperatorOptions(
|
||||
DataOperationsOperatorOptions,
|
||||
);
|
||||
useWatchFormChange(node?.id, form, true);
|
||||
|
||||
return (
|
||||
<Form {...form}>
|
||||
<FormWrapper>
|
||||
<QueryVariable
|
||||
name="query"
|
||||
className="flex-1"
|
||||
types={[JsonSchemaDataType.Array]}
|
||||
></QueryVariable>
|
||||
<Separator />
|
||||
<RAGFlowFormItem name="operations" label={t('flow.operations')}>
|
||||
<SelectWithSearch options={ListOperationsOptions} />
|
||||
</RAGFlowFormItem>
|
||||
{[
|
||||
ListOperations.TopN,
|
||||
ListOperations.Head,
|
||||
ListOperations.Tail,
|
||||
].includes(operations as ListOperations) && (
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="n"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>{t('flowNum')}</FormLabel>
|
||||
<FormControl>
|
||||
<NumberInput {...field} className="w-full"></NumberInput>
|
||||
</FormControl>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
)}
|
||||
{[ListOperations.Sort].includes(operations as ListOperations) && (
|
||||
<RAGFlowFormItem name="sort_method" label={t('flow.sortMethod')}>
|
||||
<SelectWithSearch options={SortMethodOptions} />
|
||||
</RAGFlowFormItem>
|
||||
)}
|
||||
{[ListOperations.Filter].includes(operations as ListOperations) && (
|
||||
<div className="flex items-center gap-2">
|
||||
<RAGFlowFormItem name="filter.operator" className="flex-1">
|
||||
<SelectWithSearch options={operatorOptions}></SelectWithSearch>
|
||||
</RAGFlowFormItem>
|
||||
<Separator className="w-2" />
|
||||
<RAGFlowFormItem name="filter.value" className="flex-1">
|
||||
<PromptEditor showToolbar={false} multiLine={false} />
|
||||
</RAGFlowFormItem>
|
||||
</div>
|
||||
)}
|
||||
<Output list={outputList} isFormRequired></Output>
|
||||
</FormWrapper>
|
||||
</Form>
|
||||
);
|
||||
}
|
||||
|
||||
export default memo(ListOperationsForm);
|
||||
@ -8,12 +8,14 @@ import {
|
||||
FormLabel,
|
||||
FormMessage,
|
||||
} from '@/components/ui/form';
|
||||
import { RAGFlowSelect } from '@/components/ui/select';
|
||||
import { zodResolver } from '@hookform/resolvers/zod';
|
||||
import { X } from 'lucide-react';
|
||||
import { memo } from 'react';
|
||||
import { useFieldArray, useForm } from 'react-hook-form';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { z } from 'zod';
|
||||
import { ExportFileType } from '../../constant';
|
||||
import { INextOperatorForm } from '../../interface';
|
||||
import { FormWrapper } from '../components/form-wrapper';
|
||||
import { PromptEditor } from '../components/prompt-editor';
|
||||
@ -33,10 +35,14 @@ function MessageForm({ node }: INextOperatorForm) {
|
||||
}),
|
||||
)
|
||||
.optional(),
|
||||
output_format: z.string().optional(),
|
||||
});
|
||||
|
||||
const form = useForm({
|
||||
defaultValues: values,
|
||||
defaultValues: {
|
||||
...values,
|
||||
output_format: values.output_format,
|
||||
},
|
||||
resolver: zodResolver(FormSchema),
|
||||
});
|
||||
|
||||
@ -50,6 +56,39 @@ function MessageForm({ node }: INextOperatorForm) {
|
||||
return (
|
||||
<Form {...form}>
|
||||
<FormWrapper>
|
||||
<FormContainer>
|
||||
<FormItem>
|
||||
<FormLabel tooltip={t('flow.downloadFileTypeTip')}>
|
||||
{t('flow.downloadFileType')}
|
||||
</FormLabel>
|
||||
<FormField
|
||||
control={form.control}
|
||||
name={`output_format`}
|
||||
render={({ field }) => (
|
||||
<FormItem className="flex-1">
|
||||
<FormControl>
|
||||
<RAGFlowSelect
|
||||
options={Object.keys(ExportFileType).map(
|
||||
(key: string) => {
|
||||
return {
|
||||
value:
|
||||
ExportFileType[
|
||||
key as keyof typeof ExportFileType
|
||||
],
|
||||
label: key,
|
||||
};
|
||||
},
|
||||
)}
|
||||
{...field}
|
||||
onValueChange={field.onChange}
|
||||
placeholder={t('flow.messagePlaceholder')}
|
||||
></RAGFlowSelect>
|
||||
</FormControl>
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
</FormItem>
|
||||
</FormContainer>
|
||||
<FormContainer>
|
||||
<FormItem>
|
||||
<FormLabel tooltip={t('flow.msgTip')}>{t('flow.msg')}</FormLabel>
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import { RAGFlowNodeType } from '@/interfaces/database/flow';
|
||||
import { isEmpty } from 'lodash';
|
||||
import { useMemo } from 'react';
|
||||
import { initialMessageValues } from '../../constant';
|
||||
import { ExportFileType, initialMessageValues } from '../../constant';
|
||||
import { convertToObjectArray } from '../../utils';
|
||||
|
||||
export function useValues(node?: RAGFlowNodeType) {
|
||||
@ -15,6 +15,7 @@ export function useValues(node?: RAGFlowNodeType) {
|
||||
return {
|
||||
...formData,
|
||||
content: convertToObjectArray(formData.content),
|
||||
output_format: formData.output_format || ExportFileType.PDF,
|
||||
};
|
||||
}, [node]);
|
||||
|
||||
|
||||
@ -0,0 +1,134 @@
|
||||
import {
|
||||
DynamicForm,
|
||||
DynamicFormRef,
|
||||
FormFieldConfig,
|
||||
} from '@/components/dynamic-form';
|
||||
import { Modal } from '@/components/ui/modal/modal';
|
||||
import { t } from 'i18next';
|
||||
import { useEffect, useRef } from 'react';
|
||||
import { FieldValues } from 'react-hook-form';
|
||||
import { TypeMaps, TypesWithArray } from '../constant';
|
||||
import { useHandleForm } from '../hooks/use-form';
|
||||
import { useObjectFields } from '../hooks/use-object-fields';
|
||||
|
||||
export const AddVariableModal = (props: {
|
||||
fields?: FormFieldConfig[];
|
||||
setFields: (value: any) => void;
|
||||
visible?: boolean;
|
||||
hideModal: () => void;
|
||||
defaultValues?: FieldValues;
|
||||
setDefaultValues?: (value: FieldValues) => void;
|
||||
}) => {
|
||||
const {
|
||||
fields,
|
||||
setFields,
|
||||
visible,
|
||||
hideModal,
|
||||
defaultValues,
|
||||
setDefaultValues,
|
||||
} = props;
|
||||
|
||||
const { handleSubmit: submitForm, loading } = useHandleForm();
|
||||
|
||||
const { handleCustomValidate, handleCustomSchema, handleRender } =
|
||||
useObjectFields();
|
||||
|
||||
const formRef = useRef<DynamicFormRef>(null);
|
||||
|
||||
const handleFieldUpdate = (
|
||||
fieldName: string,
|
||||
updatedField: Partial<FormFieldConfig>,
|
||||
) => {
|
||||
setFields((prevFields: any) =>
|
||||
prevFields.map((field: any) =>
|
||||
field.name === fieldName ? { ...field, ...updatedField } : field,
|
||||
),
|
||||
);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const typeField = fields?.find((item) => item.name === 'type');
|
||||
|
||||
if (typeField) {
|
||||
typeField.onChange = (value) => {
|
||||
handleFieldUpdate('value', {
|
||||
type: TypeMaps[value as keyof typeof TypeMaps],
|
||||
render: handleRender(value),
|
||||
customValidate: handleCustomValidate(value),
|
||||
schema: handleCustomSchema(value),
|
||||
});
|
||||
const values = formRef.current?.getValues();
|
||||
// setTimeout(() => {
|
||||
switch (value) {
|
||||
case TypesWithArray.Boolean:
|
||||
setDefaultValues?.({ ...values, value: false });
|
||||
break;
|
||||
case TypesWithArray.Number:
|
||||
setDefaultValues?.({ ...values, value: 0 });
|
||||
break;
|
||||
case TypesWithArray.Object:
|
||||
setDefaultValues?.({ ...values, value: {} });
|
||||
break;
|
||||
case TypesWithArray.ArrayString:
|
||||
setDefaultValues?.({ ...values, value: [''] });
|
||||
break;
|
||||
case TypesWithArray.ArrayNumber:
|
||||
setDefaultValues?.({ ...values, value: [''] });
|
||||
break;
|
||||
case TypesWithArray.ArrayBoolean:
|
||||
setDefaultValues?.({ ...values, value: [false] });
|
||||
break;
|
||||
case TypesWithArray.ArrayObject:
|
||||
setDefaultValues?.({ ...values, value: [] });
|
||||
break;
|
||||
default:
|
||||
setDefaultValues?.({ ...values, value: '' });
|
||||
break;
|
||||
}
|
||||
// }, 0);
|
||||
};
|
||||
}
|
||||
}, [fields]);
|
||||
|
||||
const handleSubmit = async (fieldValue: FieldValues) => {
|
||||
await submitForm(fieldValue);
|
||||
hideModal();
|
||||
};
|
||||
|
||||
return (
|
||||
<Modal
|
||||
title={t('flow.add') + t('flow.conversationVariable')}
|
||||
open={visible || false}
|
||||
onCancel={hideModal}
|
||||
showfooter={false}
|
||||
>
|
||||
<DynamicForm.Root
|
||||
ref={formRef}
|
||||
fields={fields || []}
|
||||
onSubmit={(data) => {
|
||||
console.log(data);
|
||||
}}
|
||||
defaultValues={defaultValues}
|
||||
onFieldUpdate={handleFieldUpdate}
|
||||
>
|
||||
<div className="flex items-center justify-end w-full gap-2">
|
||||
<DynamicForm.CancelButton
|
||||
handleCancel={() => {
|
||||
hideModal?.();
|
||||
}}
|
||||
/>
|
||||
<DynamicForm.SavingButton
|
||||
submitLoading={loading || false}
|
||||
buttonText={t('common.ok')}
|
||||
submitFunc={(values: FieldValues) => {
|
||||
handleSubmit(values);
|
||||
// console.log(values);
|
||||
// console.log(nodes, edges);
|
||||
// handleOk(values);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</DynamicForm.Root>
|
||||
</Modal>
|
||||
);
|
||||
};
|
||||
@ -13,14 +13,14 @@ export enum TypesWithArray {
|
||||
String = 'string',
|
||||
Number = 'number',
|
||||
Boolean = 'boolean',
|
||||
// Object = 'object',
|
||||
// ArrayString = 'array<string>',
|
||||
// ArrayNumber = 'array<number>',
|
||||
// ArrayBoolean = 'array<boolean>',
|
||||
// ArrayObject = 'array<object>',
|
||||
Object = 'object',
|
||||
ArrayString = 'array<string>',
|
||||
ArrayNumber = 'array<number>',
|
||||
ArrayBoolean = 'array<boolean>',
|
||||
ArrayObject = 'array<object>',
|
||||
}
|
||||
|
||||
export const GobalFormFields = [
|
||||
export const GlobalFormFields = [
|
||||
{
|
||||
label: t('flow.name'),
|
||||
name: 'name',
|
||||
@ -50,11 +50,11 @@ export const GobalFormFields = [
|
||||
label: t('flow.description'),
|
||||
name: 'description',
|
||||
placeholder: t('flow.variableDescription'),
|
||||
type: 'textarea',
|
||||
type: FormFieldType.Textarea,
|
||||
},
|
||||
] as FormFieldConfig[];
|
||||
|
||||
export const GobalVariableFormDefaultValues = {
|
||||
export const GlobalVariableFormDefaultValues = {
|
||||
name: '',
|
||||
type: TypesWithArray.String,
|
||||
value: '',
|
||||
@ -65,9 +65,9 @@ export const TypeMaps = {
|
||||
[TypesWithArray.String]: FormFieldType.Textarea,
|
||||
[TypesWithArray.Number]: FormFieldType.Number,
|
||||
[TypesWithArray.Boolean]: FormFieldType.Checkbox,
|
||||
// [TypesWithArray.Object]: FormFieldType.Textarea,
|
||||
// [TypesWithArray.ArrayString]: FormFieldType.Textarea,
|
||||
// [TypesWithArray.ArrayNumber]: FormFieldType.Textarea,
|
||||
// [TypesWithArray.ArrayBoolean]: FormFieldType.Textarea,
|
||||
// [TypesWithArray.ArrayObject]: FormFieldType.Textarea,
|
||||
[TypesWithArray.Object]: FormFieldType.Textarea,
|
||||
[TypesWithArray.ArrayString]: FormFieldType.Textarea,
|
||||
[TypesWithArray.ArrayNumber]: FormFieldType.Textarea,
|
||||
[TypesWithArray.ArrayBoolean]: FormFieldType.Textarea,
|
||||
[TypesWithArray.ArrayObject]: FormFieldType.Textarea,
|
||||
};
|
||||
41
web/src/pages/agent/gobal-variable-sheet/hooks/use-form.tsx
Normal file
41
web/src/pages/agent/gobal-variable-sheet/hooks/use-form.tsx
Normal file
@ -0,0 +1,41 @@
|
||||
import { useFetchAgent } from '@/hooks/use-agent-request';
|
||||
import { GlobalVariableType } from '@/interfaces/database/agent';
|
||||
import { useCallback } from 'react';
|
||||
import { FieldValues } from 'react-hook-form';
|
||||
import { useSaveGraph } from '../../hooks/use-save-graph';
|
||||
import { TypesWithArray } from '../constant';
|
||||
|
||||
export const useHandleForm = () => {
|
||||
const { data, refetch } = useFetchAgent();
|
||||
const { saveGraph, loading } = useSaveGraph();
|
||||
const handleObjectData = (value: any) => {
|
||||
try {
|
||||
return JSON.parse(value);
|
||||
} catch (error) {
|
||||
return value;
|
||||
}
|
||||
};
|
||||
const handleSubmit = useCallback(async (fieldValue: FieldValues) => {
|
||||
const param = {
|
||||
...(data.dsl?.variables || {}),
|
||||
[fieldValue.name]: {
|
||||
...fieldValue,
|
||||
value:
|
||||
fieldValue.type === TypesWithArray.Object ||
|
||||
fieldValue.type === TypesWithArray.ArrayObject
|
||||
? handleObjectData(fieldValue.value)
|
||||
: fieldValue.value,
|
||||
},
|
||||
} as Record<string, GlobalVariableType>;
|
||||
|
||||
const res = await saveGraph(undefined, {
|
||||
globalVariables: param,
|
||||
});
|
||||
|
||||
if (res.code === 0) {
|
||||
refetch();
|
||||
}
|
||||
}, []);
|
||||
|
||||
return { handleSubmit, loading };
|
||||
};
|
||||
@ -0,0 +1,246 @@
|
||||
import { BlockButton, Button } from '@/components/ui/button';
|
||||
import { Input } from '@/components/ui/input';
|
||||
import { Segmented } from '@/components/ui/segmented';
|
||||
import { Editor } from '@monaco-editor/react';
|
||||
import { t } from 'i18next';
|
||||
import { Trash2, X } from 'lucide-react';
|
||||
import { useCallback } from 'react';
|
||||
import { FieldValues } from 'react-hook-form';
|
||||
import { z } from 'zod';
|
||||
import { TypesWithArray } from '../constant';
|
||||
|
||||
export const useObjectFields = () => {
|
||||
const booleanRender = useCallback(
|
||||
(field: FieldValues, className?: string) => {
|
||||
const fieldValue = field.value ? true : false;
|
||||
return (
|
||||
<Segmented
|
||||
options={
|
||||
[
|
||||
{ value: true, label: 'True' },
|
||||
{ value: false, label: 'False' },
|
||||
] as any
|
||||
}
|
||||
sizeType="sm"
|
||||
value={fieldValue}
|
||||
onChange={field.onChange}
|
||||
className={className}
|
||||
itemClassName="justify-center flex-1"
|
||||
></Segmented>
|
||||
);
|
||||
},
|
||||
[],
|
||||
);
|
||||
|
||||
const objectRender = useCallback((field: FieldValues) => {
|
||||
const fieldValue =
|
||||
typeof field.value === 'object'
|
||||
? JSON.stringify(field.value, null, 2)
|
||||
: JSON.stringify({}, null, 2);
|
||||
console.log('object-render-field', field, fieldValue);
|
||||
return (
|
||||
<Editor
|
||||
height={200}
|
||||
defaultLanguage="json"
|
||||
theme="vs-dark"
|
||||
value={fieldValue}
|
||||
onChange={field.onChange}
|
||||
/>
|
||||
);
|
||||
}, []);
|
||||
|
||||
const objectValidate = useCallback((value: any) => {
|
||||
try {
|
||||
if (!JSON.parse(value)) {
|
||||
throw new Error(t('knowledgeDetails.formatTypeError'));
|
||||
}
|
||||
return true;
|
||||
} catch (e) {
|
||||
throw new Error(t('knowledgeDetails.formatTypeError'));
|
||||
}
|
||||
}, []);
|
||||
|
||||
const arrayStringRender = useCallback((field: FieldValues, type = 'text') => {
|
||||
const values = Array.isArray(field.value)
|
||||
? field.value
|
||||
: [type === 'number' ? 0 : ''];
|
||||
return (
|
||||
<>
|
||||
{values?.map((item: any, index: number) => (
|
||||
<div key={index} className="flex gap-1 items-center">
|
||||
<Input
|
||||
type={type}
|
||||
value={item}
|
||||
onChange={(e) => {
|
||||
const newValues = [...values];
|
||||
newValues[index] = e.target.value;
|
||||
field.onChange(newValues);
|
||||
}}
|
||||
/>
|
||||
<Button
|
||||
variant={'secondary'}
|
||||
onClick={() => {
|
||||
const newValues = [...values];
|
||||
newValues.splice(index, 1);
|
||||
field.onChange(newValues);
|
||||
}}
|
||||
>
|
||||
<Trash2 />
|
||||
</Button>
|
||||
</div>
|
||||
))}
|
||||
<BlockButton
|
||||
type="button"
|
||||
onClick={() => {
|
||||
field.onChange([...field.value, '']);
|
||||
}}
|
||||
>
|
||||
{t('flow.add')}
|
||||
</BlockButton>
|
||||
</>
|
||||
);
|
||||
}, []);
|
||||
|
||||
const arrayBooleanRender = useCallback(
|
||||
(field: FieldValues) => {
|
||||
// const values = field.value || [false];
|
||||
const values = Array.isArray(field.value) ? field.value : [false];
|
||||
return (
|
||||
<div className="flex items-center gap-1 flex-wrap ">
|
||||
{values?.map((item: any, index: number) => (
|
||||
<div
|
||||
key={index}
|
||||
className="flex gap-1 items-center bg-bg-card rounded-lg border-[0.5px] border-border-button"
|
||||
>
|
||||
{booleanRender(
|
||||
{
|
||||
value: item,
|
||||
onChange: (value) => {
|
||||
values[index] = !!value;
|
||||
field.onChange(values);
|
||||
},
|
||||
},
|
||||
'bg-transparent',
|
||||
)}
|
||||
<Button
|
||||
variant={'transparent'}
|
||||
className="border-none py-0 px-1"
|
||||
onClick={() => {
|
||||
const newValues = [...values];
|
||||
newValues.splice(index, 1);
|
||||
field.onChange(newValues);
|
||||
}}
|
||||
>
|
||||
<X />
|
||||
</Button>
|
||||
</div>
|
||||
))}
|
||||
<BlockButton
|
||||
className="w-auto"
|
||||
type="button"
|
||||
onClick={() => {
|
||||
field.onChange([...field.value, false]);
|
||||
}}
|
||||
>
|
||||
{t('flow.add')}
|
||||
</BlockButton>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
[booleanRender],
|
||||
);
|
||||
|
||||
const arrayNumberRender = useCallback(
|
||||
(field: FieldValues) => {
|
||||
return arrayStringRender(field, 'number');
|
||||
},
|
||||
[arrayStringRender],
|
||||
);
|
||||
|
||||
const arrayValidate = useCallback((value: any, type: string = 'string') => {
|
||||
if (!Array.isArray(value) || !value.every((item) => typeof item === type)) {
|
||||
throw new Error(t('flow.formatTypeError'));
|
||||
}
|
||||
return true;
|
||||
}, []);
|
||||
|
||||
const arrayStringValidate = useCallback(
|
||||
(value: any) => {
|
||||
return arrayValidate(value, 'string');
|
||||
},
|
||||
[arrayValidate],
|
||||
);
|
||||
|
||||
const arrayNumberValidate = useCallback(
|
||||
(value: any) => {
|
||||
return arrayValidate(value, 'number');
|
||||
},
|
||||
[arrayValidate],
|
||||
);
|
||||
|
||||
const arrayBooleanValidate = useCallback(
|
||||
(value: any) => {
|
||||
return arrayValidate(value, 'boolean');
|
||||
},
|
||||
[arrayValidate],
|
||||
);
|
||||
|
||||
const handleRender = (value: TypesWithArray) => {
|
||||
switch (value) {
|
||||
case TypesWithArray.Boolean:
|
||||
return booleanRender;
|
||||
case TypesWithArray.Object:
|
||||
case TypesWithArray.ArrayObject:
|
||||
return objectRender;
|
||||
case TypesWithArray.ArrayString:
|
||||
return arrayStringRender;
|
||||
case TypesWithArray.ArrayNumber:
|
||||
return arrayNumberRender;
|
||||
case TypesWithArray.ArrayBoolean:
|
||||
return arrayBooleanRender;
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
};
|
||||
const handleCustomValidate = (value: TypesWithArray) => {
|
||||
switch (value) {
|
||||
case TypesWithArray.Object:
|
||||
case TypesWithArray.ArrayObject:
|
||||
return objectValidate;
|
||||
case TypesWithArray.ArrayString:
|
||||
return arrayStringValidate;
|
||||
case TypesWithArray.ArrayNumber:
|
||||
return arrayNumberValidate;
|
||||
case TypesWithArray.ArrayBoolean:
|
||||
return arrayBooleanValidate;
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
};
|
||||
const handleCustomSchema = (value: TypesWithArray) => {
|
||||
switch (value) {
|
||||
case TypesWithArray.ArrayString:
|
||||
return z.array(z.string());
|
||||
case TypesWithArray.ArrayNumber:
|
||||
return z.array(z.number());
|
||||
case TypesWithArray.ArrayBoolean:
|
||||
return z.array(z.boolean());
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
};
|
||||
return {
|
||||
objectRender,
|
||||
objectValidate,
|
||||
arrayStringRender,
|
||||
arrayStringValidate,
|
||||
arrayNumberRender,
|
||||
booleanRender,
|
||||
arrayBooleanRender,
|
||||
arrayNumberValidate,
|
||||
arrayBooleanValidate,
|
||||
handleRender,
|
||||
handleCustomValidate,
|
||||
handleCustomSchema,
|
||||
};
|
||||
};
|
||||
@ -1,12 +1,6 @@
|
||||
import { ConfirmDeleteDialog } from '@/components/confirm-delete-dialog';
|
||||
import {
|
||||
DynamicForm,
|
||||
DynamicFormRef,
|
||||
FormFieldConfig,
|
||||
FormFieldType,
|
||||
} from '@/components/dynamic-form';
|
||||
import { FormFieldConfig } from '@/components/dynamic-form';
|
||||
import { BlockButton, Button } from '@/components/ui/button';
|
||||
import { Modal } from '@/components/ui/modal/modal';
|
||||
import {
|
||||
Sheet,
|
||||
SheetContent,
|
||||
@ -19,117 +13,65 @@ import { GlobalVariableType } from '@/interfaces/database/agent';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { t } from 'i18next';
|
||||
import { Trash2 } from 'lucide-react';
|
||||
import { useEffect, useRef, useState } from 'react';
|
||||
import { useState } from 'react';
|
||||
import { FieldValues } from 'react-hook-form';
|
||||
import { useSaveGraph } from '../hooks/use-save-graph';
|
||||
import { AddVariableModal } from './component/add-variable-modal';
|
||||
import {
|
||||
GobalFormFields,
|
||||
GobalVariableFormDefaultValues,
|
||||
GlobalFormFields,
|
||||
GlobalVariableFormDefaultValues,
|
||||
TypeMaps,
|
||||
TypesWithArray,
|
||||
} from './contant';
|
||||
} from './constant';
|
||||
import { useObjectFields } from './hooks/use-object-fields';
|
||||
|
||||
export type IGobalParamModalProps = {
|
||||
export type IGlobalParamModalProps = {
|
||||
data: any;
|
||||
hideModal: (open: boolean) => void;
|
||||
};
|
||||
export const GobalParamSheet = (props: IGobalParamModalProps) => {
|
||||
export const GlobalParamSheet = (props: IGlobalParamModalProps) => {
|
||||
const { hideModal } = props;
|
||||
const { data, refetch } = useFetchAgent();
|
||||
const [fields, setFields] = useState<FormFieldConfig[]>(GobalFormFields);
|
||||
const { visible, showModal, hideModal: hideAddModal } = useSetModalState();
|
||||
const [fields, setFields] = useState<FormFieldConfig[]>(GlobalFormFields);
|
||||
const [defaultValues, setDefaultValues] = useState<FieldValues>(
|
||||
GobalVariableFormDefaultValues,
|
||||
GlobalVariableFormDefaultValues,
|
||||
);
|
||||
const formRef = useRef<DynamicFormRef>(null);
|
||||
const { handleCustomValidate, handleCustomSchema, handleRender } =
|
||||
useObjectFields();
|
||||
const { saveGraph } = useSaveGraph();
|
||||
|
||||
const handleFieldUpdate = (
|
||||
fieldName: string,
|
||||
updatedField: Partial<FormFieldConfig>,
|
||||
) => {
|
||||
setFields((prevFields) =>
|
||||
prevFields.map((field) =>
|
||||
field.name === fieldName ? { ...field, ...updatedField } : field,
|
||||
),
|
||||
);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const typefileld = fields.find((item) => item.name === 'type');
|
||||
|
||||
if (typefileld) {
|
||||
typefileld.onChange = (value) => {
|
||||
// setWatchType(value);
|
||||
handleFieldUpdate('value', {
|
||||
type: TypeMaps[value as keyof typeof TypeMaps],
|
||||
});
|
||||
const values = formRef.current?.getValues();
|
||||
setTimeout(() => {
|
||||
switch (value) {
|
||||
case TypesWithArray.Boolean:
|
||||
setDefaultValues({ ...values, value: false });
|
||||
break;
|
||||
case TypesWithArray.Number:
|
||||
setDefaultValues({ ...values, value: 0 });
|
||||
break;
|
||||
default:
|
||||
setDefaultValues({ ...values, value: '' });
|
||||
}
|
||||
}, 0);
|
||||
};
|
||||
}
|
||||
}, [fields]);
|
||||
|
||||
const { saveGraph, loading } = useSaveGraph();
|
||||
|
||||
const handleSubmit = async (value: FieldValues) => {
|
||||
const param = {
|
||||
...(data.dsl?.variables || {}),
|
||||
[value.name]: value,
|
||||
} as Record<string, GlobalVariableType>;
|
||||
|
||||
const res = await saveGraph(undefined, {
|
||||
gobalVariables: param,
|
||||
});
|
||||
|
||||
if (res.code === 0) {
|
||||
refetch();
|
||||
}
|
||||
hideAddModal();
|
||||
};
|
||||
|
||||
const handleDeleteGobalVariable = async (key: string) => {
|
||||
const handleDeleteGlobalVariable = async (key: string) => {
|
||||
const param = {
|
||||
...(data.dsl?.variables || {}),
|
||||
} as Record<string, GlobalVariableType>;
|
||||
delete param[key];
|
||||
const res = await saveGraph(undefined, {
|
||||
gobalVariables: param,
|
||||
globalVariables: param,
|
||||
});
|
||||
console.log('delete gobal variable-->', res);
|
||||
if (res.code === 0) {
|
||||
refetch();
|
||||
}
|
||||
};
|
||||
|
||||
const handleEditGobalVariable = (item: FieldValues) => {
|
||||
fields.forEach((field) => {
|
||||
if (field.name === 'value') {
|
||||
switch (item.type) {
|
||||
// [TypesWithArray.String]: FormFieldType.Textarea,
|
||||
// [TypesWithArray.Number]: FormFieldType.Number,
|
||||
// [TypesWithArray.Boolean]: FormFieldType.Checkbox,
|
||||
case TypesWithArray.Boolean:
|
||||
field.type = FormFieldType.Checkbox;
|
||||
break;
|
||||
case TypesWithArray.Number:
|
||||
field.type = FormFieldType.Number;
|
||||
break;
|
||||
default:
|
||||
field.type = FormFieldType.Textarea;
|
||||
}
|
||||
const handleEditGlobalVariable = (item: FieldValues) => {
|
||||
const newFields = fields.map((field) => {
|
||||
let newField = field;
|
||||
newField.render = undefined;
|
||||
newField.schema = undefined;
|
||||
newField.customValidate = undefined;
|
||||
if (newField.name === 'value') {
|
||||
newField = {
|
||||
...newField,
|
||||
type: TypeMaps[item.type as keyof typeof TypeMaps],
|
||||
render: handleRender(item.type),
|
||||
customValidate: handleCustomValidate(item.type),
|
||||
schema: handleCustomSchema(item.type),
|
||||
};
|
||||
}
|
||||
return newField;
|
||||
});
|
||||
setFields(newFields);
|
||||
setDefaultValues(item);
|
||||
showModal();
|
||||
};
|
||||
@ -149,8 +91,8 @@ export const GobalParamSheet = (props: IGobalParamModalProps) => {
|
||||
<div className="px-5 pb-5">
|
||||
<BlockButton
|
||||
onClick={() => {
|
||||
setFields(GobalFormFields);
|
||||
setDefaultValues(GobalVariableFormDefaultValues);
|
||||
setFields(GlobalFormFields);
|
||||
setDefaultValues(GlobalVariableFormDefaultValues);
|
||||
showModal();
|
||||
}}
|
||||
>
|
||||
@ -167,7 +109,7 @@ export const GobalParamSheet = (props: IGobalParamModalProps) => {
|
||||
key={key}
|
||||
className="flex items-center gap-3 min-h-14 justify-between px-5 py-3 border border-border-default rounded-lg hover:bg-bg-card group"
|
||||
onClick={() => {
|
||||
handleEditGobalVariable(item);
|
||||
handleEditGlobalVariable(item);
|
||||
}}
|
||||
>
|
||||
<div className="flex flex-col">
|
||||
@ -177,13 +119,23 @@ export const GobalParamSheet = (props: IGobalParamModalProps) => {
|
||||
{item.type}
|
||||
</span>
|
||||
</div>
|
||||
<div>
|
||||
<span className="text-text-primary">{item.value}</span>
|
||||
</div>
|
||||
{![
|
||||
TypesWithArray.Object,
|
||||
TypesWithArray.ArrayObject,
|
||||
TypesWithArray.ArrayString,
|
||||
TypesWithArray.ArrayNumber,
|
||||
TypesWithArray.ArrayBoolean,
|
||||
].includes(item.type as TypesWithArray) && (
|
||||
<div>
|
||||
<span className="text-text-primary">
|
||||
{item.value}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<ConfirmDeleteDialog
|
||||
onOk={() => handleDeleteGobalVariable(key)}
|
||||
onOk={() => handleDeleteGlobalVariable(key)}
|
||||
>
|
||||
<Button
|
||||
variant={'secondary'}
|
||||
@ -201,40 +153,14 @@ export const GobalParamSheet = (props: IGobalParamModalProps) => {
|
||||
})}
|
||||
</div>
|
||||
</SheetContent>
|
||||
<Modal
|
||||
title={t('flow.add') + t('flow.conversationVariable')}
|
||||
open={visible}
|
||||
onCancel={hideAddModal}
|
||||
showfooter={false}
|
||||
>
|
||||
<DynamicForm.Root
|
||||
ref={formRef}
|
||||
fields={fields}
|
||||
onSubmit={(data) => {
|
||||
console.log(data);
|
||||
}}
|
||||
defaultValues={defaultValues}
|
||||
onFieldUpdate={handleFieldUpdate}
|
||||
>
|
||||
<div className="flex items-center justify-end w-full gap-2">
|
||||
<DynamicForm.CancelButton
|
||||
handleCancel={() => {
|
||||
hideAddModal?.();
|
||||
}}
|
||||
/>
|
||||
<DynamicForm.SavingButton
|
||||
submitLoading={loading || false}
|
||||
buttonText={t('common.ok')}
|
||||
submitFunc={(values: FieldValues) => {
|
||||
handleSubmit(values);
|
||||
// console.log(values);
|
||||
// console.log(nodes, edges);
|
||||
// handleOk(values);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</DynamicForm.Root>
|
||||
</Modal>
|
||||
<AddVariableModal
|
||||
visible={visible}
|
||||
hideModal={hideAddModal}
|
||||
fields={fields}
|
||||
setFields={setFields}
|
||||
defaultValues={defaultValues}
|
||||
setDefaultValues={setDefaultValues}
|
||||
/>
|
||||
</Sheet>
|
||||
</>
|
||||
);
|
||||
|
||||
@ -31,6 +31,7 @@ import {
|
||||
initialIterationValues,
|
||||
initialJin10Values,
|
||||
initialKeywordExtractValues,
|
||||
initialListOperationsValues,
|
||||
initialMessageValues,
|
||||
initialNoteValues,
|
||||
initialParserValues,
|
||||
@ -129,6 +130,7 @@ export const useInitializeOperatorParams = () => {
|
||||
prompts: t('flow.prompts.user.summary'),
|
||||
},
|
||||
[Operator.DataOperations]: initialDataOperationsValues,
|
||||
[Operator.ListOperations]: initialListOperationsValues,
|
||||
[Operator.VariableAssigner]: initialVariableAssignerValues,
|
||||
[Operator.VariableAggregator]: initialVariableAggregatorValues,
|
||||
};
|
||||
|
||||
@ -4,7 +4,7 @@ import { RAGFlowNodeType } from '@/interfaces/database/flow';
|
||||
import { useCallback } from 'react';
|
||||
import { Operator } from '../constant';
|
||||
import useGraphStore from '../store';
|
||||
import { buildDslComponentsByGraph, buildDslGobalVariables } from '../utils';
|
||||
import { buildDslComponentsByGraph, buildDslGlobalVariables } from '../utils';
|
||||
|
||||
export const useBuildDslData = () => {
|
||||
const { data } = useFetchAgent();
|
||||
@ -13,7 +13,7 @@ export const useBuildDslData = () => {
|
||||
const buildDslData = useCallback(
|
||||
(
|
||||
currentNodes?: RAGFlowNodeType[],
|
||||
otherParam?: { gobalVariables: Record<string, GlobalVariableType> },
|
||||
otherParam?: { globalVariables: Record<string, GlobalVariableType> },
|
||||
) => {
|
||||
const nodesToProcess = currentNodes ?? nodes;
|
||||
|
||||
@ -41,13 +41,13 @@ export const useBuildDslData = () => {
|
||||
data.dsl.components,
|
||||
);
|
||||
|
||||
const gobalVariables = buildDslGobalVariables(
|
||||
const globalVariables = buildDslGlobalVariables(
|
||||
data.dsl,
|
||||
otherParam?.gobalVariables,
|
||||
otherParam?.globalVariables,
|
||||
);
|
||||
return {
|
||||
...data.dsl,
|
||||
...gobalVariables,
|
||||
...globalVariables,
|
||||
graph: { nodes: filteredNodes, edges: filteredEdges },
|
||||
components: dslComponents,
|
||||
};
|
||||
|
||||
@ -21,7 +21,7 @@ export const useSaveGraph = (showMessage: boolean = true) => {
|
||||
const saveGraph = useCallback(
|
||||
async (
|
||||
currentNodes?: RAGFlowNodeType[],
|
||||
otherParam?: { gobalVariables: Record<string, GlobalVariableType> },
|
||||
otherParam?: { globalVariables: Record<string, GlobalVariableType> },
|
||||
) => {
|
||||
return setAgent({
|
||||
id,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user