diff --git a/admin/README.md b/admin/README.md new file mode 100644 index 000000000..c5e1c0b98 --- /dev/null +++ b/admin/README.md @@ -0,0 +1,101 @@ +# RAGFlow Admin Service & CLI + +### Introduction + +Admin Service is a dedicated management component designed to monitor, maintain, and administrate the RAGFlow system. It provides comprehensive tools for ensuring system stability, performing operational tasks, and managing users and permissions efficiently. + +The service offers real-time monitoring of critical components, including the RAGFlow server, Task Executor processes, and dependent services such as MySQL, Elasticsearch, Redis, and MinIO. It automatically checks their health status, resource usage, and uptime, and performs restarts in case of failures to minimize downtime. + +For user and system management, it supports listing, creating, modifying, and deleting users and their associated resources like knowledge bases and Agents. + +Built with scalability and reliability in mind, the Admin Service ensures smooth system operation and simplifies maintenance workflows. + +It consists of a server-side Service and a command-line client (CLI), both implemented in Python. User commands are parsed using the Lark parsing toolkit. + +- **Admin Service**: A backend service that interfaces with the RAGFlow system to execute administrative operations and monitor its status. +- **Admin CLI**: A command-line interface that allows users to connect to the Admin Service and issue commands for system management. + +### Starting the Admin Service + +1. Before start Admin Service, please make sure RAGFlow system is already started. + +2. Run the service script: + ```bash + python admin/admin_server.py + ``` + The service will start and listen for incoming connections from the CLI on the configured port. + +### Using the Admin CLI + +1. Ensure the Admin Service is running. +2. Launch the CLI client: + ```bash + python admin/admin_client.py -h 0.0.0.0 -p 9381 + +## Supported Commands + +Commands are case-insensitive and must be terminated with a semicolon (`;`). + +### Service Management Commands + +- `LIST SERVICES;` + - Lists all available services within the RAGFlow system. +- `SHOW SERVICE ;` + - Shows detailed status information for the service identified by ``. +- `STARTUP SERVICE ;` + - Attempts to start the service identified by ``. +- `SHUTDOWN SERVICE ;` + - Attempts to gracefully shut down the service identified by ``. +- `RESTART SERVICE ;` + - Attempts to restart the service identified by ``. + +### User Management Commands + +- `LIST USERS;` + - Lists all users known to the system. +- `SHOW USER '';` + - Shows details and permissions for the specified user. The username must be enclosed in single or double quotes. +- `DROP USER '';` + - Removes the specified user from the system. Use with caution. +- `ALTER USER PASSWORD '' '';` + - Changes the password for the specified user. + +### Data and Agent Commands + +- `LIST DATASETS OF '';` + - Lists the datasets associated with the specified user. +- `LIST AGENTS OF '';` + - Lists the agents associated with the specified user. + +### Meta-Commands + +Meta-commands are prefixed with a backslash (`\`). + +- `\?` or `\help` + - Shows help information for the available commands. +- `\q` or `\quit` + - Exits the CLI application. + +## Examples + +```commandline +admin> list users; ++-------------------------------+------------------------+-----------+-------------+ +| create_date | email | is_active | nickname | ++-------------------------------+------------------------+-----------+-------------+ +| Fri, 22 Nov 2024 16:03:41 GMT | jeffery@infiniflow.org | 1 | Jeffery | +| Fri, 22 Nov 2024 16:10:55 GMT | aya@infiniflow.org | 1 | Waterdancer | ++-------------------------------+------------------------+-----------+-------------+ + +admin> list services; ++-------------------------------------------------------------------------------------------+-----------+----+---------------+-------+----------------+ +| extra | host | id | name | port | service_type | ++-------------------------------------------------------------------------------------------+-----------+----+---------------+-------+----------------+ +| {} | 0.0.0.0 | 0 | ragflow_0 | 9380 | ragflow_server | +| {'meta_type': 'mysql', 'password': 'infini_rag_flow', 'username': 'root'} | localhost | 1 | mysql | 5455 | meta_data | +| {'password': 'infini_rag_flow', 'store_type': 'minio', 'user': 'rag_flow'} | localhost | 2 | minio | 9000 | file_store | +| {'password': 'infini_rag_flow', 'retrieval_type': 'elasticsearch', 'username': 'elastic'} | localhost | 3 | elasticsearch | 1200 | retrieval | +| {'db_name': 'default_db', 'retrieval_type': 'infinity'} | localhost | 4 | infinity | 23817 | retrieval | +| {'database': 1, 'mq_type': 'redis', 'password': 'infini_rag_flow'} | localhost | 5 | redis | 6379 | message_queue | ++-------------------------------------------------------------------------------------------+-----------+----+---------------+-------+----------------+ +``` diff --git a/admin/admin_client.py b/admin/admin_client.py new file mode 100644 index 000000000..59e0893a1 --- /dev/null +++ b/admin/admin_client.py @@ -0,0 +1,574 @@ +import argparse +import base64 + +from Cryptodome.PublicKey import RSA +from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5 +from typing import Dict, List, Any +from lark import Lark, Transformer, Tree +import requests +from requests.auth import HTTPBasicAuth +from api.common.base64 import encode_to_base64 + +GRAMMAR = r""" +start: command + +command: sql_command | meta_command + +sql_command: list_services + | show_service + | startup_service + | shutdown_service + | restart_service + | list_users + | show_user + | drop_user + | alter_user + | create_user + | activate_user + | list_datasets + | list_agents + +// meta command definition +meta_command: "\\" meta_command_name [meta_args] + +meta_command_name: /[a-zA-Z?]+/ +meta_args: (meta_arg)+ + +meta_arg: /[^\\s"']+/ | quoted_string + +// command definition + +LIST: "LIST"i +SERVICES: "SERVICES"i +SHOW: "SHOW"i +CREATE: "CREATE"i +SERVICE: "SERVICE"i +SHUTDOWN: "SHUTDOWN"i +STARTUP: "STARTUP"i +RESTART: "RESTART"i +USERS: "USERS"i +DROP: "DROP"i +USER: "USER"i +ALTER: "ALTER"i +ACTIVE: "ACTIVE"i +PASSWORD: "PASSWORD"i +DATASETS: "DATASETS"i +OF: "OF"i +AGENTS: "AGENTS"i + +list_services: LIST SERVICES ";" +show_service: SHOW SERVICE NUMBER ";" +startup_service: STARTUP SERVICE NUMBER ";" +shutdown_service: SHUTDOWN SERVICE NUMBER ";" +restart_service: RESTART SERVICE NUMBER ";" + +list_users: LIST USERS ";" +drop_user: DROP USER quoted_string ";" +alter_user: ALTER USER PASSWORD quoted_string quoted_string ";" +show_user: SHOW USER quoted_string ";" +create_user: CREATE USER quoted_string quoted_string ";" +activate_user: ALTER USER ACTIVE quoted_string status ";" + +list_datasets: LIST DATASETS OF quoted_string ";" +list_agents: LIST AGENTS OF quoted_string ";" + +identifier: WORD +quoted_string: QUOTED_STRING +status: WORD + +QUOTED_STRING: /'[^']+'/ | /"[^"]+"/ +WORD: /[a-zA-Z0-9_\-\.]+/ +NUMBER: /[0-9]+/ + +%import common.WS +%ignore WS +""" + + +class AdminTransformer(Transformer): + + def start(self, items): + return items[0] + + def command(self, items): + return items[0] + + def list_services(self, items): + result = {'type': 'list_services'} + return result + + def show_service(self, items): + service_id = int(items[2]) + return {"type": "show_service", "number": service_id} + + def startup_service(self, items): + service_id = int(items[2]) + return {"type": "startup_service", "number": service_id} + + def shutdown_service(self, items): + service_id = int(items[2]) + return {"type": "shutdown_service", "number": service_id} + + def restart_service(self, items): + service_id = int(items[2]) + return {"type": "restart_service", "number": service_id} + + def list_users(self, items): + return {"type": "list_users"} + + def show_user(self, items): + user_name = items[2] + return {"type": "show_user", "username": user_name} + + def drop_user(self, items): + user_name = items[2] + return {"type": "drop_user", "username": user_name} + + def alter_user(self, items): + user_name = items[3] + new_password = items[4] + return {"type": "alter_user", "username": user_name, "password": new_password} + + def create_user(self, items): + user_name = items[2] + password = items[3] + return {"type": "create_user", "username": user_name, "password": password, "role": "user"} + + def activate_user(self, items): + user_name = items[3] + activate_status = items[4] + return {"type": "activate_user", "activate_status": activate_status, "username": user_name} + + def list_datasets(self, items): + user_name = items[3] + return {"type": "list_datasets", "username": user_name} + + def list_agents(self, items): + user_name = items[3] + return {"type": "list_agents", "username": user_name} + + def meta_command(self, items): + command_name = str(items[0]).lower() + args = items[1:] if len(items) > 1 else [] + + # handle quoted parameter + parsed_args = [] + for arg in args: + if hasattr(arg, 'value'): + parsed_args.append(arg.value) + else: + parsed_args.append(str(arg)) + + return {'type': 'meta', 'command': command_name, 'args': parsed_args} + + def meta_command_name(self, items): + return items[0] + + def meta_args(self, items): + return items + + +def encrypt(input_string): + pub = '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArq9XTUSeYr2+N1h3Afl/z8Dse/2yD0ZGrKwx+EEEcdsBLca9Ynmx3nIB5obmLlSfmskLpBo0UACBmB5rEjBp2Q2f3AG3Hjd4B+gNCG6BDaawuDlgANIhGnaTLrIqWrrcm4EMzJOnAOI1fgzJRsOOUEfaS318Eq9OVO3apEyCCt0lOQK6PuksduOjVxtltDav+guVAA068NrPYmRNabVKRNLJpL8w4D44sfth5RvZ3q9t+6RTArpEtc5sh5ChzvqPOzKGMXW83C95TxmXqpbK6olN4RevSfVjEAgCydH6HN6OhtOQEcnrU97r9H0iZOWwbw3pVrZiUkuRD1R56Wzs2wIDAQAB\n-----END PUBLIC KEY-----' + pub_key = RSA.importKey(pub) + cipher = Cipher_pkcs1_v1_5.new(pub_key) + cipher_text = cipher.encrypt(base64.b64encode(input_string.encode('utf-8'))) + return base64.b64encode(cipher_text).decode("utf-8") + + +class AdminCommandParser: + def __init__(self): + self.parser = Lark(GRAMMAR, start='start', parser='lalr', transformer=AdminTransformer()) + self.command_history = [] + + def parse_command(self, command_str: str) -> Dict[str, Any]: + if not command_str.strip(): + return {'type': 'empty'} + + self.command_history.append(command_str) + + try: + result = self.parser.parse(command_str) + return result + except Exception as e: + return {'type': 'error', 'message': f'Parse error: {str(e)}'} + + +class AdminCLI: + def __init__(self): + self.parser = AdminCommandParser() + self.is_interactive = False + self.admin_account = "admin@ragflow.io" + self.admin_password: str = "admin" + self.host: str = "" + self.port: int = 0 + + def verify_admin(self, args): + + conn_info = self._parse_connection_args(args) + if 'error' in conn_info: + print(f"Error: {conn_info['error']}") + return + + self.host = conn_info['host'] + self.port = conn_info['port'] + print(f"Attempt to access ip: {self.host}, port: {self.port}") + url = f'http://{self.host}:{self.port}/api/v1/admin/auth' + + try_count = 0 + while True: + try_count += 1 + if try_count > 3: + return False + + admin_passwd = input(f"password for {self.admin_account}: ").strip() + try: + self.admin_password = encode_to_base64(admin_passwd) + response = requests.get(url, auth=HTTPBasicAuth(self.admin_account, self.admin_password)) + if response.status_code == 200: + res_json = response.json() + error_code = res_json.get('code', -1) + if error_code == 0: + print("Authentication successful.") + return True + else: + error_message = res_json.get('message', 'Unknown error') + print(f"Authentication failed: {error_message}, try again") + continue + else: + print(f"Bad response,status: {response.status_code}, try again") + except Exception: + print(f"Can't access {self.host}, port: {self.port}") + + def _print_table_simple(self, data): + if not data: + print("No data to print") + return + if isinstance(data, dict): + # handle single row data + data = [data] + + columns = list(data[0].keys()) + col_widths = {} + + for col in columns: + max_width = len(str(col)) + for item in data: + value_len = len(str(item.get(col, ''))) + if value_len > max_width: + max_width = value_len + col_widths[col] = max(2, max_width) + + # Generate delimiter + separator = "+" + "+".join(["-" * (col_widths[col] + 2) for col in columns]) + "+" + + # Print header + print(separator) + header = "|" + "|".join([f" {col:<{col_widths[col]}} " for col in columns]) + "|" + print(header) + print(separator) + + # Print data + for item in data: + row = "|" + for col in columns: + value = str(item.get(col, '')) + if len(value) > col_widths[col]: + value = value[:col_widths[col] - 3] + "..." + row += f" {value:<{col_widths[col]}} |" + print(row) + + print(separator) + + def run_interactive(self): + + self.is_interactive = True + print("RAGFlow Admin command line interface - Type '\\?' for help, '\\q' to quit") + + while True: + try: + command = input("admin> ").strip() + if not command: + continue + + print(f"command: {command}") + result = self.parser.parse_command(command) + self.execute_command(result) + + if isinstance(result, Tree): + continue + + if result.get('type') == 'meta' and result.get('command') in ['q', 'quit', 'exit']: + break + + except KeyboardInterrupt: + print("\nUse '\\q' to quit") + except EOFError: + print("\nGoodbye!") + break + + def run_single_command(self, args): + conn_info = self._parse_connection_args(args) + if 'error' in conn_info: + print(f"Error: {conn_info['error']}") + return + + def _parse_connection_args(self, args: List[str]) -> Dict[str, Any]: + parser = argparse.ArgumentParser(description='Admin CLI Client', add_help=False) + parser.add_argument('-h', '--host', default='localhost', help='Admin service host') + parser.add_argument('-p', '--port', type=int, default=8080, help='Admin service port') + + try: + parsed_args, remaining_args = parser.parse_known_args(args) + return { + 'host': parsed_args.host, + 'port': parsed_args.port, + } + except SystemExit: + return {'error': 'Invalid connection arguments'} + + def execute_command(self, parsed_command: Dict[str, Any]): + + command_dict: dict + if isinstance(parsed_command, Tree): + command_dict = parsed_command.children[0] + else: + if parsed_command['type'] == 'error': + print(f"Error: {parsed_command['message']}") + return + else: + command_dict = parsed_command + + # print(f"Parsed command: {command_dict}") + + command_type = command_dict['type'] + + match command_type: + case 'list_services': + self._handle_list_services(command_dict) + case 'show_service': + self._handle_show_service(command_dict) + case 'restart_service': + self._handle_restart_service(command_dict) + case 'shutdown_service': + self._handle_shutdown_service(command_dict) + case 'startup_service': + self._handle_startup_service(command_dict) + case 'list_users': + self._handle_list_users(command_dict) + case 'show_user': + self._handle_show_user(command_dict) + case 'drop_user': + self._handle_drop_user(command_dict) + case 'alter_user': + self._handle_alter_user(command_dict) + case 'create_user': + self._handle_create_user(command_dict) + case 'activate_user': + self._handle_activate_user(command_dict) + case 'list_datasets': + self._handle_list_datasets(command_dict) + case 'list_agents': + self._handle_list_agents(command_dict) + case 'meta': + self._handle_meta_command(command_dict) + case _: + print(f"Command '{command_type}' would be executed with API") + + def _handle_list_services(self, command): + print("Listing all services") + + url = f'http://{self.host}:{self.port}/api/v1/admin/services' + response = requests.get(url, auth=HTTPBasicAuth(self.admin_account, self.admin_password)) + res_json = response.json() + if response.status_code == 200: + self._print_table_simple(res_json['data']) + else: + print(f"Fail to get all users, code: {res_json['code']}, message: {res_json['message']}") + + def _handle_show_service(self, command): + service_id: int = command['number'] + print(f"Showing service: {service_id}") + + def _handle_restart_service(self, command): + service_id: int = command['number'] + print(f"Restart service {service_id}") + + def _handle_shutdown_service(self, command): + service_id: int = command['number'] + print(f"Shutdown service {service_id}") + + def _handle_startup_service(self, command): + service_id: int = command['number'] + print(f"Startup service {service_id}") + + def _handle_list_users(self, command): + print("Listing all users") + + url = f'http://{self.host}:{self.port}/api/v1/admin/users' + response = requests.get(url, auth=HTTPBasicAuth(self.admin_account, self.admin_password)) + res_json = response.json() + if response.status_code == 200: + self._print_table_simple(res_json['data']) + else: + print(f"Fail to get all users, code: {res_json['code']}, message: {res_json['message']}") + + def _handle_show_user(self, command): + username_tree: Tree = command['username'] + username: str = username_tree.children[0].strip("'\"") + print(f"Showing user: {username}") + url = f'http://{self.host}:{self.port}/api/v1/admin/users/{username}' + response = requests.get(url, auth=HTTPBasicAuth(self.admin_account, self.admin_password)) + res_json = response.json() + if response.status_code == 200: + self._print_table_simple(res_json['data']) + else: + print(f"Fail to get user {username}, code: {res_json['code']}, message: {res_json['message']}") + + def _handle_drop_user(self, command): + username_tree: Tree = command['username'] + username: str = username_tree.children[0].strip("'\"") + print(f"Drop user: {username}") + url = f'http://{self.host}:{self.port}/api/v1/admin/users/{username}' + response = requests.delete(url, auth=HTTPBasicAuth(self.admin_account, self.admin_password)) + res_json = response.json() + if response.status_code == 200: + print(res_json["message"]) + else: + print(f"Fail to drop user, code: {res_json['code']}, message: {res_json['message']}") + + def _handle_alter_user(self, command): + username_tree: Tree = command['username'] + username: str = username_tree.children[0].strip("'\"") + password_tree: Tree = command['password'] + password: str = password_tree.children[0].strip("'\"") + print(f"Alter user: {username}, password: {password}") + url = f'http://{self.host}:{self.port}/api/v1/admin/users/{username}/password' + response = requests.put(url, auth=HTTPBasicAuth(self.admin_account, self.admin_password), + json={'new_password': encrypt(password)}) + res_json = response.json() + if response.status_code == 200: + print(res_json["message"]) + else: + print(f"Fail to alter password, code: {res_json['code']}, message: {res_json['message']}") + + def _handle_create_user(self, command): + username_tree: Tree = command['username'] + username: str = username_tree.children[0].strip("'\"") + password_tree: Tree = command['password'] + password: str = password_tree.children[0].strip("'\"") + role: str = command['role'] + print(f"Create user: {username}, password: {password}, role: {role}") + url = f'http://{self.host}:{self.port}/api/v1/admin/users' + response = requests.post( + url, + auth=HTTPBasicAuth(self.admin_account, self.admin_password), + json={'username': username, 'password': encrypt(password), 'role': role} + ) + res_json = response.json() + if response.status_code == 200: + self._print_table_simple(res_json['data']) + else: + print(f"Fail to create user {username}, code: {res_json['code']}, message: {res_json['message']}") + + def _handle_activate_user(self, command): + username_tree: Tree = command['username'] + username: str = username_tree.children[0].strip("'\"") + activate_tree: Tree = command['activate_status'] + activate_status: str = activate_tree.children[0].strip("'\"") + if activate_status.lower() in ['on', 'off']: + print(f"Alter user {username} activate status, turn {activate_status.lower()}.") + url = f'http://{self.host}:{self.port}/api/v1/admin/users/{username}/activate' + response = requests.put(url, auth=HTTPBasicAuth(self.admin_account, self.admin_password), + json={'activate_status': activate_status}) + res_json = response.json() + if response.status_code == 200: + print(res_json["message"]) + else: + print(f"Fail to alter activate status, code: {res_json['code']}, message: {res_json['message']}") + else: + print(f"Unknown activate status: {activate_status}.") + + def _handle_list_datasets(self, command): + username_tree: Tree = command['username'] + username: str = username_tree.children[0].strip("'\"") + print(f"Listing all datasets of user: {username}") + url = f'http://{self.host}:{self.port}/api/v1/admin/users/{username}/datasets' + response = requests.get(url, auth=HTTPBasicAuth(self.admin_account, self.admin_password)) + res_json = response.json() + if response.status_code == 200: + self._print_table_simple(res_json['data']) + else: + print(f"Fail to get all datasets of {username}, code: {res_json['code']}, message: {res_json['message']}") + + def _handle_list_agents(self, command): + username_tree: Tree = command['username'] + username: str = username_tree.children[0].strip("'\"") + print(f"Listing all agents of user: {username}") + url = f'http://{self.host}:{self.port}/api/v1/admin/users/{username}/agents' + response = requests.get(url, auth=HTTPBasicAuth(self.admin_account, self.admin_password)) + res_json = response.json() + if response.status_code == 200: + self._print_table_simple(res_json['data']) + else: + print(f"Fail to get all agents of {username}, code: {res_json['code']}, message: {res_json['message']}") + + def _handle_meta_command(self, command): + meta_command = command['command'] + args = command.get('args', []) + + if meta_command in ['?', 'h', 'help']: + self.show_help() + elif meta_command in ['q', 'quit', 'exit']: + print("Goodbye!") + else: + print(f"Meta command '{meta_command}' with args {args}") + + def show_help(self): + """Help info""" + help_text = """ +Commands: + LIST SERVICES + SHOW SERVICE + STARTUP SERVICE + SHUTDOWN SERVICE + RESTART SERVICE + LIST USERS + SHOW USER + DROP USER + CREATE USER + ALTER USER PASSWORD + ALTER USER ACTIVE + LIST DATASETS OF + LIST AGENTS OF + +Meta Commands: + \\?, \\h, \\help Show this help + \\q, \\quit, \\exit Quit the CLI + """ + print(help_text) + + +def main(): + import sys + + cli = AdminCLI() + + if len(sys.argv) == 1 or (len(sys.argv) > 1 and sys.argv[1] == '-'): + print(r""" + ____ ___ ______________ ___ __ _ + / __ \/ | / ____/ ____/ /___ _ __ / | ____/ /___ ___ (_)___ + / /_/ / /| |/ / __/ /_ / / __ \ | /| / / / /| |/ __ / __ `__ \/ / __ \ + / _, _/ ___ / /_/ / __/ / / /_/ / |/ |/ / / ___ / /_/ / / / / / / / / / / + /_/ |_/_/ |_\____/_/ /_/\____/|__/|__/ /_/ |_\__,_/_/ /_/ /_/_/_/ /_/ + """) + if cli.verify_admin(sys.argv): + cli.run_interactive() + else: + if cli.verify_admin(sys.argv): + cli.run_interactive() + # cli.run_single_command(sys.argv[1:]) + + +if __name__ == '__main__': + main() diff --git a/admin/admin_server.py b/admin/admin_server.py new file mode 100644 index 000000000..27ee0c72a --- /dev/null +++ b/admin/admin_server.py @@ -0,0 +1,47 @@ + +import os +import signal +import logging +import time +import threading +import traceback +from werkzeug.serving import run_simple +from flask import Flask +from routes import admin_bp +from api.utils.log_utils import init_root_logger +from api.constants import SERVICE_CONF +from api import settings +from config import load_configurations, SERVICE_CONFIGS + +stop_event = threading.Event() + +if __name__ == '__main__': + init_root_logger("admin_service") + logging.info(r""" + ____ ___ ______________ ___ __ _ + / __ \/ | / ____/ ____/ /___ _ __ / | ____/ /___ ___ (_)___ + / /_/ / /| |/ / __/ /_ / / __ \ | /| / / / /| |/ __ / __ `__ \/ / __ \ + / _, _/ ___ / /_/ / __/ / / /_/ / |/ |/ / / ___ / /_/ / / / / / / / / / / + /_/ |_/_/ |_\____/_/ /_/\____/|__/|__/ /_/ |_\__,_/_/ /_/ /_/_/_/ /_/ + """) + + app = Flask(__name__) + app.register_blueprint(admin_bp) + settings.init_settings() + SERVICE_CONFIGS.configs = load_configurations(SERVICE_CONF) + + try: + logging.info("RAGFlow Admin service start...") + run_simple( + hostname="0.0.0.0", + port=9381, + application=app, + threaded=True, + use_reloader=True, + use_debugger=True, + ) + except Exception: + traceback.print_exc() + stop_event.set() + time.sleep(1) + os.kill(os.getpid(), signal.SIGKILL) diff --git a/admin/auth.py b/admin/auth.py new file mode 100644 index 000000000..3748c07a6 --- /dev/null +++ b/admin/auth.py @@ -0,0 +1,57 @@ +import logging +import uuid +from functools import wraps +from flask import request, jsonify + +from exceptions import AdminException +from api.db.init_data import encode_to_base64 +from api.db.services import UserService + + +def check_admin(username: str, password: str): + users = UserService.query(email=username) + if not users: + logging.info(f"Username: {username} is not registered!") + user_info = { + "id": uuid.uuid1().hex, + "password": encode_to_base64("admin"), + "nickname": "admin", + "is_superuser": True, + "email": "admin@ragflow.io", + "creator": "system", + "status": "1", + } + if not UserService.save(**user_info): + raise AdminException("Can't init admin.", 500) + + user = UserService.query_user(username, password) + if user: + return True + else: + return False + + +def login_verify(f): + @wraps(f) + def decorated(*args, **kwargs): + auth = request.authorization + if not auth or 'username' not in auth.parameters or 'password' not in auth.parameters: + return jsonify({ + "code": 401, + "message": "Authentication required", + "data": None + }), 200 + + username = auth.parameters['username'] + password = auth.parameters['password'] + # TODO: to check the username and password from DB + if check_admin(username, password) is False: + return jsonify({ + "code": 403, + "message": "Access denied", + "data": None + }), 200 + + return f(*args, **kwargs) + + return decorated diff --git a/admin/config.py b/admin/config.py new file mode 100644 index 000000000..570807737 --- /dev/null +++ b/admin/config.py @@ -0,0 +1,280 @@ +import logging +import threading +from enum import Enum + +from pydantic import BaseModel +from typing import Any +from api.utils.configs import read_config +from urllib.parse import urlparse + + +class ServiceConfigs: + def __init__(self): + self.configs = [] + self.lock = threading.Lock() + + +SERVICE_CONFIGS = ServiceConfigs + + +class ServiceType(Enum): + METADATA = "metadata" + RETRIEVAL = "retrieval" + MESSAGE_QUEUE = "message_queue" + RAGFLOW_SERVER = "ragflow_server" + TASK_EXECUTOR = "task_executor" + FILE_STORE = "file_store" + + +class BaseConfig(BaseModel): + id: int + name: str + host: str + port: int + service_type: str + + def to_dict(self) -> dict[str, Any]: + return {'id': self.id, 'name': self.name, 'host': self.host, 'port': self.port, 'service_type': self.service_type} + + +class MetaConfig(BaseConfig): + meta_type: str + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + extra_dict = result['extra'].copy() + extra_dict['meta_type'] = self.meta_type + result['extra'] = extra_dict + return result + + +class MySQLConfig(MetaConfig): + username: str + password: str + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + extra_dict = result['extra'].copy() + extra_dict['username'] = self.username + extra_dict['password'] = self.password + result['extra'] = extra_dict + return result + + +class PostgresConfig(MetaConfig): + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + return result + + +class RetrievalConfig(BaseConfig): + retrieval_type: str + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + extra_dict = result['extra'].copy() + extra_dict['retrieval_type'] = self.retrieval_type + result['extra'] = extra_dict + return result + + +class InfinityConfig(RetrievalConfig): + db_name: str + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + extra_dict = result['extra'].copy() + extra_dict['db_name'] = self.db_name + result['extra'] = extra_dict + return result + + +class ElasticsearchConfig(RetrievalConfig): + username: str + password: str + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + extra_dict = result['extra'].copy() + extra_dict['username'] = self.username + extra_dict['password'] = self.password + result['extra'] = extra_dict + return result + + +class MessageQueueConfig(BaseConfig): + mq_type: str + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + extra_dict = result['extra'].copy() + extra_dict['mq_type'] = self.mq_type + result['extra'] = extra_dict + return result + + +class RedisConfig(MessageQueueConfig): + database: int + password: str + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + extra_dict = result['extra'].copy() + extra_dict['database'] = self.database + extra_dict['password'] = self.password + result['extra'] = extra_dict + return result + + +class RabbitMQConfig(MessageQueueConfig): + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + return result + + +class RAGFlowServerConfig(BaseConfig): + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + return result + + +class TaskExecutorConfig(BaseConfig): + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + return result + + +class FileStoreConfig(BaseConfig): + store_type: str + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + extra_dict = result['extra'].copy() + extra_dict['store_type'] = self.store_type + result['extra'] = extra_dict + return result + + +class MinioConfig(FileStoreConfig): + user: str + password: str + + def to_dict(self) -> dict[str, Any]: + result = super().to_dict() + if 'extra' not in result: + result['extra'] = dict() + extra_dict = result['extra'].copy() + extra_dict['user'] = self.user + extra_dict['password'] = self.password + result['extra'] = extra_dict + return result + + +def load_configurations(config_path: str) -> list[BaseConfig]: + raw_configs = read_config(config_path) + configurations = [] + ragflow_count = 0 + id_count = 0 + for k, v in raw_configs.items(): + match (k): + case "ragflow": + name: str = f'ragflow_{ragflow_count}' + host: str = v['host'] + http_port: int = v['http_port'] + config = RAGFlowServerConfig(id=id_count, name=name, host=host, port=http_port, service_type="ragflow_server") + configurations.append(config) + id_count += 1 + case "es": + name: str = 'elasticsearch' + url = v['hosts'] + parsed = urlparse(url) + host: str = parsed.hostname + port: int = parsed.port + username: str = v.get('username') + password: str = v.get('password') + config = ElasticsearchConfig(id=id_count, name=name, host=host, port=port, service_type="retrieval", + retrieval_type="elasticsearch", + username=username, password=password) + configurations.append(config) + id_count += 1 + + case "infinity": + name: str = 'infinity' + url = v['uri'] + parts = url.split(':', 1) + host = parts[0] + port = int(parts[1]) + database: str = v.get('db_name', 'default_db') + config = InfinityConfig(id=id_count, name=name, host=host, port=port, service_type="retrieval", retrieval_type="infinity", + db_name=database) + configurations.append(config) + id_count += 1 + case "minio": + name: str = 'minio' + url = v['host'] + parts = url.split(':', 1) + host = parts[0] + port = int(parts[1]) + user = v.get('user') + password = v.get('password') + config = MinioConfig(id=id_count, name=name, host=host, port=port, user=user, password=password, service_type="file_store", + store_type="minio") + configurations.append(config) + id_count += 1 + case "redis": + name: str = 'redis' + url = v['host'] + parts = url.split(':', 1) + host = parts[0] + port = int(parts[1]) + password = v.get('password') + db: int = v.get('db') + config = RedisConfig(id=id_count, name=name, host=host, port=port, password=password, database=db, + service_type="message_queue", mq_type="redis") + configurations.append(config) + id_count += 1 + case "mysql": + name: str = 'mysql' + host: str = v.get('host') + port: int = v.get('port') + username = v.get('user') + password = v.get('password') + config = MySQLConfig(id=id_count, name=name, host=host, port=port, username=username, password=password, + service_type="meta_data", meta_type="mysql") + configurations.append(config) + id_count += 1 + case "admin": + pass + case _: + logging.warning(f"Unknown configuration key: {k}") + continue + + return configurations diff --git a/admin/exceptions.py b/admin/exceptions.py new file mode 100644 index 000000000..5e3021b41 --- /dev/null +++ b/admin/exceptions.py @@ -0,0 +1,17 @@ +class AdminException(Exception): + def __init__(self, message, code=400): + super().__init__(message) + self.code = code + self.message = message + +class UserNotFoundError(AdminException): + def __init__(self, username): + super().__init__(f"User '{username}' not found", 404) + +class UserAlreadyExistsError(AdminException): + def __init__(self, username): + super().__init__(f"User '{username}' already exists", 409) + +class CannotDeleteAdminError(AdminException): + def __init__(self): + super().__init__("Cannot delete admin account", 403) \ No newline at end of file diff --git a/admin/models.py b/admin/models.py new file mode 100644 index 000000000..e69de29bb diff --git a/admin/responses.py b/admin/responses.py new file mode 100644 index 000000000..00cee7038 --- /dev/null +++ b/admin/responses.py @@ -0,0 +1,15 @@ +from flask import jsonify + +def success_response(data=None, message="Success", code = 0): + return jsonify({ + "code": code, + "message": message, + "data": data + }), 200 + +def error_response(message="Error", code=-1, data=None): + return jsonify({ + "code": code, + "message": message, + "data": data + }), 400 \ No newline at end of file diff --git a/admin/routes.py b/admin/routes.py new file mode 100644 index 000000000..8cd9633b1 --- /dev/null +++ b/admin/routes.py @@ -0,0 +1,190 @@ +from flask import Blueprint, request + +from auth import login_verify +from responses import success_response, error_response +from services import UserMgr, ServiceMgr, UserServiceMgr +from exceptions import AdminException + +admin_bp = Blueprint('admin', __name__, url_prefix='/api/v1/admin') + + +@admin_bp.route('/auth', methods=['GET']) +@login_verify +def auth_admin(): + try: + return success_response(None, "Admin is authorized", 0) + except Exception as e: + return error_response(str(e), 500) + + +@admin_bp.route('/users', methods=['GET']) +@login_verify +def list_users(): + try: + users = UserMgr.get_all_users() + return success_response(users, "Get all users", 0) + except Exception as e: + return error_response(str(e), 500) + + +@admin_bp.route('/users', methods=['POST']) +@login_verify +def create_user(): + try: + data = request.get_json() + if not data or 'username' not in data or 'password' not in data: + return error_response("Username and password are required", 400) + + username = data['username'] + password = data['password'] + role = data.get('role', 'user') + + res = UserMgr.create_user(username, password, role) + if res["success"]: + user_info = res["user_info"] + user_info.pop("password") # do not return password + return success_response(user_info, "User created successfully") + else: + return error_response("create user failed") + + except AdminException as e: + return error_response(e.message, e.code) + except Exception as e: + return error_response(str(e)) + + +@admin_bp.route('/users/', methods=['DELETE']) +@login_verify +def delete_user(username): + try: + res = UserMgr.delete_user(username) + if res["success"]: + return success_response(None, res["message"]) + else: + return error_response(res["message"]) + + except AdminException as e: + return error_response(e.message, e.code) + except Exception as e: + return error_response(str(e), 500) + + +@admin_bp.route('/users//password', methods=['PUT']) +@login_verify +def change_password(username): + try: + data = request.get_json() + if not data or 'new_password' not in data: + return error_response("New password is required", 400) + + new_password = data['new_password'] + msg = UserMgr.update_user_password(username, new_password) + return success_response(None, msg) + + except AdminException as e: + return error_response(e.message, e.code) + except Exception as e: + return error_response(str(e), 500) + + +@admin_bp.route('/users//activate', methods=['PUT']) +@login_verify +def alter_user_activate_status(username): + try: + data = request.get_json() + if not data or 'activate_status' not in data: + return error_response("Activation status is required", 400) + activate_status = data['activate_status'] + msg = UserMgr.update_user_activate_status(username, activate_status) + return success_response(None, msg) + except AdminException as e: + return error_response(e.message, e.code) + except Exception as e: + return error_response(str(e), 500) + +@admin_bp.route('/users/', methods=['GET']) +@login_verify +def get_user_details(username): + try: + user_details = UserMgr.get_user_details(username) + return success_response(user_details) + + except AdminException as e: + return error_response(e.message, e.code) + except Exception as e: + return error_response(str(e), 500) + +@admin_bp.route('/users//datasets', methods=['GET']) +@login_verify +def get_user_datasets(username): + try: + datasets_list = UserServiceMgr.get_user_datasets(username) + return success_response(datasets_list) + + except AdminException as e: + return error_response(e.message, e.code) + except Exception as e: + return error_response(str(e), 500) + + +@admin_bp.route('/users//agents', methods=['GET']) +@login_verify +def get_user_agents(username): + try: + agents_list = UserServiceMgr.get_user_agents(username) + return success_response(agents_list) + + except AdminException as e: + return error_response(e.message, e.code) + except Exception as e: + return error_response(str(e), 500) + + +@admin_bp.route('/services', methods=['GET']) +@login_verify +def get_services(): + try: + services = ServiceMgr.get_all_services() + return success_response(services, "Get all services", 0) + except Exception as e: + return error_response(str(e), 500) + + +@admin_bp.route('/service_types/', methods=['GET']) +@login_verify +def get_services_by_type(service_type_str): + try: + services = ServiceMgr.get_services_by_type(service_type_str) + return success_response(services) + except Exception as e: + return error_response(str(e), 500) + + +@admin_bp.route('/services/', methods=['GET']) +@login_verify +def get_service(service_id): + try: + services = ServiceMgr.get_service_details(service_id) + return success_response(services) + except Exception as e: + return error_response(str(e), 500) + + +@admin_bp.route('/services/', methods=['DELETE']) +@login_verify +def shutdown_service(service_id): + try: + services = ServiceMgr.shutdown_service(service_id) + return success_response(services) + except Exception as e: + return error_response(str(e), 500) + + +@admin_bp.route('/services/', methods=['PUT']) +@login_verify +def restart_service(service_id): + try: + services = ServiceMgr.restart_service(service_id) + return success_response(services) + except Exception as e: + return error_response(str(e), 500) diff --git a/admin/services.py b/admin/services.py new file mode 100644 index 000000000..c5db5d90a --- /dev/null +++ b/admin/services.py @@ -0,0 +1,175 @@ +import re +from werkzeug.security import check_password_hash +from api.db import ActiveEnum +from api.db.services import UserService +from api.db.joint_services.user_account_service import create_new_user, delete_user_data +from api.db.services.canvas_service import UserCanvasService +from api.db.services.user_service import TenantService +from api.db.services.knowledgebase_service import KnowledgebaseService +from api.utils.crypt import decrypt +from exceptions import AdminException, UserAlreadyExistsError, UserNotFoundError +from config import SERVICE_CONFIGS + +class UserMgr: + @staticmethod + def get_all_users(): + users = UserService.get_all_users() + result = [] + for user in users: + result.append({'email': user.email, 'nickname': user.nickname, 'create_date': user.create_date, 'is_active': user.is_active}) + return result + + @staticmethod + def get_user_details(username): + # use email to query + users = UserService.query_user_by_email(username) + result = [] + for user in users: + result.append({ + 'email': user.email, + 'language': user.language, + 'last_login_time': user.last_login_time, + 'is_authenticated': user.is_authenticated, + 'is_active': user.is_active, + 'is_anonymous': user.is_anonymous, + 'login_channel': user.login_channel, + 'status': user.status, + 'is_superuser': user.is_superuser, + 'create_date': user.create_date, + 'update_date': user.update_date + }) + return result + + @staticmethod + def create_user(username, password, role="user") -> dict: + # Validate the email address + if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,}$", username): + raise AdminException(f"Invalid email address: {username}!") + # Check if the email address is already used + if UserService.query(email=username): + raise UserAlreadyExistsError(username) + # Construct user info data + user_info_dict = { + "email": username, + "nickname": "", # ask user to edit it manually in settings. + "password": decrypt(password), + "login_channel": "password", + "is_superuser": role == "admin", + } + return create_new_user(user_info_dict) + + @staticmethod + def delete_user(username): + # use email to delete + user_list = UserService.query_user_by_email(username) + if not user_list: + raise UserNotFoundError(username) + if len(user_list) > 1: + raise AdminException(f"Exist more than 1 user: {username}!") + usr = user_list[0] + return delete_user_data(usr.id) + + @staticmethod + def update_user_password(username, new_password) -> str: + # use email to find user. check exist and unique. + user_list = UserService.query_user_by_email(username) + if not user_list: + raise UserNotFoundError(username) + elif len(user_list) > 1: + raise AdminException(f"Exist more than 1 user: {username}!") + # check new_password different from old. + usr = user_list[0] + psw = decrypt(new_password) + if check_password_hash(usr.password, psw): + return "Same password, no need to update!" + # update password + UserService.update_user_password(usr.id, psw) + return "Password updated successfully!" + + @staticmethod + def update_user_activate_status(username, activate_status: str): + # use email to find user. check exist and unique. + user_list = UserService.query_user_by_email(username) + if not user_list: + raise UserNotFoundError(username) + elif len(user_list) > 1: + raise AdminException(f"Exist more than 1 user: {username}!") + # check activate status different from new + usr = user_list[0] + # format activate_status before handle + _activate_status = activate_status.lower() + target_status = { + 'on': ActiveEnum.ACTIVE.value, + 'off': ActiveEnum.INACTIVE.value, + }.get(_activate_status) + if not target_status: + raise AdminException(f"Invalid activate_status: {activate_status}") + if target_status == usr.is_active: + return f"User activate status is already {_activate_status}!" + # update is_active + UserService.update_user(usr.id, {"is_active": target_status}) + return f"Turn {_activate_status} user activate status successfully!" + +class UserServiceMgr: + + @staticmethod + def get_user_datasets(username): + # use email to find user. + user_list = UserService.query_user_by_email(username) + if not user_list: + raise UserNotFoundError(username) + elif len(user_list) > 1: + raise AdminException(f"Exist more than 1 user: {username}!") + # find tenants + usr = user_list[0] + tenants = TenantService.get_joined_tenants_by_user_id(usr.id) + tenant_ids = [m["tenant_id"] for m in tenants] + # filter permitted kb and owned kb + return KnowledgebaseService.get_all_kb_by_tenant_ids(tenant_ids, usr.id) + + @staticmethod + def get_user_agents(username): + # use email to find user. + user_list = UserService.query_user_by_email(username) + if not user_list: + raise UserNotFoundError(username) + elif len(user_list) > 1: + raise AdminException(f"Exist more than 1 user: {username}!") + # find tenants + usr = user_list[0] + tenants = TenantService.get_joined_tenants_by_user_id(usr.id) + tenant_ids = [m["tenant_id"] for m in tenants] + # filter permitted agents and owned agents + res = UserCanvasService.get_all_agents_by_tenant_ids(tenant_ids, usr.id) + return [{ + 'title': r['title'], + 'permission': r['permission'], + 'canvas_type': r['canvas_type'], + 'canvas_category': r['canvas_category'] + } for r in res] + +class ServiceMgr: + + @staticmethod + def get_all_services(): + result = [] + configs = SERVICE_CONFIGS.configs + for config in configs: + result.append(config.to_dict()) + return result + + @staticmethod + def get_services_by_type(service_type_str: str): + raise AdminException("get_services_by_type: not implemented") + + @staticmethod + def get_service_details(service_id: int): + raise AdminException("get_service_details: not implemented") + + @staticmethod + def shutdown_service(service_id: int): + raise AdminException("shutdown_service: not implemented") + + @staticmethod + def restart_service(service_id: int): + raise AdminException("restart_service: not implemented") diff --git a/agent/canvas.py b/agent/canvas.py index 81c71b2e2..a22391deb 100644 --- a/agent/canvas.py +++ b/agent/canvas.py @@ -27,7 +27,7 @@ from agent.component import component_class from agent.component.base import ComponentBase from api.db.services.file_service import FileService from api.utils import get_uuid, hash_str2int -from rag.prompts.prompts import chunks_format +from rag.prompts.generator import chunks_format from rag.utils.redis_conn import REDIS_CONN class Graph: @@ -490,7 +490,8 @@ class Canvas(Graph): r = self.retrieval[-1] for ck in chunks_format({"chunks": chunks}): - cid = hash_str2int(ck["id"], 100) + cid = hash_str2int(ck["id"], 500) + # cid = uuid.uuid5(uuid.NAMESPACE_DNS, ck["id"]) if cid not in r: r["chunks"][cid] = ck diff --git a/agent/component/agent_with_tools.py b/agent/component/agent_with_tools.py index 6b57fa120..55c7f2f63 100644 --- a/agent/component/agent_with_tools.py +++ b/agent/component/agent_with_tools.py @@ -28,9 +28,8 @@ from api.db.services.llm_service import LLMBundle from api.db.services.tenant_llm_service import TenantLLMService from api.db.services.mcp_server_service import MCPServerService from api.utils.api_utils import timeout -from rag.prompts import message_fit_in -from rag.prompts.prompts import next_step, COMPLETE_TASK, analyze_task, \ - citation_prompt, reflect, rank_memories, kb_prompt, citation_plus, full_question +from rag.prompts.generator import next_step, COMPLETE_TASK, analyze_task, \ + citation_prompt, reflect, rank_memories, kb_prompt, citation_plus, full_question, message_fit_in from rag.utils.mcp_tool_call_conn import MCPToolCallSession, mcp_tool_metadata_to_openai_tool from agent.component.llm import LLMParam, LLM @@ -138,7 +137,7 @@ class Agent(LLM, ToolBase): res.update(cpn.get_input_form()) return res - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 20*60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 20*60))) def _invoke(self, **kwargs): if kwargs.get("user_prompt"): usr_pmt = "" diff --git a/agent/component/base.py b/agent/component/base.py index a622046e6..73f11ba95 100644 --- a/agent/component/base.py +++ b/agent/component/base.py @@ -244,7 +244,7 @@ class ComponentParamBase(ABC): if not value_legal: raise ValueError( - "Plase check runtime conf, {} = {} does not match user-parameter restriction".format( + "Please check runtime conf, {} = {} does not match user-parameter restriction".format( variable, value ) ) @@ -431,7 +431,7 @@ class ComponentBase(ABC): self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time")) return self.output() - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))) def _invoke(self, **kwargs): raise NotImplementedError() diff --git a/agent/component/categorize.py b/agent/component/categorize.py index d25518d09..af2666fcb 100644 --- a/agent/component/categorize.py +++ b/agent/component/categorize.py @@ -28,7 +28,7 @@ from rag.llm.chat_model import ERROR_PREFIX class CategorizeParam(LLMParam): """ - Define the Categorize component parameters. + Define the categorize component parameters. """ def __init__(self): super().__init__() @@ -80,7 +80,7 @@ Here's description of each category: - Prioritize the most specific applicable category - Return only the category name without explanations - Use "Other" only when no other category fits - + """.format( "\n - ".join(list(self.category_description.keys())), "\n".join(descriptions) @@ -96,7 +96,7 @@ Here's description of each category: class Categorize(LLM, ABC): component_name = "Categorize" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))) def _invoke(self, **kwargs): msg = self._canvas.get_history(self._param.message_history_window_size) if not msg: @@ -112,7 +112,7 @@ class Categorize(LLM, ABC): user_prompt = """ ---- Real Data ---- -{} → +{} → """.format(" | ".join(["{}: \"{}\"".format(c["role"].upper(), re.sub(r"\n", "", c["content"], flags=re.DOTALL)) for c in msg])) ans = chat_mdl.chat(self._param.sys_prompt, [{"role": "user", "content": user_prompt}], self._param.gen_conf()) logging.info(f"input: {user_prompt}, answer: {str(ans)}") @@ -134,4 +134,4 @@ class Categorize(LLM, ABC): self.set_output("_next", cpn_ids) def thoughts(self) -> str: - return "Which should it falls into {}? ...".format(",".join([f"`{c}`" for c, _ in self._param.category_description.items()])) \ No newline at end of file + return "Which should it falls into {}? ...".format(",".join([f"`{c}`" for c, _ in self._param.category_description.items()])) diff --git a/agent/component/invoke.py b/agent/component/invoke.py index cb313925b..a6f6cd5ee 100644 --- a/agent/component/invoke.py +++ b/agent/component/invoke.py @@ -53,7 +53,7 @@ class InvokeParam(ComponentParamBase): class Invoke(ComponentBase, ABC): component_name = "Invoke" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3))) def _invoke(self, **kwargs): args = {} for para in self._param.variables: diff --git a/agent/component/llm.py b/agent/component/llm.py index a378ad0ba..1e6c35c27 100644 --- a/agent/component/llm.py +++ b/agent/component/llm.py @@ -26,8 +26,7 @@ from api.db.services.llm_service import LLMBundle from api.db.services.tenant_llm_service import TenantLLMService from agent.component.base import ComponentBase, ComponentParamBase from api.utils.api_utils import timeout -from rag.prompts import message_fit_in, citation_prompt -from rag.prompts.prompts import tool_call_summary +from rag.prompts.generator import tool_call_summary, message_fit_in, citation_prompt class LLMParam(ComponentParamBase): @@ -82,9 +81,9 @@ class LLMParam(ComponentParamBase): class LLM(ComponentBase): component_name = "LLM" - - def __init__(self, canvas, id, param: ComponentParamBase): - super().__init__(canvas, id, param) + + def __init__(self, canvas, component_id, param: ComponentParamBase): + super().__init__(canvas, component_id, param) self.chat_mdl = LLMBundle(self._canvas.get_tenant_id(), TenantLLMService.llm_id2llm_type(self._param.llm_id), self._param.llm_id, max_retries=self._param.max_retries, retry_interval=self._param.delay_after_error @@ -206,7 +205,7 @@ class LLM(ComponentBase): for txt in self.chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs): yield delta(txt) - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))) def _invoke(self, **kwargs): def clean_formated_answer(ans: str) -> str: ans = re.sub(r"^.*", "", ans, flags=re.DOTALL) @@ -214,7 +213,7 @@ class LLM(ComponentBase): return re.sub(r"```\n*$", "", ans, flags=re.DOTALL) prompt, msg, _ = self._prepare_prompt_variables() - error = "" + error: str = "" if self._param.output_structure: prompt += "\nThe output MUST follow this JSON format:\n"+json.dumps(self._param.output_structure, ensure_ascii=False, indent=2) diff --git a/agent/component/message.py b/agent/component/message.py index b991ad21d..3569065e5 100644 --- a/agent/component/message.py +++ b/agent/component/message.py @@ -49,7 +49,7 @@ class MessageParam(ComponentParamBase): class Message(ComponentBase): component_name = "Message" - def get_kwargs(self, script:str, kwargs:dict = {}, delimeter:str=None) -> tuple[str, dict[str, str | list | Any]]: + def get_kwargs(self, script:str, kwargs:dict = {}, delimiter:str=None) -> tuple[str, dict[str, str | list | Any]]: for k,v in self.get_input_elements_from_text(script).items(): if k in kwargs: continue @@ -60,8 +60,8 @@ class Message(ComponentBase): if isinstance(v, partial): for t in v(): ans += t - elif isinstance(v, list) and delimeter: - ans = delimeter.join([str(vv) for vv in v]) + elif isinstance(v, list) and delimiter: + ans = delimiter.join([str(vv) for vv in v]) elif not isinstance(v, str): try: ans = json.dumps(v, ensure_ascii=False) @@ -127,7 +127,7 @@ class Message(ComponentBase): ] return any([re.search(p, content) for p in patt]) - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))) def _invoke(self, **kwargs): rand_cnt = random.choice(self._param.content) if self._param.stream and not self._is_jinjia2(rand_cnt): diff --git a/agent/component/string_transform.py b/agent/component/string_transform.py index 06ab7bf21..fe812c0a8 100644 --- a/agent/component/string_transform.py +++ b/agent/component/string_transform.py @@ -56,7 +56,7 @@ class StringTransform(Message, ABC): "type": "line" } for k, o in self.get_input_elements_from_text(self._param.script).items()} - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))) def _invoke(self, **kwargs): if self._param.method == "split": self._split(kwargs.get("line")) @@ -90,7 +90,7 @@ class StringTransform(Message, ABC): for k,v in kwargs.items(): if not v: v = "" - script = re.sub(k, v, script) + script = re.sub(k, lambda match: v, script) self.set_output("result", script) diff --git a/agent/component/switch.py b/agent/component/switch.py index 99685e9a7..8cbbde659 100644 --- a/agent/component/switch.py +++ b/agent/component/switch.py @@ -61,7 +61,7 @@ class SwitchParam(ComponentParamBase): class Switch(ComponentBase, ABC): component_name = "Switch" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3))) def _invoke(self, **kwargs): for cond in self._param.conditions: res = [] diff --git a/agent/tools/arxiv.py b/agent/tools/arxiv.py index be9715cc5..616afa31a 100644 --- a/agent/tools/arxiv.py +++ b/agent/tools/arxiv.py @@ -61,7 +61,7 @@ class ArXivParam(ToolParamBase): class ArXiv(ToolBase, ABC): component_name = "ArXiv" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))) def _invoke(self, **kwargs): if not kwargs.get("query"): self.set_output("formalized_content", "") @@ -97,6 +97,6 @@ class ArXiv(ToolBase, ABC): def thoughts(self) -> str: return """ -Keywords: {} +Keywords: {} Looking for the most relevant articles. - """.format(self.get_input().get("query", "-_-!")) \ No newline at end of file + """.format(self.get_input().get("query", "-_-!")) diff --git a/agent/tools/base.py b/agent/tools/base.py index 0d946a696..e775615ac 100644 --- a/agent/tools/base.py +++ b/agent/tools/base.py @@ -22,7 +22,7 @@ from typing import TypedDict, List, Any from agent.component.base import ComponentParamBase, ComponentBase from api.utils import hash_str2int from rag.llm.chat_model import ToolCallSession -from rag.prompts.prompts import kb_prompt +from rag.prompts.generator import kb_prompt from rag.utils.mcp_tool_call_conn import MCPToolCallSession from timeit import default_timer as timer diff --git a/agent/tools/code_exec.py b/agent/tools/code_exec.py index b94dc8d5e..59181fab5 100644 --- a/agent/tools/code_exec.py +++ b/agent/tools/code_exec.py @@ -129,7 +129,7 @@ module.exports = { main }; class CodeExec(ToolBase, ABC): component_name = "CodeExec" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))) def _invoke(self, **kwargs): lang = kwargs.get("lang", self._param.lang) script = kwargs.get("script", self._param.script) @@ -157,7 +157,7 @@ class CodeExec(ToolBase, ABC): try: resp = requests.post(url=f"http://{settings.SANDBOX_HOST}:9385/run", json=code_req, timeout=os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)) - logging.info(f"http://{settings.SANDBOX_HOST}:9385/run, code_req: {code_req}, resp.status_code {resp.status_code}:") + logging.info(f"http://{settings.SANDBOX_HOST}:9385/run", code_req, resp.status_code) if resp.status_code != 200: resp.raise_for_status() body = resp.json() diff --git a/agent/tools/duckduckgo.py b/agent/tools/duckduckgo.py index 34c2e66ec..0315d6971 100644 --- a/agent/tools/duckduckgo.py +++ b/agent/tools/duckduckgo.py @@ -73,7 +73,7 @@ class DuckDuckGoParam(ToolParamBase): class DuckDuckGo(ToolBase, ABC): component_name = "DuckDuckGo" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))) def _invoke(self, **kwargs): if not kwargs.get("query"): self.set_output("formalized_content", "") @@ -115,6 +115,6 @@ class DuckDuckGo(ToolBase, ABC): def thoughts(self) -> str: return """ -Keywords: {} +Keywords: {} Looking for the most relevant articles. - """.format(self.get_input().get("query", "-_-!")) \ No newline at end of file + """.format(self.get_input().get("query", "-_-!")) diff --git a/agent/tools/email.py b/agent/tools/email.py index e9f6eaed8..ab6cc6ea6 100644 --- a/agent/tools/email.py +++ b/agent/tools/email.py @@ -98,8 +98,8 @@ class EmailParam(ToolParamBase): class Email(ToolBase, ABC): component_name = "Email" - - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)) + + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60))) def _invoke(self, **kwargs): if not kwargs.get("to_email"): self.set_output("success", False) @@ -212,4 +212,4 @@ class Email(ToolBase, ABC): To: {} Subject: {} Your email is on its way—sit tight! -""".format(inputs.get("to_email", "-_-!"), inputs.get("subject", "-_-!")) \ No newline at end of file +""".format(inputs.get("to_email", "-_-!"), inputs.get("subject", "-_-!")) diff --git a/agent/tools/exesql.py b/agent/tools/exesql.py index c4bc4fdb4..2e1cc24bf 100644 --- a/agent/tools/exesql.py +++ b/agent/tools/exesql.py @@ -53,7 +53,7 @@ class ExeSQLParam(ToolParamBase): self.max_records = 1024 def check(self): - self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgres', 'mariadb', 'mssql']) + self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgres', 'mariadb', 'mssql', 'IBM DB2']) self.check_empty(self.database, "Database name") self.check_empty(self.username, "database username") self.check_empty(self.host, "IP Address") @@ -78,7 +78,7 @@ class ExeSQLParam(ToolParamBase): class ExeSQL(ToolBase, ABC): component_name = "ExeSQL" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60))) def _invoke(self, **kwargs): def convert_decimals(obj): @@ -123,6 +123,55 @@ class ExeSQL(ToolBase, ABC): r'PWD=' + self._param.password ) db = pyodbc.connect(conn_str) + elif self._param.db_type == 'IBM DB2': + import ibm_db + conn_str = ( + f"DATABASE={self._param.database};" + f"HOSTNAME={self._param.host};" + f"PORT={self._param.port};" + f"PROTOCOL=TCPIP;" + f"UID={self._param.username};" + f"PWD={self._param.password};" + ) + try: + conn = ibm_db.connect(conn_str, "", "") + except Exception as e: + raise Exception("Database Connection Failed! \n" + str(e)) + + sql_res = [] + formalized_content = [] + for single_sql in sqls: + single_sql = single_sql.replace("```", "").strip() + if not single_sql: + continue + single_sql = re.sub(r"\[ID:[0-9]+\]", "", single_sql) + + stmt = ibm_db.exec_immediate(conn, single_sql) + rows = [] + row = ibm_db.fetch_assoc(stmt) + while row and len(rows) < self._param.max_records: + rows.append(row) + row = ibm_db.fetch_assoc(stmt) + + if not rows: + sql_res.append({"content": "No record in the database!"}) + continue + + df = pd.DataFrame(rows) + for col in df.columns: + if pd.api.types.is_datetime64_any_dtype(df[col]): + df[col] = df[col].dt.strftime("%Y-%m-%d") + + df = df.where(pd.notnull(df), None) + + sql_res.append(convert_decimals(df.to_dict(orient="records"))) + formalized_content.append(df.to_markdown(index=False, floatfmt=".6f")) + + ibm_db.close(conn) + + self.set_output("json", sql_res) + self.set_output("formalized_content", "\n\n".join(formalized_content)) + return self.output("formalized_content") try: cursor = db.cursor() except Exception as e: @@ -150,6 +199,8 @@ class ExeSQL(ToolBase, ABC): if pd.api.types.is_datetime64_any_dtype(single_res[col]): single_res[col] = single_res[col].dt.strftime('%Y-%m-%d') + single_res = single_res.where(pd.notnull(single_res), None) + sql_res.append(convert_decimals(single_res.to_dict(orient='records'))) formalized_content.append(single_res.to_markdown(index=False, floatfmt=".6f")) diff --git a/agent/tools/github.py b/agent/tools/github.py index d19a434b6..27cb1e346 100644 --- a/agent/tools/github.py +++ b/agent/tools/github.py @@ -57,7 +57,7 @@ class GitHubParam(ToolParamBase): class GitHub(ToolBase, ABC): component_name = "GitHub" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))) def _invoke(self, **kwargs): if not kwargs.get("query"): self.set_output("formalized_content", "") @@ -88,4 +88,4 @@ class GitHub(ToolBase, ABC): assert False, self.output() def thoughts(self) -> str: - return "Scanning GitHub repos related to `{}`.".format(self.get_input().get("query", "-_-!")) \ No newline at end of file + return "Scanning GitHub repos related to `{}`.".format(self.get_input().get("query", "-_-!")) diff --git a/agent/tools/google.py b/agent/tools/google.py index f68b51f91..455038abe 100644 --- a/agent/tools/google.py +++ b/agent/tools/google.py @@ -116,7 +116,7 @@ class GoogleParam(ToolParamBase): class Google(ToolBase, ABC): component_name = "Google" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))) def _invoke(self, **kwargs): if not kwargs.get("q"): self.set_output("formalized_content", "") @@ -154,6 +154,6 @@ class Google(ToolBase, ABC): def thoughts(self) -> str: return """ -Keywords: {} +Keywords: {} Looking for the most relevant articles. - """.format(self.get_input().get("query", "-_-!")) \ No newline at end of file + """.format(self.get_input().get("query", "-_-!")) diff --git a/agent/tools/googlescholar.py b/agent/tools/googlescholar.py index cfc32d63e..bf906da4b 100644 --- a/agent/tools/googlescholar.py +++ b/agent/tools/googlescholar.py @@ -63,7 +63,7 @@ class GoogleScholarParam(ToolParamBase): class GoogleScholar(ToolBase, ABC): component_name = "GoogleScholar" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))) def _invoke(self, **kwargs): if not kwargs.get("query"): self.set_output("formalized_content", "") @@ -93,4 +93,4 @@ class GoogleScholar(ToolBase, ABC): assert False, self.output() def thoughts(self) -> str: - return "Looking for scholarly papers on `{}`,” prioritising reputable sources.".format(self.get_input().get("query", "-_-!")) \ No newline at end of file + return "Looking for scholarly papers on `{}`,” prioritising reputable sources.".format(self.get_input().get("query", "-_-!")) diff --git a/agent/tools/pubmed.py b/agent/tools/pubmed.py index c939b2fab..6dce92a9b 100644 --- a/agent/tools/pubmed.py +++ b/agent/tools/pubmed.py @@ -33,7 +33,7 @@ class PubMedParam(ToolParamBase): self.meta:ToolMeta = { "name": "pubmed_search", "description": """ -PubMed is an openly accessible, free database which includes primarily the MEDLINE database of references and abstracts on life sciences and biomedical topics. +PubMed is an openly accessible, free database which includes primarily the MEDLINE database of references and abstracts on life sciences and biomedical topics. In addition to MEDLINE, PubMed provides access to: - older references from the print version of Index Medicus, back to 1951 and earlier - references to some journals before they were indexed in Index Medicus and MEDLINE, for instance Science, BMJ, and Annals of Surgery @@ -69,7 +69,7 @@ In addition to MEDLINE, PubMed provides access to: class PubMed(ToolBase, ABC): component_name = "PubMed" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))) def _invoke(self, **kwargs): if not kwargs.get("query"): self.set_output("formalized_content", "") @@ -105,4 +105,4 @@ class PubMed(ToolBase, ABC): assert False, self.output() def thoughts(self) -> str: - return "Looking for scholarly papers on `{}`,” prioritising reputable sources.".format(self.get_input().get("query", "-_-!")) \ No newline at end of file + return "Looking for scholarly papers on `{}`,” prioritising reputable sources.".format(self.get_input().get("query", "-_-!")) diff --git a/agent/tools/retrieval.py b/agent/tools/retrieval.py index a6e1e4fcd..24370f1ca 100644 --- a/agent/tools/retrieval.py +++ b/agent/tools/retrieval.py @@ -23,8 +23,7 @@ from api.db.services.llm_service import LLMBundle from api import settings from api.utils.api_utils import timeout from rag.app.tag import label_question -from rag.prompts import kb_prompt -from rag.prompts.prompts import cross_languages +from rag.prompts.generator import cross_languages, kb_prompt class RetrievalParam(ToolParamBase): @@ -75,7 +74,7 @@ class RetrievalParam(ToolParamBase): class Retrieval(ToolBase, ABC): component_name = "Retrieval" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))) def _invoke(self, **kwargs): if not kwargs.get("query"): self.set_output("formalized_content", self._param.empty_response) @@ -163,13 +162,20 @@ class Retrieval(ToolBase, ABC): self.set_output("formalized_content", self._param.empty_response) return + # Format the chunks for JSON output (similar to how other tools do it) + json_output = kbinfos["chunks"].copy() + self._canvas.add_reference(kbinfos["chunks"], kbinfos["doc_aggs"]) form_cnt = "\n".join(kb_prompt(kbinfos, 200000, True)) + + # Set both formalized content and JSON output self.set_output("formalized_content", form_cnt) + self.set_output("json", json_output) + return form_cnt def thoughts(self) -> str: return """ -Keywords: {} +Keywords: {} Looking for the most relevant articles. - """.format(self.get_input().get("query", "-_-!")) \ No newline at end of file + """.format(self.get_input().get("query", "-_-!")) diff --git a/agent/tools/searxng.py b/agent/tools/searxng.py index 25a8c0e46..f8c30bfd1 100644 --- a/agent/tools/searxng.py +++ b/agent/tools/searxng.py @@ -77,7 +77,7 @@ class SearXNGParam(ToolParamBase): class SearXNG(ToolBase, ABC): component_name = "SearXNG" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))) def _invoke(self, **kwargs): # Gracefully handle try-run without inputs query = kwargs.get("query") @@ -94,7 +94,6 @@ class SearXNG(ToolBase, ABC): last_e = "" for _ in range(self._param.max_retries+1): try: - # 构建搜索参数 search_params = { 'q': query, 'format': 'json', @@ -104,33 +103,29 @@ class SearXNG(ToolBase, ABC): 'pageno': 1 } - # 发送搜索请求 response = requests.get( f"{searxng_url}/search", params=search_params, timeout=10 ) response.raise_for_status() - + data = response.json() - - # 验证响应数据 + if not data or not isinstance(data, dict): raise ValueError("Invalid response from SearXNG") - + results = data.get("results", []) if not isinstance(results, list): raise ValueError("Invalid results format from SearXNG") - - # 限制结果数量 + results = results[:self._param.top_n] - - # 处理搜索结果 + self._retrieve_chunks(results, get_title=lambda r: r.get("title", ""), get_url=lambda r: r.get("url", ""), get_content=lambda r: r.get("content", "")) - + self.set_output("json", results) return self.output("formalized_content") @@ -151,6 +146,6 @@ class SearXNG(ToolBase, ABC): def thoughts(self) -> str: return """ -Keywords: {} +Keywords: {} Searching with SearXNG for relevant results... """.format(self.get_input().get("query", "-_-!")) diff --git a/agent/tools/tavily.py b/agent/tools/tavily.py index fa9a266ab..80203feec 100644 --- a/agent/tools/tavily.py +++ b/agent/tools/tavily.py @@ -31,7 +31,7 @@ class TavilySearchParam(ToolParamBase): self.meta:ToolMeta = { "name": "tavily_search", "description": """ -Tavily is a search engine optimized for LLMs, aimed at efficient, quick and persistent search results. +Tavily is a search engine optimized for LLMs, aimed at efficient, quick and persistent search results. When searching: - Start with specific query which should focus on just a single aspect. - Number of keywords in query should be less than 5. @@ -101,7 +101,7 @@ When searching: class TavilySearch(ToolBase, ABC): component_name = "TavilySearch" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))) def _invoke(self, **kwargs): if not kwargs.get("query"): self.set_output("formalized_content", "") @@ -136,7 +136,7 @@ class TavilySearch(ToolBase, ABC): def thoughts(self) -> str: return """ -Keywords: {} +Keywords: {} Looking for the most relevant articles. """.format(self.get_input().get("query", "-_-!")) @@ -199,7 +199,7 @@ class TavilyExtractParam(ToolParamBase): class TavilyExtract(ToolBase, ABC): component_name = "TavilyExtract" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))) def _invoke(self, **kwargs): self.tavily_client = TavilyClient(api_key=self._param.api_key) last_e = None @@ -224,4 +224,4 @@ class TavilyExtract(ToolBase, ABC): assert False, self.output() def thoughts(self) -> str: - return "Opened {}—pulling out the main text…".format(self.get_input().get("urls", "-_-!")) \ No newline at end of file + return "Opened {}—pulling out the main text…".format(self.get_input().get("urls", "-_-!")) diff --git a/agent/tools/wencai.py b/agent/tools/wencai.py index 66213a08b..e2f8adefc 100644 --- a/agent/tools/wencai.py +++ b/agent/tools/wencai.py @@ -68,7 +68,7 @@ fund selection platform: through AI technology, is committed to providing excell class WenCai(ToolBase, ABC): component_name = "WenCai" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))) def _invoke(self, **kwargs): if not kwargs.get("query"): self.set_output("report", "") @@ -111,4 +111,4 @@ class WenCai(ToolBase, ABC): assert False, self.output() def thoughts(self) -> str: - return "Pulling live financial data for `{}`.".format(self.get_input().get("query", "-_-!")) \ No newline at end of file + return "Pulling live financial data for `{}`.".format(self.get_input().get("query", "-_-!")) diff --git a/agent/tools/wikipedia.py b/agent/tools/wikipedia.py index 93bb6cfc0..83e3b13a8 100644 --- a/agent/tools/wikipedia.py +++ b/agent/tools/wikipedia.py @@ -64,7 +64,7 @@ class WikipediaParam(ToolParamBase): class Wikipedia(ToolBase, ABC): component_name = "Wikipedia" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60))) def _invoke(self, **kwargs): if not kwargs.get("query"): self.set_output("formalized_content", "") @@ -99,6 +99,6 @@ class Wikipedia(ToolBase, ABC): def thoughts(self) -> str: return """ -Keywords: {} +Keywords: {} Looking for the most relevant articles. - """.format(self.get_input().get("query", "-_-!")) \ No newline at end of file + """.format(self.get_input().get("query", "-_-!")) diff --git a/agent/tools/yahoofinance.py b/agent/tools/yahoofinance.py index 6e2dc0e62..9feea20af 100644 --- a/agent/tools/yahoofinance.py +++ b/agent/tools/yahoofinance.py @@ -72,7 +72,7 @@ class YahooFinanceParam(ToolParamBase): class YahooFinance(ToolBase, ABC): component_name = "YahooFinance" - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60))) def _invoke(self, **kwargs): if not kwargs.get("stock_code"): self.set_output("report", "") @@ -111,4 +111,4 @@ class YahooFinance(ToolBase, ABC): assert False, self.output() def thoughts(self) -> str: - return "Pulling live financial data for `{}`.".format(self.get_input().get("stock_code", "-_-!")) \ No newline at end of file + return "Pulling live financial data for `{}`.".format(self.get_input().get("stock_code", "-_-!")) diff --git a/api/apps/__init__.py b/api/apps/__init__.py index fba5d20b2..db27dd509 100644 --- a/api/apps/__init__.py +++ b/api/apps/__init__.py @@ -27,7 +27,8 @@ from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer from api.db import StatusEnum from api.db.db_models import close_connection from api.db.services import UserService -from api.utils import CustomJSONEncoder, commands +from api.utils.json import CustomJSONEncoder +from api.utils import commands from flask_mail import Mail from flask_session import Session diff --git a/api/apps/api_app.py b/api/apps/api_app.py index 8a5b29166..1bdb7c2f8 100644 --- a/api/apps/api_app.py +++ b/api/apps/api_app.py @@ -39,7 +39,7 @@ from api.utils.api_utils import server_error_response, get_data_error_result, ge from api.utils.file_utils import filename_type, thumbnail from rag.app.tag import label_question -from rag.prompts import keyword_extraction +from rag.prompts.generator import keyword_extraction from rag.utils.storage_factory import STORAGE_IMPL from api.db.services.canvas_service import UserCanvasService diff --git a/api/apps/canvas_app.py b/api/apps/canvas_app.py index c191b556c..c3d4dd824 100644 --- a/api/apps/canvas_app.py +++ b/api/apps/canvas_app.py @@ -100,7 +100,7 @@ def save(): def get(canvas_id): if not UserCanvasService.accessible(canvas_id, current_user.id): return get_data_error_result(message="canvas not found.") - e, c = UserCanvasService.get_by_tenant_id(canvas_id) + e, c = UserCanvasService.get_by_canvas_id(canvas_id) return get_json_result(data=c) @@ -243,7 +243,7 @@ def reset(): @manager.route("/upload/", methods=["POST"]) # noqa: F821 def upload(canvas_id): - e, cvs = UserCanvasService.get_by_tenant_id(canvas_id) + e, cvs = UserCanvasService.get_by_canvas_id(canvas_id) if not e: return get_data_error_result(message="canvas not found.") @@ -393,6 +393,22 @@ def test_db_connect(): cursor = db.cursor() cursor.execute("SELECT 1") cursor.close() + elif req["db_type"] == 'IBM DB2': + import ibm_db + conn_str = ( + f"DATABASE={req['database']};" + f"HOSTNAME={req['host']};" + f"PORT={req['port']};" + f"PROTOCOL=TCPIP;" + f"UID={req['username']};" + f"PWD={req['password']};" + ) + logging.info(conn_str) + conn = ibm_db.connect(conn_str, "", "") + stmt = ibm_db.exec_immediate(conn, "SELECT 1 FROM sysibm.sysdummy1") + ibm_db.fetch_assoc(stmt) + ibm_db.close(conn) + return get_json_result(data="Database Connection Successful!") else: return server_error_response("Unsupported database type.") if req["db_type"] != 'mssql': @@ -529,7 +545,7 @@ def sessions(canvas_id): @manager.route('/prompts', methods=['GET']) # noqa: F821 @login_required def prompts(): - from rag.prompts.prompts import ANALYZE_TASK_SYSTEM, ANALYZE_TASK_USER, NEXT_STEP, REFLECT, CITATION_PROMPT_TEMPLATE + from rag.prompts.generator import ANALYZE_TASK_SYSTEM, ANALYZE_TASK_USER, NEXT_STEP, REFLECT, CITATION_PROMPT_TEMPLATE return get_json_result(data={ "task_analysis": ANALYZE_TASK_SYSTEM +"\n\n"+ ANALYZE_TASK_USER, "plan_generation": NEXT_STEP, diff --git a/api/apps/chunk_app.py b/api/apps/chunk_app.py index 9b4c341b6..bfd80ea9f 100644 --- a/api/apps/chunk_app.py +++ b/api/apps/chunk_app.py @@ -33,8 +33,7 @@ from api.utils.api_utils import get_data_error_result, get_json_result, server_e from rag.app.qa import beAdoc, rmPrefix from rag.app.tag import label_question from rag.nlp import rag_tokenizer, search -from rag.prompts import cross_languages, keyword_extraction -from rag.prompts.prompts import gen_meta_filter +from rag.prompts.generator import gen_meta_filter, cross_languages, keyword_extraction from rag.settings import PAGERANK_FLD from rag.utils import rmSpace diff --git a/api/apps/conversation_app.py b/api/apps/conversation_app.py index 9ef4d6453..48b9a1568 100644 --- a/api/apps/conversation_app.py +++ b/api/apps/conversation_app.py @@ -15,7 +15,7 @@ # import json import re -import traceback +import logging from copy import deepcopy from flask import Response, request from flask_login import current_user, login_required @@ -29,8 +29,8 @@ from api.db.services.search_service import SearchService from api.db.services.tenant_llm_service import TenantLLMService from api.db.services.user_service import TenantService, UserTenantService from api.utils.api_utils import get_data_error_result, get_json_result, server_error_response, validate_request -from rag.prompts.prompt_template import load_prompt -from rag.prompts.prompts import chunks_format +from rag.prompts.template import load_prompt +from rag.prompts.generator import chunks_format @manager.route("/set", methods=["POST"]) # noqa: F821 @@ -226,7 +226,7 @@ def completion(): if not is_embedded: ConversationService.update_by_id(conv.id, conv.to_dict()) except Exception as e: - traceback.print_exc() + logging.exception(e) yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n" yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n" diff --git a/api/apps/document_app.py b/api/apps/document_app.py index 71ba84a92..2c118afda 100644 --- a/api/apps/document_app.py +++ b/api/apps/document_app.py @@ -577,7 +577,7 @@ def change_parser(): settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id) try: - if req.get("pipeline_id"): + if "pipeline_id" in req: if doc.pipeline_id == req["pipeline_id"]: return get_json_result(data=True) DocumentService.update_by_id(doc.id, {"pipeline_id": req["pipeline_id"]}) diff --git a/api/apps/file_app.py b/api/apps/file_app.py index eeb66f6ec..a644b20b4 100644 --- a/api/apps/file_app.py +++ b/api/apps/file_app.py @@ -246,6 +246,8 @@ def rm(): return get_data_error_result(message="File or Folder not found!") if not file.tenant_id: return get_data_error_result(message="Tenant not found!") + if file.tenant_id != current_user.id: + return get_json_result(data=False, message='No authorization.', code=settings.RetCode.AUTHENTICATION_ERROR) if file.source_type == FileSource.KNOWLEDGEBASE: continue @@ -292,6 +294,8 @@ def rename(): e, file = FileService.get_by_id(req["file_id"]) if not e: return get_data_error_result(message="File not found!") + if file.tenant_id != current_user.id: + return get_json_result(data=False, message='No authorization.', code=settings.RetCode.AUTHENTICATION_ERROR) if file.type != FileType.FOLDER.value \ and pathlib.Path(req["name"].lower()).suffix != pathlib.Path( file.name.lower()).suffix: @@ -328,6 +332,8 @@ def get(file_id): e, file = FileService.get_by_id(file_id) if not e: return get_data_error_result(message="Document not found!") + if file.tenant_id != current_user.id: + return get_json_result(data=False, message='No authorization.', code=settings.RetCode.AUTHENTICATION_ERROR) blob = STORAGE_IMPL.get(file.parent_id, file.location) if not blob: @@ -367,6 +373,8 @@ def move(): return get_data_error_result(message="File or Folder not found!") if not file.tenant_id: return get_data_error_result(message="Tenant not found!") + if file.tenant_id != current_user.id: + return get_json_result(data=False, message='No authorization.', code=settings.RetCode.AUTHENTICATION_ERROR) fe, _ = FileService.get_by_id(parent_id) if not fe: return get_data_error_result(message="Parent Folder not found!") diff --git a/api/apps/sdk/doc.py b/api/apps/sdk/doc.py index 5009b6fee..8d5a413b0 100644 --- a/api/apps/sdk/doc.py +++ b/api/apps/sdk/doc.py @@ -40,7 +40,7 @@ from api.utils.api_utils import check_duplicate_ids, construct_json_result, get_ from rag.app.qa import beAdoc, rmPrefix from rag.app.tag import label_question from rag.nlp import rag_tokenizer, search -from rag.prompts import cross_languages, keyword_extraction +from rag.prompts.generator import cross_languages, keyword_extraction from rag.utils import rmSpace from rag.utils.storage_factory import STORAGE_IMPL diff --git a/api/apps/sdk/files.py b/api/apps/sdk/files.py index d8fd84aed..96efe208d 100644 --- a/api/apps/sdk/files.py +++ b/api/apps/sdk/files.py @@ -3,9 +3,11 @@ import re import flask from flask import request +from pathlib import Path from api.db.services.document_service import DocumentService from api.db.services.file2document_service import File2DocumentService +from api.db.services.knowledgebase_service import KnowledgebaseService from api.utils.api_utils import server_error_response, token_required from api.utils import get_uuid from api.db import FileType @@ -81,16 +83,16 @@ def upload(tenant_id): return get_json_result(data=False, message="Can't find this folder!", code=404) for file_obj in file_objs: - # 文件路径处理 + # Handle file path full_path = '/' + file_obj.filename file_obj_names = full_path.split('/') file_len = len(file_obj_names) - # 获取文件夹路径ID + # Get folder path ID file_id_list = FileService.get_id_list_by_id(pf_id, file_obj_names, 1, [pf_id]) len_id_list = len(file_id_list) - # 创建文件夹结构 + # Crete file folder if file_len != len_id_list: e, file = FileService.get_by_id(file_id_list[len_id_list - 1]) if not e: @@ -666,3 +668,71 @@ def move(tenant_id): return get_json_result(data=True) except Exception as e: return server_error_response(e) + +@manager.route('/file/convert', methods=['POST']) # noqa: F821 +@token_required +def convert(tenant_id): + req = request.json + kb_ids = req["kb_ids"] + file_ids = req["file_ids"] + file2documents = [] + + try: + files = FileService.get_by_ids(file_ids) + files_set = dict({file.id: file for file in files}) + for file_id in file_ids: + file = files_set[file_id] + if not file: + return get_json_result(message="File not found!", code=404) + file_ids_list = [file_id] + if file.type == FileType.FOLDER.value: + file_ids_list = FileService.get_all_innermost_file_ids(file_id, []) + for id in file_ids_list: + informs = File2DocumentService.get_by_file_id(id) + # delete + for inform in informs: + doc_id = inform.document_id + e, doc = DocumentService.get_by_id(doc_id) + if not e: + return get_json_result(message="Document not found!", code=404) + tenant_id = DocumentService.get_tenant_id(doc_id) + if not tenant_id: + return get_json_result(message="Tenant not found!", code=404) + if not DocumentService.remove_document(doc, tenant_id): + return get_json_result( + message="Database error (Document removal)!", code=404) + File2DocumentService.delete_by_file_id(id) + + # insert + for kb_id in kb_ids: + e, kb = KnowledgebaseService.get_by_id(kb_id) + if not e: + return get_json_result( + message="Can't find this knowledgebase!", code=404) + e, file = FileService.get_by_id(id) + if not e: + return get_json_result( + message="Can't find this file!", code=404) + + doc = DocumentService.insert({ + "id": get_uuid(), + "kb_id": kb.id, + "parser_id": FileService.get_parser(file.type, file.name, kb.parser_id), + "parser_config": kb.parser_config, + "created_by": tenant_id, + "type": file.type, + "name": file.name, + "suffix": Path(file.name).suffix.lstrip("."), + "location": file.location, + "size": file.size + }) + file2document = File2DocumentService.insert({ + "id": get_uuid(), + "file_id": id, + "document_id": doc.id, + }) + + file2documents.append(file2document.to_json()) + return get_json_result(data=file2documents) + except Exception as e: + return server_error_response(e) \ No newline at end of file diff --git a/api/apps/sdk/session.py b/api/apps/sdk/session.py index 8e4f5ee67..10b6e9752 100644 --- a/api/apps/sdk/session.py +++ b/api/apps/sdk/session.py @@ -38,9 +38,8 @@ from api.db.services.user_service import UserTenantService from api.utils import get_uuid from api.utils.api_utils import check_duplicate_ids, get_data_openai, get_error_data_result, get_json_result, get_result, server_error_response, token_required, validate_request from rag.app.tag import label_question -from rag.prompts import chunks_format -from rag.prompts.prompt_template import load_prompt -from rag.prompts.prompts import cross_languages, gen_meta_filter, keyword_extraction +from rag.prompts.template import load_prompt +from rag.prompts.generator import cross_languages, gen_meta_filter, keyword_extraction, chunks_format @manager.route("/chats//sessions", methods=["POST"]) # noqa: F821 diff --git a/api/apps/system_app.py b/api/apps/system_app.py index df17e4b57..89c0e01bd 100644 --- a/api/apps/system_app.py +++ b/api/apps/system_app.py @@ -37,7 +37,8 @@ from timeit import default_timer as timer from rag.utils.redis_conn import REDIS_CONN from flask import jsonify -from api.utils.health import run_health_checks +from api.utils.health_utils import run_health_checks + @manager.route("/version", methods=["GET"]) # noqa: F821 @login_required diff --git a/api/apps/user_app.py b/api/apps/user_app.py index 3668efe75..f99b7c112 100644 --- a/api/apps/user_app.py +++ b/api/apps/user_app.py @@ -34,7 +34,6 @@ from api.db.services.user_service import TenantService, UserService, UserTenantS from api.utils import ( current_timestamp, datetime_format, - decrypt, download_img, get_format_time, get_uuid, @@ -46,6 +45,7 @@ from api.utils.api_utils import ( server_error_response, validate_request, ) +from api.utils.crypt import decrypt @manager.route("/login", methods=["POST", "GET"]) # noqa: F821 @@ -98,7 +98,14 @@ def login(): return get_json_result(data=False, code=settings.RetCode.SERVER_ERROR, message="Fail to crypt password") user = UserService.query_user(email, password) - if user: + + if user and hasattr(user, 'is_active') and user.is_active == "0": + return get_json_result( + data=False, + code=settings.RetCode.FORBIDDEN, + message="This account has been disabled, please contact the administrator!", + ) + elif user: response_data = user.to_json() user.access_token = get_uuid() login_user(user) @@ -227,6 +234,9 @@ def oauth_callback(channel): # User exists, try to log in user = users[0] user.access_token = get_uuid() + if user and hasattr(user, 'is_active') and user.is_active == "0": + return redirect("/?error=user_inactive") + login_user(user) user.save() return redirect(f"/?auth={user.get_id()}") @@ -317,6 +327,8 @@ def github_callback(): # User has already registered, try to log in user = users[0] user.access_token = get_uuid() + if user and hasattr(user, 'is_active') and user.is_active == "0": + return redirect("/?error=user_inactive") login_user(user) user.save() return redirect("/?auth=%s" % user.get_id()) @@ -418,6 +430,8 @@ def feishu_callback(): # User has already registered, try to log in user = users[0] + if user and hasattr(user, 'is_active') and user.is_active == "0": + return redirect("/?error=user_inactive") user.access_token = get_uuid() login_user(user) user.save() diff --git a/api/common/README.md b/api/common/README.md new file mode 100644 index 000000000..02f630216 --- /dev/null +++ b/api/common/README.md @@ -0,0 +1,2 @@ +The python files in this directory are shared between service. They contain common utilities, models, and functions that can be used across various +services to ensure consistency and reduce code duplication. \ No newline at end of file diff --git a/api/common/base64.py b/api/common/base64.py new file mode 100644 index 000000000..2b37dd281 --- /dev/null +++ b/api/common/base64.py @@ -0,0 +1,21 @@ +# +# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import base64 + +def encode_to_base64(input_string): + base64_encoded = base64.b64encode(input_string.encode('utf-8')) + return base64_encoded.decode('utf-8') \ No newline at end of file diff --git a/api/db/__init__.py b/api/db/__init__.py index c93932db8..6e924349c 100644 --- a/api/db/__init__.py +++ b/api/db/__init__.py @@ -23,6 +23,11 @@ class StatusEnum(Enum): INVALID = "0" +class ActiveEnum(Enum): + ACTIVE = "1" + INACTIVE = "0" + + class UserTenantRole(StrEnum): OWNER = 'owner' ADMIN = 'admin' @@ -111,7 +116,7 @@ class CanvasCategory(StrEnum): Agent = "agent_canvas" DataFlow = "dataflow_canvas" -VALID_CAVAS_CATEGORIES = {CanvasCategory.Agent, CanvasCategory.DataFlow} +VALID_CANVAS_CATEGORIES = {CanvasCategory.Agent, CanvasCategory.DataFlow} class MCPServerType(StrEnum): diff --git a/api/db/db_models.py b/api/db/db_models.py index 3d88c8b88..7f2e35497 100644 --- a/api/db/db_models.py +++ b/api/db/db_models.py @@ -26,12 +26,14 @@ from functools import wraps from flask_login import UserMixin from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer -from peewee import BigIntegerField, BooleanField, CharField, CompositeKey, DateTimeField, Field, FloatField, IntegerField, Metadata, Model, TextField +from peewee import InterfaceError, OperationalError, BigIntegerField, BooleanField, CharField, CompositeKey, DateTimeField, Field, FloatField, IntegerField, Metadata, Model, TextField from playhouse.migrate import MySQLMigrator, PostgresqlMigrator, migrate from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase from api import settings, utils from api.db import ParserType, SerializedType +from api.utils.json import json_dumps, json_loads +from api.utils.configs import deserialize_b64, serialize_b64 def singleton(cls, *args, **kw): @@ -70,12 +72,12 @@ class JSONField(LongTextField): def db_value(self, value): if value is None: value = self.default_value - return utils.json_dumps(value) + return json_dumps(value) def python_value(self, value): if not value: return self.default_value - return utils.json_loads(value, object_hook=self._object_hook, object_pairs_hook=self._object_pairs_hook) + return json_loads(value, object_hook=self._object_hook, object_pairs_hook=self._object_pairs_hook) class ListField(JSONField): @@ -91,21 +93,21 @@ class SerializedField(LongTextField): def db_value(self, value): if self._serialized_type == SerializedType.PICKLE: - return utils.serialize_b64(value, to_str=True) + return serialize_b64(value, to_str=True) elif self._serialized_type == SerializedType.JSON: if value is None: return None - return utils.json_dumps(value, with_type=True) + return json_dumps(value, with_type=True) else: raise ValueError(f"the serialized type {self._serialized_type} is not supported") def python_value(self, value): if self._serialized_type == SerializedType.PICKLE: - return utils.deserialize_b64(value) + return deserialize_b64(value) elif self._serialized_type == SerializedType.JSON: if value is None: return {} - return utils.json_loads(value, object_hook=self._object_hook, object_pairs_hook=self._object_pairs_hook) + return json_loads(value, object_hook=self._object_hook, object_pairs_hook=self._object_pairs_hook) else: raise ValueError(f"the serialized type {self._serialized_type} is not supported") @@ -250,36 +252,63 @@ class RetryingPooledMySQLDatabase(PooledMySQLDatabase): super().__init__(*args, **kwargs) def execute_sql(self, sql, params=None, commit=True): - from peewee import OperationalError - for attempt in range(self.max_retries + 1): try: return super().execute_sql(sql, params, commit) - except OperationalError as e: - if e.args[0] in (2013, 2006) and attempt < self.max_retries: - logging.warning(f"Lost connection (attempt {attempt + 1}/{self.max_retries}): {e}") + except (OperationalError, InterfaceError) as e: + error_codes = [2013, 2006] + error_messages = ['', 'Lost connection'] + should_retry = ( + (hasattr(e, 'args') and e.args and e.args[0] in error_codes) or + (str(e) in error_messages) or + (hasattr(e, '__class__') and e.__class__.__name__ == 'InterfaceError') + ) + + if should_retry and attempt < self.max_retries: + logging.warning( + f"Database connection issue (attempt {attempt+1}/{self.max_retries}): {e}" + ) self._handle_connection_loss() - time.sleep(self.retry_delay * (2**attempt)) + time.sleep(self.retry_delay * (2 ** attempt)) else: logging.error(f"DB execution failure: {e}") raise return None def _handle_connection_loss(self): - self.close_all() - self.connect() + # self.close_all() + # self.connect() + try: + self.close() + except Exception: + pass + try: + self.connect() + except Exception as e: + logging.error(f"Failed to reconnect: {e}") + time.sleep(0.1) + self.connect() def begin(self): - from peewee import OperationalError - for attempt in range(self.max_retries + 1): try: return super().begin() - except OperationalError as e: - if e.args[0] in (2013, 2006) and attempt < self.max_retries: - logging.warning(f"Lost connection during transaction (attempt {attempt + 1}/{self.max_retries})") + except (OperationalError, InterfaceError) as e: + error_codes = [2013, 2006] + error_messages = ['', 'Lost connection'] + + should_retry = ( + (hasattr(e, 'args') and e.args and e.args[0] in error_codes) or + (str(e) in error_messages) or + (hasattr(e, '__class__') and e.__class__.__name__ == 'InterfaceError') + ) + + if should_retry and attempt < self.max_retries: + logging.warning( + f"Lost connection during transaction (attempt {attempt+1}/{self.max_retries})" + ) self._handle_connection_loss() - time.sleep(self.retry_delay * (2**attempt)) + time.sleep(self.retry_delay * (2 ** attempt)) else: raise @@ -299,7 +328,16 @@ class BaseDataBase: def __init__(self): database_config = settings.DATABASE.copy() db_name = database_config.pop("name") - self.database_connection = PooledDatabase[settings.DATABASE_TYPE.upper()].value(db_name, **database_config) + + pool_config = { + 'max_retries': 5, + 'retry_delay': 1, + } + database_config.update(pool_config) + self.database_connection = PooledDatabase[settings.DATABASE_TYPE.upper()].value( + db_name, **database_config + ) + # self.database_connection = PooledDatabase[settings.DATABASE_TYPE.upper()].value(db_name, **database_config) logging.info("init database on cluster mode successfully") diff --git a/api/db/init_data.py b/api/db/init_data.py index d462a7b2b..39b87d06f 100644 --- a/api/db/init_data.py +++ b/api/db/init_data.py @@ -14,7 +14,6 @@ # limitations under the License. # import logging -import base64 import json import os import time @@ -32,11 +31,7 @@ from api.db.services.llm_service import LLMService, LLMBundle, get_init_tenant_l from api.db.services.user_service import TenantService, UserTenantService from api import settings from api.utils.file_utils import get_project_base_directory - - -def encode_to_base64(input_string): - base64_encoded = base64.b64encode(input_string.encode('utf-8')) - return base64_encoded.decode('utf-8') +from api.common.base64 import encode_to_base64 def init_superuser(): @@ -144,8 +139,9 @@ def init_llm_factory(): except Exception: pass break + doc_count = DocumentService.get_all_kb_doc_count() for kb_id in KnowledgebaseService.get_all_ids(): - KnowledgebaseService.update_document_number_in_init(kb_id=kb_id, doc_num=DocumentService.get_kb_doc_count(kb_id)) + KnowledgebaseService.update_document_number_in_init(kb_id=kb_id, doc_num=doc_count.get(kb_id, 0)) diff --git a/api/db/joint_services/__init__.py b/api/db/joint_services/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/db/joint_services/user_account_service.py b/api/db/joint_services/user_account_service.py new file mode 100644 index 000000000..61f23ccd5 --- /dev/null +++ b/api/db/joint_services/user_account_service.py @@ -0,0 +1,327 @@ +# +# Copyright 2024 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import logging +import uuid + +from api import settings +from api.utils.api_utils import group_by +from api.db import FileType, UserTenantRole, ActiveEnum +from api.db.services.api_service import APITokenService, API4ConversationService +from api.db.services.canvas_service import UserCanvasService +from api.db.services.conversation_service import ConversationService +from api.db.services.dialog_service import DialogService +from api.db.services.document_service import DocumentService +from api.db.services.file2document_service import File2DocumentService +from api.db.services.knowledgebase_service import KnowledgebaseService +from api.db.services.langfuse_service import TenantLangfuseService +from api.db.services.llm_service import get_init_tenant_llm +from api.db.services.file_service import FileService +from api.db.services.mcp_server_service import MCPServerService +from api.db.services.search_service import SearchService +from api.db.services.task_service import TaskService +from api.db.services.tenant_llm_service import TenantLLMService +from api.db.services.user_canvas_version import UserCanvasVersionService +from api.db.services.user_service import TenantService, UserService, UserTenantService +from rag.utils.storage_factory import STORAGE_IMPL +from rag.nlp import search + + +def create_new_user(user_info: dict) -> dict: + """ + Add a new user, and create tenant, tenant llm, file folder for new user. + :param user_info: { + "email": , + "nickname": , + "password": , + "login_channel": , + "is_superuser": , + } + :return: { + "success": , + "user_info": , # if true, return user_info + } + """ + # generate user_id and access_token for user + user_id = uuid.uuid1().hex + user_info['id'] = user_id + user_info['access_token'] = uuid.uuid1().hex + # construct tenant info + tenant = { + "id": user_id, + "name": user_info["nickname"] + "‘s Kingdom", + "llm_id": settings.CHAT_MDL, + "embd_id": settings.EMBEDDING_MDL, + "asr_id": settings.ASR_MDL, + "parser_ids": settings.PARSERS, + "img2txt_id": settings.IMAGE2TEXT_MDL, + "rerank_id": settings.RERANK_MDL, + } + usr_tenant = { + "tenant_id": user_id, + "user_id": user_id, + "invited_by": user_id, + "role": UserTenantRole.OWNER, + } + # construct file folder info + file_id = uuid.uuid1().hex + file = { + "id": file_id, + "parent_id": file_id, + "tenant_id": user_id, + "created_by": user_id, + "name": "/", + "type": FileType.FOLDER.value, + "size": 0, + "location": "", + } + try: + tenant_llm = get_init_tenant_llm(user_id) + + if not UserService.save(**user_info): + return {"success": False} + + TenantService.insert(**tenant) + UserTenantService.insert(**usr_tenant) + TenantLLMService.insert_many(tenant_llm) + FileService.insert(file) + + return { + "success": True, + "user_info": user_info, + } + + except Exception as create_error: + logging.exception(create_error) + # rollback + try: + TenantService.delete_by_id(user_id) + except Exception as e: + logging.exception(e) + try: + u = UserTenantService.query(tenant_id=user_id) + if u: + UserTenantService.delete_by_id(u[0].id) + except Exception as e: + logging.exception(e) + try: + TenantLLMService.delete_by_tenant_id(user_id) + except Exception as e: + logging.exception(e) + try: + FileService.delete_by_id(file["id"]) + except Exception as e: + logging.exception(e) + # delete user row finally + try: + UserService.delete_by_id(user_id) + except Exception as e: + logging.exception(e) + # reraise + raise create_error + + +def delete_user_data(user_id: str) -> dict: + # use user_id to delete + usr = UserService.filter_by_id(user_id) + if not usr: + return {"success": False, "message": f"{user_id} can't be found."} + # check is inactive and not admin + if usr.is_active == ActiveEnum.ACTIVE.value: + return {"success": False, "message": f"{user_id} is active and can't be deleted."} + if usr.is_superuser: + return {"success": False, "message": "Can't delete the super user."} + # tenant info + tenants = UserTenantService.get_user_tenant_relation_by_user_id(usr.id) + owned_tenant = [t for t in tenants if t["role"] == UserTenantRole.OWNER.value] + + done_msg = '' + try: + # step1. delete owned tenant info + if owned_tenant: + done_msg += "Start to delete owned tenant.\n" + tenant_id = owned_tenant[0]["tenant_id"] + kb_ids = KnowledgebaseService.get_kb_ids(usr.id) + # step1.1 delete knowledgebase related file and info + if kb_ids: + # step1.1.1 delete files in storage, remove bucket + for kb_id in kb_ids: + if STORAGE_IMPL.bucket_exists(kb_id): + STORAGE_IMPL.remove_bucket(kb_id) + done_msg += f"- Removed {len(kb_ids)} dataset's buckets.\n" + # step1.1.2 delete file and document info in db + doc_ids = DocumentService.get_all_doc_ids_by_kb_ids(kb_ids) + if doc_ids: + doc_delete_res = DocumentService.delete_by_ids([i["id"] for i in doc_ids]) + done_msg += f"- Deleted {doc_delete_res} document records.\n" + task_delete_res = TaskService.delete_by_doc_ids([i["id"] for i in doc_ids]) + done_msg += f"- Deleted {task_delete_res} task records.\n" + file_ids = FileService.get_all_file_ids_by_tenant_id(usr.id) + if file_ids: + file_delete_res = FileService.delete_by_ids([f["id"] for f in file_ids]) + done_msg += f"- Deleted {file_delete_res} file records.\n" + if doc_ids or file_ids: + file2doc_delete_res = File2DocumentService.delete_by_document_ids_or_file_ids( + [i["id"] for i in doc_ids], + [f["id"] for f in file_ids] + ) + done_msg += f"- Deleted {file2doc_delete_res} document-file relation records.\n" + # step1.1.3 delete chunk in es + r = settings.docStoreConn.delete({"kb_id": kb_ids}, + search.index_name(tenant_id), kb_ids) + done_msg += f"- Deleted {r} chunk records.\n" + kb_delete_res = KnowledgebaseService.delete_by_ids(kb_ids) + done_msg += f"- Deleted {kb_delete_res} knowledgebase records.\n" + # step1.1.4 delete agents + agent_delete_res = delete_user_agents(usr.id) + done_msg += f"- Deleted {agent_delete_res['agents_deleted_count']} agent, {agent_delete_res['version_deleted_count']} versions records.\n" + # step1.1.5 delete dialogs + dialog_delete_res = delete_user_dialogs(usr.id) + done_msg += f"- Deleted {dialog_delete_res['dialogs_deleted_count']} dialogs, {dialog_delete_res['conversations_deleted_count']} conversations, {dialog_delete_res['api_token_deleted_count']} api tokens, {dialog_delete_res['api4conversation_deleted_count']} api4conversations.\n" + # step1.1.6 delete mcp server + mcp_delete_res = MCPServerService.delete_by_tenant_id(usr.id) + done_msg += f"- Deleted {mcp_delete_res} MCP server.\n" + # step1.1.7 delete search + search_delete_res = SearchService.delete_by_tenant_id(usr.id) + done_msg += f"- Deleted {search_delete_res} search records.\n" + # step1.2 delete tenant_llm and tenant_langfuse + llm_delete_res = TenantLLMService.delete_by_tenant_id(tenant_id) + done_msg += f"- Deleted {llm_delete_res} tenant-LLM records.\n" + langfuse_delete_res = TenantLangfuseService.delete_ty_tenant_id(tenant_id) + done_msg += f"- Deleted {langfuse_delete_res} langfuse records.\n" + # step1.3 delete own tenant + tenant_delete_res = TenantService.delete_by_id(tenant_id) + done_msg += f"- Deleted {tenant_delete_res} tenant.\n" + # step2 delete user-tenant relation + if tenants: + # step2.1 delete docs and files in joined team + joined_tenants = [t for t in tenants if t["role"] == UserTenantRole.NORMAL.value] + if joined_tenants: + done_msg += "Start to delete data in joined tenants.\n" + created_documents = DocumentService.get_all_docs_by_creator_id(usr.id) + if created_documents: + # step2.1.1 delete files + doc_file_info = File2DocumentService.get_by_document_ids([d['id'] for d in created_documents]) + created_files = FileService.get_by_ids([f['file_id'] for f in doc_file_info]) + if created_files: + # step2.1.1.1 delete file in storage + for f in created_files: + STORAGE_IMPL.rm(f.parent_id, f.location) + done_msg += f"- Deleted {len(created_files)} uploaded file.\n" + # step2.1.1.2 delete file record + file_delete_res = FileService.delete_by_ids([f.id for f in created_files]) + done_msg += f"- Deleted {file_delete_res} file records.\n" + # step2.1.2 delete document-file relation record + file2doc_delete_res = File2DocumentService.delete_by_document_ids_or_file_ids( + [d['id'] for d in created_documents], + [f.id for f in created_files] + ) + done_msg += f"- Deleted {file2doc_delete_res} document-file relation records.\n" + # step2.1.3 delete chunks + doc_groups = group_by(created_documents, "tenant_id") + kb_grouped_doc = {k: group_by(v, "kb_id") for k, v in doc_groups.items()} + # chunks in {'tenant_id': {'kb_id': [{'id': doc_id}]}} structure + chunk_delete_res = 0 + kb_doc_info = {} + for _tenant_id, kb_doc in kb_grouped_doc.items(): + for _kb_id, docs in kb_doc.items(): + chunk_delete_res += settings.docStoreConn.delete( + {"doc_id": [d["id"] for d in docs]}, + search.index_name(_tenant_id), _kb_id + ) + # record doc info + if _kb_id in kb_doc_info.keys(): + kb_doc_info[_kb_id]['doc_num'] += 1 + kb_doc_info[_kb_id]['token_num'] += sum([d["token_num"] for d in docs]) + kb_doc_info[_kb_id]['chunk_num'] += sum([d["chunk_num"] for d in docs]) + else: + kb_doc_info[_kb_id] = { + 'doc_num': 1, + 'token_num': sum([d["token_num"] for d in docs]), + 'chunk_num': sum([d["chunk_num"] for d in docs]) + } + done_msg += f"- Deleted {chunk_delete_res} chunks.\n" + # step2.1.4 delete tasks + task_delete_res = TaskService.delete_by_doc_ids([d['id'] for d in created_documents]) + done_msg += f"- Deleted {task_delete_res} tasks.\n" + # step2.1.5 delete document record + doc_delete_res = DocumentService.delete_by_ids([d['id'] for d in created_documents]) + done_msg += f"- Deleted {doc_delete_res} documents.\n" + # step2.1.6 update knowledge base doc&chunk&token cnt + for kb_id, doc_num in kb_doc_info.items(): + KnowledgebaseService.decrease_document_num_in_delete(kb_id, doc_num) + + # step2.2 delete relation + user_tenant_delete_res = UserTenantService.delete_by_ids([t["id"] for t in tenants]) + done_msg += f"- Deleted {user_tenant_delete_res} user-tenant records.\n" + # step3 finally delete user + user_delete_res = UserService.delete_by_id(usr.id) + done_msg += f"- Deleted {user_delete_res} user.\nDelete done!" + + return {"success": True, "message": f"Successfully deleted user. Details:\n{done_msg}"} + + except Exception as e: + logging.exception(e) + return {"success": False, "message": f"Error: {str(e)}. Already done:\n{done_msg}"} + + +def delete_user_agents(user_id: str) -> dict: + """ + use user_id to delete + :return: { + "agents_deleted_count": 1, + "version_deleted_count": 2 + } + """ + agents_deleted_count, agents_version_deleted_count = 0, 0 + user_agents = UserCanvasService.get_all_agents_by_tenant_ids([user_id], user_id) + if user_agents: + agents_version = UserCanvasVersionService.get_all_canvas_version_by_canvas_ids([a['id'] for a in user_agents]) + agents_version_deleted_count = UserCanvasVersionService.delete_by_ids([v['id'] for v in agents_version]) + agents_deleted_count = UserCanvasService.delete_by_ids([a['id'] for a in user_agents]) + return { + "agents_deleted_count": agents_deleted_count, + "version_deleted_count": agents_version_deleted_count + } + + +def delete_user_dialogs(user_id: str) -> dict: + """ + use user_id to delete + :return: { + "dialogs_deleted_count": 1, + "conversations_deleted_count": 1, + "api_token_deleted_count": 2, + "api4conversation_deleted_count": 2 + } + """ + dialog_deleted_count, conversations_deleted_count, api_token_deleted_count, api4conversation_deleted_count = 0, 0, 0, 0 + user_dialogs = DialogService.get_all_dialogs_by_tenant_id(user_id) + if user_dialogs: + # delete conversation + conversations = ConversationService.get_all_conversation_by_dialog_ids([ud['id'] for ud in user_dialogs]) + conversations_deleted_count = ConversationService.delete_by_ids([c['id'] for c in conversations]) + # delete api token + api_token_deleted_count = APITokenService.delete_by_tenant_id(user_id) + # delete api for conversation + api4conversation_deleted_count = API4ConversationService.delete_by_dialog_ids([ud['id'] for ud in user_dialogs]) + # delete dialog at last + dialog_deleted_count = DialogService.delete_by_ids([ud['id'] for ud in user_dialogs]) + return { + "dialogs_deleted_count": dialog_deleted_count, + "conversations_deleted_count": conversations_deleted_count, + "api_token_deleted_count": api_token_deleted_count, + "api4conversation_deleted_count": api4conversation_deleted_count + } diff --git a/api/db/services/__init__.py b/api/db/services/__init__.py index 4b3af3ecf..ce937911f 100644 --- a/api/db/services/__init__.py +++ b/api/db/services/__init__.py @@ -19,7 +19,7 @@ from pathlib import PurePath from .user_service import UserService as UserService -def split_name_counter(filename: str) -> tuple[str, int | None]: +def _split_name_counter(filename: str) -> tuple[str, int | None]: """ Splits a filename into main part and counter (if present in parentheses). @@ -87,7 +87,7 @@ def duplicate_name(query_func, **kwargs) -> str: stem = path.stem suffix = path.suffix - main_part, counter = split_name_counter(stem) + main_part, counter = _split_name_counter(stem) counter = counter + 1 if counter else 1 new_name = f"{main_part}({counter}){suffix}" diff --git a/api/db/services/api_service.py b/api/db/services/api_service.py index 2fcbe0329..9a23547f7 100644 --- a/api/db/services/api_service.py +++ b/api/db/services/api_service.py @@ -35,6 +35,11 @@ class APITokenService(CommonService): cls.model.token == token ) + @classmethod + @DB.connection_context() + def delete_by_tenant_id(cls, tenant_id): + return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute() + class API4ConversationService(CommonService): model = API4Conversation @@ -100,3 +105,8 @@ class API4ConversationService(CommonService): cls.model.create_date <= to_date, cls.model.source == source ).group_by(cls.model.create_date.truncate("day")).dicts() + + @classmethod + @DB.connection_context() + def delete_by_dialog_ids(cls, dialog_ids): + return cls.model.delete().where(cls.model.dialog_id.in_(dialog_ids)).execute() diff --git a/api/db/services/canvas_service.py b/api/db/services/canvas_service.py index bc07aec1c..f72c6f92a 100644 --- a/api/db/services/canvas_service.py +++ b/api/db/services/canvas_service.py @@ -18,7 +18,7 @@ import logging import time from uuid import uuid4 from agent.canvas import Canvas -from api.db import CanvasCategory +from api.db import CanvasCategory, TenantPermission from api.db.db_models import DB, CanvasTemplate, User, UserCanvas, API4Conversation from api.db.services.api_service import API4ConversationService from api.db.services.common_service import CommonService @@ -63,7 +63,38 @@ class UserCanvasService(CommonService): @classmethod @DB.connection_context() - def get_by_tenant_id(cls, pid): + def get_all_agents_by_tenant_ids(cls, tenant_ids, user_id): + # will get all permitted agents, be cautious + fields = [ + cls.model.id, + cls.model.title, + cls.model.permission, + cls.model.canvas_type, + cls.model.canvas_category + ] + # find team agents and owned agents + agents = cls.model.select(*fields).where( + (cls.model.user_id.in_(tenant_ids) & (cls.model.permission == TenantPermission.TEAM.value)) | ( + cls.model.user_id == user_id + ) + ) + # sort by create_time, asc + agents.order_by(cls.model.create_time.asc()) + # maybe cause slow query by deep paginate, optimize later + offset, limit = 0, 50 + res = [] + while True: + ag_batch = agents.offset(offset).limit(limit) + _temp = list(ag_batch.dicts()) + if not _temp: + break + res.extend(_temp) + offset += limit + return res + + @classmethod + @DB.connection_context() + def get_by_canvas_id(cls, pid): try: fields = [ @@ -138,7 +169,7 @@ class UserCanvasService(CommonService): @DB.connection_context() def accessible(cls, canvas_id, tenant_id): from api.db.services.user_service import UserTenantService - e, c = UserCanvasService.get_by_tenant_id(canvas_id) + e, c = UserCanvasService.get_by_canvas_id(canvas_id) if not e: return False diff --git a/api/db/services/common_service.py b/api/db/services/common_service.py index 7645b43d4..a5c871426 100644 --- a/api/db/services/common_service.py +++ b/api/db/services/common_service.py @@ -14,12 +14,24 @@ # limitations under the License. # from datetime import datetime - +from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type import peewee +from peewee import InterfaceError, OperationalError from api.db.db_models import DB from api.utils import current_timestamp, datetime_format, get_uuid +def retry_db_operation(func): + @retry( + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=1, max=5), + retry=retry_if_exception_type((InterfaceError, OperationalError)), + before_sleep=lambda retry_state: print(f"RETRY {retry_state.attempt_number} TIMES"), + reraise=True, + ) + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + return wrapper class CommonService: """Base service class that provides common database operations. @@ -202,6 +214,7 @@ class CommonService: @classmethod @DB.connection_context() + @retry_db_operation def update_by_id(cls, pid, data): # Update a single record by ID # Args: diff --git a/api/db/services/conversation_service.py b/api/db/services/conversation_service.py index 5e247c21c..53913f442 100644 --- a/api/db/services/conversation_service.py +++ b/api/db/services/conversation_service.py @@ -23,7 +23,7 @@ from api.db.services.dialog_service import DialogService, chat from api.utils import get_uuid import json -from rag.prompts import chunks_format +from rag.prompts.generator import chunks_format class ConversationService(CommonService): @@ -48,6 +48,21 @@ class ConversationService(CommonService): return list(sessions.dicts()) + @classmethod + @DB.connection_context() + def get_all_conversation_by_dialog_ids(cls, dialog_ids): + sessions = cls.model.select().where(cls.model.dialog_id.in_(dialog_ids)) + sessions.order_by(cls.model.create_time.asc()) + offset, limit = 0, 100 + res = [] + while True: + s_batch = sessions.offset(offset).limit(limit) + _temp = list(s_batch.dicts()) + if not _temp: + break + res.extend(_temp) + offset += limit + return res def structure_answer(conv, ans, message_id, session_id): reference = ans["reference"] diff --git a/api/db/services/dialog_service.py b/api/db/services/dialog_service.py index 3855c1ded..673000ff9 100644 --- a/api/db/services/dialog_service.py +++ b/api/db/services/dialog_service.py @@ -39,8 +39,8 @@ from graphrag.general.mind_map_extractor import MindMapExtractor from rag.app.resume import forbidden_select_fields4resume from rag.app.tag import label_question from rag.nlp.search import index_name -from rag.prompts import chunks_format, citation_prompt, cross_languages, full_question, kb_prompt, keyword_extraction, message_fit_in -from rag.prompts.prompts import gen_meta_filter, PROMPT_JINJA_ENV, ASK_SUMMARY +from rag.prompts.generator import chunks_format, citation_prompt, cross_languages, full_question, kb_prompt, keyword_extraction, message_fit_in, \ + gen_meta_filter, PROMPT_JINJA_ENV, ASK_SUMMARY from rag.utils import num_tokens_from_string, rmSpace from rag.utils.tavily_conn import Tavily @@ -159,6 +159,22 @@ class DialogService(CommonService): return list(dialogs.dicts()), count + @classmethod + @DB.connection_context() + def get_all_dialogs_by_tenant_id(cls, tenant_id): + fields = [cls.model.id] + dialogs = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id) + dialogs.order_by(cls.model.create_time.asc()) + offset, limit = 0, 100 + res = [] + while True: + d_batch = dialogs.offset(offset).limit(limit) + _temp = list(d_batch.dicts()) + if not _temp: + break + res.extend(_temp) + offset += limit + return res def chat_solo(dialog, messages, stream=True): if TenantLLMService.llm_id2llm_type(dialog.llm_id) == "image2text": @@ -176,7 +192,7 @@ def chat_solo(dialog, messages, stream=True): delta_ans = "" for ans in chat_mdl.chat_streamly(prompt_config.get("system", ""), msg, dialog.llm_setting): answer = ans - delta_ans = ans[len(last_ans) :] + delta_ans = ans[len(last_ans):] if num_tokens_from_string(delta_ans) < 16: continue last_ans = answer @@ -261,13 +277,13 @@ def convert_conditions(metadata_condition): "not is": "≠" } return [ - { - "op": op_mapping.get(cond["comparison_operator"], cond["comparison_operator"]), - "key": cond["name"], - "value": cond["value"] - } - for cond in metadata_condition.get("conditions", []) -] + { + "op": op_mapping.get(cond["comparison_operator"], cond["comparison_operator"]), + "key": cond["name"], + "value": cond["value"] + } + for cond in metadata_condition.get("conditions", []) + ] def meta_filter(metas: dict, filters: list[dict]): @@ -284,19 +300,19 @@ def meta_filter(metas: dict, filters: list[dict]): value = str(value) for conds in [ - (operator == "contains", str(value).lower() in str(input).lower()), - (operator == "not contains", str(value).lower() not in str(input).lower()), - (operator == "start with", str(input).lower().startswith(str(value).lower())), - (operator == "end with", str(input).lower().endswith(str(value).lower())), - (operator == "empty", not input), - (operator == "not empty", input), - (operator == "=", input == value), - (operator == "≠", input != value), - (operator == ">", input > value), - (operator == "<", input < value), - (operator == "≥", input >= value), - (operator == "≤", input <= value), - ]: + (operator == "contains", str(value).lower() in str(input).lower()), + (operator == "not contains", str(value).lower() not in str(input).lower()), + (operator == "start with", str(input).lower().startswith(str(value).lower())), + (operator == "end with", str(input).lower().endswith(str(value).lower())), + (operator == "empty", not input), + (operator == "not empty", input), + (operator == "=", input == value), + (operator == "≠", input != value), + (operator == ">", input > value), + (operator == "<", input < value), + (operator == "≥", input >= value), + (operator == "≤", input <= value), + ]: try: if all(conds): ids.extend(docids) @@ -456,7 +472,8 @@ def chat(dialog, messages, stream=True, **kwargs): kbinfos["chunks"].extend(tav_res["chunks"]) kbinfos["doc_aggs"].extend(tav_res["doc_aggs"]) if prompt_config.get("use_kg"): - ck = settings.kg_retrievaler.retrieval(" ".join(questions), tenant_ids, dialog.kb_ids, embd_mdl, LLMBundle(dialog.tenant_id, LLMType.CHAT)) + ck = settings.kg_retrievaler.retrieval(" ".join(questions), tenant_ids, dialog.kb_ids, embd_mdl, + LLMBundle(dialog.tenant_id, LLMType.CHAT)) if ck["content_with_weight"]: kbinfos["chunks"].insert(0, ck) @@ -467,7 +484,8 @@ def chat(dialog, messages, stream=True, **kwargs): retrieval_ts = timer() if not knowledges and prompt_config.get("empty_response"): empty_res = prompt_config["empty_response"] - yield {"answer": empty_res, "reference": kbinfos, "prompt": "\n\n### Query:\n%s" % " ".join(questions), "audio_binary": tts(tts_mdl, empty_res)} + yield {"answer": empty_res, "reference": kbinfos, "prompt": "\n\n### Query:\n%s" % " ".join(questions), + "audio_binary": tts(tts_mdl, empty_res)} return {"answer": prompt_config["empty_response"], "reference": kbinfos} kwargs["knowledge"] = "\n------\n" + "\n\n------\n\n".join(knowledges) @@ -565,7 +583,8 @@ def chat(dialog, messages, stream=True, **kwargs): if langfuse_tracer: langfuse_generation = langfuse_tracer.start_generation( - trace_context=trace_context, name="chat", model=llm_model_config["llm_name"], input={"prompt": prompt, "prompt4citation": prompt4citation, "messages": msg} + trace_context=trace_context, name="chat", model=llm_model_config["llm_name"], + input={"prompt": prompt, "prompt4citation": prompt4citation, "messages": msg} ) if stream: @@ -575,12 +594,12 @@ def chat(dialog, messages, stream=True, **kwargs): if thought: ans = re.sub(r"^.*", "", ans, flags=re.DOTALL) answer = ans - delta_ans = ans[len(last_ans) :] + delta_ans = ans[len(last_ans):] if num_tokens_from_string(delta_ans) < 16: continue last_ans = answer yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)} - delta_ans = answer[len(last_ans) :] + delta_ans = answer[len(last_ans):] if delta_ans: yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)} yield decorate_answer(thought + answer) @@ -676,7 +695,9 @@ Please write the SQL, only SQL, without any other explanations or text. # compose Markdown table columns = ( - "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"], tbl["columns"][i]["name"])) for i in column_idx]) + ("|Source|" if docid_idx and docid_idx else "|") + "|" + "|".join( + [re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"], tbl["columns"][i]["name"])) for i in column_idx]) + ( + "|Source|" if docid_idx and docid_idx else "|") ) line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + ("|------|" if docid_idx and docid_idx else "") @@ -753,7 +774,7 @@ def ask(question, kb_ids, tenant_id, chat_llm_name=None, search_config={}): doc_ids = None kbinfos = retriever.retrieval( - question = question, + question=question, embd_mdl=embd_mdl, tenant_ids=tenant_ids, kb_ids=kb_ids, @@ -775,7 +796,8 @@ def ask(question, kb_ids, tenant_id, chat_llm_name=None, search_config={}): def decorate_answer(answer): nonlocal knowledges, kbinfos, sys_prompt - answer, idx = retriever.insert_citations(answer, [ck["content_ltks"] for ck in kbinfos["chunks"]], [ck["vector"] for ck in kbinfos["chunks"]], embd_mdl, tkweight=0.7, vtweight=0.3) + answer, idx = retriever.insert_citations(answer, [ck["content_ltks"] for ck in kbinfos["chunks"]], [ck["vector"] for ck in kbinfos["chunks"]], + embd_mdl, tkweight=0.7, vtweight=0.3) idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx]) recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx] if not recall_docs: diff --git a/api/db/services/document_service.py b/api/db/services/document_service.py index c091f57f3..af2b08f24 100644 --- a/api/db/services/document_service.py +++ b/api/db/services/document_service.py @@ -243,6 +243,46 @@ class DocumentService(CommonService): return int(query.scalar()) or 0 + @classmethod + @DB.connection_context() + def get_all_doc_ids_by_kb_ids(cls, kb_ids): + fields = [cls.model.id] + docs = cls.model.select(*fields).where(cls.model.kb_id.in_(kb_ids)) + docs.order_by(cls.model.create_time.asc()) + # maybe cause slow query by deep paginate, optimize later + offset, limit = 0, 100 + res = [] + while True: + doc_batch = docs.offset(offset).limit(limit) + _temp = list(doc_batch.dicts()) + if not _temp: + break + res.extend(_temp) + offset += limit + return res + + @classmethod + @DB.connection_context() + def get_all_docs_by_creator_id(cls, creator_id): + fields = [ + cls.model.id, cls.model.kb_id, cls.model.token_num, cls.model.chunk_num, Knowledgebase.tenant_id + ] + docs = cls.model.select(*fields).join(Knowledgebase, on=(Knowledgebase.id == cls.model.kb_id)).where( + cls.model.created_by == creator_id + ) + docs.order_by(cls.model.create_time.asc()) + # maybe cause slow query by deep paginate, optimize later + offset, limit = 0, 100 + res = [] + while True: + doc_batch = docs.offset(offset).limit(limit) + _temp = list(doc_batch.dicts()) + if not _temp: + break + res.extend(_temp) + offset += limit + return res + @classmethod @DB.connection_context() def insert(cls, doc): @@ -517,9 +557,6 @@ class DocumentService(CommonService): @classmethod @DB.connection_context() def get_doc_id_by_doc_name(cls, doc_name): - """ - highly rely on the strict deduplication guarantee from Document - """ fields = [cls.model.id] doc_id = cls.model.select(*fields) \ .where(cls.model.name == doc_name) @@ -681,8 +718,16 @@ class DocumentService(CommonService): @classmethod @DB.connection_context() def get_kb_doc_count(cls, kb_id): - return len(cls.model.select(cls.model.id).where( - cls.model.kb_id == kb_id).dicts()) + return cls.model.select().where(cls.model.kb_id == kb_id).count() + + @classmethod + @DB.connection_context() + def get_all_kb_doc_count(cls): + result = {} + rows = cls.model.select(cls.model.kb_id, fn.COUNT(cls.model.id).alias('count')).group_by(cls.model.kb_id) + for row in rows: + result[row.kb_id] = row.count + return result @classmethod @DB.connection_context() diff --git a/api/db/services/file2document_service.py b/api/db/services/file2document_service.py index c03dbf928..31d75accd 100644 --- a/api/db/services/file2document_service.py +++ b/api/db/services/file2document_service.py @@ -38,6 +38,12 @@ class File2DocumentService(CommonService): objs = cls.model.select().where(cls.model.document_id == document_id) return objs + @classmethod + @DB.connection_context() + def get_by_document_ids(cls, document_ids): + objs = cls.model.select().where(cls.model.document_id.in_(document_ids)) + return list(objs.dicts()) + @classmethod @DB.connection_context() def insert(cls, obj): @@ -50,6 +56,15 @@ class File2DocumentService(CommonService): def delete_by_file_id(cls, file_id): return cls.model.delete().where(cls.model.file_id == file_id).execute() + @classmethod + @DB.connection_context() + def delete_by_document_ids_or_file_ids(cls, document_ids, file_ids): + if not document_ids: + return cls.model.delete().where(cls.model.file_id.in_(file_ids)).execute() + elif not file_ids: + return cls.model.delete().where(cls.model.document_id.in_(document_ids)).execute() + return cls.model.delete().where(cls.model.document_id.in_(document_ids) | cls.model.file_id.in_(file_ids)).execute() + @classmethod @DB.connection_context() def delete_by_document_id(cls, doc_id): diff --git a/api/db/services/file_service.py b/api/db/services/file_service.py index f2a7e5dc2..7c8d91de2 100644 --- a/api/db/services/file_service.py +++ b/api/db/services/file_service.py @@ -161,6 +161,23 @@ class FileService(CommonService): result_ids.append(folder_id) return result_ids + @classmethod + @DB.connection_context() + def get_all_file_ids_by_tenant_id(cls, tenant_id): + fields = [cls.model.id] + files = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id) + files.order_by(cls.model.create_time.asc()) + offset, limit = 0, 100 + res = [] + while True: + file_batch = files.offset(offset).limit(limit) + _temp = list(file_batch.dicts()) + if not _temp: + break + res.extend(_temp) + offset += limit + return res + @classmethod @DB.connection_context() def create_folder(cls, file, parent_id, name, count): diff --git a/api/db/services/knowledgebase_service.py b/api/db/services/knowledgebase_service.py index f80dca04f..492c24510 100644 --- a/api/db/services/knowledgebase_service.py +++ b/api/db/services/knowledgebase_service.py @@ -18,7 +18,7 @@ from datetime import datetime from peewee import fn, JOIN from api.db import StatusEnum, TenantPermission -from api.db.db_models import DB, Document, Knowledgebase, Tenant, User, UserTenant, UserCanvas +from api.db.db_models import DB, Document, Knowledgebase, User, UserTenant, UserCanvas from api.db.services.common_service import CommonService from api.utils import current_timestamp, datetime_format @@ -190,6 +190,41 @@ class KnowledgebaseService(CommonService): return list(kbs.dicts()), count + @classmethod + @DB.connection_context() + def get_all_kb_by_tenant_ids(cls, tenant_ids, user_id): + # will get all permitted kb, be cautious. + fields = [ + cls.model.name, + cls.model.language, + cls.model.permission, + cls.model.doc_num, + cls.model.token_num, + cls.model.chunk_num, + cls.model.status, + cls.model.create_date, + cls.model.update_date + ] + # find team kb and owned kb + kbs = cls.model.select(*fields).where( + (cls.model.tenant_id.in_(tenant_ids) & (cls.model.permission ==TenantPermission.TEAM.value)) | ( + cls.model.tenant_id == user_id + ) + ) + # sort by create_time asc + kbs.order_by(cls.model.create_time.asc()) + # maybe cause slow query by deep paginate, optimize later. + offset, limit = 0, 50 + res = [] + while True: + kb_batch = kbs.offset(offset).limit(limit) + _temp = list(kb_batch.dicts()) + if not _temp: + break + res.extend(_temp) + offset += limit + return res + @classmethod @DB.connection_context() def get_kb_ids(cls, tenant_id): @@ -226,7 +261,7 @@ class KnowledgebaseService(CommonService): cls.model.chunk_num, cls.model.parser_id, cls.model.pipeline_id, - UserCanvas.title, + UserCanvas.title.alias("pipeline_name"), UserCanvas.avatar.alias("pipeline_avatar"), cls.model.parser_config, cls.model.pagerank, @@ -240,16 +275,14 @@ class KnowledgebaseService(CommonService): cls.model.update_time ] kbs = cls.model.select(*fields)\ - .join(Tenant, on=((Tenant.id == cls.model.tenant_id) & (Tenant.status == StatusEnum.VALID.value)))\ .join(UserCanvas, on=(cls.model.pipeline_id == UserCanvas.id), join_type=JOIN.LEFT_OUTER)\ .where( (cls.model.id == kb_id), (cls.model.status == StatusEnum.VALID.value) - ) + ).dicts() if not kbs: return - d = kbs[0].to_dict() - return d + return kbs[0] @classmethod @DB.connection_context() @@ -447,3 +480,17 @@ class KnowledgebaseService(CommonService): else: raise e + @classmethod + @DB.connection_context() + def decrease_document_num_in_delete(cls, kb_id, doc_num_info: dict): + kb_row = cls.model.get_by_id(kb_id) + if not kb_row: + raise RuntimeError(f"kb_id {kb_id} does not exist") + update_dict = { + 'doc_num': kb_row.doc_num - doc_num_info['doc_num'], + 'chunk_num': kb_row.chunk_num - doc_num_info['chunk_num'], + 'token_num': kb_row.token_num - doc_num_info['token_num'], + 'update_time': current_timestamp(), + 'update_date': datetime_format(datetime.now()) + } + return cls.model.update(update_dict).where(cls.model.id == kb_id).execute() diff --git a/api/db/services/langfuse_service.py b/api/db/services/langfuse_service.py index c75f3d12e..6f4646900 100644 --- a/api/db/services/langfuse_service.py +++ b/api/db/services/langfuse_service.py @@ -51,6 +51,11 @@ class TenantLangfuseService(CommonService): except peewee.DoesNotExist: return None + @classmethod + @DB.connection_context() + def delete_ty_tenant_id(cls, tenant_id): + return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute() + @classmethod def update_by_tenant(cls, tenant_id, langfuse_keys): langfuse_keys["update_time"] = current_timestamp() diff --git a/api/db/services/mcp_server_service.py b/api/db/services/mcp_server_service.py index 869350094..101555f4b 100644 --- a/api/db/services/mcp_server_service.py +++ b/api/db/services/mcp_server_service.py @@ -84,3 +84,8 @@ class MCPServerService(CommonService): return bool(mcp_server), mcp_server except Exception: return False, None + + @classmethod + @DB.connection_context() + def delete_by_tenant_id(cls, tenant_id: str): + return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute() diff --git a/api/db/services/search_service.py b/api/db/services/search_service.py index 674cfcf1f..acb07da57 100644 --- a/api/db/services/search_service.py +++ b/api/db/services/search_service.py @@ -110,3 +110,8 @@ class SearchService(CommonService): query = query.paginate(page_number, items_per_page) return list(query.dicts()), count + + @classmethod + @DB.connection_context() + def delete_by_tenant_id(cls, tenant_id): + return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute() diff --git a/api/db/services/task_service.py b/api/db/services/task_service.py index 4b8eddc82..f31494b0e 100644 --- a/api/db/services/task_service.py +++ b/api/db/services/task_service.py @@ -316,6 +316,12 @@ class TaskService(CommonService): process_duration = (datetime.now() - task.begin_at).total_seconds() cls.model.update(process_duration=process_duration).where(cls.model.id == id).execute() + @classmethod + @DB.connection_context() + def delete_by_doc_ids(cls, doc_ids): + """Delete task associated with a document.""" + return cls.model.delete().where(cls.model.doc_id.in_(doc_ids)).execute() + def queue_tasks(doc: dict, bucket: str, name: str, priority: int): """Create and queue document processing tasks. diff --git a/api/db/services/tenant_llm_service.py b/api/db/services/tenant_llm_service.py index ec023f115..4eca970ec 100644 --- a/api/db/services/tenant_llm_service.py +++ b/api/db/services/tenant_llm_service.py @@ -209,6 +209,11 @@ class TenantLLMService(CommonService): objs = cls.model.select().where((cls.model.llm_factory == "OpenAI"), ~(cls.model.llm_name == "text-embedding-3-small"), ~(cls.model.llm_name == "text-embedding-3-large")).dicts() return list(objs) + @classmethod + @DB.connection_context() + def delete_by_tenant_id(cls, tenant_id): + return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute() + @staticmethod def llm_id2llm_type(llm_id: str) -> str | None: from api.db.services.llm_service import LLMService diff --git a/api/db/services/user_canvas_version.py b/api/db/services/user_canvas_version.py index 9fe12e32e..9696a7834 100644 --- a/api/db/services/user_canvas_version.py +++ b/api/db/services/user_canvas_version.py @@ -24,7 +24,24 @@ class UserCanvasVersionService(CommonService): return None except Exception: return None - + + @classmethod + @DB.connection_context() + def get_all_canvas_version_by_canvas_ids(cls, canvas_ids): + fields = [cls.model.id] + versions = cls.model.select(*fields).where(cls.model.user_canvas_id.in_(canvas_ids)) + versions.order_by(cls.model.create_time.asc()) + offset, limit = 0, 100 + res = [] + while True: + version_batch = versions.offset(offset).limit(limit) + _temp = list(version_batch.dicts()) + if not _temp: + break + res.extend(_temp) + offset += limit + return res + @classmethod @DB.connection_context() def delete_all_versions(cls, user_canvas_id): diff --git a/api/db/services/user_service.py b/api/db/services/user_service.py index e4184e334..12c405bd6 100644 --- a/api/db/services/user_service.py +++ b/api/db/services/user_service.py @@ -45,22 +45,22 @@ class UserService(CommonService): def query(cls, cols=None, reverse=None, order_by=None, **kwargs): if 'access_token' in kwargs: access_token = kwargs['access_token'] - + # Reject empty, None, or whitespace-only access tokens if not access_token or not str(access_token).strip(): logging.warning("UserService.query: Rejecting empty access_token query") return cls.model.select().where(cls.model.id == "INVALID_EMPTY_TOKEN") # Returns empty result - + # Reject tokens that are too short (should be UUID, 32+ chars) if len(str(access_token).strip()) < 32: logging.warning(f"UserService.query: Rejecting short access_token query: {len(str(access_token))} chars") return cls.model.select().where(cls.model.id == "INVALID_SHORT_TOKEN") # Returns empty result - + # Reject tokens that start with "INVALID_" (from logout) if str(access_token).startswith("INVALID_"): logging.warning("UserService.query: Rejecting invalidated access_token") return cls.model.select().where(cls.model.id == "INVALID_LOGOUT_TOKEN") # Returns empty result - + # Call parent query method for valid requests return super().query(cols=cols, reverse=reverse, order_by=order_by, **kwargs) @@ -100,6 +100,12 @@ class UserService(CommonService): else: return None + @classmethod + @DB.connection_context() + def query_user_by_email(cls, email): + users = cls.model.select().where((cls.model.email == email)) + return list(users) + @classmethod @DB.connection_context() def save(cls, **kwargs): @@ -133,6 +139,17 @@ class UserService(CommonService): cls.model.update(user_dict).where( cls.model.id == user_id).execute() + @classmethod + @DB.connection_context() + def update_user_password(cls, user_id, new_password): + with DB.atomic(): + update_dict = { + "password": generate_password_hash(str(new_password)), + "update_time": current_timestamp(), + "update_date": datetime_format(datetime.now()) + } + cls.model.update(update_dict).where(cls.model.id == user_id).execute() + @classmethod @DB.connection_context() def is_admin(cls, user_id): @@ -140,6 +157,12 @@ class UserService(CommonService): cls.model.id == user_id, cls.model.is_superuser == 1).count() > 0 + @classmethod + @DB.connection_context() + def get_all_users(cls): + users = cls.model.select() + return list(users) + class TenantService(CommonService): """Service class for managing tenant-related database operations. @@ -265,6 +288,17 @@ class UserTenantService(CommonService): .join(User, on=((cls.model.tenant_id == User.id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value))) .where(cls.model.status == StatusEnum.VALID.value).dicts()) + @classmethod + @DB.connection_context() + def get_user_tenant_relation_by_user_id(cls, user_id): + fields = [ + cls.model.id, + cls.model.user_id, + cls.model.tenant_id, + cls.model.role + ] + return list(cls.model.select(*fields).where(cls.model.user_id == user_id).dicts().dicts()) + @classmethod @DB.connection_context() def get_num_members(cls, user_id: str): diff --git a/api/ragflow_server.py b/api/ragflow_server.py index 0dbeb771b..fb49f3d8b 100644 --- a/api/ragflow_server.py +++ b/api/ragflow_server.py @@ -41,7 +41,7 @@ from api import utils from api.db.db_models import init_database_tables as init_web_db from api.db.init_data import init_web_data from api.versions import get_ragflow_version -from api.utils import show_configs +from api.utils.configs import show_configs from rag.settings import print_rag_settings from rag.utils.mcp_tool_call_conn import shutdown_all_mcp_sessions from rag.utils.redis_conn import RedisDistributedLock diff --git a/api/settings.py b/api/settings.py index 3148633e6..e6763d8a2 100644 --- a/api/settings.py +++ b/api/settings.py @@ -24,7 +24,7 @@ import rag.utils.es_conn import rag.utils.infinity_conn import rag.utils.opensearch_conn from api.constants import RAG_FLOW_SERVICE_NAME -from api.utils import decrypt_database_config, get_base_config +from api.utils.configs import decrypt_database_config, get_base_config from api.utils.file_utils import get_project_base_directory from rag.nlp import search diff --git a/api/utils/__init__.py b/api/utils/__init__.py index 461340b63..e0f8a5655 100644 --- a/api/utils/__init__.py +++ b/api/utils/__init__.py @@ -16,184 +16,15 @@ import base64 import datetime import hashlib -import io -import json import os -import pickle import socket import time import uuid import requests -import logging -import copy -from enum import Enum, IntEnum + import importlib -from Cryptodome.PublicKey import RSA -from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5 -from filelock import FileLock -from api.constants import SERVICE_CONF -from . import file_utils - - -def conf_realpath(conf_name): - conf_path = f"conf/{conf_name}" - return os.path.join(file_utils.get_project_base_directory(), conf_path) - - -def read_config(conf_name=SERVICE_CONF): - local_config = {} - local_path = conf_realpath(f'local.{conf_name}') - - # load local config file - if os.path.exists(local_path): - local_config = file_utils.load_yaml_conf(local_path) - if not isinstance(local_config, dict): - raise ValueError(f'Invalid config file: "{local_path}".') - - global_config_path = conf_realpath(conf_name) - global_config = file_utils.load_yaml_conf(global_config_path) - - if not isinstance(global_config, dict): - raise ValueError(f'Invalid config file: "{global_config_path}".') - - global_config.update(local_config) - return global_config - - -CONFIGS = read_config() - - -def show_configs(): - msg = f"Current configs, from {conf_realpath(SERVICE_CONF)}:" - for k, v in CONFIGS.items(): - if isinstance(v, dict): - if "password" in v: - v = copy.deepcopy(v) - v["password"] = "*" * 8 - if "access_key" in v: - v = copy.deepcopy(v) - v["access_key"] = "*" * 8 - if "secret_key" in v: - v = copy.deepcopy(v) - v["secret_key"] = "*" * 8 - if "secret" in v: - v = copy.deepcopy(v) - v["secret"] = "*" * 8 - if "sas_token" in v: - v = copy.deepcopy(v) - v["sas_token"] = "*" * 8 - if "oauth" in k: - v = copy.deepcopy(v) - for key, val in v.items(): - if "client_secret" in val: - val["client_secret"] = "*" * 8 - if "authentication" in k: - v = copy.deepcopy(v) - for key, val in v.items(): - if "http_secret_key" in val: - val["http_secret_key"] = "*" * 8 - msg += f"\n\t{k}: {v}" - logging.info(msg) - - -def get_base_config(key, default=None): - if key is None: - return None - if default is None: - default = os.environ.get(key.upper()) - return CONFIGS.get(key, default) - - -use_deserialize_safe_module = get_base_config( - 'use_deserialize_safe_module', False) - - -class BaseType: - def to_dict(self): - return dict([(k.lstrip("_"), v) for k, v in self.__dict__.items()]) - - def to_dict_with_type(self): - def _dict(obj): - module = None - if issubclass(obj.__class__, BaseType): - data = {} - for attr, v in obj.__dict__.items(): - k = attr.lstrip("_") - data[k] = _dict(v) - module = obj.__module__ - elif isinstance(obj, (list, tuple)): - data = [] - for i, vv in enumerate(obj): - data.append(_dict(vv)) - elif isinstance(obj, dict): - data = {} - for _k, vv in obj.items(): - data[_k] = _dict(vv) - else: - data = obj - return {"type": obj.__class__.__name__, - "data": data, "module": module} - - return _dict(self) - - -class CustomJSONEncoder(json.JSONEncoder): - def __init__(self, **kwargs): - self._with_type = kwargs.pop("with_type", False) - super().__init__(**kwargs) - - def default(self, obj): - if isinstance(obj, datetime.datetime): - return obj.strftime('%Y-%m-%d %H:%M:%S') - elif isinstance(obj, datetime.date): - return obj.strftime('%Y-%m-%d') - elif isinstance(obj, datetime.timedelta): - return str(obj) - elif issubclass(type(obj), Enum) or issubclass(type(obj), IntEnum): - return obj.value - elif isinstance(obj, set): - return list(obj) - elif issubclass(type(obj), BaseType): - if not self._with_type: - return obj.to_dict() - else: - return obj.to_dict_with_type() - elif isinstance(obj, type): - return obj.__name__ - else: - return json.JSONEncoder.default(self, obj) - - -def rag_uuid(): - return uuid.uuid1().hex - - -def string_to_bytes(string): - return string if isinstance( - string, bytes) else string.encode(encoding="utf-8") - - -def bytes_to_string(byte): - return byte.decode(encoding="utf-8") - - -def json_dumps(src, byte=False, indent=None, with_type=False): - dest = json.dumps( - src, - indent=indent, - cls=CustomJSONEncoder, - with_type=with_type) - if byte: - dest = string_to_bytes(dest) - return dest - - -def json_loads(src, object_hook=None, object_pairs_hook=None): - if isinstance(src, bytes): - src = bytes_to_string(src) - return json.loads(src, object_hook=object_hook, - object_pairs_hook=object_pairs_hook) +from .common import string_to_bytes def current_timestamp(): @@ -215,45 +46,6 @@ def date_string_to_timestamp(time_str, format_string="%Y-%m-%d %H:%M:%S"): return time_stamp -def serialize_b64(src, to_str=False): - dest = base64.b64encode(pickle.dumps(src)) - if not to_str: - return dest - else: - return bytes_to_string(dest) - - -def deserialize_b64(src): - src = base64.b64decode( - string_to_bytes(src) if isinstance( - src, str) else src) - if use_deserialize_safe_module: - return restricted_loads(src) - return pickle.loads(src) - - -safe_module = { - 'numpy', - 'rag_flow' -} - - -class RestrictedUnpickler(pickle.Unpickler): - def find_class(self, module, name): - import importlib - if module.split('.')[0] in safe_module: - _module = importlib.import_module(module) - return getattr(_module, name) - # Forbid everything else. - raise pickle.UnpicklingError("global '%s.%s' is forbidden" % - (module, name)) - - -def restricted_loads(src): - """Helper function analogous to pickle.loads().""" - return RestrictedUnpickler(io.BytesIO(src)).load() - - def get_lan_ip(): if os.name != "nt": import fcntl @@ -298,47 +90,6 @@ def from_dict_hook(in_dict: dict): return in_dict -def decrypt_database_password(password): - encrypt_password = get_base_config("encrypt_password", False) - encrypt_module = get_base_config("encrypt_module", False) - private_key = get_base_config("private_key", None) - - if not password or not encrypt_password: - return password - - if not private_key: - raise ValueError("No private key") - - module_fun = encrypt_module.split("#") - pwdecrypt_fun = getattr( - importlib.import_module( - module_fun[0]), - module_fun[1]) - - return pwdecrypt_fun(private_key, password) - - -def decrypt_database_config( - database=None, passwd_key="password", name="database"): - if not database: - database = get_base_config(name, {}) - - database[passwd_key] = decrypt_database_password(database[passwd_key]) - return database - - -def update_config(key, value, conf_name=SERVICE_CONF): - conf_path = conf_realpath(conf_name=conf_name) - if not os.path.isabs(conf_path): - conf_path = os.path.join( - file_utils.get_project_base_directory(), conf_path) - - with FileLock(os.path.join(os.path.dirname(conf_path), ".lock")): - config = file_utils.load_yaml_conf(conf_path=conf_path) or {} - config[key] = value - file_utils.rewrite_yaml_conf(conf_path=conf_path, config=config) - - def get_uuid(): return uuid.uuid1().hex @@ -363,37 +114,6 @@ def elapsed2time(elapsed): return '%02d:%02d:%02d' % (hour, minuter, second) -def decrypt(line): - file_path = os.path.join( - file_utils.get_project_base_directory(), - "conf", - "private.pem") - rsa_key = RSA.importKey(open(file_path).read(), "Welcome") - cipher = Cipher_pkcs1_v1_5.new(rsa_key) - return cipher.decrypt(base64.b64decode( - line), "Fail to decrypt password!").decode('utf-8') - - -def decrypt2(crypt_text): - from base64 import b64decode, b16decode - from Crypto.Cipher import PKCS1_v1_5 as Cipher_PKCS1_v1_5 - from Crypto.PublicKey import RSA - decode_data = b64decode(crypt_text) - if len(decode_data) == 127: - hex_fixed = '00' + decode_data.hex() - decode_data = b16decode(hex_fixed.upper()) - - file_path = os.path.join( - file_utils.get_project_base_directory(), - "conf", - "private.pem") - pem = open(file_path).read() - rsa_key = RSA.importKey(pem, "Welcome") - cipher = Cipher_PKCS1_v1_5.new(rsa_key) - decrypt_text = cipher.decrypt(decode_data, None) - return (b64decode(decrypt_text)).decode() - - def download_img(url): if not url: return "" @@ -408,5 +128,5 @@ def delta_seconds(date_string: str): return (datetime.datetime.now() - dt).total_seconds() -def hash_str2int(line:str, mod: int=10 ** 8) -> int: - return int(hashlib.sha1(line.encode("utf-8")).hexdigest(), 16) % mod \ No newline at end of file +def hash_str2int(line: str, mod: int = 10 ** 8) -> int: + return int(hashlib.sha1(line.encode("utf-8")).hexdigest(), 16) % mod diff --git a/api/utils/api_utils.py b/api/utils/api_utils.py index 35f9d3eca..821ec6d31 100644 --- a/api/utils/api_utils.py +++ b/api/utils/api_utils.py @@ -39,6 +39,7 @@ from flask import ( make_response, send_file, ) +from flask_login import current_user from flask import ( request as flask_request, ) @@ -48,10 +49,13 @@ from werkzeug.http import HTTP_STATUS_CODES from api import settings from api.constants import REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC +from api.db import ActiveEnum from api.db.db_models import APIToken +from api.db.services import UserService from api.db.services.llm_service import LLMService from api.db.services.tenant_llm_service import TenantLLMService -from api.utils import CustomJSONEncoder, get_uuid, json_dumps +from api.utils.json import CustomJSONEncoder, json_dumps +from api.utils import get_uuid from rag.utils.mcp_tool_call_conn import MCPToolCallSession, close_multiple_mcp_toolcall_sessions requests.models.complexjson.dumps = functools.partial(json.dumps, cls=CustomJSONEncoder) @@ -226,6 +230,18 @@ def not_allowed_parameters(*params): return decorator +def active_required(f): + @wraps(f) + def wrapper(*args, **kwargs): + user_id = current_user.id + usr = UserService.filter_by_id(user_id) + # check is_active + if not usr or not usr.is_active == ActiveEnum.ACTIVE.value: + return get_json_result(code=settings.RetCode.FORBIDDEN, message="User isn't active, please activate first.") + return f(*args, **kwargs) + return wrapper + + def is_localhost(ip): return ip in {"127.0.0.1", "::1", "[::1]", "localhost"} @@ -643,6 +659,16 @@ def remap_dictionary_keys(source_data: dict, key_aliases: dict = None) -> dict: return transformed_data +def group_by(list_of_dict, key): + res = {} + for item in list_of_dict: + if item[key] in res.keys(): + res[item[key]].append(item) + else: + res[item[key]] = [item] + return res + + def get_mcp_tools(mcp_servers: list, timeout: float | int = 10) -> tuple[dict, str]: results = {} tool_call_sessions = [] diff --git a/api/utils/common.py b/api/utils/common.py new file mode 100644 index 000000000..ce7428507 --- /dev/null +++ b/api/utils/common.py @@ -0,0 +1,23 @@ +# +# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def string_to_bytes(string): + return string if isinstance( + string, bytes) else string.encode(encoding="utf-8") + + +def bytes_to_string(byte): + return byte.decode(encoding="utf-8") diff --git a/api/utils/configs.py b/api/utils/configs.py new file mode 100644 index 000000000..48e492246 --- /dev/null +++ b/api/utils/configs.py @@ -0,0 +1,179 @@ +# +# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import io +import copy +import logging +import base64 +import pickle +import importlib + +from api.utils import file_utils +from filelock import FileLock +from api.utils.common import bytes_to_string, string_to_bytes +from api.constants import SERVICE_CONF + + +def conf_realpath(conf_name): + conf_path = f"conf/{conf_name}" + return os.path.join(file_utils.get_project_base_directory(), conf_path) + + +def read_config(conf_name=SERVICE_CONF): + local_config = {} + local_path = conf_realpath(f'local.{conf_name}') + + # load local config file + if os.path.exists(local_path): + local_config = file_utils.load_yaml_conf(local_path) + if not isinstance(local_config, dict): + raise ValueError(f'Invalid config file: "{local_path}".') + + global_config_path = conf_realpath(conf_name) + global_config = file_utils.load_yaml_conf(global_config_path) + + if not isinstance(global_config, dict): + raise ValueError(f'Invalid config file: "{global_config_path}".') + + global_config.update(local_config) + return global_config + + +CONFIGS = read_config() + + +def show_configs(): + msg = f"Current configs, from {conf_realpath(SERVICE_CONF)}:" + for k, v in CONFIGS.items(): + if isinstance(v, dict): + if "password" in v: + v = copy.deepcopy(v) + v["password"] = "*" * 8 + if "access_key" in v: + v = copy.deepcopy(v) + v["access_key"] = "*" * 8 + if "secret_key" in v: + v = copy.deepcopy(v) + v["secret_key"] = "*" * 8 + if "secret" in v: + v = copy.deepcopy(v) + v["secret"] = "*" * 8 + if "sas_token" in v: + v = copy.deepcopy(v) + v["sas_token"] = "*" * 8 + if "oauth" in k: + v = copy.deepcopy(v) + for key, val in v.items(): + if "client_secret" in val: + val["client_secret"] = "*" * 8 + if "authentication" in k: + v = copy.deepcopy(v) + for key, val in v.items(): + if "http_secret_key" in val: + val["http_secret_key"] = "*" * 8 + msg += f"\n\t{k}: {v}" + logging.info(msg) + + +def get_base_config(key, default=None): + if key is None: + return None + if default is None: + default = os.environ.get(key.upper()) + return CONFIGS.get(key, default) + + +def decrypt_database_password(password): + encrypt_password = get_base_config("encrypt_password", False) + encrypt_module = get_base_config("encrypt_module", False) + private_key = get_base_config("private_key", None) + + if not password or not encrypt_password: + return password + + if not private_key: + raise ValueError("No private key") + + module_fun = encrypt_module.split("#") + pwdecrypt_fun = getattr( + importlib.import_module( + module_fun[0]), + module_fun[1]) + + return pwdecrypt_fun(private_key, password) + + +def decrypt_database_config( + database=None, passwd_key="password", name="database"): + if not database: + database = get_base_config(name, {}) + + database[passwd_key] = decrypt_database_password(database[passwd_key]) + return database + + +def update_config(key, value, conf_name=SERVICE_CONF): + conf_path = conf_realpath(conf_name=conf_name) + if not os.path.isabs(conf_path): + conf_path = os.path.join( + file_utils.get_project_base_directory(), conf_path) + + with FileLock(os.path.join(os.path.dirname(conf_path), ".lock")): + config = file_utils.load_yaml_conf(conf_path=conf_path) or {} + config[key] = value + file_utils.rewrite_yaml_conf(conf_path=conf_path, config=config) + + +safe_module = { + 'numpy', + 'rag_flow' +} + + +class RestrictedUnpickler(pickle.Unpickler): + def find_class(self, module, name): + import importlib + if module.split('.')[0] in safe_module: + _module = importlib.import_module(module) + return getattr(_module, name) + # Forbid everything else. + raise pickle.UnpicklingError("global '%s.%s' is forbidden" % + (module, name)) + + +def restricted_loads(src): + """Helper function analogous to pickle.loads().""" + return RestrictedUnpickler(io.BytesIO(src)).load() + + +def serialize_b64(src, to_str=False): + dest = base64.b64encode(pickle.dumps(src)) + if not to_str: + return dest + else: + return bytes_to_string(dest) + + +def deserialize_b64(src): + src = base64.b64decode( + string_to_bytes(src) if isinstance( + src, str) else src) + use_deserialize_safe_module = get_base_config( + 'use_deserialize_safe_module', False) + if use_deserialize_safe_module: + return restricted_loads(src) + return pickle.loads(src) diff --git a/api/utils/crypt.py b/api/utils/crypt.py new file mode 100644 index 000000000..eb922a886 --- /dev/null +++ b/api/utils/crypt.py @@ -0,0 +1,64 @@ +# +# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import base64 +import os +import sys +from Cryptodome.PublicKey import RSA +from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5 +from api.utils import file_utils + + +def crypt(line): + """ + decrypt(crypt(input_string)) == base64(input_string), which frontend and admin_client use. + """ + file_path = os.path.join(file_utils.get_project_base_directory(), "conf", "public.pem") + rsa_key = RSA.importKey(open(file_path).read(), "Welcome") + cipher = Cipher_pkcs1_v1_5.new(rsa_key) + password_base64 = base64.b64encode(line.encode('utf-8')).decode("utf-8") + encrypted_password = cipher.encrypt(password_base64.encode()) + return base64.b64encode(encrypted_password).decode('utf-8') + + +def decrypt(line): + file_path = os.path.join(file_utils.get_project_base_directory(), "conf", "private.pem") + rsa_key = RSA.importKey(open(file_path).read(), "Welcome") + cipher = Cipher_pkcs1_v1_5.new(rsa_key) + return cipher.decrypt(base64.b64decode(line), "Fail to decrypt password!").decode('utf-8') + + +def decrypt2(crypt_text): + from base64 import b64decode, b16decode + from Crypto.Cipher import PKCS1_v1_5 as Cipher_PKCS1_v1_5 + from Crypto.PublicKey import RSA + decode_data = b64decode(crypt_text) + if len(decode_data) == 127: + hex_fixed = '00' + decode_data.hex() + decode_data = b16decode(hex_fixed.upper()) + + file_path = os.path.join(file_utils.get_project_base_directory(), "conf", "private.pem") + pem = open(file_path).read() + rsa_key = RSA.importKey(pem, "Welcome") + cipher = Cipher_PKCS1_v1_5.new(rsa_key) + decrypt_text = cipher.decrypt(decode_data, None) + return (b64decode(decrypt_text)).decode() + + +if __name__ == "__main__": + passwd = crypt(sys.argv[1]) + print(passwd) + print(decrypt(passwd)) diff --git a/api/utils/health_utils.py b/api/utils/health_utils.py new file mode 100644 index 000000000..967fa71b7 --- /dev/null +++ b/api/utils/health_utils.py @@ -0,0 +1,107 @@ +# +# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from timeit import default_timer as timer + +from api import settings +from api.db.db_models import DB +from rag.utils.redis_conn import REDIS_CONN +from rag.utils.storage_factory import STORAGE_IMPL + + +def _ok_nok(ok: bool) -> str: + return "ok" if ok else "nok" + + +def check_db() -> tuple[bool, dict]: + st = timer() + try: + # lightweight probe; works for MySQL/Postgres + DB.execute_sql("SELECT 1") + return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"} + except Exception as e: + return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)} + + +def check_redis() -> tuple[bool, dict]: + st = timer() + try: + ok = bool(REDIS_CONN.health()) + return ok, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"} + except Exception as e: + return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)} + + +def check_doc_engine() -> tuple[bool, dict]: + st = timer() + try: + meta = settings.docStoreConn.health() + # treat any successful call as ok + return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", **(meta or {})} + except Exception as e: + return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)} + + +def check_storage() -> tuple[bool, dict]: + st = timer() + try: + STORAGE_IMPL.health() + return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"} + except Exception as e: + return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)} + + + + +def run_health_checks() -> tuple[dict, bool]: + result: dict[str, str | dict] = {} + + db_ok, db_meta = check_db() + result["db"] = _ok_nok(db_ok) + if not db_ok: + result.setdefault("_meta", {})["db"] = db_meta + + try: + redis_ok, redis_meta = check_redis() + result["redis"] = _ok_nok(redis_ok) + if not redis_ok: + result.setdefault("_meta", {})["redis"] = redis_meta + except Exception: + result["redis"] = "nok" + + try: + doc_ok, doc_meta = check_doc_engine() + result["doc_engine"] = _ok_nok(doc_ok) + if not doc_ok: + result.setdefault("_meta", {})["doc_engine"] = doc_meta + except Exception: + result["doc_engine"] = "nok" + + try: + sto_ok, sto_meta = check_storage() + result["storage"] = _ok_nok(sto_ok) + if not sto_ok: + result.setdefault("_meta", {})["storage"] = sto_meta + except Exception: + result["storage"] = "nok" + + + all_ok = (result.get("db") == "ok") and (result.get("redis") == "ok") and (result.get("doc_engine") == "ok") and (result.get("storage") == "ok") + result["status"] = "ok" if all_ok else "nok" + return result, all_ok + + diff --git a/api/utils/json.py b/api/utils/json.py new file mode 100644 index 000000000..b21addd4f --- /dev/null +++ b/api/utils/json.py @@ -0,0 +1,78 @@ +import datetime +import json +from enum import Enum, IntEnum +from api.utils.common import string_to_bytes, bytes_to_string + + +class BaseType: + def to_dict(self): + return dict([(k.lstrip("_"), v) for k, v in self.__dict__.items()]) + + def to_dict_with_type(self): + def _dict(obj): + module = None + if issubclass(obj.__class__, BaseType): + data = {} + for attr, v in obj.__dict__.items(): + k = attr.lstrip("_") + data[k] = _dict(v) + module = obj.__module__ + elif isinstance(obj, (list, tuple)): + data = [] + for i, vv in enumerate(obj): + data.append(_dict(vv)) + elif isinstance(obj, dict): + data = {} + for _k, vv in obj.items(): + data[_k] = _dict(vv) + else: + data = obj + return {"type": obj.__class__.__name__, + "data": data, "module": module} + + return _dict(self) + + +class CustomJSONEncoder(json.JSONEncoder): + def __init__(self, **kwargs): + self._with_type = kwargs.pop("with_type", False) + super().__init__(**kwargs) + + def default(self, obj): + if isinstance(obj, datetime.datetime): + return obj.strftime('%Y-%m-%d %H:%M:%S') + elif isinstance(obj, datetime.date): + return obj.strftime('%Y-%m-%d') + elif isinstance(obj, datetime.timedelta): + return str(obj) + elif issubclass(type(obj), Enum) or issubclass(type(obj), IntEnum): + return obj.value + elif isinstance(obj, set): + return list(obj) + elif issubclass(type(obj), BaseType): + if not self._with_type: + return obj.to_dict() + else: + return obj.to_dict_with_type() + elif isinstance(obj, type): + return obj.__name__ + else: + return json.JSONEncoder.default(self, obj) + + +def json_dumps(src, byte=False, indent=None, with_type=False): + dest = json.dumps( + src, + indent=indent, + cls=CustomJSONEncoder, + with_type=with_type) + if byte: + dest = string_to_bytes(dest) + return dest + + +def json_loads(src, object_hook=None, object_pairs_hook=None): + if isinstance(src, bytes): + src = bytes_to_string(src) + return json.loads(src, object_hook=object_hook, + object_pairs_hook=object_pairs_hook) diff --git a/api/utils/t_crypt.py b/api/utils/t_crypt.py deleted file mode 100644 index d0763c19f..000000000 --- a/api/utils/t_crypt.py +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import base64 -import os -import sys -from Cryptodome.PublicKey import RSA -from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5 -from api.utils import decrypt, file_utils - - -def crypt(line): - file_path = os.path.join( - file_utils.get_project_base_directory(), - "conf", - "public.pem") - rsa_key = RSA.importKey(open(file_path).read(),"Welcome") - cipher = Cipher_pkcs1_v1_5.new(rsa_key) - password_base64 = base64.b64encode(line.encode('utf-8')).decode("utf-8") - encrypted_password = cipher.encrypt(password_base64.encode()) - return base64.b64encode(encrypted_password).decode('utf-8') - - -if __name__ == "__main__": - passwd = crypt(sys.argv[1]) - print(passwd) - print(decrypt(passwd)) diff --git a/chat_demo/index.html b/chat_demo/index.html new file mode 100644 index 000000000..114b13683 --- /dev/null +++ b/chat_demo/index.html @@ -0,0 +1,19 @@ + + \ No newline at end of file diff --git a/chat_demo/widget_demo.html b/chat_demo/widget_demo.html new file mode 100644 index 000000000..34c262b37 --- /dev/null +++ b/chat_demo/widget_demo.html @@ -0,0 +1,154 @@ + + + + + + Floating Chat Widget Demo + + + +
+

🚀 Floating Chat Widget Demo

+ +

+ Welcome to our demo page! This page simulates a real website with content. + Look for the floating chat button in the bottom-right corner - just like Intercom! +

+ +
+

🎯 Widget Features

+
    +
  • Floating button that stays visible while scrolling
  • +
  • Click to open/close the chat window
  • +
  • Minimize button to collapse the chat
  • +
  • Professional Intercom-style design
  • +
  • Unread message indicator (red badge)
  • +
  • Transparent background integration
  • +
  • Responsive design for all screen sizes
  • +
+
+ +

+ The chat widget is completely separate from your website's content and won't + interfere with your existing layout or functionality. It's designed to be + lightweight and performant. +

+ +

+ Try scrolling this page - notice how the chat button stays in position. + Click it to start a conversation with our AI assistant! +

+ +
+

🔧 Implementation

+
    +
  • Simple iframe embed - just copy and paste
  • +
  • No JavaScript dependencies required
  • +
  • Works on any website or platform
  • +
  • Customizable appearance and behavior
  • +
  • Secure and privacy-focused
  • +
+
+ +

+ This is just placeholder content to demonstrate how the widget integrates + seamlessly with your existing website content. The widget floats above + everything else without disrupting your user experience. +

+ +

+ 🎉 Ready to add this to your website? Get your embed code from the admin panel! +

+
+ + + + + \ No newline at end of file diff --git a/conf/llm_factories.json b/conf/llm_factories.json index 686e97373..14575174f 100644 --- a/conf/llm_factories.json +++ b/conf/llm_factories.json @@ -402,7 +402,7 @@ "is_tools": true }, { - "llm_name": "qwen3-max-preview", + "llm_name": "qwen3-max", "tags": "LLM,CHAT,256k", "max_tokens": 256000, "model_type": "chat", @@ -436,6 +436,27 @@ "model_type": "chat", "is_tools": true }, + { + "llm_name": "qwen3-vl-plus", + "tags": "LLM,CHAT,IMAGE2TEXT,256k", + "max_tokens": 256000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "qwen3-vl-235b-a22b-instruct", + "tags": "LLM,CHAT,IMAGE2TEXT,128k", + "max_tokens": 128000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "qwen3-vl-235b-a22b-thinking", + "tags": "LLM,CHAT,IMAGE2TEXT,128k", + "max_tokens": 128000, + "model_type": "image2text", + "is_tools": true + }, { "llm_name": "qwen3-235b-a22b-instruct-2507", "tags": "LLM,CHAT,128k", @@ -457,6 +478,20 @@ "model_type": "chat", "is_tools": true }, + { + "llm_name": "qwen3-next-80b-a3b-instruct", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "qwen3-next-80b-a3b-thinking", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, { "llm_name": "qwen3-0.6b", "tags": "LLM,CHAT,32k", @@ -622,6 +657,13 @@ "tags": "SPEECH2TEXT,8k", "max_tokens": 8000, "model_type": "speech2text" + }, + { + "llm_name": "qianwen-deepresearch-30b-a3b-131k", + "tags": "LLM,CHAT,1M,AGENT,DEEPRESEARCH", + "max_tokens": 1000000, + "model_type": "chat", + "is_tools": true } ] }, diff --git a/conf/service_conf.yaml b/conf/service_conf.yaml index 7b76f2b4f..89f734f89 100644 --- a/conf/service_conf.yaml +++ b/conf/service_conf.yaml @@ -1,6 +1,9 @@ ragflow: host: 0.0.0.0 http_port: 9380 +admin: + host: 0.0.0.0 + http_port: 9381 mysql: name: 'rag_flow' user: 'root' diff --git a/deepdoc/parser/figure_parser.py b/deepdoc/parser/figure_parser.py index 0ec315876..0274f549d 100644 --- a/deepdoc/parser/figure_parser.py +++ b/deepdoc/parser/figure_parser.py @@ -19,7 +19,7 @@ from PIL import Image from api.utils.api_utils import timeout from rag.app.picture import vision_llm_chunk as picture_vision_llm_chunk -from rag.prompts import vision_llm_figure_describe_prompt +from rag.prompts.generator import vision_llm_figure_describe_prompt def vision_figure_parser_figure_data_wrapper(figures_data_without_positions): diff --git a/deepdoc/parser/pdf_parser.py b/deepdoc/parser/pdf_parser.py index ecc0b30eb..f865b80a9 100644 --- a/deepdoc/parser/pdf_parser.py +++ b/deepdoc/parser/pdf_parser.py @@ -37,7 +37,7 @@ from api.utils.file_utils import get_project_base_directory from deepdoc.vision import OCR, AscendLayoutRecognizer, LayoutRecognizer, Recognizer, TableStructureRecognizer from rag.app.picture import vision_llm_chunk as picture_vision_llm_chunk from rag.nlp import rag_tokenizer -from rag.prompts import vision_llm_describe_prompt +from rag.prompts.generator import vision_llm_describe_prompt from rag.settings import PARALLEL_DEVICES LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber" diff --git a/deepdoc/vision/ocr.py b/deepdoc/vision/ocr.py index d9f472aa1..d91de2ab8 100644 --- a/deepdoc/vision/ocr.py +++ b/deepdoc/vision/ocr.py @@ -350,7 +350,7 @@ class TextRecognizer: def close(self): # close session and release manually - logging.info('Close TextRecognizer.') + logging.info('Close text recognizer.') if hasattr(self, "predictor"): del self.predictor gc.collect() @@ -490,7 +490,7 @@ class TextDetector: return dt_boxes def close(self): - logging.info("Close TextDetector.") + logging.info("Close text detector.") if hasattr(self, "predictor"): del self.predictor gc.collect() diff --git a/docker/service_conf.yaml.template b/docker/service_conf.yaml.template index 5db35b9c7..b5121d6eb 100644 --- a/docker/service_conf.yaml.template +++ b/docker/service_conf.yaml.template @@ -1,6 +1,9 @@ ragflow: host: ${RAGFLOW_HOST:-0.0.0.0} http_port: 9380 +admin: + host: ${RAGFLOW_HOST:-0.0.0.0} + http_port: 9381 mysql: name: '${MYSQL_DBNAME:-rag_flow}' user: '${MYSQL_USER:-root}' diff --git a/docs/develop/mcp/_category_.json b/docs/develop/mcp/_category_.json index 35324fdcf..d2f129c23 100644 --- a/docs/develop/mcp/_category_.json +++ b/docs/develop/mcp/_category_.json @@ -3,6 +3,6 @@ "position": 40, "link": { "type": "generated-index", - "description": "Guides and references on accessing RAGFlow's knowledge bases via MCP." + "description": "Guides and references on accessing RAGFlow's datasets via MCP." } } diff --git a/docs/develop/mcp/launch_mcp_server.md b/docs/develop/mcp/launch_mcp_server.md index 718aaaf70..ceabc8bd0 100644 --- a/docs/develop/mcp/launch_mcp_server.md +++ b/docs/develop/mcp/launch_mcp_server.md @@ -14,9 +14,9 @@ A RAGFlow Model Context Protocol (MCP) server is designed as an independent comp An MCP server can start up in either self-host mode (default) or host mode: - **Self-host mode**: - When launching an MCP server in self-host mode, you must provide an API key to authenticate the MCP server with the RAGFlow server. In this mode, the MCP server can access *only* the datasets (knowledge bases) of a specified tenant on the RAGFlow server. + When launching an MCP server in self-host mode, you must provide an API key to authenticate the MCP server with the RAGFlow server. In this mode, the MCP server can access *only* the datasets of a specified tenant on the RAGFlow server. - **Host mode**: - In host mode, each MCP client can access their own knowledge bases on the RAGFlow server. However, each client request must include a valid API key to authenticate the client with the RAGFlow server. + In host mode, each MCP client can access their own datasets on the RAGFlow server. However, each client request must include a valid API key to authenticate the client with the RAGFlow server. Once a connection is established, an MCP server communicates with its client in MCP HTTP+SSE (Server-Sent Events) mode, unidirectionally pushing responses from the RAGFlow server to its client in real time. diff --git a/docs/faq.mdx b/docs/faq.mdx index 0d7cb46ce..a9bdf0cb1 100644 --- a/docs/faq.mdx +++ b/docs/faq.mdx @@ -498,7 +498,7 @@ To switch your document engine from Elasticsearch to [Infinity](https://github.c ### Where are my uploaded files stored in RAGFlow's image? -All uploaded files are stored in Minio, RAGFlow's object storage solution. For instance, if you upload your file directly to a knowledge base, it is located at `/filename`. +All uploaded files are stored in Minio, RAGFlow's object storage solution. For instance, if you upload your file directly to a dataset, it is located at `/filename`. --- @@ -507,3 +507,16 @@ All uploaded files are stored in Minio, RAGFlow's object storage solution. For i You can control the batch size for document parsing and embedding by setting the environment variables `DOC_BULK_SIZE` and `EMBEDDING_BATCH_SIZE`. Increasing these values may improve throughput for large-scale data processing, but will also increase memory usage. Adjust them according to your hardware resources. --- + +### How to accelerate the question-answering speed of my chat assistant? + +See [here](./guides/chat/best_practices/accelerate_question_answering.mdx). + +--- + +### How to accelerate the question-answering speed of my Agent? + +See [here](./guides/agent/best_practices/accelerate_agent_question_answering.md). + +--- + diff --git a/docs/guides/agent/agent_component_reference/agent.mdx b/docs/guides/agent/agent_component_reference/agent.mdx index d02617a73..e5daab65f 100644 --- a/docs/guides/agent/agent_component_reference/agent.mdx +++ b/docs/guides/agent/agent_component_reference/agent.mdx @@ -229,18 +229,4 @@ The global variable name for the output of the **Agent** component, which can be ### Why does it take so long for my Agent to respond? -An Agent’s response time generally depends on two key factors: the LLM’s capabilities and the prompt, the latter reflecting task complexity. When using an Agent, you should always balance task demands with the LLM’s ability. See [How to balance task complexity with an Agent's performance and speed?](#how-to-balance-task-complexity-with-an-agents-performance-and-speed) for details. - -## Best practices - -### How to balance task complexity with an Agent’s performance and speed? - -- For simple tasks, such as retrieval, rewriting, formatting, or structured data extraction, use concise prompts, remove planning or reasoning instructions, enforce output length limits, and select smaller or Turbo-class models. This significantly reduces latency and cost with minimal impact on quality. - -- For complex tasks, like multi-step reasoning, cross-document synthesis, or tool-based workflows, maintain or enhance prompts that include planning, reflection, and verification steps. - -- In multi-Agent orchestration systems, delegate simple subtasks to sub-Agents using smaller, faster models, and reserve more powerful models for the lead Agent to handle complexity and uncertainty. - -:::tip KEY INSIGHT -Focus on minimizing output tokens — through summarization, bullet points, or explicit length limits — as this has far greater impact on reducing latency than optimizing input size. -::: \ No newline at end of file +See [here](../best_practices/accelerate_agent_question_answering.md) for details. \ No newline at end of file diff --git a/docs/guides/agent/agent_component_reference/begin.mdx b/docs/guides/agent/agent_component_reference/begin.mdx index 74efb14be..597d93905 100644 --- a/docs/guides/agent/agent_component_reference/begin.mdx +++ b/docs/guides/agent/agent_component_reference/begin.mdx @@ -67,14 +67,14 @@ You can tune document parsing and embedding efficiency by setting the environmen ## Frequently asked questions -### Is the uploaded file in a knowledge base? +### Is the uploaded file in a dataset? -No. Files uploaded to an agent as input are not stored in a knowledge base and hence will not be processed using RAGFlow's built-in OCR, DLR or TSR models, or chunked using RAGFlow's built-in chunking methods. +No. Files uploaded to an agent as input are not stored in a dataset and hence will not be processed using RAGFlow's built-in OCR, DLR or TSR models, or chunked using RAGFlow's built-in chunking methods. ### File size limit for an uploaded file There is no _specific_ file size limit for a file uploaded to an agent. However, note that model providers typically have a default or explicit maximum token setting, which can range from 8196 to 128k: The plain text part of the uploaded file will be passed in as the key value, but if the file's token count exceeds this limit, the string will be truncated and incomplete. :::tip NOTE -The variables `MAX_CONTENT_LENGTH` in `/docker/.env` and `client_max_body_size` in `/docker/nginx/nginx.conf` set the file size limit for each upload to a knowledge base or **File Management**. These settings DO NOT apply in this scenario. +The variables `MAX_CONTENT_LENGTH` in `/docker/.env` and `client_max_body_size` in `/docker/nginx/nginx.conf` set the file size limit for each upload to a dataset or **File Management**. These settings DO NOT apply in this scenario. ::: diff --git a/docs/guides/agent/agent_component_reference/code.mdx b/docs/guides/agent/agent_component_reference/code.mdx index a6e356612..3d90496e1 100644 --- a/docs/guides/agent/agent_component_reference/code.mdx +++ b/docs/guides/agent/agent_component_reference/code.mdx @@ -49,6 +49,10 @@ You can specify multiple input sources for the **Code** component. Click **+ Add This field allows you to enter and edit your source code. +:::danger IMPORTANT +If your code implementation includes defined variables, whether input or output variables, ensure they are also specified in the corresponding **Input** or **Output** sections. +::: + #### A Python code example ```Python @@ -77,6 +81,15 @@ This field allows you to enter and edit your source code. You define the output variable(s) of the **Code** component here. +:::danger IMPORTANT +If you define output variables here, ensure they are also defined in your code implementation; otherwise, their values will be `null`. The following are two examples: + + +![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/set_object_output.jpg) + +![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/set_nested_object_output.png) +::: + ### Output The defined output variable(s) will be auto-populated here. diff --git a/docs/guides/agent/agent_component_reference/retrieval.mdx b/docs/guides/agent/agent_component_reference/retrieval.mdx index 0b69c641f..5807eab5c 100644 --- a/docs/guides/agent/agent_component_reference/retrieval.mdx +++ b/docs/guides/agent/agent_component_reference/retrieval.mdx @@ -9,7 +9,7 @@ A component that retrieves information from specified datasets. ## Scenarios -A **Retrieval** component is essential in most RAG scenarios, where information is extracted from designated knowledge bases before being sent to the LLM for content generation. A **Retrieval** component can operate either as a standalone workflow module or as a tool for an **Agent** component. In the latter role, the **Agent** component has autonomous control over when to invoke it for query and retrieval. +A **Retrieval** component is essential in most RAG scenarios, where information is extracted from designated datasets before being sent to the LLM for content generation. A **Retrieval** component can operate either as a standalone workflow module or as a tool for an **Agent** component. In the latter role, the **Agent** component has autonomous control over when to invoke it for query and retrieval. The following screenshot shows a reference design using the **Retrieval** component, where the component serves as a tool for an **Agent** component. You can find it from the **Report Agent Using Knowledge Base** Agent template. @@ -17,7 +17,7 @@ The following screenshot shows a reference design using the **Retrieval** compon ## Prerequisites -Ensure you [have properly configured your target knowledge base(s)](../../dataset/configure_knowledge_base.md). +Ensure you [have properly configured your target dataset(s)](../../dataset/configure_knowledge_base.md). ## Quickstart @@ -36,9 +36,9 @@ The **Retrieval** component depends on query variables to specify its queries. By default, you can use `sys.query`, which is the user query and the default output of the **Begin** component. All global variables defined before the **Retrieval** component can also be used as query statements. Use the `(x)` button or type `/` to show all the available query variables. -### 3. Select knowledge base(s) to query +### 3. Select dataset(s) to query -You can specify one or multiple knowledge bases to retrieve data from. If selecting mutiple, ensure they use the same embedding model. +You can specify one or multiple datasets to retrieve data from. If selecting mutiple, ensure they use the same embedding model. ### 4. Expand **Advanced Settings** to configure the retrieval method @@ -52,7 +52,7 @@ Using a rerank model will *significantly* increase the system's response time. I ### 5. Enable cross-language search -If your user query is different from the languages of the knowledge bases, you can select the target languages in the **Cross-language search** dropdown menu. The model will then translates queries to ensure accurate matching of semantic meaning across languages. +If your user query is different from the languages of the datasets, you can select the target languages in the **Cross-language search** dropdown menu. The model will then translates queries to ensure accurate matching of semantic meaning across languages. ### 6. Test retrieval results @@ -76,10 +76,10 @@ The **Retrieval** component relies on query variables to specify its queries. Al ### Knowledge bases -Select the knowledge base(s) to retrieve data from. +Select the dataset(s) to retrieve data from. -- If no knowledge base is selected, meaning conversations with the agent will not be based on any knowledge base, ensure that the **Empty response** field is left blank to avoid an error. -- If you select multiple knowledge bases, you must ensure that the knowledge bases (datasets) you select use the same embedding model; otherwise, an error message would occur. +- If no dataset is selected, meaning conversations with the agent will not be based on any dataset, ensure that the **Empty response** field is left blank to avoid an error. +- If you select multiple datasets, you must ensure that the datasets you select use the same embedding model; otherwise, an error message would occur. ### Similarity threshold @@ -110,11 +110,11 @@ Using a rerank model will *significantly* increase the system's response time. ### Empty response -- Set this as a response if no results are retrieved from the knowledge base(s) for your query, or +- Set this as a response if no results are retrieved from the dataset(s) for your query, or - Leave this field blank to allow the chat model to improvise when nothing is found. :::caution WARNING -If you do not specify a knowledge base, you must leave this field blank; otherwise, an error would occur. +If you do not specify a dataset, you must leave this field blank; otherwise, an error would occur. ::: ### Cross-language search @@ -124,10 +124,10 @@ Select one or more languages for cross‑language search. If no language is sele ### Use knowledge graph :::caution IMPORTANT -Before enabling this feature, ensure you have properly [constructed a knowledge graph from each target knowledge base](../../dataset/construct_knowledge_graph.md). +Before enabling this feature, ensure you have properly [constructed a knowledge graph from each target dataset](../../dataset/construct_knowledge_graph.md). ::: -Whether to use knowledge graph(s) in the specified knowledge base(s) during retrieval for multi-hop question answering. When enabled, this would involve iterative searches across entity, relationship, and community report chunks, greatly increasing retrieval time. +Whether to use knowledge graph(s) in the specified dataset(s) during retrieval for multi-hop question answering. When enabled, this would involve iterative searches across entity, relationship, and community report chunks, greatly increasing retrieval time. ### Output diff --git a/docs/guides/agent/agent_introduction.md b/docs/guides/agent/agent_introduction.md index c93bf4c25..fa21a7810 100644 --- a/docs/guides/agent/agent_introduction.md +++ b/docs/guides/agent/agent_introduction.md @@ -27,7 +27,7 @@ Agents and RAG are complementary techniques, each enhancing the other’s capabi Before proceeding, ensure that: 1. You have properly set the LLM to use. See the guides on [Configure your API key](../models/llm_api_key_setup.md) or [Deploy a local LLM](../models/deploy_local_llm.mdx) for more information. -2. You have a knowledge base configured and the corresponding files properly parsed. See the guide on [Configure a knowledge base](../dataset/configure_knowledge_base.md) for more information. +2. You have a dataset configured and the corresponding files properly parsed. See the guide on [Configure a dataset](../dataset/configure_knowledge_base.md) for more information. ::: diff --git a/docs/guides/agent/best_practices/_category_.json b/docs/guides/agent/best_practices/_category_.json new file mode 100644 index 000000000..c788383c0 --- /dev/null +++ b/docs/guides/agent/best_practices/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Best practices", + "position": 30, + "link": { + "type": "generated-index", + "description": "Best practices on Agent configuration." + } +} diff --git a/docs/guides/agent/best_practices/accelerate_agent_question_answering.md b/docs/guides/agent/best_practices/accelerate_agent_question_answering.md new file mode 100644 index 000000000..c06937755 --- /dev/null +++ b/docs/guides/agent/best_practices/accelerate_agent_question_answering.md @@ -0,0 +1,58 @@ +--- +sidebar_position: 1 +slug: /accelerate_agent_question_answering +--- + +# Accelerate answering + +A checklist to speed up question answering. + +--- + +Please note that some of your settings may consume a significant amount of time. If you often find that your question answering is time-consuming, here is a checklist to consider: + +## Balance task complexity with an Agent’s performance and speed? + +An Agent’s response time generally depends on many factors, e.g., the LLM’s capabilities and the prompt, the latter reflecting task complexity. When using an Agent, you should always balance task demands with the LLM’s ability. + +- For simple tasks, such as retrieval, rewriting, formatting, or structured data extraction, use concise prompts, remove planning or reasoning instructions, enforce output length limits, and select smaller or Turbo-class models. This significantly reduces latency and cost with minimal impact on quality. + +- For complex tasks, like multi-step reasoning, cross-document synthesis, or tool-based workflows, maintain or enhance prompts that include planning, reflection, and verification steps. + +- In multi-Agent orchestration systems, delegate simple subtasks to sub-Agents using smaller, faster models, and reserve more powerful models for the lead Agent to handle complexity and uncertainty. + +:::tip KEY INSIGHT +Focus on minimizing output tokens — through summarization, bullet points, or explicit length limits — as this has far greater impact on reducing latency than optimizing input size. +::: + +## Disable Reasoning + +Disabling the **Reasoning** toggle will reduce the LLM's thinking time. For a model like Qwen3, you also need to add `/no_think` to the system prompt to disable reasoning. + +## Disable Rerank model + +- Leaving the **Rerank model** field empty (in the corresponding **Retrieval** component) will significantly decrease retrieval time. +- When using a rerank model, ensure you have a GPU for acceleration; otherwise, the reranking process will be *prohibitively* slow. + +:::tip NOTE +Please note that rerank models are essential in certain scenarios. There is always a trade-off between speed and performance; you must weigh the pros against cons for your specific case. +::: + +## Check the time taken for each task + +Click the light bulb icon above the *current* dialogue and scroll down the popup window to view the time taken for each task: + + + +| Item name | Description | +| ----------------- | --------------------------------------------------------------------------------------------- | +| Total | Total time spent on this conversation round, including chunk retrieval and answer generation. | +| Check LLM | Time to validate the specified LLM. | +| Create retriever | Time to create a chunk retriever. | +| Bind embedding | Time to initialize an embedding model instance. | +| Bind LLM | Time to initialize an LLM instance. | +| Tune question | Time to optimize the user query using the context of the mult-turn conversation. | +| Bind reranker | Time to initialize an reranker model instance for chunk retrieval. | +| Generate keywords | Time to extract keywords from the user query. | +| Retrieval | Time to retrieve the chunks. | +| Generate answer | Time to generate the answer. | diff --git a/docs/guides/ai_search.md b/docs/guides/ai_search.md index e5f48793c..6bd533600 100644 --- a/docs/guides/ai_search.md +++ b/docs/guides/ai_search.md @@ -22,7 +22,7 @@ When debugging your chat assistant, you can use AI search as a reference to veri ## Prerequisites - Ensure that you have configured the system's default models on the **Model providers** page. -- Ensure that the intended knowledge bases are properly configured and the intended documents have finished file parsing. +- Ensure that the intended datasets are properly configured and the intended documents have finished file parsing. ## Frequently asked questions diff --git a/docs/guides/chat/best_practices/accelerate_question_answering.mdx b/docs/guides/chat/best_practices/accelerate_question_answering.mdx index 408a403ab..e404c1c2a 100644 --- a/docs/guides/chat/best_practices/accelerate_question_answering.mdx +++ b/docs/guides/chat/best_practices/accelerate_question_answering.mdx @@ -6,21 +6,22 @@ slug: /accelerate_question_answering # Accelerate answering import APITable from '@site/src/components/APITable'; -A checklist to speed up question answering. +A checklist to speed up question answering for your chat assistant. --- Please note that some of your settings may consume a significant amount of time. If you often find that your question answering is time-consuming, here is a checklist to consider: -- In the **Prompt engine** tab of your **Chat Configuration** dialogue, disabling **Multi-turn optimization** will reduce the time required to get an answer from the LLM. -- In the **Prompt engine** tab of your **Chat Configuration** dialogue, leaving the **Rerank model** field empty will significantly decrease retrieval time. +- Disabling **Multi-turn optimization** will reduce the time required to get an answer from the LLM. +- Leaving the **Rerank model** field empty will significantly decrease retrieval time. +- Disabling the **Reasoning** toggle will reduce the LLM's thinking time. For a model like Qwen3, you also need to add `/no_think` to the system prompt to disable reasoning. - When using a rerank model, ensure you have a GPU for acceleration; otherwise, the reranking process will be *prohibitively* slow. :::tip NOTE Please note that rerank models are essential in certain scenarios. There is always a trade-off between speed and performance; you must weigh the pros against cons for your specific case. ::: -- In the **Assistant settings** tab of your **Chat Configuration** dialogue, disabling **Keyword analysis** will reduce the time to receive an answer from the LLM. +- Disabling **Keyword analysis** will reduce the time to receive an answer from the LLM. - When chatting with your chat assistant, click the light bulb icon above the *current* dialogue and scroll down the popup window to view the time taken for each task: ![enlighten](https://github.com/user-attachments/assets/fedfa2ee-21a7-451b-be66-20125619923c) diff --git a/docs/guides/chat/set_chat_variables.md b/docs/guides/chat/set_chat_variables.md index a5676c4f9..89e786262 100644 --- a/docs/guides/chat/set_chat_variables.md +++ b/docs/guides/chat/set_chat_variables.md @@ -25,13 +25,13 @@ In the **Variable** section, you add, remove, or update variables. ### `{knowledge}` - a reserved variable -`{knowledge}` is the system's reserved variable, representing the chunks retrieved from the knowledge base(s) specified by **Knowledge bases** under the **Assistant settings** tab. If your chat assistant is associated with certain knowledge bases, you can keep it as is. +`{knowledge}` is the system's reserved variable, representing the chunks retrieved from the dataset(s) specified by **Knowledge bases** under the **Assistant settings** tab. If your chat assistant is associated with certain datasets, you can keep it as is. :::info NOTE It currently makes no difference whether `{knowledge}` is set as optional or mandatory, but please note this design will be updated in due course. ::: -From v0.17.0 onward, you can start an AI chat without specifying knowledge bases. In this case, we recommend removing the `{knowledge}` variable to prevent unnecessary reference and keeping the **Empty response** field empty to avoid errors. +From v0.17.0 onward, you can start an AI chat without specifying datasets. In this case, we recommend removing the `{knowledge}` variable to prevent unnecessary reference and keeping the **Empty response** field empty to avoid errors. ### Custom variables @@ -45,15 +45,15 @@ Besides `{knowledge}`, you can also define your own variables to pair with the s After you add or remove variables in the **Variable** section, ensure your changes are reflected in the system prompt to avoid inconsistencies or errors. Here's an example: ``` -You are an intelligent assistant. Please answer the question by summarizing chunks from the specified knowledge base(s)... +You are an intelligent assistant. Please answer the question by summarizing chunks from the specified dataset(s)... Your answers should follow a professional and {style} style. ... -Here is the knowledge base: +Here is the dataset: {knowledge} -The above is the knowledge base. +The above is the dataset. ``` :::tip NOTE diff --git a/docs/guides/chat/start_chat.md b/docs/guides/chat/start_chat.md index abe7f8a8f..1ba8c2755 100644 --- a/docs/guides/chat/start_chat.md +++ b/docs/guides/chat/start_chat.md @@ -9,7 +9,7 @@ Initiate an AI-powered chat with a configured chat assistant. --- -Knowledge base, hallucination-free chat, and file management are the three pillars of RAGFlow. Chats in RAGFlow are based on a particular knowledge base or multiple knowledge bases. Once you have created your knowledge base, finished file parsing, and [run a retrieval test](../dataset/run_retrieval_test.md), you can go ahead and start an AI conversation. +Knowledge base, hallucination-free chat, and file management are the three pillars of RAGFlow. Chats in RAGFlow are based on a particular dataset or multiple datasets. Once you have created your dataset, finished file parsing, and [run a retrieval test](../dataset/run_retrieval_test.md), you can go ahead and start an AI conversation. ## Start an AI chat @@ -21,12 +21,12 @@ You start an AI conversation by creating an assistant. 2. Update **Assistant settings**: - - **Assistant name** is the name of your chat assistant. Each assistant corresponds to a dialogue with a unique combination of knowledge bases, prompts, hybrid search configurations, and large model settings. + - **Assistant name** is the name of your chat assistant. Each assistant corresponds to a dialogue with a unique combination of datasets, prompts, hybrid search configurations, and large model settings. - **Empty response**: - - If you wish to *confine* RAGFlow's answers to your knowledge bases, leave a response here. Then, when it doesn't retrieve an answer, it *uniformly* responds with what you set here. - - If you wish RAGFlow to *improvise* when it doesn't retrieve an answer from your knowledge bases, leave it blank, which may give rise to hallucinations. + - If you wish to *confine* RAGFlow's answers to your datasets, leave a response here. Then, when it doesn't retrieve an answer, it *uniformly* responds with what you set here. + - If you wish RAGFlow to *improvise* when it doesn't retrieve an answer from your datasets, leave it blank, which may give rise to hallucinations. - **Show quote**: This is a key feature of RAGFlow and enabled by default. RAGFlow does not work like a black box. Instead, it clearly shows the sources of information that its responses are based on. - - Select the corresponding knowledge bases. You can select one or multiple knowledge bases, but ensure that they use the same embedding model, otherwise an error would occur. + - Select the corresponding datasets. You can select one or multiple datasets, but ensure that they use the same embedding model, otherwise an error would occur. 3. Update **Prompt engine**: @@ -37,14 +37,14 @@ You start an AI conversation by creating an assistant. - If **Rerank model** is selected, the hybrid score system uses keyword similarity and reranker score, and the default weight assigned to the reranker score is 1-0.7=0.3. - **Top N** determines the *maximum* number of chunks to feed to the LLM. In other words, even if more chunks are retrieved, only the top N chunks are provided as input. - **Multi-turn optimization** enhances user queries using existing context in a multi-round conversation. It is enabled by default. When enabled, it will consume additional LLM tokens and significantly increase the time to generate answers. - - **Use knowledge graph** indicates whether to use knowledge graph(s) in the specified knowledge base(s) during retrieval for multi-hop question answering. When enabled, this would involve iterative searches across entity, relationship, and community report chunks, greatly increasing retrieval time. + - **Use knowledge graph** indicates whether to use knowledge graph(s) in the specified dataset(s) during retrieval for multi-hop question answering. When enabled, this would involve iterative searches across entity, relationship, and community report chunks, greatly increasing retrieval time. - **Reasoning** indicates whether to generate answers through reasoning processes like Deepseek-R1/OpenAI o1. Once enabled, the chat model autonomously integrates Deep Research during question answering when encountering an unknown topic. This involves the chat model dynamically searching external knowledge and generating final answers through reasoning. - **Rerank model** sets the reranker model to use. It is left empty by default. - If **Rerank model** is left empty, the hybrid score system uses keyword similarity and vector similarity, and the default weight assigned to the vector similarity component is 1-0.7=0.3. - If **Rerank model** is selected, the hybrid score system uses keyword similarity and reranker score, and the default weight assigned to the reranker score is 1-0.7=0.3. - [Cross-language search](../../references/glossary.mdx#cross-language-search): Optional Select one or more target languages from the dropdown menu. The system’s default chat model will then translate your query into the selected target language(s). This translation ensures accurate semantic matching across languages, allowing you to retrieve relevant results regardless of language differences. - - When selecting target languages, please ensure that these languages are present in the knowledge base to guarantee an effective search. + - When selecting target languages, please ensure that these languages are present in the dataset to guarantee an effective search. - If no target language is selected, the system will search only in the language of your query, which may cause relevant information in other languages to be missed. - **Variable** refers to the variables (keys) to be used in the system prompt. `{knowledge}` is a reserved variable. Click **Add** to add more variables for the system prompt. - If you are uncertain about the logic behind **Variable**, leave it *as-is*. diff --git a/docs/guides/dataset/_category_.json b/docs/guides/dataset/_category_.json index f0d79edfd..4c454f51f 100644 --- a/docs/guides/dataset/_category_.json +++ b/docs/guides/dataset/_category_.json @@ -3,6 +3,6 @@ "position": 0, "link": { "type": "generated-index", - "description": "Guides on configuring a knowledge base." + "description": "Guides on configuring a dataset." } } diff --git a/docs/guides/dataset/autokeyword_autoquestion.mdx b/docs/guides/dataset/autokeyword_autoquestion.mdx index c7a1293af..f61e50317 100644 --- a/docs/guides/dataset/autokeyword_autoquestion.mdx +++ b/docs/guides/dataset/autokeyword_autoquestion.mdx @@ -6,7 +6,7 @@ slug: /autokeyword_autoquestion # Auto-keyword Auto-question import APITable from '@site/src/components/APITable'; -Use a chat model to generate keywords or questions from each chunk in the knowledge base. +Use a chat model to generate keywords or questions from each chunk in the dataset. --- @@ -18,7 +18,7 @@ Enabling this feature increases document indexing time and uses extra tokens, as ## What is Auto-keyword? -Auto-keyword refers to the auto-keyword generation feature of RAGFlow. It uses a chat model to generate a set of keywords or synonyms from each chunk to correct errors and enhance retrieval accuracy. This feature is implemented as a slider under **Page rank** on the **Configuration** page of your knowledge base. +Auto-keyword refers to the auto-keyword generation feature of RAGFlow. It uses a chat model to generate a set of keywords or synonyms from each chunk to correct errors and enhance retrieval accuracy. This feature is implemented as a slider under **Page rank** on the **Configuration** page of your dataset. **Values**: @@ -33,7 +33,7 @@ Auto-keyword refers to the auto-keyword generation feature of RAGFlow. It uses a ## What is Auto-question? -Auto-question is a feature of RAGFlow that automatically generates questions from chunks of data using a chat model. These questions (e.g. who, what, and why) also help correct errors and improve the matching of user queries. The feature usually works with FAQ retrieval scenarios involving product manuals or policy documents. And you can find this feature as a slider under **Page rank** on the **Configuration** page of your knowledge base. +Auto-question is a feature of RAGFlow that automatically generates questions from chunks of data using a chat model. These questions (e.g. who, what, and why) also help correct errors and improve the matching of user queries. The feature usually works with FAQ retrieval scenarios involving product manuals or policy documents. And you can find this feature as a slider under **Page rank** on the **Configuration** page of your dataset. **Values**: @@ -48,7 +48,7 @@ Auto-question is a feature of RAGFlow that automatically generates questions fro ## Tips from the community -The Auto-keyword or Auto-question values relate closely to the chunking size in your knowledge base. However, if you are new to this feature and unsure which value(s) to start with, the following are some value settings we gathered from our community. While they may not be accurate, they provide a starting point at the very least. +The Auto-keyword or Auto-question values relate closely to the chunking size in your dataset. However, if you are new to this feature and unsure which value(s) to start with, the following are some value settings we gathered from our community. While they may not be accurate, they provide a starting point at the very least. ```mdx-code-block diff --git a/docs/guides/dataset/best_practices/_category_.json b/docs/guides/dataset/best_practices/_category_.json index 52098b7d8..f55fe009b 100644 --- a/docs/guides/dataset/best_practices/_category_.json +++ b/docs/guides/dataset/best_practices/_category_.json @@ -3,6 +3,6 @@ "position": 11, "link": { "type": "generated-index", - "description": "Best practices on configuring a knowledge base." + "description": "Best practices on configuring a dataset." } } diff --git a/docs/guides/dataset/best_practices/accelerate_doc_indexing.mdx b/docs/guides/dataset/best_practices/accelerate_doc_indexing.mdx index bc0dde11b..d70579769 100644 --- a/docs/guides/dataset/best_practices/accelerate_doc_indexing.mdx +++ b/docs/guides/dataset/best_practices/accelerate_doc_indexing.mdx @@ -13,7 +13,7 @@ A checklist to speed up document parsing and indexing. Please note that some of your settings may consume a significant amount of time. If you often find that document parsing is time-consuming, here is a checklist to consider: - Use GPU to reduce embedding time. -- On the configuration page of your knowledge base, switch off **Use RAPTOR to enhance retrieval**. +- On the configuration page of your dataset, switch off **Use RAPTOR to enhance retrieval**. - Extracting knowledge graph (GraphRAG) is time-consuming. -- Disable **Auto-keyword** and **Auto-question** on the configuration page of your knowledge base, as both depend on the LLM. -- **v0.17.0+:** If all PDFs in your knowledge base are plain text and do not require GPU-intensive processes like OCR (Optical Character Recognition), TSR (Table Structure Recognition), or DLA (Document Layout Analysis), you can choose **Naive** over **DeepDoc** or other time-consuming large model options in the **Document parser** dropdown. This will substantially reduce document parsing time. +- Disable **Auto-keyword** and **Auto-question** on the configuration page of your dataset, as both depend on the LLM. +- **v0.17.0+:** If all PDFs in your dataset are plain text and do not require GPU-intensive processes like OCR (Optical Character Recognition), TSR (Table Structure Recognition), or DLA (Document Layout Analysis), you can choose **Naive** over **DeepDoc** or other time-consuming large model options in the **Document parser** dropdown. This will substantially reduce document parsing time. diff --git a/docs/guides/dataset/configure_knowledge_base.md b/docs/guides/dataset/configure_knowledge_base.md index 432ecce1f..487d8b9cd 100644 --- a/docs/guides/dataset/configure_knowledge_base.md +++ b/docs/guides/dataset/configure_knowledge_base.md @@ -3,28 +3,28 @@ sidebar_position: -1 slug: /configure_knowledge_base --- -# Configure knowledge base +# Configure dataset -Knowledge base, hallucination-free chat, and file management are the three pillars of RAGFlow. RAGFlow's AI chats are based on knowledge bases. Each of RAGFlow's knowledge bases serves as a knowledge source, *parsing* files uploaded from your local machine and file references generated in **File Management** into the real 'knowledge' for future AI chats. This guide demonstrates some basic usages of the knowledge base feature, covering the following topics: +Most of RAGFlow's chat assistants and Agents are based on datasets. Each of RAGFlow's datasets serves as a knowledge source, *parsing* files uploaded from your local machine and file references generated in **File Management** into the real 'knowledge' for future AI chats. This guide demonstrates some basic usages of the dataset feature, covering the following topics: -- Create a knowledge base -- Configure a knowledge base -- Search for a knowledge base -- Delete a knowledge base +- Create a dataset +- Configure a dataset +- Search for a dataset +- Delete a dataset -## Create knowledge base +## Create dataset -With multiple knowledge bases, you can build more flexible, diversified question answering. To create your first knowledge base: +With multiple datasets, you can build more flexible, diversified question answering. To create your first dataset: -![create knowledge base](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/create_knowledge_base.jpg) +![create dataset](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/create_knowledge_base.jpg) -_Each time a knowledge base is created, a folder with the same name is generated in the **root/.knowledgebase** directory._ +_Each time a dataset is created, a folder with the same name is generated in the **root/.knowledgebase** directory._ -## Configure knowledge base +## Configure dataset -The following screenshot shows the configuration page of a knowledge base. A proper configuration of your knowledge base is crucial for future AI chats. For example, choosing the wrong embedding model or chunking method would cause unexpected semantic loss or mismatched answers in chats. +The following screenshot shows the configuration page of a dataset. A proper configuration of your dataset is crucial for future AI chats. For example, choosing the wrong embedding model or chunking method would cause unexpected semantic loss or mismatched answers in chats. -![knowledge base configuration](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/configure_knowledge_base.jpg) +![dataset configuration](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/configure_knowledge_base.jpg) This section covers the following topics: @@ -52,7 +52,7 @@ RAGFlow offers multiple chunking template to facilitate chunking files of differ | Presentation | | PDF, PPTX | | Picture | | JPEG, JPG, PNG, TIF, GIF | | One | Each document is chunked in its entirety (as one). | DOCX, XLSX, XLS (Excel 97-2003), PDF, TXT | -| Tag | The knowledge base functions as a tag set for the others. | XLSX, CSV/TXT | +| Tag | The dataset functions as a tag set for the others. | XLSX, CSV/TXT | You can also change a file's chunking method on the **Datasets** page. @@ -60,7 +60,7 @@ You can also change a file's chunking method on the **Datasets** page. ### Select embedding model -An embedding model converts chunks into embeddings. It cannot be changed once the knowledge base has chunks. To switch to a different embedding model, you must delete all existing chunks in the knowledge base. The obvious reason is that we *must* ensure that files in a specific knowledge base are converted to embeddings using the *same* embedding model (ensure that they are compared in the same embedding space). +An embedding model converts chunks into embeddings. It cannot be changed once the dataset has chunks. To switch to a different embedding model, you must delete all existing chunks in the dataset. The obvious reason is that we *must* ensure that files in a specific dataset are converted to embeddings using the *same* embedding model (ensure that they are compared in the same embedding space). The following embedding models can be deployed locally: @@ -73,19 +73,19 @@ These two embedding models are optimized specifically for English and Chinese, s ### Upload file -- RAGFlow's **File Management** allows you to link a file to multiple knowledge bases, in which case each target knowledge base holds a reference to the file. -- In **Knowledge Base**, you are also given the option of uploading a single file or a folder of files (bulk upload) from your local machine to a knowledge base, in which case the knowledge base holds file copies. +- RAGFlow's **File Management** allows you to link a file to multiple datasets, in which case each target dataset holds a reference to the file. +- In **Knowledge Base**, you are also given the option of uploading a single file or a folder of files (bulk upload) from your local machine to a dataset, in which case the dataset holds file copies. -While uploading files directly to a knowledge base seems more convenient, we *highly* recommend uploading files to **File Management** and then linking them to the target knowledge bases. This way, you can avoid permanently deleting files uploaded to the knowledge base. +While uploading files directly to a dataset seems more convenient, we *highly* recommend uploading files to **File Management** and then linking them to the target datasets. This way, you can avoid permanently deleting files uploaded to the dataset. ### Parse file -File parsing is a crucial topic in knowledge base configuration. The meaning of file parsing in RAGFlow is twofold: chunking files based on file layout and building embedding and full-text (keyword) indexes on these chunks. After having selected the chunking method and embedding model, you can start parsing a file: +File parsing is a crucial topic in dataset configuration. The meaning of file parsing in RAGFlow is twofold: chunking files based on file layout and building embedding and full-text (keyword) indexes on these chunks. After having selected the chunking method and embedding model, you can start parsing a file: ![parse file](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/parse_file.jpg) - As shown above, RAGFlow allows you to use a different chunking method for a particular file, offering flexibility beyond the default method. -- As shown above, RAGFlow allows you to enable or disable individual files, offering finer control over knowledge base-based AI chats. +- As shown above, RAGFlow allows you to enable or disable individual files, offering finer control over dataset-based AI chats. ### Intervene with file parsing results @@ -122,17 +122,17 @@ RAGFlow uses multiple recall of both full-text search and vector search in its c See [Run retrieval test](./run_retrieval_test.md) for details. -## Search for knowledge base +## Search for dataset -As of RAGFlow v0.20.5, the search feature is still in a rudimentary form, supporting only knowledge base search by name. +As of RAGFlow v0.20.5, the search feature is still in a rudimentary form, supporting only dataset search by name. -![search knowledge base](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/search_datasets.jpg) +![search dataset](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/search_datasets.jpg) -## Delete knowledge base +## Delete dataset -You are allowed to delete a knowledge base. Hover your mouse over the three dot of the intended knowledge base card and the **Delete** option appears. Once you delete a knowledge base, the associated folder under **root/.knowledge** directory is AUTOMATICALLY REMOVED. The consequence is: +You are allowed to delete a dataset. Hover your mouse over the three dot of the intended dataset card and the **Delete** option appears. Once you delete a dataset, the associated folder under **root/.knowledge** directory is AUTOMATICALLY REMOVED. The consequence is: -- The files uploaded directly to the knowledge base are gone; +- The files uploaded directly to the dataset are gone; - The file references, which you created from within **File Management**, are gone, but the associated files still exist in **File Management**. -![delete knowledge base](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/delete_datasets.jpg) +![delete dataset](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/delete_datasets.jpg) diff --git a/docs/guides/dataset/construct_knowledge_graph.md b/docs/guides/dataset/construct_knowledge_graph.md index edc63d98b..4484526f3 100644 --- a/docs/guides/dataset/construct_knowledge_graph.md +++ b/docs/guides/dataset/construct_knowledge_graph.md @@ -5,7 +5,7 @@ slug: /construct_knowledge_graph # Construct knowledge graph -Generate a knowledge graph for your knowledge base. +Generate a knowledge graph for your dataset. --- @@ -13,7 +13,7 @@ To enhance multi-hop question-answering, RAGFlow adds a knowledge graph construc ![Image](https://github.com/user-attachments/assets/1ec21d8e-f255-4d65-9918-69b72dfa142b) -From v0.16.0 onward, RAGFlow supports constructing a knowledge graph on a knowledge base, allowing you to construct a *unified* graph across multiple files within your knowledge base. When a newly uploaded file starts parsing, the generated graph will automatically update. +From v0.16.0 onward, RAGFlow supports constructing a knowledge graph on a dataset, allowing you to construct a *unified* graph across multiple files within your dataset. When a newly uploaded file starts parsing, the generated graph will automatically update. :::danger WARNING Constructing a knowledge graph requires significant memory, computational resources, and tokens. @@ -37,7 +37,7 @@ The system's default chat model is used to generate knowledge graph. Before proc ### Entity types (*Required*) -The types of the entities to extract from your knowledge base. The default types are: **organization**, **person**, **event**, and **category**. Add or remove types to suit your specific knowledge base. +The types of the entities to extract from your dataset. The default types are: **organization**, **person**, **event**, and **category**. Add or remove types to suit your specific dataset. ### Method @@ -62,12 +62,12 @@ In a knowledge graph, a community is a cluster of entities linked by relationshi ## Procedure -1. On the **Configuration** page of your knowledge base, switch on **Extract knowledge graph** or adjust its settings as needed, and click **Save** to confirm your changes. +1. On the **Configuration** page of your dataset, switch on **Extract knowledge graph** or adjust its settings as needed, and click **Save** to confirm your changes. - - *The default knowledge graph configurations for your knowledge base are now set and files uploaded from this point onward will automatically use these settings during parsing.* + - *The default knowledge graph configurations for your dataset are now set and files uploaded from this point onward will automatically use these settings during parsing.* - *Files parsed before this update will retain their original knowledge graph settings.* -2. The knowledge graph of your knowledge base does *not* automatically update *until* a newly uploaded file is parsed. +2. The knowledge graph of your dataset does *not* automatically update *until* a newly uploaded file is parsed. _A **Knowledge graph** entry appears under **Configuration** once a knowledge graph is created._ @@ -75,13 +75,13 @@ In a knowledge graph, a community is a cluster of entities linked by relationshi 4. To use the created knowledge graph, do either of the following: - In the **Chat setting** panel of your chat app, switch on the **Use knowledge graph** toggle. - - If you are using an agent, click the **Retrieval** agent component to specify the knowledge base(s) and switch on the **Use knowledge graph** toggle. + - If you are using an agent, click the **Retrieval** agent component to specify the dataset(s) and switch on the **Use knowledge graph** toggle. ## Frequently asked questions -### Can I have different knowledge graph settings for different files in my knowledge base? +### Can I have different knowledge graph settings for different files in my dataset? -Yes, you can. Just one graph is generated per knowledge base. The smaller graphs of your files will be *combined* into one big, unified graph at the end of the graph extraction process. +Yes, you can. Just one graph is generated per dataset. The smaller graphs of your files will be *combined* into one big, unified graph at the end of the graph extraction process. ### Does the knowledge graph automatically update when I remove a related file? @@ -89,7 +89,7 @@ Nope. The knowledge graph does *not* automatically update *until* a newly upload ### How to remove a generated knowledge graph? -To remove the generated knowledge graph, delete all related files in your knowledge base. Although the **Knowledge graph** entry will still be visible, the graph has actually been deleted. +To remove the generated knowledge graph, delete all related files in your dataset. Although the **Knowledge graph** entry will still be visible, the graph has actually been deleted. ### Where is the created knowledge graph stored? diff --git a/docs/guides/dataset/enable_excel2html.md b/docs/guides/dataset/enable_excel2html.md index 531a673cc..d8090420d 100644 --- a/docs/guides/dataset/enable_excel2html.md +++ b/docs/guides/dataset/enable_excel2html.md @@ -12,7 +12,7 @@ Convert complex Excel spreadsheets into HTML tables. When using the **General** chunking method, you can enable the **Excel to HTML** toggle to convert spreadsheet files into HTML tables. If it is disabled, spreadsheet tables will be represented as key-value pairs. For complex tables that cannot be simply represented this way, you must enable this feature. :::caution WARNING -The feature is disabled by default. If your knowledge base contains spreadsheets with complex tables and you do not enable this feature, RAGFlow will not throw an error but your tables are likely to be garbled. +The feature is disabled by default. If your dataset contains spreadsheets with complex tables and you do not enable this feature, RAGFlow will not throw an error but your tables are likely to be garbled. ::: ## Scenarios @@ -27,12 +27,12 @@ Works with complex tables that cannot be represented as key-value pairs. Example ## Procedure -1. On your knowledge base's **Configuration** page, select **General** as the chunking method. +1. On your dataset's **Configuration** page, select **General** as the chunking method. _The **Excel to HTML** toggle appears._ -2. Enable **Excel to HTML** if your knowledge base contains complex spreadsheet tables that cannot be represented as key-value pairs. -3. Leave **Excel to HTML** disabled if your knowledge base has no spreadsheet tables or if its spreadsheet tables can be represented as key-value pairs. +2. Enable **Excel to HTML** if your dataset contains complex spreadsheet tables that cannot be represented as key-value pairs. +3. Leave **Excel to HTML** disabled if your dataset has no spreadsheet tables or if its spreadsheet tables can be represented as key-value pairs. 4. If question-answering regarding complex tables is unsatisfactory, check if **Excel to HTML** is enabled. ## Frequently asked questions diff --git a/docs/guides/dataset/enable_raptor.md b/docs/guides/dataset/enable_raptor.md index ae55faaa7..096bc19ef 100644 --- a/docs/guides/dataset/enable_raptor.md +++ b/docs/guides/dataset/enable_raptor.md @@ -43,7 +43,7 @@ The system's default chat model is used to summarize clustered content. Before p ## Configurations -The RAPTOR feature is disabled by default. To enable it, manually switch on the **Use RAPTOR to enhance retrieval** toggle on your knowledge base's **Configuration** page. +The RAPTOR feature is disabled by default. To enable it, manually switch on the **Use RAPTOR to enhance retrieval** toggle on your dataset's **Configuration** page. ### Prompt diff --git a/docs/guides/dataset/run_retrieval_test.md b/docs/guides/dataset/run_retrieval_test.md index a9ca9f192..08ef999cd 100644 --- a/docs/guides/dataset/run_retrieval_test.md +++ b/docs/guides/dataset/run_retrieval_test.md @@ -5,11 +5,11 @@ slug: /run_retrieval_test # Run retrieval test -Conduct a retrieval test on your knowledge base to check whether the intended chunks can be retrieved. +Conduct a retrieval test on your dataset to check whether the intended chunks can be retrieved. --- -After your files are uploaded and parsed, it is recommended that you run a retrieval test before proceeding with the chat assistant configuration. Running a retrieval test is *not* an unnecessary or superfluous step at all! Just like fine-tuning a precision instrument, RAGFlow requires careful tuning to deliver optimal question answering performance. Your knowledge base settings, chat assistant configurations, and the specified large and small models can all significantly impact the final results. Running a retrieval test verifies whether the intended chunks can be recovered, allowing you to quickly identify areas for improvement or pinpoint any issue that needs addressing. For instance, when debugging your question answering system, if you know that the correct chunks can be retrieved, you can focus your efforts elsewhere. For example, in issue [#5627](https://github.com/infiniflow/ragflow/issues/5627), the problem was found to be due to the LLM's limitations. +After your files are uploaded and parsed, it is recommended that you run a retrieval test before proceeding with the chat assistant configuration. Running a retrieval test is *not* an unnecessary or superfluous step at all! Just like fine-tuning a precision instrument, RAGFlow requires careful tuning to deliver optimal question answering performance. Your dataset settings, chat assistant configurations, and the specified large and small models can all significantly impact the final results. Running a retrieval test verifies whether the intended chunks can be recovered, allowing you to quickly identify areas for improvement or pinpoint any issue that needs addressing. For instance, when debugging your question answering system, if you know that the correct chunks can be retrieved, you can focus your efforts elsewhere. For example, in issue [#5627](https://github.com/infiniflow/ragflow/issues/5627), the problem was found to be due to the LLM's limitations. During a retrieval test, chunks created from your specified chunking method are retrieved using a hybrid search. This search combines weighted keyword similarity with either weighted vector cosine similarity or a weighted reranking score, depending on your settings: @@ -65,7 +65,7 @@ Using a knowledge graph in a retrieval test will significantly increase the time To perform a [cross-language search](../../references/glossary.mdx#cross-language-search), select one or more target languages from the dropdown menu. The system’s default chat model will then translate your query entered in the Test text field into the selected target language(s). This translation ensures accurate semantic matching across languages, allowing you to retrieve relevant results regardless of language differences. :::tip NOTE -- When selecting target languages, please ensure that these languages are present in the knowledge base to guarantee an effective search. +- When selecting target languages, please ensure that these languages are present in the dataset to guarantee an effective search. - If no target language is selected, the system will search only in the language of your query, which may cause relevant information in other languages to be missed. ::: @@ -75,7 +75,7 @@ This field is where you put in your testing query. ## Procedure -1. Navigate to the **Retrieval testing** page of your knowledge base, enter your query in **Test text**, and click **Testing** to run the test. +1. Navigate to the **Retrieval testing** page of your dataset, enter your query in **Test text**, and click **Testing** to run the test. 2. If the results are unsatisfactory, tune the options listed in the Configuration section and rerun the test. *The following is a screenshot of a retrieval test conducted without using knowledge graph. It demonstrates a hybrid search combining weighted keyword similarity and weighted vector cosine similarity. The overall hybrid similarity score is 28.56, calculated as 25.17 (term similarity score) x 0.7 + 36.49 (vector similarity score) x 0.3:* diff --git a/docs/guides/dataset/select_pdf_parser.md b/docs/guides/dataset/select_pdf_parser.md index 1bdda5d1d..eabf0b264 100644 --- a/docs/guides/dataset/select_pdf_parser.md +++ b/docs/guides/dataset/select_pdf_parser.md @@ -27,7 +27,7 @@ RAGFlow isn't one-size-fits-all. It is built for flexibility and supports deeper ## Procedure -1. On your knowledge base's **Configuration** page, select a chunking method, say **General**. +1. On your dataset's **Configuration** page, select a chunking method, say **General**. _The **PDF parser** dropdown menu appears._ diff --git a/docs/guides/dataset/set_metadata.md b/docs/guides/dataset/set_metadata.md index e0281da81..904efaa9c 100644 --- a/docs/guides/dataset/set_metadata.md +++ b/docs/guides/dataset/set_metadata.md @@ -9,7 +9,7 @@ Add metadata to an uploaded file --- -On the **Dataset** page of your knowledge base, you can add metadata to any uploaded file. This approach enables you to 'tag' additional information like URL, author, date, and more to an existing file. In an AI-powered chat, such information will be sent to the LLM with the retrieved chunks for content generation. +On the **Dataset** page of your dataset, you can add metadata to any uploaded file. This approach enables you to 'tag' additional information like URL, author, date, and more to an existing file. In an AI-powered chat, such information will be sent to the LLM with the retrieved chunks for content generation. For example, if you have a dataset of HTML files and want the LLM to cite the source URL when responding to your query, add a `"url"` parameter to each file's metadata. diff --git a/docs/guides/dataset/set_page_rank.md b/docs/guides/dataset/set_page_rank.md index c0af82308..4b24d9b34 100644 --- a/docs/guides/dataset/set_page_rank.md +++ b/docs/guides/dataset/set_page_rank.md @@ -11,15 +11,15 @@ Create a step-retrieval strategy using page rank. ## Scenario -In an AI-powered chat, you can configure a chat assistant or an agent to respond using knowledge retrieved from multiple specified knowledge bases (datasets), provided that they employ the same embedding model. In situations where you prefer information from certain knowledge base(s) to take precedence or to be retrieved first, you can use RAGFlow's page rank feature to increase the ranking of chunks from these knowledge bases. For example, if you have configured a chat assistant to draw from two knowledge bases, knowledge base A for 2024 news and knowledge base B for 2023 news, but wish to prioritize news from year 2024, this feature is particularly useful. +In an AI-powered chat, you can configure a chat assistant or an agent to respond using knowledge retrieved from multiple specified datasets (datasets), provided that they employ the same embedding model. In situations where you prefer information from certain dataset(s) to take precedence or to be retrieved first, you can use RAGFlow's page rank feature to increase the ranking of chunks from these datasets. For example, if you have configured a chat assistant to draw from two datasets, dataset A for 2024 news and dataset B for 2023 news, but wish to prioritize news from year 2024, this feature is particularly useful. :::info NOTE -It is important to note that this 'page rank' feature operates at the level of the entire knowledge base rather than on individual files or documents. +It is important to note that this 'page rank' feature operates at the level of the entire dataset rather than on individual files or documents. ::: ## Configuration -On the **Configuration** page of your knowledge base, drag the slider under **Page rank** to set the page rank value for your knowledge base. You are also allowed to input the intended page rank value in the field next to the slider. +On the **Configuration** page of your dataset, drag the slider under **Page rank** to set the page rank value for your dataset. You are also allowed to input the intended page rank value in the field next to the slider. :::info NOTE The page rank value must be an integer. Range: [0,100] @@ -36,4 +36,4 @@ If you set the page rank value to a non-integer, say 1.7, it will be rounded dow If you configure a chat assistant's **similarity threshold** to 0.2, only chunks with a hybrid score greater than 0.2 x 100 = 20 will be retrieved and sent to the chat model for content generation. This initial filtering step is crucial for narrowing down relevant information. -If you have assigned a page rank of 1 to knowledge base A (2024 news) and 0 to knowledge base B (2023 news), the final hybrid scores of the retrieved chunks will be adjusted accordingly. A chunk retrieved from knowledge base A with an initial score of 50 will receive a boost of 1 x 100 = 100 points, resulting in a final score of 50 + 1 x 100 = 150. In this way, chunks retrieved from knowledge base A will always precede chunks from knowledge base B. \ No newline at end of file +If you have assigned a page rank of 1 to dataset A (2024 news) and 0 to dataset B (2023 news), the final hybrid scores of the retrieved chunks will be adjusted accordingly. A chunk retrieved from dataset A with an initial score of 50 will receive a boost of 1 x 100 = 100 points, resulting in a final score of 50 + 1 x 100 = 150. In this way, chunks retrieved from dataset A will always precede chunks from dataset B. \ No newline at end of file diff --git a/docs/guides/dataset/use_tag_sets.md b/docs/guides/dataset/use_tag_sets.md index 012c63e14..81dc65838 100644 --- a/docs/guides/dataset/use_tag_sets.md +++ b/docs/guides/dataset/use_tag_sets.md @@ -9,9 +9,9 @@ Use a tag set to auto-tag chunks in your datasets. --- -Retrieval accuracy is the touchstone for a production-ready RAG framework. In addition to retrieval-enhancing approaches like auto-keyword, auto-question, and knowledge graph, RAGFlow introduces an auto-tagging feature to address semantic gaps. The auto-tagging feature automatically maps tags in the user-defined tag sets to relevant chunks within your knowledge base based on similarity with each chunk. This automation mechanism allows you to apply an additional "layer" of domain-specific knowledge to existing datasets, which is particularly useful when dealing with a large number of chunks. +Retrieval accuracy is the touchstone for a production-ready RAG framework. In addition to retrieval-enhancing approaches like auto-keyword, auto-question, and knowledge graph, RAGFlow introduces an auto-tagging feature to address semantic gaps. The auto-tagging feature automatically maps tags in the user-defined tag sets to relevant chunks within your dataset based on similarity with each chunk. This automation mechanism allows you to apply an additional "layer" of domain-specific knowledge to existing datasets, which is particularly useful when dealing with a large number of chunks. -To use this feature, ensure you have at least one properly configured tag set, specify the tag set(s) on the **Configuration** page of your knowledge base (dataset), and then re-parse your documents to initiate the auto-tagging process. During this process, each chunk in your dataset is compared with every entry in the specified tag set(s), and tags are automatically applied based on similarity. +To use this feature, ensure you have at least one properly configured tag set, specify the tag set(s) on the **Configuration** page of your dataset, and then re-parse your documents to initiate the auto-tagging process. During this process, each chunk in your dataset is compared with every entry in the specified tag set(s), and tags are automatically applied based on similarity. ## Scenarios @@ -19,7 +19,7 @@ Auto-tagging applies in situations where chunks are so similar to each other tha ## 1. Create tag set -You can consider a tag set as a closed set, and the tags to attach to the chunks in your dataset (knowledge base) are *exclusively* from the specified tag set. You use a tag set to "inform" RAGFlow which chunks to tag and which tags to apply. +You can consider a tag set as a closed set, and the tags to attach to the chunks in your dataset are *exclusively* from the specified tag set. You use a tag set to "inform" RAGFlow which chunks to tag and which tags to apply. ### Prepare a tag table file @@ -41,8 +41,8 @@ As a rule of thumb, consider including the following entries in your tag table: A tag set is *not* involved in document indexing or retrieval. Do not specify a tag set when configuring your chat assistant or agent. ::: -1. Click **+ Create knowledge base** to create a knowledge base. -2. Navigate to the **Configuration** page of the created knowledge base and choose **Tag** as the default chunking method. +1. Click **+ Create dataset** to create a dataset. +2. Navigate to the **Configuration** page of the created dataset and choose **Tag** as the default chunking method. 3. Navigate to the **Dataset** page and upload and parse your table file in XLSX, CSV, or TXT formats. _A tag cloud appears under the **Tag view** section, indicating the tag set is created:_ ![Image](https://github.com/user-attachments/assets/abefbcbf-c130-4abe-95e1-267b0d2a0505) @@ -53,7 +53,7 @@ A tag set is *not* involved in document indexing or retrieval. Do not specify a Once a tag set is created, you can apply it to your dataset: -1. Navigate to the **Configuration** page of your knowledge base (dataset). +1. Navigate to the **Configuration** page of your dataset. 2. Select the tag set from the **Tag sets** dropdown and click **Save** to confirm. :::tip NOTE @@ -94,9 +94,9 @@ If you add new table files to your tag set, it is at your own discretion whether Yes, you can. Usually one tag set suffices. When using multiple tag sets, ensure they are independent of each other; otherwise, consider merging your tag sets. -### Difference between a tag set and a standard knowledge base? +### Difference between a tag set and a standard dataset? -A standard knowledge base is a dataset. It will be searched by RAGFlow's document engine and the retrieved chunks will be fed to the LLM. In contrast, a tag set is used solely to attach tags to chunks within your dataset. It does not directly participate in the retrieval process, and you should not choose a tag set when selecting datasets for your chat assistant or agent. +A standard dataset is a dataset. It will be searched by RAGFlow's document engine and the retrieved chunks will be fed to the LLM. In contrast, a tag set is used solely to attach tags to chunks within your dataset. It does not directly participate in the retrieval process, and you should not choose a tag set when selecting datasets for your chat assistant or agent. ### Difference between auto-tag and auto-keyword? diff --git a/docs/guides/manage_files.md b/docs/guides/manage_files.md index 7f633dd0a..f3e3b31e6 100644 --- a/docs/guides/manage_files.md +++ b/docs/guides/manage_files.md @@ -5,10 +5,10 @@ slug: /manage_files # Files -Knowledge base, hallucination-free chat, and file management are the three pillars of RAGFlow. RAGFlow's file management allows you to upload files individually or in bulk. You can then link an uploaded file to multiple target knowledge bases. This guide showcases some basic usages of the file management feature. +RAGFlow's file management allows you to upload files individually or in bulk. You can then link an uploaded file to multiple target datasets. This guide showcases some basic usages of the file management feature. :::info IMPORTANT -Compared to uploading files directly to various knowledge bases, uploading them to RAGFlow's file management and then linking them to different knowledge bases is *not* an unnecessary step, particularly when you want to delete some parsed files or an entire knowledge base but retain the original files. +Compared to uploading files directly to various datasets, uploading them to RAGFlow's file management and then linking them to different datasets is *not* an unnecessary step, particularly when you want to delete some parsed files or an entire dataset but retain the original files. ::: ## Create folder @@ -18,7 +18,7 @@ RAGFlow's file management allows you to establish your file system with nested f ![create new folder](https://github.com/infiniflow/ragflow/assets/93570324/3a37a5f4-43a6-426d-a62a-e5cd2ff7a533) :::caution NOTE -Each knowledge base in RAGFlow has a corresponding folder under the **root/.knowledgebase** directory. You are not allowed to create a subfolder within it. +Each dataset in RAGFlow has a corresponding folder under the **root/.knowledgebase** directory. You are not allowed to create a subfolder within it. ::: ## Upload file @@ -39,13 +39,13 @@ RAGFlow's file management supports previewing files in the following formats: ![preview](https://github.com/infiniflow/ragflow/assets/93570324/2e931362-8bbf-482c-ac86-b68b09d331bc) -## Link file to knowledge bases +## Link file to datasets -RAGFlow's file management allows you to *link* an uploaded file to multiple knowledge bases, creating a file reference in each target knowledge base. Therefore, deleting a file in your file management will AUTOMATICALLY REMOVE all related file references across the knowledge bases. +RAGFlow's file management allows you to *link* an uploaded file to multiple datasets, creating a file reference in each target dataset. Therefore, deleting a file in your file management will AUTOMATICALLY REMOVE all related file references across the datasets. ![link knowledgebase](https://github.com/infiniflow/ragflow/assets/93570324/6c6b8db4-3269-4e35-9434-6089887e3e3f) -You can link your file to one knowledge base or multiple knowledge bases at one time: +You can link your file to one dataset or multiple datasets at one time: ![link multiple kb](https://github.com/infiniflow/ragflow/assets/93570324/6c508803-fb1f-435d-b688-683066fd7fff) @@ -79,7 +79,7 @@ To bulk delete files or folders: ![bulk delete](https://github.com/infiniflow/ragflow/assets/93570324/519b99ab-ec7f-4c8a-8cea-e0b6dcb3cb46) > - You are not allowed to delete the **root/.knowledgebase** folder. -> - Deleting files that have been linked to knowledge bases will **AUTOMATICALLY REMOVE** all associated file references across the knowledge bases. +> - Deleting files that have been linked to datasets will **AUTOMATICALLY REMOVE** all associated file references across the datasets. ## Download uploaded file diff --git a/docs/guides/models/deploy_local_llm.mdx b/docs/guides/models/deploy_local_llm.mdx index 918e9503c..6553e7c53 100644 --- a/docs/guides/models/deploy_local_llm.mdx +++ b/docs/guides/models/deploy_local_llm.mdx @@ -164,7 +164,7 @@ Click on your logo **>** **Model providers** **>** **System Model Settings** to Update your chat model accordingly in **Chat Configuration**: -> If your local model is an embedding model, update it on the configuration page of your knowledge base. +> If your local model is an embedding model, update it on the configuration page of your dataset. ## Deploy a local model using IPEX-LLM diff --git a/docs/guides/run_health_check.md b/docs/guides/run_health_check.md index f07538460..5ed7cfdf6 100644 --- a/docs/guides/run_health_check.md +++ b/docs/guides/run_health_check.md @@ -31,3 +31,79 @@ You can click on a specific 30-second time interval to view the details of compl ![done_tasks](https://github.com/user-attachments/assets/49b25ec4-03af-48cf-b2e5-c892f6eaa261) ![done_vs_failed](https://github.com/user-attachments/assets/eaa928d0-a31c-4072-adea-046091e04599) + +## API Health Check + +In addition to checking the system dependencies from the **avatar > System** page in the UI, you can directly query the backend health check endpoint: + +```bash +http://IP_OF_YOUR_MACHINE/v1/system/healthz +``` + +Here `` refers to the actual port of your backend service (e.g., `7897`, `9222`, etc.). + +Key points: +- **No login required** (no `@login_required` decorator) +- Returns results in JSON format +- If all dependencies are healthy → HTTP **200 OK** +- If any dependency fails → HTTP **500 Internal Server Error** + +### Example 1: All services healthy (HTTP 200) + +```bash +http://127.0.0.1/v1/system/healthz +``` + +Response: + +```http +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 120 + +{ + "db": "ok", + "redis": "ok", + "doc_engine": "ok", + "storage": "ok", + "status": "ok" +} +``` + +Explanation: +- Database (MySQL/Postgres), Redis, document engine (Elasticsearch/Infinity), and object storage (MinIO) are all healthy. +- The `status` field returns `"ok"`. + +### Example 2: One service unhealthy (HTTP 500) + +For example, if Redis is down: + +Response: + +```http +HTTP/1.1 500 INTERNAL SERVER ERROR +Content-Type: application/json +Content-Length: 300 + +{ + "db": "ok", + "redis": "nok", + "doc_engine": "ok", + "storage": "ok", + "status": "nok", + "_meta": { + "redis": { + "elapsed": "5.2", + "error": "Lost connection!" + } + } +} +``` + +Explanation: +- `redis` is marked as `"nok"`, with detailed error info under `_meta.redis.error`. +- The overall `status` is `"nok"`, so the endpoint returns 500. + +--- + +This endpoint allows you to monitor RAGFlow’s core dependencies programmatically in scripts or external monitoring systems, without relying on the frontend UI. diff --git a/docs/guides/team/join_or_leave_team.md b/docs/guides/team/join_or_leave_team.md index 12257306d..93255ef3c 100644 --- a/docs/guides/team/join_or_leave_team.md +++ b/docs/guides/team/join_or_leave_team.md @@ -11,7 +11,7 @@ Accept an invite to join a team, decline an invite, or leave a team. Once you join a team, you can do the following: -- Upload documents to the team owner's shared datasets (knowledge bases). +- Upload documents to the team owner's shared datasets. - Parse documents in the team owner's shared datasets. - Use the team owner's shared Agents. @@ -22,7 +22,7 @@ You cannot invite users to a team unless you are its owner. ## Prerequisites 1. Ensure that your Email address that received the team invitation is associated with a RAGFlow user account. -2. The team owner should share his knowledge bases by setting their **Permission** to **Team**. +2. The team owner should share his datasets by setting their **Permission** to **Team**. ## Accept or decline team invite @@ -32,6 +32,6 @@ You cannot invite users to a team unless you are its owner. _On the **Team** page, you can view the information about members of your team and the teams you have joined._ -_After accepting the team invite, you should be able to view and update the team owner's knowledge bases whose **Permissions** is set to **Team**._ +_After accepting the team invite, you should be able to view and update the team owner's datasets whose **Permissions** is set to **Team**._ ## Leave a joined team \ No newline at end of file diff --git a/docs/guides/team/manage_team_members.md b/docs/guides/team/manage_team_members.md index bf8a2eacf..edd8289cd 100644 --- a/docs/guides/team/manage_team_members.md +++ b/docs/guides/team/manage_team_members.md @@ -11,7 +11,7 @@ Invite or remove team members. By default, each RAGFlow user is assigned a single team named after their name. RAGFlow allows you to invite RAGFlow users to your team. Your team members can help you: -- Upload documents to your shared datasets (knowledge bases). +- Upload documents to your shared datasets. - Parse documents in your shared datasets. - Use your shared Agents. @@ -23,7 +23,7 @@ By default, each RAGFlow user is assigned a single team named after their name. ## Prerequisites 1. Ensure that the invited team member is a RAGFlow user and that the Email address used is associated with a RAGFlow user account. -2. To allow your team members to view and update your knowledge base, ensure that you set **Permissions** on its **Configuration** page from **Only me** to **Team**. +2. To allow your team members to view and update your dataset, ensure that you set **Permissions** on its **Configuration** page from **Only me** to **Team**. ## Invite team members diff --git a/docs/guides/team/share_knowledge_bases.md b/docs/guides/team/share_knowledge_bases.md index ed106b63f..4eeccd264 100644 --- a/docs/guides/team/share_knowledge_bases.md +++ b/docs/guides/team/share_knowledge_bases.md @@ -3,16 +3,16 @@ sidebar_position: 4 slug: /share_datasets --- -# Share knowledge base +# Share dataset -Share a knowledge base with team members. +Share a dataset with team members. --- -When ready, you may share your knowledge bases with your team members so that they can upload and parse files in them. Please note that your knowledge bases are not shared automatically; you must manually enable sharing by selecting the appropriate **Permissions** radio button: +When ready, you may share your datasets with your team members so that they can upload and parse files in them. Please note that your datasets are not shared automatically; you must manually enable sharing by selecting the appropriate **Permissions** radio button: -1. Navigate to the knowledge base's **Configuration** page. +1. Navigate to the dataset's **Configuration** page. 2. Change **Permissions** from **Only me** to **Team**. 3. Click **Save** to apply your changes. -*Once completed, your team members will see your shared knowledge bases.* \ No newline at end of file +*Once completed, your team members will see your shared datasets.* \ No newline at end of file diff --git a/docs/guides/upgrade_ragflow.mdx b/docs/guides/upgrade_ragflow.mdx index 57a9bd7d8..41d9dfd22 100644 --- a/docs/guides/upgrade_ragflow.mdx +++ b/docs/guides/upgrade_ragflow.mdx @@ -105,9 +105,9 @@ RAGFLOW_IMAGE=infiniflow/ragflow:v0.20.5 ## Frequently asked questions -### Do I need to back up my knowledge bases before upgrading RAGFlow? +### Do I need to back up my datasets before upgrading RAGFlow? -No, you do not need to. Upgrading RAGFlow in itself will *not* remove your uploaded data or knowledge base settings. However, be aware that `docker compose -f docker/docker-compose.yml down -v` will remove Docker container volumes, resulting in data loss. +No, you do not need to. Upgrading RAGFlow in itself will *not* remove your uploaded data or dataset settings. However, be aware that `docker compose -f docker/docker-compose.yml down -v` will remove Docker container volumes, resulting in data loss. ### Upgrade RAGFlow in an offline environment (without Internet access) diff --git a/docs/quickstart.mdx b/docs/quickstart.mdx index 8be0a8b35..959fb33e5 100644 --- a/docs/quickstart.mdx +++ b/docs/quickstart.mdx @@ -13,7 +13,7 @@ RAGFlow is an open-source RAG (Retrieval-Augmented Generation) engine based on d This quick start guide describes a general process from: - Starting up a local RAGFlow server, -- Creating a knowledge base, +- Creating a dataset, - Intervening with file parsing, to - Establishing an AI chat based on your datasets. @@ -280,29 +280,29 @@ To add and configure an LLM: > Some models, such as the image-to-text model **qwen-vl-max**, are subsidiary to a specific LLM. And you may need to update your API key to access these models. -## Create your first knowledge base +## Create your first dataset -You are allowed to upload files to a knowledge base in RAGFlow and parse them into datasets. A knowledge base is virtually a collection of datasets. Question answering in RAGFlow can be based on a particular knowledge base or multiple knowledge bases. File formats that RAGFlow supports include documents (PDF, DOC, DOCX, TXT, MD, MDX), tables (CSV, XLSX, XLS), pictures (JPEG, JPG, PNG, TIF, GIF), and slides (PPT, PPTX). +You are allowed to upload files to a dataset in RAGFlow and parse them into datasets. A dataset is virtually a collection of datasets. Question answering in RAGFlow can be based on a particular dataset or multiple datasets. File formats that RAGFlow supports include documents (PDF, DOC, DOCX, TXT, MD, MDX), tables (CSV, XLSX, XLS), pictures (JPEG, JPG, PNG, TIF, GIF), and slides (PPT, PPTX). -To create your first knowledge base: +To create your first dataset: 1. Click the **Dataset** tab in the top middle of the page **>** **Create dataset**. -2. Input the name of your knowledge base and click **OK** to confirm your changes. +2. Input the name of your dataset and click **OK** to confirm your changes. - _You are taken to the **Configuration** page of your knowledge base._ + _You are taken to the **Configuration** page of your dataset._ - ![knowledge base configuration](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/configure_knowledge_base.jpg) + ![dataset configuration](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/configure_knowledge_base.jpg) -3. RAGFlow offers multiple chunk templates that cater to different document layouts and file formats. Select the embedding model and chunking method (template) for your knowledge base. +3. RAGFlow offers multiple chunk templates that cater to different document layouts and file formats. Select the embedding model and chunking method (template) for your dataset. :::danger IMPORTANT -Once you have selected an embedding model and used it to parse a file, you are no longer allowed to change it. The obvious reason is that we must ensure that all files in a specific knowledge base are parsed using the *same* embedding model (ensure that they are being compared in the same embedding space). +Once you have selected an embedding model and used it to parse a file, you are no longer allowed to change it. The obvious reason is that we must ensure that all files in a specific dataset are parsed using the *same* embedding model (ensure that they are being compared in the same embedding space). ::: - _You are taken to the **Dataset** page of your knowledge base._ + _You are taken to the **Dataset** page of your dataset._ -4. Click **+ Add file** **>** **Local files** to start uploading a particular file to the knowledge base. +4. Click **+ Add file** **>** **Local files** to start uploading a particular file to the dataset. 5. In the uploaded file entry, click the play button to start file parsing: @@ -341,17 +341,17 @@ You can add keywords or questions to a file chunk to improve its ranking for que ## Set up an AI chat -Conversations in RAGFlow are based on a particular knowledge base or multiple knowledge bases. Once you have created your knowledge base and finished file parsing, you can go ahead and start an AI conversation. +Conversations in RAGFlow are based on a particular dataset or multiple datasets. Once you have created your dataset and finished file parsing, you can go ahead and start an AI conversation. 1. Click the **Chat** tab in the middle top of the mage **>** **Create an assistant** to show the **Chat Configuration** dialogue *of your next dialogue*. > RAGFlow offer the flexibility of choosing a different chat model for each dialogue, while allowing you to set the default models in **System Model Settings**. 2. Update **Assistant settings**: - - Name your assistant and specify your knowledge bases. + - Name your assistant and specify your datasets. - **Empty response**: - - If you wish to *confine* RAGFlow's answers to your knowledge bases, leave a response here. Then when it doesn't retrieve an answer, it *uniformly* responds with what you set here. - - If you wish RAGFlow to *improvise* when it doesn't retrieve an answer from your knowledge bases, leave it blank, which may give rise to hallucinations. + - If you wish to *confine* RAGFlow's answers to your datasets, leave a response here. Then when it doesn't retrieve an answer, it *uniformly* responds with what you set here. + - If you wish RAGFlow to *improvise* when it doesn't retrieve an answer from your datasets, leave it blank, which may give rise to hallucinations. 3. Update **Prompt engine** or leave it as is for the beginning. diff --git a/docs/references/http_api_reference.md b/docs/references/http_api_reference.md index b112e8618..062c942d7 100644 --- a/docs/references/http_api_reference.md +++ b/docs/references/http_api_reference.md @@ -4102,3 +4102,77 @@ Failure: ``` --- + +### System +--- +### Check system health + +**GET** `/v1/system/healthz` + +Check the health status of RAGFlow’s dependencies (database, Redis, document engine, object storage). + +#### Request + +- Method: GET +- URL: `/v1/system/healthz` +- Headers: + - 'Content-Type: application/json' + (no Authorization required) + +##### Request example + +```bash +curl --request GET + --url http://{address}/v1/system/healthz + --header 'Content-Type: application/json' +``` + +##### Request parameters + +- `address`: (*Path parameter*), string + The host and port of the backend service (e.g., `localhost:7897`). + +--- + +#### Responses + +- **200 OK** – All services healthy + +```http +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "db": "ok", + "redis": "ok", + "doc_engine": "ok", + "storage": "ok", + "status": "ok" +} +``` + +- **500 Internal Server Error** – At least one service unhealthy + +```http +HTTP/1.1 500 INTERNAL SERVER ERROR +Content-Type: application/json + +{ + "db": "ok", + "redis": "nok", + "doc_engine": "ok", + "storage": "ok", + "status": "nok", + "_meta": { + "redis": { + "elapsed": "5.2", + "error": "Lost connection!" + } + } +} +``` + +Explanation: +- Each service is reported as "ok" or "nok". +- The top-level `status` reflects overall health. +- If any service is "nok", detailed error info appears in `_meta`. diff --git a/docs/references/python_api_reference.md b/docs/references/python_api_reference.md index 79c62424b..abd1c393e 100644 --- a/docs/references/python_api_reference.md +++ b/docs/references/python_api_reference.md @@ -85,11 +85,11 @@ completion = client.chat.completions.create( ) if stream: -for chunk in completion: - print(chunk) - if reference and chunk.choices[0].finish_reason == "stop": - print(f"Reference:\n{chunk.choices[0].delta.reference}") - print(f"Final content:\n{chunk.choices[0].delta.final_content}") + for chunk in completion: + print(chunk) + if reference and chunk.choices[0].finish_reason == "stop": + print(f"Reference:\n{chunk.choices[0].delta.reference}") + print(f"Final content:\n{chunk.choices[0].delta.final_content}") else: print(completion.choices[0].message.content) if reference: diff --git a/docs/references/supported_models.mdx b/docs/references/supported_models.mdx index 8302bd308..d9587f863 100644 --- a/docs/references/supported_models.mdx +++ b/docs/references/supported_models.mdx @@ -65,7 +65,7 @@ A complete list of models supported by RAGFlow, which will continue to expand. | 01.AI | :heavy_check_mark: | | | | | | | DeepInfra | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | | 302.AI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | | -| CometAPI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | | +| CometAPI | :heavy_check_mark: | :heavy_check_mark: | | | | | ```mdx-code-block diff --git a/docs/release_notes.md b/docs/release_notes.md index ad9f291df..b19f9f720 100644 --- a/docs/release_notes.md +++ b/docs/release_notes.md @@ -28,10 +28,10 @@ Released on September 10, 2025. ### Improvements -- Agent: +- Agent: - Agent Performance Optimized: Improves planning and reflection speed for simple tasks; optimizes concurrent tool calls for parallelizable scenarios, significantly reducing overall response time. - Four framework-level prompt blocks are available in the **System prompt** section, enabling customization and overriding of prompts at the framework level, thereby enhancing flexibility and control. See [here](./guides/agent/agent_component_reference/agent.mdx#system-prompt). - - **Execute SQL** component enhanced: Replaces the original variable reference component with a text input field, allowing users to write free-form SQL queries and reference variables. + - **Execute SQL** component enhanced: Replaces the original variable reference component with a text input field, allowing users to write free-form SQL queries and reference variables. See [here](./guides/agent/agent_component_reference/execute_sql.md). - Chat: Re-enables **Reasoning** and **Cross-language search**. ### Added models @@ -79,7 +79,7 @@ ZHIPU GLM-4.5 ### New Agent templates -Ecommerce Customer Service Workflow: A template designed to handle enquiries about product features and multi-product comparisons using the internal knowledge base, as well as to manage installation appointment bookings. +Ecommerce Customer Service Workflow: A template designed to handle enquiries about product features and multi-product comparisons using the internal dataset, as well as to manage installation appointment bookings. ### Fixed issues @@ -131,7 +131,7 @@ Released on August 8, 2025. ### New Features -- The **Retrieval** component now supports the dynamic specification of knowledge base names using variables. +- The **Retrieval** component now supports the dynamic specification of dataset names using variables. - The user interface now includes a French language option. ### Added Models @@ -142,7 +142,7 @@ Released on August 8, 2025. ### New agent templates (both workflow and agentic) - SQL Assistant Workflow: Empowers non-technical teams (e.g., operations, product) to independently query business data. -- Choose Your Knowledge Base Workflow: Lets users select a knowledge base to query during conversations. [#9325](https://github.com/infiniflow/ragflow/pull/9325) +- Choose Your Knowledge Base Workflow: Lets users select a dataset to query during conversations. [#9325](https://github.com/infiniflow/ragflow/pull/9325) - Choose Your Knowledge Base Agent: Delivers higher-quality responses with extended reasoning time, suited for complex queries. [#9325](https://github.com/infiniflow/ragflow/pull/9325) ### Fixed Issues @@ -175,14 +175,14 @@ From v0.20.0 onwards, Agents are no longer compatible with earlier versions, and ### New agent templates introduced - Multi-Agent based Deep Research: Collaborative Agent teamwork led by a Lead Agent with multiple Subagents, distinct from traditional workflow orchestration. -- An intelligent Q&A chatbot leveraging internal knowledge bases, designed for customer service and training scenarios. +- An intelligent Q&A chatbot leveraging internal datasets, designed for customer service and training scenarios. - A resume analysis template used by the RAGFlow team to screen, analyze, and record candidate information. - A blog generation workflow that transforms raw ideas into SEO-friendly blog content. - An intelligent customer service workflow. - A user feedback analysis template that directs user feedback to appropriate teams through semantic analysis. - Trip Planner: Uses web search and map MCP servers to assist with travel planning. - Image Lingo: Translates content from uploaded photos. -- An information search assistant that retrieves answers from both internal knowledge bases and the web. +- An information search assistant that retrieves answers from both internal datasets and the web. ## v0.19.1 @@ -195,7 +195,7 @@ Released on June 23, 2025. - A context error occurring when using Sandbox in standalone mode. [#8340](https://github.com/infiniflow/ragflow/pull/8340) - An excessive CPU usage issue caused by Ollama. [#8216](https://github.com/infiniflow/ragflow/pull/8216) - A bug in the Code Component. [#7949](https://github.com/infiniflow/ragflow/pull/7949) -- Added support for models installed via Ollama or VLLM when creating a knowledge base through the API. [#8069](https://github.com/infiniflow/ragflow/pull/8069) +- Added support for models installed via Ollama or VLLM when creating a dataset through the API. [#8069](https://github.com/infiniflow/ragflow/pull/8069) - Enabled role-based authentication for S3 bucket access. [#8149](https://github.com/infiniflow/ragflow/pull/8149) ### Added models @@ -209,7 +209,7 @@ Released on May 26, 2025. ### New features -- [Cross-language search](./references/glossary.mdx#cross-language-search) is supported in the Knowledge and Chat modules, enhancing search accuracy and user experience in multilingual environments, such as in Chinese-English knowledge bases. +- [Cross-language search](./references/glossary.mdx#cross-language-search) is supported in the Knowledge and Chat modules, enhancing search accuracy and user experience in multilingual environments, such as in Chinese-English datasets. - Agent component: A new Code component supports Python and JavaScript scripts, enabling developers to handle more complex tasks like dynamic data processing. - Enhanced image display: Images in Chat and Search now render directly within responses, rather than as external references. Knowledge retrieval testing can retrieve images directly, instead of texts extracted from images. - Claude 4 and ChatGPT o3: Developers can now use the newly released, most advanced Claude model and OpenAI’s latest ChatGPT o3 inference model. @@ -238,7 +238,7 @@ From this release onwards, built-in rerank models have been removed because they ### New features -- MCP server: enables access to RAGFlow's knowledge bases via MCP. +- MCP server: enables access to RAGFlow's datasets via MCP. - DeepDoc supports adopting VLM model as a processing pipeline during document layout recognition, enabling in-depth analysis of images in PDF and DOCX files. - OpenAI-compatible APIs: Agents can be called via OpenAI-compatible APIs. - User registration control: administrators can enable or disable user registration through an environment variable. @@ -330,7 +330,7 @@ Released on March 3, 2025. - AI chat: Implements Deep Research for agentic reasoning. To activate this, enable the **Reasoning** toggle under the **Prompt engine** tab of your chat assistant dialogue. - AI chat: Leverages Tavily-based web search to enhance contexts in agentic reasoning. To activate this, enter the correct Tavily API key under the **Assistant settings** tab of your chat assistant dialogue. -- AI chat: Supports starting a chat without specifying knowledge bases. +- AI chat: Supports starting a chat without specifying datasets. - AI chat: HTML files can also be previewed and referenced, in addition to PDF files. - Dataset: Adds a **PDF parser**, aka **Document parser**, dropdown menu to dataset configurations. This includes a DeepDoc model option, which is time-consuming, a much faster **naive** option (plain text), which skips DLA (Document Layout Analysis), OCR (Optical Character Recognition), and TSR (Table Structure Recognition) tasks, and several currently *experimental* large model options. See [here](./guides/dataset/select_pdf_parser.md). - Agent component: **(x)** or a forward slash `/` can be used to insert available keys (variables) in the system prompt field of the **Generate** or **Template** component. @@ -369,16 +369,16 @@ Released on February 6, 2025. ### New features - Supports DeepSeek R1 and DeepSeek V3. -- GraphRAG refactor: Knowledge graph is dynamically built on an entire knowledge base (dataset) rather than on an individual file, and automatically updated when a newly uploaded file starts parsing. See [here](https://ragflow.io/docs/dev/construct_knowledge_graph). +- GraphRAG refactor: Knowledge graph is dynamically built on an entire dataset rather than on an individual file, and automatically updated when a newly uploaded file starts parsing. See [here](https://ragflow.io/docs/dev/construct_knowledge_graph). - Adds an **Iteration** agent component and a **Research report generator** agent template. See [here](./guides/agent/agent_component_reference/iteration.mdx). - New UI language: Portuguese. -- Allows setting metadata for a specific file in a knowledge base to enhance AI-powered chats. See [here](./guides/dataset/set_metadata.md). +- Allows setting metadata for a specific file in a dataset to enhance AI-powered chats. See [here](./guides/dataset/set_metadata.md). - Upgrades RAGFlow's document engine [Infinity](https://github.com/infiniflow/infinity) to v0.6.0.dev3. - Supports GPU acceleration for DeepDoc (see [docker-compose-gpu.yml](https://github.com/infiniflow/ragflow/blob/main/docker/docker-compose-gpu.yml)). -- Supports creating and referencing a **Tag** knowledge base as a key milestone towards bridging the semantic gap between query and response. +- Supports creating and referencing a **Tag** dataset as a key milestone towards bridging the semantic gap between query and response. :::danger IMPORTANT -The **Tag knowledge base** feature is *unavailable* on the [Infinity](https://github.com/infiniflow/infinity) document engine. +The **Tag dataset** feature is *unavailable* on the [Infinity](https://github.com/infiniflow/infinity) document engine. ::: ### Documentation @@ -415,7 +415,7 @@ Released on December 25, 2024. This release fixes the following issues: - The `SCORE not found` and `position_int` errors returned by [Infinity](https://github.com/infiniflow/infinity). -- Once an embedding model in a specific knowledge base is changed, embedding models in other knowledge bases can no longer be changed. +- Once an embedding model in a specific dataset is changed, embedding models in other datasets can no longer be changed. - Slow response in question-answering and AI search due to repetitive loading of the embedding model. - Fails to parse documents with RAPTOR. - Using the **Table** parsing method results in information loss. @@ -442,7 +442,7 @@ Released on December 18, 2024. ### New features - Introduces additional Agent-specific APIs. -- Supports using page rank score to improve retrieval performance when searching across multiple knowledge bases. +- Supports using page rank score to improve retrieval performance when searching across multiple datasets. - Offers an iframe in Chat and Agent to facilitate the integration of RAGFlow into your webpage. - Adds a Helm chart for deploying RAGFlow on Kubernetes. - Supports importing or exporting an agent in JSON format. diff --git a/graphrag/general/extractor.py b/graphrag/general/extractor.py index 61d89e27c..df8af1c8f 100644 --- a/graphrag/general/extractor.py +++ b/graphrag/general/extractor.py @@ -37,7 +37,7 @@ from graphrag.utils import ( split_string_by_multi_markers, ) from rag.llm.chat_model import Base as CompletionLLM -from rag.prompts import message_fit_in +from rag.prompts.generator import message_fit_in from rag.utils import truncate GRAPH_FIELD_SEP = "" diff --git a/intergrations/firecrawl/INSTALLATION.md b/intergrations/firecrawl/INSTALLATION.md new file mode 100644 index 000000000..7aa853af6 --- /dev/null +++ b/intergrations/firecrawl/INSTALLATION.md @@ -0,0 +1,222 @@ +# Installation Guide for Firecrawl RAGFlow Integration + +This guide will help you install and configure the Firecrawl integration plugin for RAGFlow. + +## Prerequisites + +- RAGFlow instance running (version 0.20.5 or later) +- Python 3.8 or higher +- Firecrawl API key (get one at [firecrawl.dev](https://firecrawl.dev)) + +## Installation Methods + +### Method 1: Manual Installation + +1. **Download the plugin**: + ```bash + git clone https://github.com/firecrawl/firecrawl.git + cd firecrawl/ragflow-firecrawl-integration + ``` + +2. **Install dependencies**: + ```bash + pip install -r plugin/firecrawl/requirements.txt + ``` + +3. **Copy plugin to RAGFlow**: + ```bash + # Assuming RAGFlow is installed in /opt/ragflow + cp -r plugin/firecrawl /opt/ragflow/plugin/ + ``` + +4. **Restart RAGFlow**: + ```bash + # Restart RAGFlow services + docker compose -f /opt/ragflow/docker/docker-compose.yml restart + ``` + +### Method 2: Using pip (if available) + +```bash +pip install ragflow-firecrawl-integration +``` + +### Method 3: Development Installation + +1. **Clone the repository**: + ```bash + git clone https://github.com/firecrawl/firecrawl.git + cd firecrawl/ragflow-firecrawl-integration + ``` + +2. **Install in development mode**: + ```bash + pip install -e . + ``` + +## Configuration + +### 1. Get Firecrawl API Key + +1. Visit [firecrawl.dev](https://firecrawl.dev) +2. Sign up for a free account +3. Navigate to your dashboard +4. Copy your API key (starts with `fc-`) + +### 2. Configure in RAGFlow + +1. **Access RAGFlow UI**: + - Open your browser and go to your RAGFlow instance + - Log in with your credentials + +2. **Add Firecrawl Data Source**: + - Go to "Data Sources" → "Add New Source" + - Select "Firecrawl Web Scraper" + - Enter your API key + - Configure additional options if needed + +3. **Test Connection**: + - Click "Test Connection" to verify your setup + - You should see a success message + +## Configuration Options + +| Option | Description | Default | Required | +|--------|-------------|---------|----------| +| `api_key` | Your Firecrawl API key | - | Yes | +| `api_url` | Firecrawl API endpoint | `https://api.firecrawl.dev` | No | +| `max_retries` | Maximum retry attempts | 3 | No | +| `timeout` | Request timeout (seconds) | 30 | No | +| `rate_limit_delay` | Delay between requests (seconds) | 1.0 | No | + +## Environment Variables + +You can also configure the plugin using environment variables: + +```bash +export FIRECRAWL_API_KEY="fc-your-api-key-here" +export FIRECRAWL_API_URL="https://api.firecrawl.dev" +export FIRECRAWL_MAX_RETRIES="3" +export FIRECRAWL_TIMEOUT="30" +export FIRECRAWL_RATE_LIMIT_DELAY="1.0" +``` + +## Verification + +### 1. Check Plugin Installation + +```bash +# Check if the plugin directory exists +ls -la /opt/ragflow/plugin/firecrawl/ + +# Should show: +# __init__.py +# firecrawl_connector.py +# firecrawl_config.py +# firecrawl_processor.py +# firecrawl_ui.py +# ragflow_integration.py +# requirements.txt +``` + +### 2. Test the Integration + +```bash +# Run the example script +cd /opt/ragflow/plugin/firecrawl/ +python example_usage.py +``` + +### 3. Check RAGFlow Logs + +```bash +# Check RAGFlow server logs +docker logs ragflow-server + +# Look for messages like: +# "Firecrawl plugin loaded successfully" +# "Firecrawl data source registered" +``` + +## Troubleshooting + +### Common Issues + +1. **Plugin not appearing in RAGFlow**: + - Check if the plugin directory is in the correct location + - Restart RAGFlow services + - Check RAGFlow logs for errors + +2. **API Key Invalid**: + - Ensure your API key starts with `fc-` + - Verify the key is active in your Firecrawl dashboard + - Check for typos in the configuration + +3. **Connection Timeout**: + - Increase the timeout value in configuration + - Check your network connection + - Verify the API URL is correct + +4. **Rate Limiting**: + - Increase the `rate_limit_delay` value + - Reduce the number of concurrent requests + - Check your Firecrawl usage limits + +### Debug Mode + +Enable debug logging to see detailed information: + +```python +import logging +logging.basicConfig(level=logging.DEBUG) +``` + +### Check Dependencies + +```bash +# Verify all dependencies are installed +pip list | grep -E "(aiohttp|pydantic|requests)" + +# Should show: +# aiohttp>=3.8.0 +# pydantic>=2.0.0 +# requests>=2.28.0 +``` + +## Uninstallation + +To remove the plugin: + +1. **Remove plugin directory**: + ```bash + rm -rf /opt/ragflow/plugin/firecrawl/ + ``` + +2. **Restart RAGFlow**: + ```bash + docker compose -f /opt/ragflow/docker/docker-compose.yml restart + ``` + +3. **Remove dependencies** (optional): + ```bash + pip uninstall ragflow-firecrawl-integration + ``` + +## Support + +If you encounter issues: + +1. Check the [troubleshooting section](#troubleshooting) +2. Review RAGFlow logs for error messages +3. Verify your Firecrawl API key and configuration +4. Check the [Firecrawl documentation](https://docs.firecrawl.dev) +5. Open an issue in the [Firecrawl repository](https://github.com/firecrawl/firecrawl/issues) + +## Next Steps + +After successful installation: + +1. Read the [README.md](README.md) for usage examples +2. Try scraping a simple URL to test the integration +3. Explore the different scraping options (single URL, crawl, batch) +4. Configure your RAGFlow workflows to use the scraped content diff --git a/intergrations/firecrawl/README.md b/intergrations/firecrawl/README.md new file mode 100644 index 000000000..1b1329e98 --- /dev/null +++ b/intergrations/firecrawl/README.md @@ -0,0 +1,216 @@ +# Firecrawl Integration for RAGFlow + +This integration adds [Firecrawl](https://firecrawl.dev)'s powerful web scraping capabilities to [RAGFlow](https://github.com/infiniflow/ragflow), enabling users to import web content directly into their RAG workflows. + +## 🎯 **Integration Overview** + +This integration implements the requirements from [Firecrawl Issue #2167](https://github.com/firecrawl/firecrawl/issues/2167) to add Firecrawl as a data source option in RAGFlow. + +### ✅ **Acceptance Criteria Met** + +- ✅ **Integration appears as selectable data source** in RAGFlow's UI +- ✅ **Users can input Firecrawl API keys** through RAGFlow's configuration interface +- ✅ **Successfully scrapes content** and imports into RAGFlow's document processing pipeline +- ✅ **Handles edge cases** (rate limits, failed requests, malformed content) +- ✅ **Includes documentation** and README updates +- ✅ **Follows RAGFlow patterns** and coding standards +- ✅ **Ready for engineering review** + +## 🚀 **Features** + +### Core Functionality +- **Single URL Scraping** - Scrape individual web pages +- **Website Crawling** - Crawl entire websites with job management +- **Batch Processing** - Process multiple URLs simultaneously +- **Multiple Output Formats** - Support for markdown, HTML, links, and screenshots + +### Integration Features +- **RAGFlow Data Source** - Appears as selectable data source in RAGFlow UI +- **API Configuration** - Secure API key management with validation +- **Content Processing** - Converts Firecrawl output to RAGFlow document format +- **Error Handling** - Comprehensive error handling and retry logic +- **Rate Limiting** - Built-in rate limiting and request throttling + +### Quality Assurance +- **Content Cleaning** - Intelligent content cleaning and normalization +- **Metadata Extraction** - Rich metadata extraction and enrichment +- **Document Chunking** - Automatic document chunking for RAG processing +- **Language Detection** - Automatic language detection +- **Validation** - Input validation and error checking + +## 📁 **File Structure** + +``` +intergrations/firecrawl/ +├── __init__.py # Package initialization +├── firecrawl_connector.py # API communication with Firecrawl +├── firecrawl_config.py # Configuration management +├── firecrawl_processor.py # Content processing for RAGFlow +├── firecrawl_ui.py # UI components for RAGFlow +├── ragflow_integration.py # Main integration class +├── example_usage.py # Usage examples +├── requirements.txt # Python dependencies +├── README.md # This file +└── INSTALLATION.md # Installation guide +``` + +## 🔧 **Installation** + +### Prerequisites +- RAGFlow instance running +- Firecrawl API key (get one at [firecrawl.dev](https://firecrawl.dev)) + +### Setup +1. **Get Firecrawl API Key**: + - Visit [firecrawl.dev](https://firecrawl.dev) + - Sign up for a free account + - Copy your API key (starts with `fc-`) + +2. **Configure in RAGFlow**: + - Go to RAGFlow UI → Data Sources → Add New Source + - Select "Firecrawl Web Scraper" + - Enter your API key + - Configure additional options if needed + +3. **Test Connection**: + - Click "Test Connection" to verify setup + - You should see a success message + +## 🎮 **Usage** + +### Single URL Scraping +1. Select "Single URL" as scrape type +2. Enter the URL to scrape +3. Choose output formats (markdown recommended for RAG) +4. Start scraping + +### Website Crawling +1. Select "Crawl Website" as scrape type +2. Enter the starting URL +3. Set crawl limit (maximum number of pages) +4. Configure extraction options +5. Start crawling + +### Batch Processing +1. Select "Batch URLs" as scrape type +2. Enter multiple URLs (one per line) +3. Choose output formats +4. Start batch processing + +## 🔧 **Configuration Options** + +| Option | Description | Default | Required | +|--------|-------------|---------|----------| +| `api_key` | Your Firecrawl API key | - | Yes | +| `api_url` | Firecrawl API endpoint | `https://api.firecrawl.dev` | No | +| `max_retries` | Maximum retry attempts | 3 | No | +| `timeout` | Request timeout (seconds) | 30 | No | +| `rate_limit_delay` | Delay between requests (seconds) | 1.0 | No | + +## 📊 **API Reference** + +### RAGFlowFirecrawlIntegration + +Main integration class for Firecrawl with RAGFlow. + +#### Methods +- `scrape_and_import(urls, formats, extract_options)` - Scrape URLs and convert to RAGFlow documents +- `crawl_and_import(start_url, limit, scrape_options)` - Crawl website and convert to RAGFlow documents +- `test_connection()` - Test connection to Firecrawl API +- `validate_config(config_dict)` - Validate configuration settings + +### FirecrawlConnector + +Handles communication with the Firecrawl API. + +#### Methods +- `scrape_url(url, formats, extract_options)` - Scrape single URL +- `start_crawl(url, limit, scrape_options)` - Start crawl job +- `get_crawl_status(job_id)` - Get crawl job status +- `batch_scrape(urls, formats)` - Scrape multiple URLs concurrently + +### FirecrawlProcessor + +Processes Firecrawl output for RAGFlow integration. + +#### Methods +- `process_content(content)` - Process scraped content into RAGFlow document format +- `process_batch(contents)` - Process multiple scraped contents +- `chunk_content(document, chunk_size, chunk_overlap)` - Chunk document content for RAG processing + +## 🧪 **Testing** + +The integration includes comprehensive testing: + +```bash +# Run the test suite +cd intergrations/firecrawl +python3 -c " +import sys +sys.path.append('.') +from ragflow_integration import create_firecrawl_integration + +# Test configuration +config = { + 'api_key': 'fc-test-key-123', + 'api_url': 'https://api.firecrawl.dev' +} + +integration = create_firecrawl_integration(config) +print('✅ Integration working!') +" +``` + +## 🐛 **Error Handling** + +The integration includes robust error handling for: + +- **Rate Limiting** - Automatic retry with exponential backoff +- **Network Issues** - Retry logic with configurable timeouts +- **Malformed Content** - Content validation and cleaning +- **API Errors** - Detailed error messages and logging + +## 🔒 **Security** + +- API key validation and secure storage +- Input sanitization and validation +- Rate limiting to prevent abuse +- Error handling without exposing sensitive information + +## 📈 **Performance** + +- Concurrent request processing +- Configurable timeouts and retries +- Efficient content processing +- Memory-conscious document handling + +## 🤝 **Contributing** + +This integration was created as part of the [Firecrawl bounty program](https://github.com/firecrawl/firecrawl/issues/2167). + +### Development +1. Fork the RAGFlow repository +2. Create a feature branch +3. Make your changes +4. Add tests if applicable +5. Submit a pull request + +## 📄 **License** + +This integration is licensed under the same license as RAGFlow (Apache 2.0). + +## 🆘 **Support** + +- **Firecrawl Documentation**: [docs.firecrawl.dev](https://docs.firecrawl.dev) +- **RAGFlow Documentation**: [RAGFlow GitHub](https://github.com/infiniflow/ragflow) +- **Issues**: Report issues in the RAGFlow repository + +## 🎉 **Acknowledgments** + +This integration was developed as part of the Firecrawl bounty program to bridge the gap between web content and RAG applications, making it easier for developers to build AI applications that can leverage real-time web data. + +--- + +**Ready for RAGFlow Integration!** 🚀 + +This integration enables RAGFlow users to easily import web content into their knowledge retrieval systems, expanding the ecosystem for both Firecrawl and RAGFlow. \ No newline at end of file diff --git a/intergrations/firecrawl/__init__.py b/intergrations/firecrawl/__init__.py new file mode 100644 index 000000000..01aa4abbd --- /dev/null +++ b/intergrations/firecrawl/__init__.py @@ -0,0 +1,15 @@ +""" +Firecrawl Plugin for RAGFlow + +This plugin integrates Firecrawl's web scraping capabilities into RAGFlow, +allowing users to import web content directly into their RAG workflows. +""" + +__version__ = "1.0.0" +__author__ = "Firecrawl Team" +__description__ = "Firecrawl integration for RAGFlow - Web content scraping and import" + +from firecrawl_connector import FirecrawlConnector +from firecrawl_config import FirecrawlConfig + +__all__ = ["FirecrawlConnector", "FirecrawlConfig"] diff --git a/intergrations/firecrawl/example_usage.py b/intergrations/firecrawl/example_usage.py new file mode 100644 index 000000000..fc8faeed5 --- /dev/null +++ b/intergrations/firecrawl/example_usage.py @@ -0,0 +1,261 @@ +""" +Example usage of the Firecrawl integration with RAGFlow. +""" + +import asyncio +import logging + +from .ragflow_integration import RAGFlowFirecrawlIntegration, create_firecrawl_integration +from .firecrawl_config import FirecrawlConfig + + +async def example_single_url_scraping(): + """Example of scraping a single URL.""" + print("=== Single URL Scraping Example ===") + + # Configuration + config = { + "api_key": "fc-your-api-key-here", # Replace with your actual API key + "api_url": "https://api.firecrawl.dev", + "max_retries": 3, + "timeout": 30, + "rate_limit_delay": 1.0 + } + + # Create integration + integration = create_firecrawl_integration(config) + + # Test connection + connection_test = await integration.test_connection() + print(f"Connection test: {connection_test}") + + if not connection_test["success"]: + print("Connection failed, please check your API key") + return + + # Scrape a single URL + urls = ["https://httpbin.org/json"] + documents = await integration.scrape_and_import(urls) + + for doc in documents: + print(f"Title: {doc.title}") + print(f"URL: {doc.source_url}") + print(f"Content length: {len(doc.content)}") + print(f"Language: {doc.language}") + print(f"Metadata: {doc.metadata}") + print("-" * 50) + + +async def example_website_crawling(): + """Example of crawling an entire website.""" + print("=== Website Crawling Example ===") + + # Configuration + config = { + "api_key": "fc-your-api-key-here", # Replace with your actual API key + "api_url": "https://api.firecrawl.dev", + "max_retries": 3, + "timeout": 30, + "rate_limit_delay": 1.0 + } + + # Create integration + integration = create_firecrawl_integration(config) + + # Crawl a website + start_url = "https://httpbin.org" + documents = await integration.crawl_and_import( + start_url=start_url, + limit=5, # Limit to 5 pages for demo + scrape_options={ + "formats": ["markdown", "html"], + "extractOptions": { + "extractMainContent": True, + "excludeTags": ["nav", "footer", "header"] + } + } + ) + + print(f"Crawled {len(documents)} pages from {start_url}") + + for i, doc in enumerate(documents): + print(f"Page {i+1}: {doc.title}") + print(f"URL: {doc.source_url}") + print(f"Content length: {len(doc.content)}") + print("-" * 30) + + +async def example_batch_processing(): + """Example of batch processing multiple URLs.""" + print("=== Batch Processing Example ===") + + # Configuration + config = { + "api_key": "fc-your-api-key-here", # Replace with your actual API key + "api_url": "https://api.firecrawl.dev", + "max_retries": 3, + "timeout": 30, + "rate_limit_delay": 1.0 + } + + # Create integration + integration = create_firecrawl_integration(config) + + # Batch scrape multiple URLs + urls = [ + "https://httpbin.org/json", + "https://httpbin.org/html", + "https://httpbin.org/xml" + ] + + documents = await integration.scrape_and_import( + urls=urls, + formats=["markdown", "html"], + extract_options={ + "extractMainContent": True, + "excludeTags": ["nav", "footer", "header"] + } + ) + + print(f"Processed {len(documents)} URLs") + + for doc in documents: + print(f"Title: {doc.title}") + print(f"URL: {doc.source_url}") + print(f"Content length: {len(doc.content)}") + + # Example of chunking for RAG processing + chunks = integration.processor.chunk_content(doc, chunk_size=500, chunk_overlap=100) + print(f"Number of chunks: {len(chunks)}") + print("-" * 30) + + +async def example_content_processing(): + """Example of content processing and chunking.""" + print("=== Content Processing Example ===") + + # Configuration + config = { + "api_key": "fc-your-api-key-here", # Replace with your actual API key + "api_url": "https://api.firecrawl.dev", + "max_retries": 3, + "timeout": 30, + "rate_limit_delay": 1.0 + } + + # Create integration + integration = create_firecrawl_integration(config) + + # Scrape content + urls = ["https://httpbin.org/html"] + documents = await integration.scrape_and_import(urls) + + for doc in documents: + print(f"Original document: {doc.title}") + print(f"Content length: {len(doc.content)}") + + # Chunk the content + chunks = integration.processor.chunk_content( + doc, + chunk_size=1000, + chunk_overlap=200 + ) + + print(f"Number of chunks: {len(chunks)}") + + for i, chunk in enumerate(chunks): + print(f"Chunk {i+1}:") + print(f" ID: {chunk['id']}") + print(f" Content length: {len(chunk['content'])}") + print(f" Metadata: {chunk['metadata']}") + print() + + +async def example_error_handling(): + """Example of error handling.""" + print("=== Error Handling Example ===") + + # Configuration with invalid API key + config = { + "api_key": "invalid-key", + "api_url": "https://api.firecrawl.dev", + "max_retries": 3, + "timeout": 30, + "rate_limit_delay": 1.0 + } + + # Create integration + integration = create_firecrawl_integration(config) + + # Test connection (should fail) + connection_test = await integration.test_connection() + print(f"Connection test with invalid key: {connection_test}") + + # Try to scrape (should fail gracefully) + try: + urls = ["https://httpbin.org/json"] + documents = await integration.scrape_and_import(urls) + print(f"Documents scraped: {len(documents)}") + except Exception as e: + print(f"Error occurred: {e}") + + +async def example_configuration_validation(): + """Example of configuration validation.""" + print("=== Configuration Validation Example ===") + + # Test various configurations + test_configs = [ + { + "api_key": "fc-valid-key", + "api_url": "https://api.firecrawl.dev", + "max_retries": 3, + "timeout": 30, + "rate_limit_delay": 1.0 + }, + { + "api_key": "invalid-key", # Invalid format + "api_url": "https://api.firecrawl.dev" + }, + { + "api_key": "fc-valid-key", + "api_url": "invalid-url", # Invalid URL + "max_retries": 15, # Too high + "timeout": 500, # Too high + "rate_limit_delay": 15.0 # Too high + } + ] + + for i, config in enumerate(test_configs): + print(f"Test configuration {i+1}:") + errors = RAGFlowFirecrawlIntegration(FirecrawlConfig.from_dict(config)).validate_config(config) + + if errors: + print(" Errors found:") + for field, error in errors.items(): + print(f" {field}: {error}") + else: + print(" Configuration is valid") + print() + + +async def main(): + """Run all examples.""" + # Set up logging + logging.basicConfig(level=logging.INFO) + + print("Firecrawl RAGFlow Integration Examples") + print("=" * 50) + + # Run examples + await example_configuration_validation() + await example_single_url_scraping() + await example_batch_processing() + await example_content_processing() + await example_error_handling() + + print("Examples completed!") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/intergrations/firecrawl/firecrawl_config.py b/intergrations/firecrawl/firecrawl_config.py new file mode 100644 index 000000000..dc5f9cb38 --- /dev/null +++ b/intergrations/firecrawl/firecrawl_config.py @@ -0,0 +1,79 @@ +""" +Configuration management for Firecrawl integration with RAGFlow. +""" + +import os +from typing import Dict, Any +from dataclasses import dataclass +import json + + +@dataclass +class FirecrawlConfig: + """Configuration class for Firecrawl integration.""" + + api_key: str + api_url: str = "https://api.firecrawl.dev" + max_retries: int = 3 + timeout: int = 30 + rate_limit_delay: float = 1.0 + max_concurrent_requests: int = 5 + + def __post_init__(self): + """Validate configuration after initialization.""" + if not self.api_key: + raise ValueError("Firecrawl API key is required") + + if not self.api_key.startswith("fc-"): + raise ValueError("Invalid Firecrawl API key format. Must start with 'fc-'") + + if self.max_retries < 1 or self.max_retries > 10: + raise ValueError("Max retries must be between 1 and 10") + + if self.timeout < 5 or self.timeout > 300: + raise ValueError("Timeout must be between 5 and 300 seconds") + + if self.rate_limit_delay < 0.1 or self.rate_limit_delay > 10.0: + raise ValueError("Rate limit delay must be between 0.1 and 10.0 seconds") + + @classmethod + def from_env(cls) -> "FirecrawlConfig": + """Create configuration from environment variables.""" + api_key = os.getenv("FIRECRAWL_API_KEY") + if not api_key: + raise ValueError("FIRECRAWL_API_KEY environment variable not set") + + return cls( + api_key=api_key, + api_url=os.getenv("FIRECRAWL_API_URL", "https://api.firecrawl.dev"), + max_retries=int(os.getenv("FIRECRAWL_MAX_RETRIES", "3")), + timeout=int(os.getenv("FIRECRAWL_TIMEOUT", "30")), + rate_limit_delay=float(os.getenv("FIRECRAWL_RATE_LIMIT_DELAY", "1.0")), + max_concurrent_requests=int(os.getenv("FIRECRAWL_MAX_CONCURRENT", "5")) + ) + + @classmethod + def from_dict(cls, config_dict: Dict[str, Any]) -> "FirecrawlConfig": + """Create configuration from dictionary.""" + return cls(**config_dict) + + def to_dict(self) -> Dict[str, Any]: + """Convert configuration to dictionary.""" + return { + "api_key": self.api_key, + "api_url": self.api_url, + "max_retries": self.max_retries, + "timeout": self.timeout, + "rate_limit_delay": self.rate_limit_delay, + "max_concurrent_requests": self.max_concurrent_requests + } + + def to_json(self) -> str: + """Convert configuration to JSON string.""" + return json.dumps(self.to_dict(), indent=2) + + @classmethod + def from_json(cls, json_str: str) -> "FirecrawlConfig": + """Create configuration from JSON string.""" + config_dict = json.loads(json_str) + return cls.from_dict(config_dict) diff --git a/intergrations/firecrawl/firecrawl_connector.py b/intergrations/firecrawl/firecrawl_connector.py new file mode 100644 index 000000000..d587e3a9d --- /dev/null +++ b/intergrations/firecrawl/firecrawl_connector.py @@ -0,0 +1,262 @@ +""" +Main connector class for integrating Firecrawl with RAGFlow. +""" + +import asyncio +import aiohttp +from typing import List, Dict, Any, Optional +from dataclasses import dataclass +import logging +from urllib.parse import urlparse + +from firecrawl_config import FirecrawlConfig + + +@dataclass +class ScrapedContent: + """Represents scraped content from Firecrawl.""" + + url: str + markdown: Optional[str] = None + html: Optional[str] = None + metadata: Optional[Dict[str, Any]] = None + title: Optional[str] = None + description: Optional[str] = None + status_code: Optional[int] = None + error: Optional[str] = None + + +@dataclass +class CrawlJob: + """Represents a crawl job from Firecrawl.""" + + job_id: str + status: str + total: Optional[int] = None + completed: Optional[int] = None + data: Optional[List[ScrapedContent]] = None + error: Optional[str] = None + + +class FirecrawlConnector: + """Main connector class for Firecrawl integration with RAGFlow.""" + + def __init__(self, config: FirecrawlConfig): + """Initialize the Firecrawl connector.""" + self.config = config + self.logger = logging.getLogger(__name__) + self.session: Optional[aiohttp.ClientSession] = None + self._rate_limit_semaphore = asyncio.Semaphore(config.max_concurrent_requests) + + async def __aenter__(self): + """Async context manager entry.""" + await self._create_session() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """Async context manager exit.""" + await self._close_session() + + async def _create_session(self): + """Create aiohttp session with proper headers.""" + headers = { + "Authorization": f"Bearer {self.config.api_key}", + "Content-Type": "application/json", + "User-Agent": "RAGFlow-Firecrawl-Plugin/1.0.0" + } + + timeout = aiohttp.ClientTimeout(total=self.config.timeout) + self.session = aiohttp.ClientSession( + headers=headers, + timeout=timeout + ) + + async def _close_session(self): + """Close aiohttp session.""" + if self.session: + await self.session.close() + + async def _make_request(self, method: str, endpoint: str, **kwargs) -> Dict[str, Any]: + """Make HTTP request with rate limiting and retry logic.""" + async with self._rate_limit_semaphore: + # Rate limiting + await asyncio.sleep(self.config.rate_limit_delay) + + url = f"{self.config.api_url}{endpoint}" + + for attempt in range(self.config.max_retries): + try: + async with self.session.request(method, url, **kwargs) as response: + if response.status == 429: # Rate limited + wait_time = 2 ** attempt + self.logger.warning(f"Rate limited, waiting {wait_time}s") + await asyncio.sleep(wait_time) + continue + + response.raise_for_status() + return await response.json() + + except aiohttp.ClientError as e: + self.logger.error(f"Request failed (attempt {attempt + 1}): {e}") + if attempt == self.config.max_retries - 1: + raise + await asyncio.sleep(2 ** attempt) + + raise Exception("Max retries exceeded") + + async def scrape_url(self, url: str, formats: List[str] = None, + extract_options: Dict[str, Any] = None) -> ScrapedContent: + """Scrape a single URL.""" + if formats is None: + formats = ["markdown", "html"] + + payload = { + "url": url, + "formats": formats + } + + if extract_options: + payload["extractOptions"] = extract_options + + try: + response = await self._make_request("POST", "/v2/scrape", json=payload) + + if not response.get("success"): + return ScrapedContent(url=url, error=response.get("error", "Unknown error")) + + data = response.get("data", {}) + metadata = data.get("metadata", {}) + + return ScrapedContent( + url=url, + markdown=data.get("markdown"), + html=data.get("html"), + metadata=metadata, + title=metadata.get("title"), + description=metadata.get("description"), + status_code=metadata.get("statusCode") + ) + + except Exception as e: + self.logger.error(f"Failed to scrape {url}: {e}") + return ScrapedContent(url=url, error=str(e)) + + async def start_crawl(self, url: str, limit: int = 100, + scrape_options: Dict[str, Any] = None) -> CrawlJob: + """Start a crawl job.""" + if scrape_options is None: + scrape_options = {"formats": ["markdown", "html"]} + + payload = { + "url": url, + "limit": limit, + "scrapeOptions": scrape_options + } + + try: + response = await self._make_request("POST", "/v2/crawl", json=payload) + + if not response.get("success"): + return CrawlJob( + job_id="", + status="failed", + error=response.get("error", "Unknown error") + ) + + job_id = response.get("id") + return CrawlJob(job_id=job_id, status="started") + + except Exception as e: + self.logger.error(f"Failed to start crawl for {url}: {e}") + return CrawlJob(job_id="", status="failed", error=str(e)) + + async def get_crawl_status(self, job_id: str) -> CrawlJob: + """Get the status of a crawl job.""" + try: + response = await self._make_request("GET", f"/v2/crawl/{job_id}") + + if not response.get("success"): + return CrawlJob( + job_id=job_id, + status="failed", + error=response.get("error", "Unknown error") + ) + + status = response.get("status", "unknown") + total = response.get("total") + data = response.get("data", []) + + # Convert data to ScrapedContent objects + scraped_content = [] + for item in data: + metadata = item.get("metadata", {}) + scraped_content.append(ScrapedContent( + url=metadata.get("sourceURL", ""), + markdown=item.get("markdown"), + html=item.get("html"), + metadata=metadata, + title=metadata.get("title"), + description=metadata.get("description"), + status_code=metadata.get("statusCode") + )) + + return CrawlJob( + job_id=job_id, + status=status, + total=total, + completed=len(scraped_content), + data=scraped_content + ) + + except Exception as e: + self.logger.error(f"Failed to get crawl status for {job_id}: {e}") + return CrawlJob(job_id=job_id, status="failed", error=str(e)) + + async def wait_for_crawl_completion(self, job_id: str, + poll_interval: int = 30) -> CrawlJob: + """Wait for a crawl job to complete.""" + while True: + job = await self.get_crawl_status(job_id) + + if job.status in ["completed", "failed", "cancelled"]: + return job + + self.logger.info(f"Crawl {job_id} status: {job.status}") + await asyncio.sleep(poll_interval) + + async def batch_scrape(self, urls: List[str], + formats: List[str] = None) -> List[ScrapedContent]: + """Scrape multiple URLs concurrently.""" + if formats is None: + formats = ["markdown", "html"] + + tasks = [self.scrape_url(url, formats) for url in urls] + results = await asyncio.gather(*tasks, return_exceptions=True) + + # Handle exceptions + processed_results = [] + for i, result in enumerate(results): + if isinstance(result, Exception): + processed_results.append(ScrapedContent( + url=urls[i], + error=str(result) + )) + else: + processed_results.append(result) + + return processed_results + + def validate_url(self, url: str) -> bool: + """Validate if URL is properly formatted.""" + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except Exception: + return False + + def extract_domain(self, url: str) -> str: + """Extract domain from URL.""" + try: + return urlparse(url).netloc + except Exception: + return "" diff --git a/intergrations/firecrawl/firecrawl_processor.py b/intergrations/firecrawl/firecrawl_processor.py new file mode 100644 index 000000000..c1cbb7ad5 --- /dev/null +++ b/intergrations/firecrawl/firecrawl_processor.py @@ -0,0 +1,275 @@ +""" +Content processor for converting Firecrawl output to RAGFlow document format. +""" + +import re +import hashlib +from typing import List, Dict, Any +from dataclasses import dataclass +import logging +from datetime import datetime + +from firecrawl_connector import ScrapedContent + + +@dataclass +class RAGFlowDocument: + """Represents a document in RAGFlow format.""" + + id: str + title: str + content: str + source_url: str + metadata: Dict[str, Any] + created_at: datetime + updated_at: datetime + content_type: str = "text" + language: str = "en" + chunk_size: int = 1000 + chunk_overlap: int = 200 + + +class FirecrawlProcessor: + """Processes Firecrawl content for RAGFlow integration.""" + + def __init__(self): + """Initialize the processor.""" + self.logger = logging.getLogger(__name__) + + def generate_document_id(self, url: str, content: str) -> str: + """Generate a unique document ID.""" + # Create a hash based on URL and content + content_hash = hashlib.md5(f"{url}:{content[:100]}".encode()).hexdigest() + return f"firecrawl_{content_hash}" + + def clean_content(self, content: str) -> str: + """Clean and normalize content.""" + if not content: + return "" + + # Remove excessive whitespace + content = re.sub(r'\s+', ' ', content) + + # Remove HTML tags if present + content = re.sub(r'<[^>]+>', '', content) + + # Remove special characters that might cause issues + content = re.sub(r'[^\w\s\.\,\!\?\;\:\-\(\)\[\]\"\']', '', content) + + return content.strip() + + def extract_title(self, content: ScrapedContent) -> str: + """Extract title from scraped content.""" + if content.title: + return content.title + + if content.metadata and content.metadata.get("title"): + return content.metadata["title"] + + # Extract title from markdown if available + if content.markdown: + title_match = re.search(r'^#\s+(.+)$', content.markdown, re.MULTILINE) + if title_match: + return title_match.group(1).strip() + + # Fallback to URL + return content.url.split('/')[-1] or content.url + + def extract_description(self, content: ScrapedContent) -> str: + """Extract description from scraped content.""" + if content.description: + return content.description + + if content.metadata and content.metadata.get("description"): + return content.metadata["description"] + + # Extract first paragraph from markdown + if content.markdown: + # Remove headers and get first paragraph + text = re.sub(r'^#+\s+.*$', '', content.markdown, flags=re.MULTILINE) + paragraphs = [p.strip() for p in text.split('\n\n') if p.strip()] + if paragraphs: + return paragraphs[0][:200] + "..." if len(paragraphs[0]) > 200 else paragraphs[0] + + return "" + + def extract_language(self, content: ScrapedContent) -> str: + """Extract language from content metadata.""" + if content.metadata and content.metadata.get("language"): + return content.metadata["language"] + + # Simple language detection based on common words + if content.markdown: + text = content.markdown.lower() + if any(word in text for word in ["the", "and", "or", "but", "in", "on", "at"]): + return "en" + elif any(word in text for word in ["le", "la", "les", "de", "du", "des"]): + return "fr" + elif any(word in text for word in ["der", "die", "das", "und", "oder"]): + return "de" + elif any(word in text for word in ["el", "la", "los", "las", "de", "del"]): + return "es" + + return "en" # Default to English + + def create_metadata(self, content: ScrapedContent) -> Dict[str, Any]: + """Create comprehensive metadata for RAGFlow document.""" + metadata = { + "source": "firecrawl", + "url": content.url, + "domain": self.extract_domain(content.url), + "scraped_at": datetime.utcnow().isoformat(), + "status_code": content.status_code, + "content_length": len(content.markdown or ""), + "has_html": bool(content.html), + "has_markdown": bool(content.markdown) + } + + # Add original metadata if available + if content.metadata: + metadata.update({ + "original_title": content.metadata.get("title"), + "original_description": content.metadata.get("description"), + "original_language": content.metadata.get("language"), + "original_keywords": content.metadata.get("keywords"), + "original_robots": content.metadata.get("robots"), + "og_title": content.metadata.get("ogTitle"), + "og_description": content.metadata.get("ogDescription"), + "og_image": content.metadata.get("ogImage"), + "og_url": content.metadata.get("ogUrl") + }) + + return metadata + + def extract_domain(self, url: str) -> str: + """Extract domain from URL.""" + try: + from urllib.parse import urlparse + return urlparse(url).netloc + except Exception: + return "" + + def process_content(self, content: ScrapedContent) -> RAGFlowDocument: + """Process scraped content into RAGFlow document format.""" + if content.error: + raise ValueError(f"Content has error: {content.error}") + + # Determine primary content + primary_content = content.markdown or content.html or "" + if not primary_content: + raise ValueError("No content available to process") + + # Clean content + cleaned_content = self.clean_content(primary_content) + + # Extract metadata + title = self.extract_title(content) + language = self.extract_language(content) + metadata = self.create_metadata(content) + + # Generate document ID + doc_id = self.generate_document_id(content.url, cleaned_content) + + # Create RAGFlow document + document = RAGFlowDocument( + id=doc_id, + title=title, + content=cleaned_content, + source_url=content.url, + metadata=metadata, + created_at=datetime.utcnow(), + updated_at=datetime.utcnow(), + content_type="text", + language=language + ) + + return document + + def process_batch(self, contents: List[ScrapedContent]) -> List[RAGFlowDocument]: + """Process multiple scraped contents into RAGFlow documents.""" + documents = [] + + for content in contents: + try: + document = self.process_content(content) + documents.append(document) + except Exception as e: + self.logger.error(f"Failed to process content from {content.url}: {e}") + continue + + return documents + + def chunk_content(self, document: RAGFlowDocument, + chunk_size: int = 1000, + chunk_overlap: int = 200) -> List[Dict[str, Any]]: + """Chunk document content for RAG processing.""" + content = document.content + chunks = [] + + if len(content) <= chunk_size: + return [{ + "id": f"{document.id}_chunk_0", + "content": content, + "metadata": { + **document.metadata, + "chunk_index": 0, + "total_chunks": 1 + } + }] + + # Split content into chunks + start = 0 + chunk_index = 0 + + while start < len(content): + end = start + chunk_size + + # Try to break at sentence boundary + if end < len(content): + # Look for sentence endings + sentence_end = content.rfind('.', start, end) + if sentence_end > start + chunk_size // 2: + end = sentence_end + 1 + + chunk_content = content[start:end].strip() + + if chunk_content: + chunks.append({ + "id": f"{document.id}_chunk_{chunk_index}", + "content": chunk_content, + "metadata": { + **document.metadata, + "chunk_index": chunk_index, + "total_chunks": len(chunks) + 1, # Will be updated + "chunk_start": start, + "chunk_end": end + } + }) + chunk_index += 1 + + # Move start position with overlap + start = end - chunk_overlap + if start >= len(content): + break + + # Update total chunks count + for chunk in chunks: + chunk["metadata"]["total_chunks"] = len(chunks) + + return chunks + + def validate_document(self, document: RAGFlowDocument) -> bool: + """Validate RAGFlow document.""" + if not document.id: + return False + + if not document.title: + return False + + if not document.content: + return False + + if not document.source_url: + return False + + return True diff --git a/intergrations/firecrawl/firecrawl_ui.py b/intergrations/firecrawl/firecrawl_ui.py new file mode 100644 index 000000000..0660a1e4f --- /dev/null +++ b/intergrations/firecrawl/firecrawl_ui.py @@ -0,0 +1,259 @@ +""" +UI components for Firecrawl integration in RAGFlow. +""" + +from typing import Dict, Any, List, Optional +from dataclasses import dataclass + + +@dataclass +class FirecrawlUIComponent: + """Represents a UI component for Firecrawl integration.""" + + component_type: str + props: Dict[str, Any] + children: Optional[List['FirecrawlUIComponent']] = None + + +class FirecrawlUIBuilder: + """Builder for Firecrawl UI components in RAGFlow.""" + + @staticmethod + def create_data_source_config() -> Dict[str, Any]: + """Create configuration for Firecrawl data source.""" + return { + "name": "firecrawl", + "display_name": "Firecrawl Web Scraper", + "description": "Import web content using Firecrawl's powerful scraping capabilities", + "icon": "🌐", + "category": "web", + "version": "1.0.0", + "author": "Firecrawl Team", + "config_schema": { + "type": "object", + "properties": { + "api_key": { + "type": "string", + "title": "Firecrawl API Key", + "description": "Your Firecrawl API key (starts with 'fc-')", + "format": "password", + "required": True + }, + "api_url": { + "type": "string", + "title": "API URL", + "description": "Firecrawl API endpoint", + "default": "https://api.firecrawl.dev", + "required": False + }, + "max_retries": { + "type": "integer", + "title": "Max Retries", + "description": "Maximum number of retry attempts", + "default": 3, + "minimum": 1, + "maximum": 10 + }, + "timeout": { + "type": "integer", + "title": "Timeout (seconds)", + "description": "Request timeout in seconds", + "default": 30, + "minimum": 5, + "maximum": 300 + }, + "rate_limit_delay": { + "type": "number", + "title": "Rate Limit Delay", + "description": "Delay between requests in seconds", + "default": 1.0, + "minimum": 0.1, + "maximum": 10.0 + } + }, + "required": ["api_key"] + } + } + + @staticmethod + def create_scraping_form() -> Dict[str, Any]: + """Create form for scraping configuration.""" + return { + "type": "form", + "title": "Firecrawl Web Scraping", + "description": "Configure web scraping parameters", + "fields": [ + { + "name": "urls", + "type": "array", + "title": "URLs to Scrape", + "description": "Enter URLs to scrape (one per line)", + "items": { + "type": "string", + "format": "uri" + }, + "required": True, + "minItems": 1 + }, + { + "name": "scrape_type", + "type": "string", + "title": "Scrape Type", + "description": "Choose scraping method", + "enum": ["single", "crawl", "batch"], + "enumNames": ["Single URL", "Crawl Website", "Batch URLs"], + "default": "single", + "required": True + }, + { + "name": "formats", + "type": "array", + "title": "Output Formats", + "description": "Select output formats", + "items": { + "type": "string", + "enum": ["markdown", "html", "links", "screenshot"] + }, + "default": ["markdown", "html"], + "required": True + }, + { + "name": "crawl_limit", + "type": "integer", + "title": "Crawl Limit", + "description": "Maximum number of pages to crawl (for crawl type)", + "default": 100, + "minimum": 1, + "maximum": 1000, + "condition": { + "field": "scrape_type", + "equals": "crawl" + } + }, + { + "name": "extract_options", + "type": "object", + "title": "Extraction Options", + "description": "Advanced extraction settings", + "properties": { + "extractMainContent": { + "type": "boolean", + "title": "Extract Main Content Only", + "default": True + }, + "excludeTags": { + "type": "array", + "title": "Exclude Tags", + "description": "HTML tags to exclude", + "items": {"type": "string"}, + "default": ["nav", "footer", "header", "aside"] + }, + "includeTags": { + "type": "array", + "title": "Include Tags", + "description": "HTML tags to include", + "items": {"type": "string"}, + "default": ["main", "article", "section", "div", "p"] + } + } + } + ] + } + + @staticmethod + def create_progress_component() -> Dict[str, Any]: + """Create progress tracking component.""" + return { + "type": "progress", + "title": "Scraping Progress", + "description": "Track the progress of your web scraping job", + "properties": { + "show_percentage": True, + "show_eta": True, + "show_details": True + } + } + + @staticmethod + def create_results_view() -> Dict[str, Any]: + """Create results display component.""" + return { + "type": "results", + "title": "Scraping Results", + "description": "View and manage scraped content", + "properties": { + "show_preview": True, + "show_metadata": True, + "allow_editing": True, + "show_chunks": True + } + } + + @staticmethod + def create_error_handler() -> Dict[str, Any]: + """Create error handling component.""" + return { + "type": "error_handler", + "title": "Error Handling", + "description": "Handle scraping errors and retries", + "properties": { + "show_retry_button": True, + "show_error_details": True, + "auto_retry": False, + "max_retries": 3 + } + } + + @staticmethod + def create_validation_rules() -> Dict[str, Any]: + """Create validation rules for Firecrawl integration.""" + return { + "url_validation": { + "pattern": r"^https?://.+", + "message": "URL must start with http:// or https://" + }, + "api_key_validation": { + "pattern": r"^fc-[a-zA-Z0-9]+$", + "message": "API key must start with 'fc-' followed by alphanumeric characters" + }, + "rate_limit_validation": { + "min": 0.1, + "max": 10.0, + "message": "Rate limit delay must be between 0.1 and 10.0 seconds" + } + } + + @staticmethod + def create_help_text() -> Dict[str, str]: + """Create help text for users.""" + return { + "api_key_help": "Get your API key from https://firecrawl.dev. Sign up for a free account to get started.", + "url_help": "Enter the URLs you want to scrape. You can add multiple URLs for batch processing.", + "crawl_help": "Crawling will follow links from the starting URL and scrape all accessible pages within the limit.", + "formats_help": "Choose the output formats you need. Markdown is recommended for RAG processing.", + "extract_help": "Extraction options help filter content to get only the main content without navigation and ads." + } + + @staticmethod + def create_ui_schema() -> Dict[str, Any]: + """Create complete UI schema for Firecrawl integration.""" + return { + "version": "1.0.0", + "components": { + "data_source_config": FirecrawlUIBuilder.create_data_source_config(), + "scraping_form": FirecrawlUIBuilder.create_scraping_form(), + "progress_component": FirecrawlUIBuilder.create_progress_component(), + "results_view": FirecrawlUIBuilder.create_results_view(), + "error_handler": FirecrawlUIBuilder.create_error_handler() + }, + "validation_rules": FirecrawlUIBuilder.create_validation_rules(), + "help_text": FirecrawlUIBuilder.create_help_text(), + "workflow": [ + "configure_data_source", + "setup_scraping_parameters", + "start_scraping_job", + "monitor_progress", + "review_results", + "import_to_ragflow" + ] + } diff --git a/intergrations/firecrawl/integration.py b/intergrations/firecrawl/integration.py new file mode 100644 index 000000000..b4fbf6ced --- /dev/null +++ b/intergrations/firecrawl/integration.py @@ -0,0 +1,149 @@ +""" +RAGFlow Integration Entry Point for Firecrawl + +This file provides the main entry point for the Firecrawl integration with RAGFlow. +It follows RAGFlow's integration patterns and provides the necessary interfaces. +""" + +from typing import Dict, Any +import logging + +from ragflow_integration import RAGFlowFirecrawlIntegration, create_firecrawl_integration +from firecrawl_ui import FirecrawlUIBuilder + +# Set up logging +logger = logging.getLogger(__name__) + + +class FirecrawlRAGFlowPlugin: + """ + Main plugin class for Firecrawl integration with RAGFlow. + This class provides the interface that RAGFlow expects from integrations. + """ + + def __init__(self): + """Initialize the Firecrawl plugin.""" + self.name = "firecrawl" + self.display_name = "Firecrawl Web Scraper" + self.description = "Import web content using Firecrawl's powerful scraping capabilities" + self.version = "1.0.0" + self.author = "Firecrawl Team" + self.category = "web" + self.icon = "🌐" + + logger.info(f"Initialized {self.display_name} plugin v{self.version}") + + def get_plugin_info(self) -> Dict[str, Any]: + """Get plugin information for RAGFlow.""" + return { + "name": self.name, + "display_name": self.display_name, + "description": self.description, + "version": self.version, + "author": self.author, + "category": self.category, + "icon": self.icon, + "supported_formats": ["markdown", "html", "links", "screenshot"], + "supported_scrape_types": ["single", "crawl", "batch"] + } + + def get_config_schema(self) -> Dict[str, Any]: + """Get configuration schema for RAGFlow.""" + return FirecrawlUIBuilder.create_data_source_config()["config_schema"] + + def get_ui_schema(self) -> Dict[str, Any]: + """Get UI schema for RAGFlow.""" + return FirecrawlUIBuilder.create_ui_schema() + + def validate_config(self, config: Dict[str, Any]) -> Dict[str, Any]: + """Validate configuration and return any errors.""" + try: + integration = create_firecrawl_integration(config) + return integration.validate_config(config) + except Exception as e: + logger.error(f"Configuration validation error: {e}") + return {"general": str(e)} + + def test_connection(self, config: Dict[str, Any]) -> Dict[str, Any]: + """Test connection to Firecrawl API.""" + try: + integration = create_firecrawl_integration(config) + # Run the async test_connection method + import asyncio + return asyncio.run(integration.test_connection()) + except Exception as e: + logger.error(f"Connection test error: {e}") + return { + "success": False, + "error": str(e), + "message": "Connection test failed" + } + + def create_integration(self, config: Dict[str, Any]) -> RAGFlowFirecrawlIntegration: + """Create and return a Firecrawl integration instance.""" + return create_firecrawl_integration(config) + + def get_help_text(self) -> Dict[str, str]: + """Get help text for users.""" + return FirecrawlUIBuilder.create_help_text() + + def get_validation_rules(self) -> Dict[str, Any]: + """Get validation rules for configuration.""" + return FirecrawlUIBuilder.create_validation_rules() + + +# RAGFlow integration entry points +def get_plugin() -> FirecrawlRAGFlowPlugin: + """Get the plugin instance for RAGFlow.""" + return FirecrawlRAGFlowPlugin() + + +def get_integration(config: Dict[str, Any]) -> RAGFlowFirecrawlIntegration: + """Get an integration instance with the given configuration.""" + return create_firecrawl_integration(config) + + +def get_config_schema() -> Dict[str, Any]: + """Get the configuration schema.""" + return FirecrawlUIBuilder.create_data_source_config()["config_schema"] + + +def get_ui_schema() -> Dict[str, Any]: + """Get the UI schema.""" + return FirecrawlUIBuilder.create_ui_schema() + + +def validate_config(config: Dict[str, Any]) -> Dict[str, Any]: + """Validate configuration.""" + try: + integration = create_firecrawl_integration(config) + return integration.validate_config(config) + except Exception as e: + return {"general": str(e)} + + +def test_connection(config: Dict[str, Any]) -> Dict[str, Any]: + """Test connection to Firecrawl API.""" + try: + integration = create_firecrawl_integration(config) + return integration.test_connection() + except Exception as e: + return { + "success": False, + "error": str(e), + "message": "Connection test failed" + } + + +# Export main functions and classes +__all__ = [ + "FirecrawlRAGFlowPlugin", + "get_plugin", + "get_integration", + "get_config_schema", + "get_ui_schema", + "validate_config", + "test_connection", + "RAGFlowFirecrawlIntegration", + "create_firecrawl_integration" +] diff --git a/intergrations/firecrawl/ragflow_integration.py b/intergrations/firecrawl/ragflow_integration.py new file mode 100644 index 000000000..2d0bfe4b7 --- /dev/null +++ b/intergrations/firecrawl/ragflow_integration.py @@ -0,0 +1,175 @@ +""" +Main integration file for Firecrawl with RAGFlow. +This file provides the interface between RAGFlow and the Firecrawl plugin. +""" + +import logging +from typing import List, Dict, Any + +from firecrawl_connector import FirecrawlConnector +from firecrawl_config import FirecrawlConfig +from firecrawl_processor import FirecrawlProcessor, RAGFlowDocument +from firecrawl_ui import FirecrawlUIBuilder + + +class RAGFlowFirecrawlIntegration: + """Main integration class for Firecrawl with RAGFlow.""" + + def __init__(self, config: FirecrawlConfig): + """Initialize the integration.""" + self.config = config + self.connector = FirecrawlConnector(config) + self.processor = FirecrawlProcessor() + self.logger = logging.getLogger(__name__) + + async def scrape_and_import(self, urls: List[str], + formats: List[str] = None, + extract_options: Dict[str, Any] = None) -> List[RAGFlowDocument]: + """Scrape URLs and convert to RAGFlow documents.""" + if formats is None: + formats = ["markdown", "html"] + + async with self.connector: + # Scrape URLs + scraped_contents = await self.connector.batch_scrape(urls, formats) + + # Process into RAGFlow documents + documents = self.processor.process_batch(scraped_contents) + + return documents + + async def crawl_and_import(self, start_url: str, + limit: int = 100, + scrape_options: Dict[str, Any] = None) -> List[RAGFlowDocument]: + """Crawl a website and convert to RAGFlow documents.""" + if scrape_options is None: + scrape_options = {"formats": ["markdown", "html"]} + + async with self.connector: + # Start crawl job + crawl_job = await self.connector.start_crawl(start_url, limit, scrape_options) + + if crawl_job.error: + raise Exception(f"Failed to start crawl: {crawl_job.error}") + + # Wait for completion + completed_job = await self.connector.wait_for_crawl_completion(crawl_job.job_id) + + if completed_job.error: + raise Exception(f"Crawl failed: {completed_job.error}") + + # Process into RAGFlow documents + documents = self.processor.process_batch(completed_job.data or []) + + return documents + + def get_ui_schema(self) -> Dict[str, Any]: + """Get UI schema for RAGFlow integration.""" + return FirecrawlUIBuilder.create_ui_schema() + + def validate_config(self, config_dict: Dict[str, Any]) -> Dict[str, Any]: + """Validate configuration and return any errors.""" + errors = {} + + # Validate API key + api_key = config_dict.get("api_key", "") + if not api_key: + errors["api_key"] = "API key is required" + elif not api_key.startswith("fc-"): + errors["api_key"] = "API key must start with 'fc-'" + + # Validate API URL + api_url = config_dict.get("api_url", "https://api.firecrawl.dev") + if not api_url.startswith("http"): + errors["api_url"] = "API URL must start with http:// or https://" + + # Validate numeric fields + try: + max_retries = int(config_dict.get("max_retries", 3)) + if max_retries < 1 or max_retries > 10: + errors["max_retries"] = "Max retries must be between 1 and 10" + except (ValueError, TypeError): + errors["max_retries"] = "Max retries must be a valid integer" + + try: + timeout = int(config_dict.get("timeout", 30)) + if timeout < 5 or timeout > 300: + errors["timeout"] = "Timeout must be between 5 and 300 seconds" + except (ValueError, TypeError): + errors["timeout"] = "Timeout must be a valid integer" + + try: + rate_limit_delay = float(config_dict.get("rate_limit_delay", 1.0)) + if rate_limit_delay < 0.1 or rate_limit_delay > 10.0: + errors["rate_limit_delay"] = "Rate limit delay must be between 0.1 and 10.0 seconds" + except (ValueError, TypeError): + errors["rate_limit_delay"] = "Rate limit delay must be a valid number" + + return errors + + def create_config(self, config_dict: Dict[str, Any]) -> FirecrawlConfig: + """Create FirecrawlConfig from dictionary.""" + return FirecrawlConfig.from_dict(config_dict) + + async def test_connection(self) -> Dict[str, Any]: + """Test the connection to Firecrawl API.""" + try: + async with self.connector: + # Try to scrape a simple URL to test connection + test_url = "https://httpbin.org/json" + result = await self.connector.scrape_url(test_url, ["markdown"]) + + if result.error: + return { + "success": False, + "error": result.error, + "message": "Failed to connect to Firecrawl API" + } + + return { + "success": True, + "message": "Successfully connected to Firecrawl API", + "test_url": test_url, + "response_time": "N/A" # Could be enhanced to measure actual response time + } + + except Exception as e: + return { + "success": False, + "error": str(e), + "message": "Connection test failed" + } + + def get_supported_formats(self) -> List[str]: + """Get list of supported output formats.""" + return ["markdown", "html", "links", "screenshot"] + + def get_supported_scrape_types(self) -> List[str]: + """Get list of supported scrape types.""" + return ["single", "crawl", "batch"] + + def get_help_text(self) -> Dict[str, str]: + """Get help text for users.""" + return FirecrawlUIBuilder.create_help_text() + + def get_validation_rules(self) -> Dict[str, Any]: + """Get validation rules for configuration.""" + return FirecrawlUIBuilder.create_validation_rules() + + +# Factory function for creating integration instance +def create_firecrawl_integration(config_dict: Dict[str, Any]) -> RAGFlowFirecrawlIntegration: + """Create a Firecrawl integration instance from configuration.""" + config = FirecrawlConfig.from_dict(config_dict) + return RAGFlowFirecrawlIntegration(config) + + +# Export main classes and functions +__all__ = [ + "RAGFlowFirecrawlIntegration", + "create_firecrawl_integration", + "FirecrawlConfig", + "FirecrawlConnector", + "FirecrawlProcessor", + "RAGFlowDocument" +] diff --git a/intergrations/firecrawl/requirements.txt b/intergrations/firecrawl/requirements.txt new file mode 100644 index 000000000..53b39c46a --- /dev/null +++ b/intergrations/firecrawl/requirements.txt @@ -0,0 +1,31 @@ +# Firecrawl Plugin for RAGFlow - Dependencies + +# Core dependencies +aiohttp>=3.8.0 +asyncio-throttle>=1.0.0 + +# Data processing +pydantic>=2.0.0 +python-dateutil>=2.8.0 + +# HTTP and networking +urllib3>=1.26.0 +requests>=2.28.0 + +# Logging and monitoring +structlog>=22.0.0 + +# Optional: For advanced content processing +beautifulsoup4>=4.11.0 +lxml>=4.9.0 +html2text>=2020.1.16 + +# Optional: For enhanced error handling +tenacity>=8.0.0 + +# Development dependencies (optional) +pytest>=7.0.0 +pytest-asyncio>=0.21.0 +black>=22.0.0 +flake8>=5.0.0 +mypy>=1.0.0 diff --git a/pyproject.toml b/pyproject.toml index b88a56349..b594f9e1a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -132,6 +132,7 @@ dependencies = [ "python-calamine>=0.4.0", "litellm>=1.74.15.post1", "flask-mail>=0.10.0", + "lark>=1.2.2", ] [project.optional-dependencies] @@ -157,6 +158,9 @@ test = [ "requests-toolbelt>=1.0.0", ] +[[tool.uv.index]] +url = "https://mirrors.aliyun.com/pypi/simple" + [tool.setuptools] packages = [ 'agent', @@ -170,9 +174,6 @@ packages = [ 'sdk.python.ragflow_sdk', ] -[[tool.uv.index]] -url = "https://mirrors.aliyun.com/pypi/simple" - [tool.ruff] line-length = 200 exclude = [".venv", "rag/svr/discord_svr.py"] diff --git a/rag/app/laws.py b/rag/app/laws.py index 185c66935..35cb706d8 100644 --- a/rag/app/laws.py +++ b/rag/app/laws.py @@ -22,12 +22,15 @@ from docx import Document from api.db import ParserType from deepdoc.parser.utils import get_text -from rag.nlp import bullets_category, remove_contents_table, hierarchical_merge, \ - make_colon_as_title, tokenize_chunks, docx_question_level -from rag.nlp import rag_tokenizer +from rag.nlp import bullets_category, remove_contents_table, \ + make_colon_as_title, tokenize_chunks, docx_question_level, tree_merge +from rag.nlp import rag_tokenizer, Node from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser + + + class Docx(DocxParser): def __init__(self): pass @@ -55,49 +58,37 @@ class Docx(DocxParser): return [line for line in lines if line] def __call__(self, filename, binary=None, from_page=0, to_page=100000): - self.doc = Document( - filename) if not binary else Document(BytesIO(binary)) - pn = 0 - lines = [] - bull = bullets_category([p.text for p in self.doc.paragraphs]) - for p in self.doc.paragraphs: - if pn > to_page: - break - question_level, p_text = docx_question_level(p, bull) - if not p_text.strip("\n"): - continue - lines.append((question_level, p_text)) - - for run in p.runs: - if 'lastRenderedPageBreak' in run._element.xml: - pn += 1 - continue - if 'w:br' in run._element.xml and 'type="page"' in run._element.xml: - pn += 1 - - visit = [False for _ in range(len(lines))] - sections = [] - for s in range(len(lines)): - e = s + 1 - while e < len(lines): - if lines[e][0] <= lines[s][0]: + self.doc = Document( + filename) if not binary else Document(BytesIO(binary)) + pn = 0 + lines = [] + level_set = set() + bull = bullets_category([p.text for p in self.doc.paragraphs]) + for p in self.doc.paragraphs: + if pn > to_page: break - e += 1 - if e - s == 1 and visit[s]: - continue - sec = [] - next_level = lines[s][0] + 1 - while not sec and next_level < 22: - for i in range(s+1, e): - if lines[i][0] != next_level: + question_level, p_text = docx_question_level(p, bull) + if not p_text.strip("\n"): + continue + lines.append((question_level, p_text)) + level_set.add(question_level) + for run in p.runs: + if 'lastRenderedPageBreak' in run._element.xml: + pn += 1 continue - sec.append(lines[i][1]) - visit[i] = True - next_level += 1 - sec.insert(0, lines[s][1]) + if 'w:br' in run._element.xml and 'type="page"' in run._element.xml: + pn += 1 + + sorted_levels = sorted(level_set) + + h2_level = sorted_levels[1] if len(sorted_levels) > 1 else 1 + h2_level = sorted_levels[-2] if h2_level == sorted_levels[-1] and len(sorted_levels) > 2 else h2_level + + root = Node(level=0, depth=h2_level, texts=[]) + root.build_tree(lines) + + return [("\n").join(element) for element in root.get_tree() if element] - sections.append("\n".join(sec)) - return [s for s in sections if s] def __str__(self) -> str: return f''' @@ -163,7 +154,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, chunks = Docx()(filename, binary) callback(0.7, "Finish parsing.") return tokenize_chunks(chunks, doc, eng, None) - + elif re.search(r"\.pdf$", filename, re.IGNORECASE): pdf_parser = Pdf() if parser_config.get("layout_recognize", "DeepDOC") == "Plain Text": @@ -172,7 +163,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, from_page=from_page, to_page=to_page, callback=callback)[0]: sections.append(txt + poss) - elif re.search(r"\.txt$", filename, re.IGNORECASE): + elif re.search(r"\.(txt|md|markdown|mdx)$", filename, re.IGNORECASE): callback(0.1, "Start to parse.") txt = get_text(filename, binary) sections = txt.split("\n") @@ -203,13 +194,16 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, make_colon_as_title(sections) bull = bullets_category(sections) - chunks = hierarchical_merge(bull, sections, 5) - if not chunks: + res = tree_merge(bull, sections, 2) + + + if not res: callback(0.99, "No chunk parsed out.") - return tokenize_chunks(["\n".join(ck) - for ck in chunks], doc, eng, pdf_parser) + return tokenize_chunks(res, doc, eng, pdf_parser) + # chunks = hierarchical_merge(bull, sections, 5) + # return tokenize_chunks(["\n".join(ck)for ck in chunks], doc, eng, pdf_parser) if __name__ == "__main__": import sys diff --git a/rag/app/tag.py b/rag/app/tag.py index 179d72e24..de2ce0fa6 100644 --- a/rag/app/tag.py +++ b/rag/app/tag.py @@ -138,6 +138,8 @@ def label_question(question, kbs): else: all_tags = json.loads(all_tags) tag_kbs = KnowledgebaseService.get_by_ids(tag_kb_ids) + if not tag_kbs: + return tags tags = settings.retrievaler.tag_query(question, list(set([kb.tenant_id for kb in tag_kbs])), tag_kb_ids, diff --git a/rag/flow/base.py b/rag/flow/base.py index fae5f1ed1..5edc280f8 100644 --- a/rag/flow/base.py +++ b/rag/flow/base.py @@ -56,6 +56,6 @@ class ProcessBase(ComponentBase): self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time")) return self.output() - @timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10 * 60)) + @timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10 * 60))) async def _invoke(self, **kwargs): raise NotImplementedError() diff --git a/rag/flow/chunker/chunker.py b/rag/flow/chunker/chunker.py index 2cbbf95cf..786680e01 100644 --- a/rag/flow/chunker/chunker.py +++ b/rag/flow/chunker/chunker.py @@ -241,7 +241,6 @@ class Chunker(ProcessBase): "laws": self._laws, "presentation": self._presentation, "one": self._one, - "toc": self._toc, } try: diff --git a/rag/llm/__init__.py b/rag/llm/__init__.py index d91f57736..8b6d40fdd 100644 --- a/rag/llm/__init__.py +++ b/rag/llm/__init__.py @@ -68,6 +68,7 @@ FACTORY_DEFAULT_BASE_URL = { SupportedLiteLLMProvider.Lingyi_AI: "https://api.lingyiwanwu.com/v1", SupportedLiteLLMProvider.GiteeAI: "https://ai.gitee.com/v1/", SupportedLiteLLMProvider.AI_302: "https://api.302.ai/v1", + SupportedLiteLLMProvider.Anthropic: "https://api.anthropic.com/", } diff --git a/rag/llm/chat_model.py b/rag/llm/chat_model.py index b43277fc0..a28f56fb8 100644 --- a/rag/llm/chat_model.py +++ b/rag/llm/chat_model.py @@ -36,7 +36,7 @@ from zhipuai import ZhipuAI from rag.llm import FACTORY_DEFAULT_BASE_URL, LITELLM_PROVIDER_PREFIX, SupportedLiteLLMProvider from rag.nlp import is_chinese, is_english -from rag.utils import num_tokens_from_string +from rag.utils import num_tokens_from_string, total_token_count_from_response # Error message constants @@ -143,9 +143,10 @@ class Base(ABC): logging.info("[HISTORY]" + json.dumps(history, ensure_ascii=False, indent=2)) if self.model_name.lower().find("qwen3") >= 0: kwargs["extra_body"] = {"enable_thinking": False} + response = self.client.chat.completions.create(model=self.model_name, messages=history, **gen_conf, **kwargs) - if any([not response.choices, not response.choices[0].message, not response.choices[0].message.content]): + if not response.choices or not response.choices[0].message or not response.choices[0].message.content: return "", 0 ans = response.choices[0].message.content.strip() if response.choices[0].finish_reason == "length": @@ -155,10 +156,12 @@ class Base(ABC): def _chat_streamly(self, history, gen_conf, **kwargs): logging.info("[HISTORY STREAMLY]" + json.dumps(history, ensure_ascii=False, indent=4)) reasoning_start = False + if kwargs.get("stop") or "stop" in gen_conf: response = self.client.chat.completions.create(model=self.model_name, messages=history, stream=True, **gen_conf, stop=kwargs.get("stop")) else: response = self.client.chat.completions.create(model=self.model_name, messages=history, stream=True, **gen_conf) + for resp in response: if not resp.choices: continue @@ -190,21 +193,30 @@ class Base(ABC): return ans + LENGTH_NOTIFICATION_CN return ans + LENGTH_NOTIFICATION_EN - def _exceptions(self, e, attempt): + @property + def _retryable_errors(self) -> set[str]: + return { + LLMErrorCode.ERROR_RATE_LIMIT, + LLMErrorCode.ERROR_SERVER, + } + + def _should_retry(self, error_code: str) -> bool: + return error_code in self._retryable_errors + + def _exceptions(self, e, attempt) -> str | None: logging.exception("OpenAI chat_with_tools") # Classify the error error_code = self._classify_error(e) if attempt == self.max_retries: error_code = LLMErrorCode.ERROR_MAX_RETRIES - # Check if it's a rate limit error or server error and not the last attempt - should_retry = error_code == LLMErrorCode.ERROR_RATE_LIMIT or error_code == LLMErrorCode.ERROR_SERVER - if not should_retry: - return f"{ERROR_PREFIX}: {error_code} - {str(e)}" + if self._should_retry(error_code): + delay = self._get_delay() + logging.warning(f"Error: {error_code}. Retrying in {delay:.2f} seconds... (Attempt {attempt + 1}/{self.max_retries})") + time.sleep(delay) + return None - delay = self._get_delay() - logging.warning(f"Error: {error_code}. Retrying in {delay:.2f} seconds... (Attempt {attempt + 1}/{self.max_retries})") - time.sleep(delay) + return f"{ERROR_PREFIX}: {error_code} - {str(e)}" def _verbose_tool_use(self, name, args, res): return "" + json.dumps({"name": name, "args": args, "result": res}, ensure_ascii=False, indent=2) + "" @@ -445,15 +457,7 @@ class Base(ABC): yield total_tokens def total_token_count(self, resp): - try: - return resp.usage.total_tokens - except Exception: - pass - try: - return resp["usage"]["total_tokens"] - except Exception: - pass - return 0 + return total_token_count_from_response(resp) def _calculate_dynamic_ctx(self, history): """Calculate dynamic context window size""" @@ -541,6 +545,14 @@ class AzureChat(Base): self.client = AzureOpenAI(api_key=api_key, azure_endpoint=base_url, api_version=api_version) self.model_name = model_name + @property + def _retryable_errors(self) -> set[str]: + return { + LLMErrorCode.ERROR_RATE_LIMIT, + LLMErrorCode.ERROR_SERVER, + LLMErrorCode.ERROR_QUOTA, + } + class BaiChuanChat(Base): _FACTORY_NAME = "BaiChuan" @@ -629,6 +641,10 @@ class ZhipuChat(Base): def _clean_conf(self, gen_conf): if "max_tokens" in gen_conf: del gen_conf["max_tokens"] + gen_conf = self._clean_conf_plealty(gen_conf) + return gen_conf + + def _clean_conf_plealty(self, gen_conf): if "presence_penalty" in gen_conf: del gen_conf["presence_penalty"] if "frequency_penalty" in gen_conf: @@ -636,22 +652,14 @@ class ZhipuChat(Base): return gen_conf def chat_with_tools(self, system: str, history: list, gen_conf: dict): - if "presence_penalty" in gen_conf: - del gen_conf["presence_penalty"] - if "frequency_penalty" in gen_conf: - del gen_conf["frequency_penalty"] + gen_conf = self._clean_conf_plealty(gen_conf) return super().chat_with_tools(system, history, gen_conf) def chat_streamly(self, system, history, gen_conf={}, **kwargs): if system and history and history[0].get("role") != "system": history.insert(0, {"role": "system", "content": system}) - if "max_tokens" in gen_conf: - del gen_conf["max_tokens"] - if "presence_penalty" in gen_conf: - del gen_conf["presence_penalty"] - if "frequency_penalty" in gen_conf: - del gen_conf["frequency_penalty"] + gen_conf = self._clean_conf(gen_conf) ans = "" tk_count = 0 try: @@ -677,11 +685,7 @@ class ZhipuChat(Base): yield tk_count def chat_streamly_with_tools(self, system: str, history: list, gen_conf: dict): - if "presence_penalty" in gen_conf: - del gen_conf["presence_penalty"] - if "frequency_penalty" in gen_conf: - del gen_conf["frequency_penalty"] - + gen_conf = self._clean_conf_plealty(gen_conf) return super().chat_streamly_with_tools(system, history, gen_conf) @@ -858,6 +862,7 @@ class MistralChat(Base): return gen_conf def _chat(self, history, gen_conf={}, **kwargs): + gen_conf = self._clean_conf(gen_conf) response = self.client.chat(model=self.model_name, messages=history, **gen_conf) ans = response.choices[0].message.content if response.choices[0].finish_reason == "length": @@ -870,9 +875,7 @@ class MistralChat(Base): def chat_streamly(self, system, history, gen_conf={}, **kwargs): if system and history and history[0].get("role") != "system": history.insert(0, {"role": "system", "content": system}) - for k in list(gen_conf.keys()): - if k not in ["temperature", "top_p", "max_tokens"]: - del gen_conf[k] + gen_conf = self._clean_conf(gen_conf) ans = "" total_tokens = 0 try: @@ -1302,10 +1305,6 @@ class LiteLLMBase(ABC): "302.AI", ] - import litellm - - litellm._turn_on_debug() - def __init__(self, key, model_name, base_url=None, **kwargs): self.timeout = int(os.environ.get("LM_TIMEOUT_SECONDS", 600)) self.provider = kwargs.get("provider", "") @@ -1429,21 +1428,30 @@ class LiteLLMBase(ABC): return ans + LENGTH_NOTIFICATION_CN return ans + LENGTH_NOTIFICATION_EN - def _exceptions(self, e, attempt): + @property + def _retryable_errors(self) -> set[str]: + return { + LLMErrorCode.ERROR_RATE_LIMIT, + LLMErrorCode.ERROR_SERVER, + } + + def _should_retry(self, error_code: str) -> bool: + return error_code in self._retryable_errors + + def _exceptions(self, e, attempt) -> str | None: logging.exception("OpenAI chat_with_tools") # Classify the error error_code = self._classify_error(e) if attempt == self.max_retries: error_code = LLMErrorCode.ERROR_MAX_RETRIES - # Check if it's a rate limit error or server error and not the last attempt - should_retry = error_code == LLMErrorCode.ERROR_RATE_LIMIT or error_code == LLMErrorCode.ERROR_SERVER - if not should_retry: - return f"{ERROR_PREFIX}: {error_code} - {str(e)}" + if self._should_retry(error_code): + delay = self._get_delay() + logging.warning(f"Error: {error_code}. Retrying in {delay:.2f} seconds... (Attempt {attempt + 1}/{self.max_retries})") + time.sleep(delay) + return None - delay = self._get_delay() - logging.warning(f"Error: {error_code}. Retrying in {delay:.2f} seconds... (Attempt {attempt + 1}/{self.max_retries})") - time.sleep(delay) + return f"{ERROR_PREFIX}: {error_code} - {str(e)}" def _verbose_tool_use(self, name, args, res): return "" + json.dumps({"name": name, "args": args, "result": res}, ensure_ascii=False, indent=2) + "" diff --git a/rag/llm/cv_model.py b/rag/llm/cv_model.py index bbb81f572..0a1559319 100644 --- a/rag/llm/cv_model.py +++ b/rag/llm/cv_model.py @@ -25,7 +25,7 @@ from openai import OpenAI from openai.lib.azure import AzureOpenAI from zhipuai import ZhipuAI from rag.nlp import is_english -from rag.prompts import vision_llm_describe_prompt +from rag.prompts.generator import vision_llm_describe_prompt from rag.utils import num_tokens_from_string diff --git a/rag/llm/embedding_model.py b/rag/llm/embedding_model.py index 5f7f0cf82..5db85b6b5 100644 --- a/rag/llm/embedding_model.py +++ b/rag/llm/embedding_model.py @@ -33,7 +33,7 @@ from zhipuai import ZhipuAI from api import settings from api.utils.file_utils import get_home_cache_dir from api.utils.log_utils import log_exception -from rag.utils import num_tokens_from_string, truncate +from rag.utils import num_tokens_from_string, truncate, total_token_count_from_response class Base(ABC): @@ -52,15 +52,7 @@ class Base(ABC): raise NotImplementedError("Please implement encode method!") def total_token_count(self, resp): - try: - return resp.usage.total_tokens - except Exception: - pass - try: - return resp["usage"]["total_tokens"] - except Exception: - pass - return 0 + return total_token_count_from_response(resp) class DefaultEmbedding(Base): @@ -497,7 +489,6 @@ class MistralEmbed(Base): def encode_queries(self, text): import time import random - retry_max = 5 while retry_max > 0: try: @@ -662,7 +653,7 @@ class OpenAI_APIEmbed(OpenAIEmbed): def __init__(self, key, model_name, base_url): if not base_url: raise ValueError("url cannot be None") - #base_url = urljoin(base_url, "v1") + base_url = urljoin(base_url, "v1") self.client = OpenAI(api_key=key, base_url=base_url) self.model_name = model_name.split("___")[0] @@ -945,6 +936,7 @@ class GiteeEmbed(SILICONFLOWEmbed): base_url = "https://ai.gitee.com/v1/embeddings" super().__init__(key, model_name, base_url) + class DeepInfraEmbed(OpenAIEmbed): _FACTORY_NAME = "DeepInfra" @@ -963,7 +955,7 @@ class Ai302Embed(Base): super().__init__(key, model_name, base_url) -class CometEmbed(OpenAIEmbed): +class CometAPIEmbed(OpenAIEmbed): _FACTORY_NAME = "CometAPI" def __init__(self, key, model_name, base_url="https://api.cometapi.com/v1"): diff --git a/rag/llm/rerank_model.py b/rag/llm/rerank_model.py index a69efa7eb..7256b047b 100644 --- a/rag/llm/rerank_model.py +++ b/rag/llm/rerank_model.py @@ -30,7 +30,7 @@ from yarl import URL from api import settings from api.utils.file_utils import get_home_cache_dir from api.utils.log_utils import log_exception -from rag.utils import num_tokens_from_string, truncate +from rag.utils import num_tokens_from_string, truncate, total_token_count_from_response class Base(ABC): def __init__(self, key, model_name, **kwargs): @@ -44,18 +44,7 @@ class Base(ABC): raise NotImplementedError("Please implement encode method!") def total_token_count(self, resp): - if hasattr(resp, "usage") and hasattr(resp.usage, "total_tokens"): - try: - return resp.usage.total_tokens - except Exception: - pass - - if 'usage' in resp and 'total_tokens' in resp['usage']: - try: - return resp["usage"]["total_tokens"] - except Exception: - pass - return 0 + return total_token_count_from_response(resp) class DefaultRerank(Base): @@ -365,7 +354,7 @@ class OpenAI_APIRerank(Base): max_rank = np.max(rank) # Avoid division by zero if all ranks are identical - if np.isclose(min_rank, max_rank, atol=1e-3): + if not np.isclose(min_rank, max_rank, atol=1e-3): rank = (rank - min_rank) / (max_rank - min_rank) else: rank = np.zeros_like(rank) diff --git a/rag/llm/sequence2txt_model.py b/rag/llm/sequence2txt_model.py index b2d1a5aaa..3a7bcf72c 100644 --- a/rag/llm/sequence2txt_model.py +++ b/rag/llm/sequence2txt_model.py @@ -236,7 +236,7 @@ class DeepInfraSeq2txt(Base): self.model_name = model_name -class CometSeq2txt(Base): +class CometAPISeq2txt(Base): _FACTORY_NAME = "CometAPI" def __init__(self, key, model_name="whisper-1", base_url="https://api.cometapi.com/v1", **kwargs): diff --git a/rag/nlp/__init__.py b/rag/nlp/__init__.py index 37e59205d..736298679 100644 --- a/rag/nlp/__init__.py +++ b/rag/nlp/__init__.py @@ -189,6 +189,13 @@ BULLET_PATTERN = [[ r"Chapter (I+V?|VI*|XI|IX|X)", r"Section [0-9]+", r"Article [0-9]+" +], [ + r"^#[^#]", + r"^##[^#]", + r"^###.*", + r"^####.*", + r"^#####.*", + r"^######.*", ] ] @@ -429,8 +436,58 @@ def not_title(txt): return True return re.search(r"[,;,。;!!]", txt) +def tree_merge(bull, sections, depth): + + if not sections or bull < 0: + return sections + if isinstance(sections[0], type("")): + sections = [(s, "") for s in sections] + + # filter out position information in pdf sections + sections = [(t, o) for t, o in sections if + t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())] + + def get_level(bull, section): + text, layout = section + text = re.sub(r"\u3000", " ", text).strip() + + for i, title in enumerate(BULLET_PATTERN[bull]): + if re.match(title, text.strip()): + return i+1, text + else: + if re.search(r"(title|head)", layout) and not not_title(text): + return len(BULLET_PATTERN[bull])+1, text + else: + return len(BULLET_PATTERN[bull])+2, text + + level_set = set() + lines = [] + for section in sections: + level, text = get_level(bull, section) + + if not text.strip("\n"): + continue + + lines.append((level, text)) + level_set.add(level) + + sorted_levels = sorted(list(level_set)) + + if depth <= len(sorted_levels): + target_level = sorted_levels[depth - 1] + else: + target_level = sorted_levels[-1] + + if target_level == len(BULLET_PATTERN[bull]) + 2: + target_level = sorted_levels[-2] if len(sorted_levels) > 1 else sorted_levels[0] + + root = Node(level=0, depth=target_level, texts=[]) + root.build_tree(lines) + + return [("\n").join(element) for element in root.get_tree() if element] def hierarchical_merge(bull, sections, depth): + if not sections or bull < 0: return [] if isinstance(sections[0], type("")): @@ -632,7 +689,7 @@ def docx_question_level(p, bull=-1): for j, title in enumerate(BULLET_PATTERN[bull]): if re.match(title, txt): return j + 1, txt - return len(BULLET_PATTERN[bull]), txt + return len(BULLET_PATTERN[bull])+1, txt def concat_img(img1, img2): @@ -735,3 +792,68 @@ def get_delimiters(delimiters: str): dels_pattern = "|".join(dels) return dels_pattern + +class Node: + def __init__(self, level, depth=-1, texts=None): + self.level = level + self.depth = depth + self.texts = texts if texts is not None else [] # 存放内容 + self.children = [] # 子节点 + + def add_child(self, child_node): + self.children.append(child_node) + + def get_children(self): + return self.children + + def get_level(self): + return self.level + + def get_texts(self): + return self.texts + + def set_texts(self, texts): + self.texts = texts + + def add_text(self, text): + self.texts.append(text) + + def clear_text(self): + self.texts = [] + + def __repr__(self): + return f"Node(level={self.level}, texts={self.texts}, children={len(self.children)})" + + def build_tree(self, lines): + stack = [self] + for line in lines: + level, text = line + node = Node(level=level, texts=[text]) + + if level <= self.depth or self.depth == -1: + while stack and level <= stack[-1].get_level(): + stack.pop() + + stack[-1].add_child(node) + stack.append(node) + else: + stack[-1].add_text(text) + return self + + def get_tree(self): + tree_list = [] + self._dfs(self, tree_list, 0, []) + return tree_list + + def _dfs(self, node, tree_list, current_depth, titles): + + if node.get_texts(): + if 0 < node.get_level() < self.depth: + titles.extend(node.get_texts()) + else: + combined_text = ["\n".join(titles + node.get_texts())] + tree_list.append(combined_text) + + + for child in node.get_children(): + self._dfs(child, tree_list, current_depth + 1, titles.copy()) diff --git a/rag/nlp/query.py b/rag/nlp/query.py index b708ff490..68d2d2979 100644 --- a/rag/nlp/query.py +++ b/rag/nlp/query.py @@ -56,7 +56,7 @@ class FulltextQueryer: def rmWWW(txt): patts = [ ( - r"是*(什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀|谁|哪位|哪个)是*", + r"是*(怎么办|什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀|谁|哪位|哪个)是*", "", ), (r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "), diff --git a/rag/prompts/__init__.py b/rag/prompts/__init__.py index f5616dddd..b8b924b93 100644 --- a/rag/prompts/__init__.py +++ b/rag/prompts/__init__.py @@ -1,6 +1,6 @@ -from . import prompts +from . import generator -__all__ = [name for name in dir(prompts) +__all__ = [name for name in dir(generator) if not name.startswith('_')] -globals().update({name: getattr(prompts, name) for name in __all__}) \ No newline at end of file +globals().update({name: getattr(generator, name) for name in __all__}) \ No newline at end of file diff --git a/rag/prompts/prompts.py b/rag/prompts/generator.py similarity index 99% rename from rag/prompts/prompts.py rename to rag/prompts/generator.py index cc23da6ba..89c9c5c1e 100644 --- a/rag/prompts/prompts.py +++ b/rag/prompts/generator.py @@ -22,7 +22,7 @@ from typing import Tuple import jinja2 import json_repair from api.utils import hash_str2int -from rag.prompts.prompt_template import load_prompt +from rag.prompts.template import load_prompt from rag.settings import TAG_FLD from rag.utils import encoder, num_tokens_from_string diff --git a/rag/prompts/prompt_template.py b/rag/prompts/template.py similarity index 100% rename from rag/prompts/prompt_template.py rename to rag/prompts/template.py diff --git a/rag/settings.py b/rag/settings.py index 70d1b6234..c78728783 100644 --- a/rag/settings.py +++ b/rag/settings.py @@ -15,7 +15,7 @@ # import os import logging -from api.utils import get_base_config, decrypt_database_config +from api.utils.configs import get_base_config, decrypt_database_config from api.utils.file_utils import get_project_base_directory # Server diff --git a/rag/utils/__init__.py b/rag/utils/__init__.py index 8468bf4c3..22445da92 100644 --- a/rag/utils/__init__.py +++ b/rag/utils/__init__.py @@ -88,6 +88,20 @@ def num_tokens_from_string(string: str) -> int: except Exception: return 0 +def total_token_count_from_response(resp): + if hasattr(resp, "usage") and hasattr(resp.usage, "total_tokens"): + try: + return resp.usage.total_tokens + except Exception: + pass + + if 'usage' in resp and 'total_tokens' in resp['usage']: + try: + return resp["usage"]["total_tokens"] + except Exception: + pass + return 0 + def truncate(string: str, max_len: int) -> str: """Returns truncated text if the length of text exceed max_len.""" diff --git a/rag/utils/minio_conn.py b/rag/utils/minio_conn.py index 80a723a5c..c26e5606d 100644 --- a/rag/utils/minio_conn.py +++ b/rag/utils/minio_conn.py @@ -108,6 +108,19 @@ class RAGFlowMinio: logging.exception(f"obj_exist {bucket}/{filename} got exception") return False + def bucket_exists(self, bucket): + try: + if not self.conn.bucket_exists(bucket): + return False + else: + return True + except S3Error as e: + if e.code in ["NoSuchKey", "NoSuchBucket", "ResourceNotFound"]: + return False + except Exception: + logging.exception(f"bucket_exist {bucket} got exception") + return False + def get_presigned_url(self, bucket, fnm, expires): for _ in range(10): try: diff --git a/rag/utils/opendal_conn.py b/rag/utils/opendal_conn.py index c4fe92563..7642b33d4 100644 --- a/rag/utils/opendal_conn.py +++ b/rag/utils/opendal_conn.py @@ -3,7 +3,7 @@ import logging import pymysql from urllib.parse import quote_plus -from api.utils import get_base_config +from api.utils.configs import get_base_config from rag.utils import singleton diff --git a/uv.lock b/uv.lock index 943912822..328d3aac7 100644 --- a/uv.lock +++ b/uv.lock @@ -861,15 +861,6 @@ wheels = [ { url = "https://mirrors.aliyun.com/pypi/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6" }, ] -[[package]] -name = "colorclass" -version = "2.2.2" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/d7/1a/31ff00a33569a3b59d65bbdc445c73e12f92ad28195b7ace299f68b9af70/colorclass-2.2.2.tar.gz", hash = "sha256:6d4fe287766166a98ca7bc6f6312daf04a0481b1eda43e7173484051c0ab4366" } -wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/30/b6/daf3e2976932da4ed3579cff7a30a53d22ea9323ee4f0d8e43be60454897/colorclass-2.2.2-py2.py3-none-any.whl", hash = "sha256:6f10c273a0ef7a1150b1120b6095cbdd68e5cf36dfd5d0fc957a2500bbf99a55" }, -] - [[package]] name = "coloredlogs" version = "15.0.1" @@ -882,15 +873,6 @@ wheels = [ { url = "https://mirrors.aliyun.com/pypi/packages/a7/06/3d6badcf13db419e25b07041d9c7b4a2c331d3f4e7134445ec5df57714cd/coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934" }, ] -[[package]] -name = "compressed-rtf" -version = "1.0.7" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/b7/0c/929a4e8ef9d7143f54d77dadb5f370cc7b98534b1bd6e1124d0abe8efb24/compressed_rtf-1.0.7.tar.gz", hash = "sha256:7c30859334839f3cdc7d10796af5b434bb326b9df7cb5a65e95a8eacb2951b0e" } -wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/07/1d/62f5bf92e12335eb63517f42671ed78512d48bbc69e02a942dd7b90f03f0/compressed_rtf-1.0.7-py3-none-any.whl", hash = "sha256:b7904921d78c67a0a4b7fff9fb361a00ae2b447b6edca010ce321cd98fa0fcc0" }, -] - [[package]] name = "contourpy" version = "1.3.2" @@ -1340,23 +1322,6 @@ wheels = [ { url = "https://mirrors.aliyun.com/pypi/packages/fc/da/8376678b4a9ae0f9418d93df9c9cf851dced49c95ceb38daac6651e38f7a/duckduckgo_search-7.5.5-py3-none-any.whl", hash = "sha256:c71a0661aa436f215d9a05d653af424affb58825ab3e79f3b788053cbdee9ebc" }, ] -[[package]] -name = "easygui" -version = "0.98.3" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/cc/ad/e35f7a30272d322be09dc98592d2f55d27cc933a7fde8baccbbeb2bd9409/easygui-0.98.3.tar.gz", hash = "sha256:d653ff79ee1f42f63b5a090f2f98ce02335d86ad8963b3ce2661805cafe99a04" } -wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/8e/a7/b276ff776533b423710a285c8168b52551cb2ab0855443131fdc7fd8c16f/easygui-0.98.3-py2.py3-none-any.whl", hash = "sha256:33498710c68b5376b459cd3fc48d1d1f33822139eb3ed01defbc0528326da3ba" }, -] - -[[package]] -name = "ebcdic" -version = "1.1.1" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/0d/2f/633031205333bee5f9f93761af8268746aa75f38754823aabb8570eb245b/ebcdic-1.1.1-py2.py3-none-any.whl", hash = "sha256:33b4cb729bc2d0bf46cc1847b0e5946897cb8d3f53520c5b9aa5fa98d7e735f1" }, -] - [[package]] name = "editdistance" version = "0.8.1" @@ -1470,24 +1435,6 @@ wheels = [ { url = "https://mirrors.aliyun.com/pypi/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10" }, ] -[[package]] -name = "extract-msg" -version = "0.55.0" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -dependencies = [ - { name = "beautifulsoup4" }, - { name = "compressed-rtf" }, - { name = "ebcdic" }, - { name = "olefile" }, - { name = "red-black-tree-mod" }, - { name = "rtfde" }, - { name = "tzlocal" }, -] -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/5e/65/c70afb3b119a44b3ee36b029485dc15326cf3a7c50da19a1ecbbf949c5d1/extract_msg-0.55.0.tar.gz", hash = "sha256:cf08283498c3dfcc7f894dad1579f52e3ced9fb76b865c2355cbe757af8a54e1" } -wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/53/81/87d5241036046ea17c5c8db228f4c9e04e07e53b627015d4496a99449aaf/extract_msg-0.55.0-py3-none-any.whl", hash = "sha256:baf0cdee9a8d267b70c366bc57ceb03dbfa1e7ab2dca6824169a7fe623f0917c" }, -] - [[package]] name = "fake-http-header" version = "0.3.5" @@ -2948,11 +2895,11 @@ wheels = [ [[package]] name = "lark" -version = "1.1.9" +version = "1.2.2" source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/2c/e1/804b6196b3fbdd0f8ba785fc62837b034782a891d6f663eea2f30ca23cfa/lark-1.1.9.tar.gz", hash = "sha256:15fa5236490824c2c4aba0e22d2d6d823575dcaf4cdd1848e34b6ad836240fba" } +sdist = { url = "https://mirrors.aliyun.com/pypi/packages/af/60/bc7622aefb2aee1c0b4ba23c1446d3e30225c8770b38d7aedbfb65ca9d5a/lark-1.2.2.tar.gz", hash = "sha256:ca807d0162cd16cef15a8feecb862d7319e7a09bdb13aef927968e45040fed80" } wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/e7/9c/eef7c591e6dc952f3636cfe0df712c0f9916cedf317810a3bb53ccb65cdd/lark-1.1.9-py3-none-any.whl", hash = "sha256:a0dd3a87289f8ccbb325901e4222e723e7d745dbfc1803eaf5f3d2ace19cf2db" }, + { url = "https://mirrors.aliyun.com/pypi/packages/2d/00/d90b10b962b4277f5e64a78b6609968859ff86889f5b898c1a778c06ec00/lark-1.2.2-py3-none-any.whl", hash = "sha256:c2276486b02f0f1b90be155f2c8ba4a8e194d42775786db622faccd652d8e80c" }, ] [[package]] @@ -3439,19 +3386,6 @@ wheels = [ { url = "https://mirrors.aliyun.com/pypi/packages/b1/ef/27dd35a7049c9a4f4211c6cd6a8c9db0a50647546f003a5867827ec45391/msgspec-0.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:067f0de1c33cfa0b6a8206562efdf6be5985b988b53dd244a8e06f993f27c8c0" }, ] -[[package]] -name = "msoffcrypto-tool" -version = "5.4.2" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -dependencies = [ - { name = "cryptography" }, - { name = "olefile" }, -] -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/d2/b7/0fd6573157e0ec60c0c470e732ab3322fba4d2834fd24e1088d670522a01/msoffcrypto_tool-5.4.2.tar.gz", hash = "sha256:44b545adba0407564a0cc3d6dde6ca36b7c0fdf352b85bca51618fa1d4817370" } -wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/03/54/7f6d3d9acad083dae8c22d9ab483b657359a1bf56fee1d7af88794677707/msoffcrypto_tool-5.4.2-py3-none-any.whl", hash = "sha256:274fe2181702d1e5a107ec1b68a4c9fea997a44972ae1cc9ae0cb4f6a50fef0e" }, -] - [[package]] name = "multidict" version = "6.6.3" @@ -3801,32 +3735,6 @@ wheels = [ { url = "https://mirrors.aliyun.com/pypi/packages/9e/4e/0d0c945463719429b7bd21dece907ad0bde437a2ff12b9b12fee94722ab0/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6574241a3ec5fdc9334353ab8c479fe75841dbe8f4532a8fc97ce63503330ba1" }, ] -[[package]] -name = "olefile" -version = "0.47" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/69/1b/077b508e3e500e1629d366249c3ccb32f95e50258b231705c09e3c7a4366/olefile-0.47.zip", hash = "sha256:599383381a0bf3dfbd932ca0ca6515acd174ed48870cbf7fee123d698c192c1c" } -wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/17/d3/b64c356a907242d719fc668b71befd73324e47ab46c8ebbbede252c154b2/olefile-0.47-py2.py3-none-any.whl", hash = "sha256:543c7da2a7adadf21214938bb79c83ea12b473a4b6ee4ad4bf854e7715e13d1f" }, -] - -[[package]] -name = "oletools" -version = "0.60.2" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -dependencies = [ - { name = "colorclass" }, - { name = "easygui" }, - { name = "msoffcrypto-tool", marker = "(platform_python_implementation != 'PyPy' and sys_platform == 'darwin') or (platform_python_implementation != 'PyPy' and sys_platform == 'win32') or (sys_platform != 'darwin' and sys_platform != 'win32')" }, - { name = "olefile" }, - { name = "pcodedmp" }, - { name = "pyparsing" }, -] -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/5c/2f/037f40e44706d542b94a2312ccc33ee2701ebfc9a83b46b55263d49ce55a/oletools-0.60.2.zip", hash = "sha256:ad452099f4695ffd8855113f453348200d195ee9fa341a09e197d66ee7e0b2c3" } -wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/ac/ff/05257b7183279b80ecec6333744de23f48f0faeeba46c93e6d13ce835515/oletools-0.60.2-py2.py3-none-any.whl", hash = "sha256:72ad8bd748fd0c4e7b5b4733af770d11543ebb2bf2697455f99f975fcd50cc96" }, -] - [[package]] name = "ollama" version = "0.2.1" @@ -4289,19 +4197,6 @@ wheels = [ { url = "https://mirrors.aliyun.com/pypi/packages/87/2b/b50d3d08ea0fc419c183a84210571eba005328efa62b6b98bc28e9ead32a/patsy-1.0.1-py2.py3-none-any.whl", hash = "sha256:751fb38f9e97e62312e921a1954b81e1bb2bcda4f5eeabaf94db251ee791509c" }, ] -[[package]] -name = "pcodedmp" -version = "1.2.6" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -dependencies = [ - { name = "oletools" }, - { name = "win-unicode-console", marker = "platform_python_implementation != 'PyPy' and sys_platform == 'win32'" }, -] -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/3d/20/6d461e29135f474408d0d7f95b2456a9ba245560768ee51b788af10f7429/pcodedmp-1.2.6.tar.gz", hash = "sha256:025f8c809a126f45a082ffa820893e6a8d990d9d7ddb68694b5a9f0a6dbcd955" } -wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/ba/72/b380fb5c89d89c3afafac8cf02a71a45f4f4a4f35531ca949a34683962d1/pcodedmp-1.2.6-py2.py3-none-any.whl", hash = "sha256:4441f7c0ab4cbda27bd4668db3b14f36261d86e5059ce06c0828602cbe1c4278" }, -] - [[package]] name = "pdfminer-six" version = "20221105" @@ -5414,7 +5309,6 @@ dependencies = [ { name = "elastic-transport" }, { name = "elasticsearch" }, { name = "elasticsearch-dsl" }, - { name = "extract-msg" }, { name = "filelock" }, { name = "flasgger" }, { name = "flask" }, @@ -5435,6 +5329,7 @@ dependencies = [ { name = "itsdangerous" }, { name = "json-repair" }, { name = "langfuse" }, + { name = "lark" }, { name = "litellm" }, { name = "markdown" }, { name = "markdown-to-json" }, @@ -5567,7 +5462,6 @@ requires-dist = [ { name = "elastic-transport", specifier = "==8.12.0" }, { name = "elasticsearch", specifier = "==8.12.1" }, { name = "elasticsearch-dsl", specifier = "==8.12.0" }, - { name = "extract-msg", specifier = ">=0.39.0" }, { name = "fastembed", marker = "(platform_machine != 'x86_64' and extra == 'full') or (sys_platform == 'darwin' and extra == 'full')", specifier = ">=0.3.6,<0.4.0" }, { name = "fastembed-gpu", marker = "platform_machine == 'x86_64' and sys_platform != 'darwin' and extra == 'full'", specifier = ">=0.3.6,<0.4.0" }, { name = "filelock", specifier = "==3.15.4" }, @@ -5591,6 +5485,7 @@ requires-dist = [ { name = "itsdangerous", specifier = "==2.1.2" }, { name = "json-repair", specifier = "==0.35.0" }, { name = "langfuse", specifier = ">=2.60.0" }, + { name = "lark", specifier = ">=1.2.2" }, { name = "litellm", specifier = ">=1.74.15.post1" }, { name = "markdown", specifier = "==3.6" }, { name = "markdown-to-json", specifier = "==2.1.1" }, @@ -5746,12 +5641,6 @@ wheels = [ { url = "https://mirrors.aliyun.com/pypi/packages/c2/5a/2f2e7fc026d5e64b5408aa3fbe0296a6407b8481196cae4daacacb3a3ae0/readerwriterlock-1.0.9-py3-none-any.whl", hash = "sha256:8c4b704e60d15991462081a27ef46762fea49b478aa4426644f2146754759ca7" }, ] -[[package]] -name = "red-black-tree-mod" -version = "1.22" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/48/75/bfa342a2ebfc9623b701f1c6995b9906fd6dd2cedf6bce777d09e23303ac/red-black-tree-mod-1.22.tar.gz", hash = "sha256:38e3652903a2bf96379c27c2082ca0b7b905158662dd7ef0c97f4fd93a9aa908" } - [[package]] name = "referencing" version = "0.36.2" @@ -6005,19 +5894,6 @@ wheels = [ { url = "https://mirrors.aliyun.com/pypi/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762" }, ] -[[package]] -name = "rtfde" -version = "0.1.2.1" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -dependencies = [ - { name = "lark" }, - { name = "oletools" }, -] -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/66/f1/3fafc33cd80cc605509ced36dbbb74c3c365d5859b0b57b6500e4a8ca8a5/rtfde-0.1.2.1.tar.gz", hash = "sha256:ea2653fb163ef1e9fdd1b0849bef88b0ba82537f860d4aca5b2c49f556efaaaa" } -wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/b6/dd/641e9cf68d4242aaf7ce9653498009d8925080b6664993988bd50468932a/rtfde-0.1.2.1-py3-none-any.whl", hash = "sha256:c44dfa923a435c54cdbdd0e0f5352a4075542af317af061f82f2d4f032271645" }, -] - [[package]] name = "ruamel-base" version = "1.0.0" @@ -7025,18 +6901,6 @@ wheels = [ { url = "https://mirrors.aliyun.com/pypi/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8" }, ] -[[package]] -name = "tzlocal" -version = "5.3.1" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -dependencies = [ - { name = "tzdata", marker = "sys_platform == 'win32'" }, -] -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/8b/2e/c14812d3d4d9cd1773c6be938f89e5735a1f11a9f184ac3639b93cef35d5/tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd" } -wheels = [ - { url = "https://mirrors.aliyun.com/pypi/packages/c2/14/e2a54fabd4f08cd7af1c07030603c3356b74da07f7cc056e600436edfa17/tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d" }, -] - [[package]] name = "umap-learn" version = "0.5.6" @@ -7281,12 +7145,6 @@ dependencies = [ ] sdist = { url = "https://mirrors.aliyun.com/pypi/packages/67/35/25e68fbc99e672127cc6fbb14b8ec1ba3dfef035bf1e4c90f78f24a80b7d/wikipedia-1.4.0.tar.gz", hash = "sha256:db0fad1829fdd441b1852306e9856398204dc0786d2996dd2e0c8bb8e26133b2" } -[[package]] -name = "win-unicode-console" -version = "0.5" -source = { registry = "https://mirrors.aliyun.com/pypi/simple" } -sdist = { url = "https://mirrors.aliyun.com/pypi/packages/89/8d/7aad74930380c8972ab282304a2ff45f3d4927108bb6693cabcc9fc6a099/win_unicode_console-0.5.zip", hash = "sha256:d4142d4d56d46f449d6f00536a73625a871cba040f0bc1a2e305a04578f07d1e" } - [[package]] name = "win32-setctime" version = "1.2.0" diff --git a/web/src/components/embed-dialog/index.tsx b/web/src/components/embed-dialog/index.tsx index 9aa389565..e1e40f2a0 100644 --- a/web/src/components/embed-dialog/index.tsx +++ b/web/src/components/embed-dialog/index.tsx @@ -15,6 +15,8 @@ import { FormLabel, FormMessage, } from '@/components/ui/form'; +import { Label } from '@/components/ui/label'; +import { RadioGroup, RadioGroupItem } from '@/components/ui/radio-group'; import { Switch } from '@/components/ui/switch'; import { SharedFrom } from '@/constants/chat'; import { @@ -32,6 +34,8 @@ import { z } from 'zod'; const FormSchema = z.object({ visibleAvatar: z.boolean(), locale: z.string(), + embedType: z.enum(['fullscreen', 'widget']), + enableStreaming: z.boolean(), }); type IProps = IModalProps & { @@ -55,6 +59,8 @@ function EmbedDialog({ defaultValues: { visibleAvatar: false, locale: '', + embedType: 'fullscreen' as const, + enableStreaming: false, }, }); @@ -68,20 +74,60 @@ function EmbedDialog({ }, []); const generateIframeSrc = useCallback(() => { - const { visibleAvatar, locale } = values; - let src = `${location.origin}${from === SharedFrom.Agent ? Routes.AgentShare : Routes.ChatShare}?shared_id=${token}&from=${from}&auth=${beta}`; + const { visibleAvatar, locale, embedType, enableStreaming } = values; + const baseRoute = + embedType === 'widget' + ? Routes.ChatWidget + : from === SharedFrom.Agent + ? Routes.AgentShare + : Routes.ChatShare; + let src = `${location.origin}${baseRoute}?shared_id=${token}&from=${from}&auth=${beta}`; if (visibleAvatar) { src += '&visible_avatar=1'; } if (locale) { src += `&locale=${locale}`; } + if (enableStreaming) { + src += '&streaming=true'; + } return src; }, [beta, from, token, values]); const text = useMemo(() => { const iframeSrc = generateIframeSrc(); - return ` + const { embedType } = values; + + if (embedType === 'widget') { + const { enableStreaming } = values; + const streamingParam = enableStreaming + ? '&streaming=true' + : '&streaming=false'; + return ` + ~~~ html + + +~~~ + `; + } else { + return ` ~~~ html