mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
182 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 83803a72ee | |||
| c3c2515691 | |||
| 117a173fff | |||
| 77363a0875 | |||
| 843720f958 | |||
| f077b57f8b | |||
| c62834f870 | |||
| 0171082cc5 | |||
| 8dd45459be | |||
| dded365b8d | |||
| 9fdd517af6 | |||
| 2604ded2e4 | |||
| 758eb03ccb | |||
| e0d05a3895 | |||
| 614defec21 | |||
| e1f0644deb | |||
| a135f9f5b6 | |||
| daa4799385 | |||
| 495a6434ec | |||
| 21aac545d9 | |||
| 0f317221b4 | |||
| a427672229 | |||
| 196f2b445f | |||
| 5041677f11 | |||
| 7eee193956 | |||
| 9ffd7ae321 | |||
| ec6ae744a1 | |||
| d9bc093df1 | |||
| 571aaaff22 | |||
| 7d8e03ec38 | |||
| 65677f65c9 | |||
| 89d296feab | |||
| 3ae8a87986 | |||
| 46454362d7 | |||
| 55fb96131e | |||
| 20b57144b0 | |||
| 9e3a0e4d03 | |||
| c0d71adaa2 | |||
| 735bdf06a4 | |||
| fe18627ebc | |||
| 4cda40c3ef | |||
| 1e5c5abe58 | |||
| 6f99bbbb08 | |||
| 3bbdf3b770 | |||
| 070b53f3bf | |||
| eb51ad73d6 | |||
| fbd0d74053 | |||
| 170186ee4d | |||
| ed184ed87e | |||
| 43412571f7 | |||
| 17489e6c6c | |||
| 21453ffff0 | |||
| be13429d05 | |||
| 5178daeeaf | |||
| d5b8d8e647 | |||
| b62a20816e | |||
| 3cae87a902 | |||
| 1797f5ce31 | |||
| fe4b2e4670 | |||
| 250119e03a | |||
| bae376a479 | |||
| 6c32f80bc9 | |||
| 7e74546b73 | |||
| 25781113f9 | |||
| 16fa7db737 | |||
| a12fcf9156 | |||
| c27c02ea67 | |||
| 71068895ae | |||
| 93b35f4e58 | |||
| 9a01d1b876 | |||
| a7bd427116 | |||
| 2b36283712 | |||
| 6683179d6a | |||
| 673a28e492 | |||
| 2bfacd0469 | |||
| b3c923da6b | |||
| a1586e0af9 | |||
| f6a599461f | |||
| 081f922ee6 | |||
| 9f0f5b45cc | |||
| a2a6a35e94 | |||
| 9e5d501e83 | |||
| 4ca176bd41 | |||
| c3bc72dfd9 | |||
| 2dd705fe68 | |||
| d1614107e2 | |||
| 05fa3aeb08 | |||
| e73ce39b66 | |||
| d54d1375a5 | |||
| c6c9dbde64 | |||
| 95f809187e | |||
| d6772f5dd7 | |||
| 63ca15c595 | |||
| 7b144cc086 | |||
| 1c4e92ed35 | |||
| 10e83f26dc | |||
| 6ff63ee2ba | |||
| 12b4c5668c | |||
| baad35df30 | |||
| 5effbfac80 | |||
| 4d47b2b459 | |||
| d8c080ee52 | |||
| 626ace8639 | |||
| 1e923f1c90 | |||
| 234afb25d8 | |||
| aa1c915d6e | |||
| 77b1520b66 | |||
| 6b06ccead4 | |||
| 282f0857a3 | |||
| d7744f5870 | |||
| 9b21b66f23 | |||
| aa03dfa453 | |||
| 69b7c61498 | |||
| 8769619bb1 | |||
| ffe5737f7d | |||
| 04a9e95161 | |||
| 91b4a18c47 | |||
| 33eaf6fa2e | |||
| d65ba3e4d7 | |||
| bef1bbdf3e | |||
| 6b36f31f92 | |||
| 648a2baaa9 | |||
| 9392b8bc8f | |||
| 4153a36683 | |||
| bca63ad571 | |||
| 793e29f23a | |||
| 99be226c7c | |||
| 7ddb2f19be | |||
| c28f7b5d38 | |||
| 48607c3cfb | |||
| d15ba37313 | |||
| a553dc8dbd | |||
| eb27a4309e | |||
| 48e1534bf4 | |||
| e9d19c4684 | |||
| 8d6d7f6887 | |||
| a6e4b74d94 | |||
| a5aed2412f | |||
| 2810c60757 | |||
| 62afcf5ac8 | |||
| a74c755d83 | |||
| 7013d7f620 | |||
| de839fc3f0 | |||
| c6b6c748ae | |||
| ca5acc151a | |||
| 385dbe5ab5 | |||
| 3050a8cb07 | |||
| 9c77d367d0 | |||
| 5f03a4de11 | |||
| 290e5d958d | |||
| 9703633a57 | |||
| 7d3b68bb1e | |||
| c89f3c3cdb | |||
| 5d7f573379 | |||
| cab274f560 | |||
| 7059ec2298 | |||
| 674b3aeafd | |||
| 4c1476032d | |||
| 2af74cc494 | |||
| 38f0cc016f | |||
| 6874c6f3a7 | |||
| 8acc01a227 | |||
| 8c07992b6c | |||
| aee8b48d2f | |||
| daf215d266 | |||
| cdcc779705 | |||
| d589b0f568 | |||
| 9d60a84958 | |||
| aadb9cbec8 | |||
| 038822f3bd | |||
| ae501c58fa | |||
| 944776f207 | |||
| f1c98aad6b | |||
| ab06f502d7 | |||
| 6329339a32 | |||
| 84b39c60f6 | |||
| eb62c669ae | |||
| f69ff39fa0 | |||
| b1cd203904 | |||
| b75d75e995 | |||
| 76c477f211 | |||
| 1b01c4fe69 |
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -1,5 +1,5 @@
|
||||
name: Bug Report
|
||||
description: Create a bug issue for infinity
|
||||
description: Create a bug issue for RAGFlow
|
||||
title: "[Bug]: "
|
||||
labels: [bug]
|
||||
body:
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Feature request
|
||||
title: '[Feature Request]: '
|
||||
about: Suggest an idea for Infinity
|
||||
about: Suggest an idea for RAGFlow
|
||||
labels: ''
|
||||
---
|
||||
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@ -1,5 +1,5 @@
|
||||
name: Feature request
|
||||
description: Propose a feature request for infinity.
|
||||
description: Propose a feature request for RAGFlow.
|
||||
title: "[Feature Request]: "
|
||||
labels: [feature request]
|
||||
body:
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/question.yml
vendored
2
.github/ISSUE_TEMPLATE/question.yml
vendored
@ -1,5 +1,5 @@
|
||||
name: Question
|
||||
description: Ask questions on infinity
|
||||
description: Ask questions on RAGFlow
|
||||
title: "[Question]: "
|
||||
labels: [question]
|
||||
body:
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/subtask.yml
vendored
2
.github/ISSUE_TEMPLATE/subtask.yml
vendored
@ -1,5 +1,5 @@
|
||||
name: Subtask
|
||||
description: "Propose a subtask for infinity"
|
||||
description: "Propose a subtask for RAGFlow"
|
||||
title: "[Subtask]: "
|
||||
labels: [subtask]
|
||||
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -27,3 +27,6 @@ Cargo.lock
|
||||
|
||||
# Exclude the log folder
|
||||
docker/ragflow-logs/
|
||||
/flask_session
|
||||
/logs
|
||||
rag/res/deepdoc
|
||||
@ -1,10 +1,10 @@
|
||||
FROM swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow-base:v1.0
|
||||
FROM infiniflow/ragflow-base:v2.0
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
ADD ./web ./web
|
||||
RUN cd ./web && npm i && npm run build
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
|
||||
ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
@ -15,6 +15,7 @@ ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||
ADD docker/.env ./
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
32
Dockerfile.arm
Normal file
32
Dockerfile.arm
Normal file
@ -0,0 +1,32 @@
|
||||
FROM python:3.11
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
COPY requirements_arm.txt /ragflow/requirements.txt
|
||||
RUN pip install -i https://mirrors.aliyun.com/pypi/simple/ --default-timeout=1000 -r requirements.txt &&\
|
||||
python -c "import nltk;nltk.download('punkt');nltk.download('wordnet')"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl gnupg && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -sL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||
apt-get install -y nodejs nginx ffmpeg libsm6 libxext6 libgl1
|
||||
|
||||
ADD ./web ./web
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
|
||||
ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||
ADD docker/.env ./
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
@ -1,4 +1,4 @@
|
||||
FROM swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow-base:v1.0
|
||||
FROM infiniflow/ragflow-base:v2.0
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
@ -9,7 +9,7 @@ RUN /root/miniconda3/envs/py11/bin/pip install onnxruntime-gpu --extra-index-url
|
||||
|
||||
|
||||
ADD ./web ./web
|
||||
RUN cd ./web && npm i && npm run build
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
|
||||
ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
|
||||
@ -34,7 +34,7 @@ ADD ./requirements.txt ./requirements.txt
|
||||
RUN apt install openmpi-bin openmpi-common libopenmpi-dev
|
||||
ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu/openmpi/lib:$LD_LIBRARY_PATH
|
||||
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
||||
RUN cd ./web && npm i && npm run build
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
RUN conda run -n py11 pip install -i https://mirrors.aliyun.com/pypi/simple/ -r ./requirements.txt
|
||||
|
||||
RUN apt-get update && \
|
||||
|
||||
@ -35,7 +35,7 @@ RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
||||
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
||||
ENV LD_LIBRARY_PATH /usr/lib64/openmpi/lib:$LD_LIBRARY_PATH
|
||||
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
||||
RUN cd ./web && npm i && npm run build
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
RUN conda run -n py11 pip install $(grep -ivE "mpi4py" ./requirements.txt) # without mpi4py==3.1.5
|
||||
RUN conda run -n py11 pip install redis
|
||||
|
||||
|
||||
170
README.md
170
README.md
@ -11,22 +11,60 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/infiniflow/infinity/releases/latest">
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/RAGFLOW-LLM-white?&labelColor=dd0af7"></a>
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.3.1-brightgreen"
|
||||
alt="docker pull infiniflow/ragflow:v0.3.1"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=7d09f1" alt="license">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.7.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.7.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Table of Contents</b></summary>
|
||||
|
||||
- 💡 [What is RAGFlow?](#-what-is-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
- 📌 [Latest Updates](#-latest-updates)
|
||||
- 🌟 [Key Features](#-key-features)
|
||||
- 🔎 [System Architecture](#-system-architecture)
|
||||
- 🎬 [Get Started](#-get-started)
|
||||
- 🔧 [Configurations](#-configurations)
|
||||
- 🛠️ [Build from source](#-build-from-source)
|
||||
- 🛠️ [Launch service from source](#-launch-service-from-source)
|
||||
- 📚 [Documentation](#-documentation)
|
||||
- 📜 [Roadmap](#-roadmap)
|
||||
- 🏄 [Community](#-community)
|
||||
- 🙌 [Contributing](#-contributing)
|
||||
|
||||
</details>
|
||||
|
||||
## 💡 What is RAGFlow?
|
||||
|
||||
[RAGFlow](https://demo.ragflow.io) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models) to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted data.
|
||||
[RAGFlow](https://ragflow.io/) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models) to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted data.
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 📌 Latest Updates
|
||||
|
||||
- 2024-05-30 Integrates [BCE](https://github.com/netease-youdao/BCEmbedding), [BGE](https://github.com/FlagOpen/FlagEmbedding), and [Colbert](https://github.com/stanford-futuredata/ColBERT) reranker models.
|
||||
- 2024-05-28 Supports LLM Baichuan and VolcanoArk.
|
||||
- 2024-05-23 Supports [RAPTOR](https://arxiv.org/html/2401.18059v1) for better text retrieval.
|
||||
- 2024-05-21 Supports streaming output and text chunk retrieval API.
|
||||
- 2024-05-15 Integrates OpenAI GPT-4o.
|
||||
- 2024-05-08 Integrates LLM DeepSeek-V2.
|
||||
- 2024-04-26 Adds file management.
|
||||
- 2024-04-19 Supports conversation API ([detail](./docs/references/api.md)).
|
||||
- 2024-04-16 Integrates an embedding model 'bce-embedding-base_v1' from [BCEmbedding](https://github.com/netease-youdao/BCEmbedding), and [FastEmbed](https://github.com/qdrant/fastembed), which is designed specifically for light and speedy embedding.
|
||||
- 2024-04-11 Supports [Xinference](./docs/guides/deploy_local_llm.md) for local LLM deployment.
|
||||
- 2024-04-10 Adds a new layout recognition model for analyzing legal documents.
|
||||
- 2024-04-08 Supports [Ollama](./docs/guides/deploy_local_llm.md) for local LLM deployment.
|
||||
- 2024-04-07 Supports Chinese UI.
|
||||
|
||||
## 🌟 Key Features
|
||||
|
||||
@ -56,16 +94,6 @@
|
||||
- Multiple recall paired with fused re-ranking.
|
||||
- Intuitive APIs for seamless integration with business.
|
||||
|
||||
## 📌 Latest Features
|
||||
|
||||
- 2024-04-19 Support conversation API ([detail](./docs/conversation_api.md)).
|
||||
- 2024-04-16 Add an embedding model 'bce-embedding-base_v1' from [BCEmbedding](https://github.com/netease-youdao/BCEmbedding).
|
||||
- 2024-04-16 Add [FastEmbed](https://github.com/qdrant/fastembed), which is designed specifically for light and speedy embedding.
|
||||
- 2024-04-11 Support [Xinference](./docs/xinference.md) for local LLM deployment.
|
||||
- 2024-04-10 Add a new layout recognization model for analyzing Laws documentation.
|
||||
- 2024-04-08 Support [Ollama](./docs/ollama.md) for local LLM deployment.
|
||||
- 2024-04-07 Support Chinese UI.
|
||||
|
||||
## 🔎 System Architecture
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
@ -84,7 +112,7 @@
|
||||
|
||||
### 🚀 Start up the server
|
||||
|
||||
1. Ensure `vm.max_map_count` >= 262144 ([more](./docs/max_map_count.md)):
|
||||
1. Ensure `vm.max_map_count` >= 262144 ([more](./docs/guides/max_map_count.md)):
|
||||
|
||||
> To check the value of `vm.max_map_count`:
|
||||
>
|
||||
@ -113,11 +141,14 @@
|
||||
|
||||
3. Build the pre-built Docker images and start up the server:
|
||||
|
||||
> Running the following commands automatically downloads the *dev* version RAGFlow Docker image. To download and run a specified Docker version, update `RAGFLOW_VERSION` in **docker/.env** to the intended version, for example `RAGFLOW_VERSION=v0.7.0`, before running the following commands.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
|
||||
> The core image is about 9 GB in size and may take a while to load.
|
||||
|
||||
@ -148,7 +179,7 @@
|
||||
> With default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default HTTP serving port `80` can be omitted when using the default configurations.
|
||||
6. In [service_conf.yaml](./docker/service_conf.yaml), select the desired LLM factory in `user_default_llm` and update the `API_KEY` field with the corresponding API key.
|
||||
|
||||
> See [./docs/llm_api_key_setup.md](./docs/llm_api_key_setup.md) for more information.
|
||||
> See [./docs/guides/llm_api_key_setup.md](./docs/guides/llm_api_key_setup.md) for more information.
|
||||
|
||||
_The show is now on!_
|
||||
|
||||
@ -179,15 +210,110 @@ To build the Docker images from source:
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
$ docker build -t infiniflow/ragflow:v0.3.1 .
|
||||
$ docker build -t infiniflow/ragflow:dev .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
## 🛠️ Launch service from source
|
||||
|
||||
To launch the service from source:
|
||||
|
||||
1. Clone the repository:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
```
|
||||
|
||||
2. Create a virtual environment, ensuring that Anaconda or Miniconda is installed:
|
||||
|
||||
```bash
|
||||
$ conda create -n ragflow python=3.11.0
|
||||
$ conda activate ragflow
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
|
||||
```bash
|
||||
# If your CUDA version is higher than 12.0, run the following additional commands:
|
||||
$ pip uninstall -y onnxruntime-gpu
|
||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
```
|
||||
|
||||
3. Copy the entry script and configure environment variables:
|
||||
|
||||
```bash
|
||||
# Get the Python path:
|
||||
$ which python
|
||||
# Get the ragflow project path:
|
||||
$ pwd
|
||||
```
|
||||
|
||||
```bash
|
||||
$ cp docker/entrypoint.sh .
|
||||
$ vi entrypoint.sh
|
||||
```
|
||||
|
||||
```bash
|
||||
# Adjust configurations according to your actual situation (the following two export commands are newly added):
|
||||
# - Assign the result of `which python` to `PY`.
|
||||
# - Assign the result of `pwd` to `PYTHONPATH`.
|
||||
# - Comment out `LD_LIBRARY_PATH`, if it is configured.
|
||||
# - Optional: Add Hugging Face mirror.
|
||||
PY=${PY}
|
||||
export PYTHONPATH=${PYTHONPATH}
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
4. Launch the third-party services (MinIO, Elasticsearch, Redis, and MySQL):
|
||||
|
||||
```bash
|
||||
$ cd docker
|
||||
$ docker compose -f docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
5. Check the configuration files, ensuring that:
|
||||
|
||||
- The settings in **docker/.env** match those in **conf/service_conf.yaml**.
|
||||
- The IP addresses and ports for related services in **service_conf.yaml** match the local machine IP and ports exposed by the container.
|
||||
|
||||
6. Launch the RAGFlow backend service:
|
||||
|
||||
```bash
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ bash ./entrypoint.sh
|
||||
```
|
||||
|
||||
7. Launch the frontend service:
|
||||
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ vim .umirc.ts
|
||||
# Update proxy.target to 127.0.0.1:9380
|
||||
$ npm run dev
|
||||
```
|
||||
|
||||
8. Deploy the frontend service:
|
||||
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ umi build
|
||||
$ mkdir -p /ragflow/web
|
||||
$ cp -r dist /ragflow/web
|
||||
$ apt install nginx -y
|
||||
$ cp ../docker/nginx/proxy.conf /etc/nginx
|
||||
$ cp ../docker/nginx/nginx.conf /etc/nginx
|
||||
$ cp ../docker/nginx/ragflow.conf /etc/nginx/conf.d
|
||||
$ systemctl start nginx
|
||||
```
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- [FAQ](./docs/faq.md)
|
||||
- [Quickstart](./docs/quickstart.md)
|
||||
- [FAQ](./docs/references/faq.md)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
@ -200,4 +326,4 @@ See the [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||
|
||||
## 🙌 Contributing
|
||||
|
||||
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community. If you would like to be a part, review our [Contribution Guidelines](https://github.com/infiniflow/ragflow/blob/main/docs/CONTRIBUTING.md) first.
|
||||
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community. If you would like to be a part, review our [Contribution Guidelines](./docs/references/CONTRIBUTING.md) first.
|
||||
|
||||
119
README_ja.md
119
README_ja.md
@ -11,22 +11,44 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/infiniflow/infinity/releases/latest">
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/RAGFLOW-LLM-white?&labelColor=dd0af7"></a>
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.3.1-brightgreen"
|
||||
alt="docker pull infiniflow/ragflow:v0.3.1"></a>
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.7.0-brightgreen"
|
||||
alt="docker pull infiniflow/ragflow:v0.7.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=7d09f1" alt="license">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
## 💡 RAGFlow とは?
|
||||
|
||||
[RAGFlow](https://demo.ragflow.io) は、深い文書理解に基づいたオープンソースの RAG (Retrieval-Augmented Generation) エンジンである。LLM(大規模言語モデル)を組み合わせることで、様々な複雑なフォーマットのデータから根拠のある引用に裏打ちされた、信頼できる質問応答機能を実現し、あらゆる規模のビジネスに適した RAG ワークフローを提供します。
|
||||
[RAGFlow](https://ragflow.io/) は、深い文書理解に基づいたオープンソースの RAG (Retrieval-Augmented Generation) エンジンである。LLM(大規模言語モデル)を組み合わせることで、様々な複雑なフォーマットのデータから根拠のある引用に裏打ちされた、信頼できる質問応答機能を実現し、あらゆる規模のビジネスに適した RAG ワークフローを提供します。
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
||||
|
||||
## 📌 最新情報
|
||||
|
||||
- 2024-05-30 [BCE](https://github.com/netease-youdao/BCEmbedding)、[BGE](https://github.com/FlagOpen/FlagEmbedding)、[Colbert](https://github.com/stanford-futuredata/ColBERT) reranker を統合。
|
||||
- 2024-05-28 LLM BaichuanとVolcanoArkを統合しました。
|
||||
- 2024-05-23 より良いテキスト検索のために[RAPTOR](https://arxiv.org/html/2401.18059v1)をサポート。
|
||||
- 2024-05-21 ストリーミング出力とテキストチャンク取得APIをサポート。
|
||||
- 2024-05-15 OpenAI GPT-4oを統合しました。
|
||||
- 2024-05-08 LLM DeepSeek-V2を統合しました。
|
||||
- 2024-04-26 「ファイル管理」機能を追加しました。
|
||||
- 2024-04-19 会話 API をサポートします ([詳細](./docs/references/api.md))。
|
||||
- 2024-04-16 [BCEmbedding](https://github.com/netease-youdao/BCEmbedding) から埋め込みモデル「bce-embedding-base_v1」を追加します。
|
||||
- 2024-04-16 [FastEmbed](https://github.com/qdrant/fastembed) は、軽量かつ高速な埋め込み用に設計されています。
|
||||
- 2024-04-11 ローカル LLM デプロイメント用に [Xinference](./docs/guides/deploy_local_llm.md) をサポートします。
|
||||
- 2024-04-10 メソッド「Laws」に新しいレイアウト認識モデルを追加します。
|
||||
- 2024-04-08 [Ollama](./docs/guides/deploy_local_llm.md) を使用した大規模モデルのローカライズされたデプロイメントをサポートします。
|
||||
- 2024-04-07 中国語インターフェースをサポートします。
|
||||
|
||||
|
||||
## 🌟 主な特徴
|
||||
|
||||
@ -56,16 +78,6 @@
|
||||
- 複数の想起と融合された再ランク付け。
|
||||
- 直感的な API によってビジネスとの統合がシームレスに。
|
||||
|
||||
## 📌 最新の機能
|
||||
|
||||
- 2024-04-19 会話 API をサポートします ([詳細](./docs/conversation_api.md))。
|
||||
- 2024-04-16 [BCEmbedding](https://github.com/netease-youdao/BCEmbedding) から埋め込みモデル「bce-embedding-base_v1」を追加します。
|
||||
- 2024-04-16 [FastEmbed](https://github.com/qdrant/fastembed) は、軽量かつ高速な埋め込み用に設計されています。
|
||||
- 2024-04-11 ローカル LLM デプロイメント用に [Xinference](./docs/xinference.md) をサポートします。
|
||||
- 2024-04-10 メソッド「Laws」に新しいレイアウト認識モデルを追加します。
|
||||
- 2024-04-08 [Ollama](./docs/ollama.md) を使用した大規模モデルのローカライズされたデプロイメントをサポートします。
|
||||
- 2024-04-07 中国語インターフェースをサポートします。
|
||||
|
||||
## 🔎 システム構成
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
@ -84,7 +96,7 @@
|
||||
|
||||
### 🚀 サーバーを起動
|
||||
|
||||
1. `vm.max_map_count` >= 262144 であることを確認する【[もっと](./docs/max_map_count.md)】:
|
||||
1. `vm.max_map_count` >= 262144 であることを確認する【[もっと](./docs/guides/max_map_count.md)】:
|
||||
|
||||
> `vm.max_map_count` の値をチェックするには:
|
||||
>
|
||||
@ -119,7 +131,9 @@
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
> コアイメージのサイズは約 15 GB で、ロードに時間がかかる場合があります。
|
||||
> 上記のコマンドを実行すると、RAGFlowの開発版dockerイメージが自動的にダウンロードされます。 特定のバージョンのDockerイメージをダウンロードして実行したい場合は、docker/.envファイルのRAGFLOW_VERSION変数を見つけて、対応するバージョンに変更してください。 例えば、RAGFLOW_VERSION=v0.7.0として、上記のコマンドを実行してください。
|
||||
|
||||
> コアイメージのサイズは約 9 GB で、ロードに時間がかかる場合があります。
|
||||
|
||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
|
||||
@ -148,7 +162,7 @@
|
||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||
6. [service_conf.yaml](./docker/service_conf.yaml) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
|
||||
> 詳しくは [./docs/llm_api_key_setup.md](./docs/llm_api_key_setup.md) を参照してください。
|
||||
> 詳しくは [./docs/guides/llm_api_key_setup.md](./docs/guides/llm_api_key_setup.md) を参照してください。
|
||||
|
||||
_これで初期設定完了!ショーの開幕です!_
|
||||
|
||||
@ -179,15 +193,76 @@
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
$ docker build -t infiniflow/ragflow:v0.3.1 .
|
||||
$ docker build -t infiniflow/ragflow:v0.7.0 .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
## 🛠️ ソースコードからサービスを起動する方法
|
||||
|
||||
ソースコードからサービスを起動する場合は、以下の手順に従ってください:
|
||||
|
||||
1. リポジトリをクローンします
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
```
|
||||
|
||||
2. 仮想環境を作成します(AnacondaまたはMinicondaがインストールされていることを確認してください)
|
||||
```bash
|
||||
$ conda create -n ragflow python=3.11.0
|
||||
$ conda activate ragflow
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
CUDAのバージョンが12.0以上の場合、以下の追加コマンドを実行してください:
|
||||
```bash
|
||||
$ pip uninstall -y onnxruntime-gpu
|
||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
```
|
||||
|
||||
3. エントリースクリプトをコピーし、環境変数を設定します
|
||||
```bash
|
||||
$ cp docker/entrypoint.sh .
|
||||
$ vi entrypoint.sh
|
||||
```
|
||||
以下のコマンドでPythonのパスとragflowプロジェクトのパスを取得します:
|
||||
```bash
|
||||
$ which python
|
||||
$ pwd
|
||||
```
|
||||
|
||||
`which python`の出力を`PY`の値として、`pwd`の出力を`PYTHONPATH`の値として設定します。
|
||||
|
||||
`LD_LIBRARY_PATH`が既に設定されている場合は、コメントアウトできます。
|
||||
|
||||
```bash
|
||||
# 実際の状況に応じて設定を調整してください。以下の二つのexportは新たに追加された設定です
|
||||
PY=${PY}
|
||||
export PYTHONPATH=${PYTHONPATH}
|
||||
# オプション:Hugging Faceミラーを追加
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
4. 基本サービスを起動します
|
||||
```bash
|
||||
$ cd docker
|
||||
$ docker compose -f docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
5. 設定ファイルを確認します
|
||||
**docker/.env**内の設定が**conf/service_conf.yaml**内の設定と一致していることを確認してください。**service_conf.yaml**内の関連サービスのIPアドレスとポートは、ローカルマシンのIPアドレスとコンテナが公開するポートに変更する必要があります。
|
||||
|
||||
6. サービスを起動します
|
||||
```bash
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ bash ./entrypoint.sh
|
||||
```
|
||||
|
||||
## 📚 ドキュメンテーション
|
||||
|
||||
- [FAQ](./docs/faq.md)
|
||||
- [Quickstart](./docs/quickstart.md)
|
||||
- [FAQ](./docs/references/faq.md)
|
||||
|
||||
## 📜 ロードマップ
|
||||
|
||||
@ -200,4 +275,4 @@ $ docker compose up -d
|
||||
|
||||
## 🙌 コントリビュート
|
||||
|
||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず[コントリビューションガイド](https://github.com/infiniflow/ragflow/blob/main/docs/CONTRIBUTING.md)をご覧ください。
|
||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず[コントリビューションガイド](./docs/references/CONTRIBUTING.md)をご覧ください。
|
||||
|
||||
139
README_zh.md
139
README_zh.md
@ -11,22 +11,41 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/infiniflow/infinity/releases/latest">
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/RAGFLOW-LLM-white?&labelColor=dd0af7"></a>
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.3.1-brightgreen"
|
||||
alt="docker pull infiniflow/ragflow:v0.3.1"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=7d09f1" alt="license">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.7.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.7.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
## 💡 RAGFlow 是什么?
|
||||
|
||||
[RAGFlow](https://demo.ragflow.io) 是一款基于深度文档理解构建的开源 RAG(Retrieval-Augmented Generation)引擎。RAGFlow 可以为各种规模的企业及个人提供一套精简的 RAG 工作流程,结合大语言模型(LLM)针对用户各类不同的复杂格式数据提供可靠的问答以及有理有据的引用。
|
||||
[RAGFlow](https://ragflow.io/) 是一款基于深度文档理解构建的开源 RAG(Retrieval-Augmented Generation)引擎。RAGFlow 可以为各种规模的企业及个人提供一套精简的 RAG 工作流程,结合大语言模型(LLM)针对用户各类不同的复杂格式数据提供可靠的问答以及有理有据的引用。
|
||||
|
||||
## 🎮 Demo 试用
|
||||
|
||||
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
||||
|
||||
## 📌 近期更新
|
||||
|
||||
- 2024-05-30 集成 [BCE](https://github.com/netease-youdao/BCEmbedding), [BGE](https://github.com/FlagOpen/FlagEmbedding) 和 [Colbert](https://github.com/stanford-futuredata/ColBERT) 重排序模型。
|
||||
- 2024-05-28 集成大模型 Baichuan 和火山方舟。
|
||||
- 2024-05-23 实现 [RAPTOR](https://arxiv.org/html/2401.18059v1) 提供更好的文本检索。
|
||||
- 2024-05-21 支持流式结果输出和文本块获取API。
|
||||
- 2024-05-15 集成大模型 OpenAI GPT-4o。
|
||||
- 2024-05-08 集成大模型 DeepSeek。
|
||||
- 2024-04-26 增添了'文件管理'功能。
|
||||
- 2024-04-19 支持对话 API ([更多](./docs/references/api.md))。
|
||||
- 2024-04-16 集成嵌入模型 [BCEmbedding](https://github.com/netease-youdao/BCEmbedding) 和 专为轻型和高速嵌入而设计的 [FastEmbed](https://github.com/qdrant/fastembed)。
|
||||
- 2024-04-11 支持用 [Xinference](./docs/guides/deploy_local_llm.md) 本地化部署大模型。
|
||||
- 2024-04-10 为‘Laws’版面分析增加了底层模型。
|
||||
- 2024-04-08 支持用 [Ollama](./docs/guides/deploy_local_llm.md) 本地化部署大模型。
|
||||
- 2024-04-07 支持中文界面。
|
||||
|
||||
## 🌟 主要功能
|
||||
|
||||
@ -56,16 +75,6 @@
|
||||
- 基于多路召回、融合重排序。
|
||||
- 提供易用的 API,可以轻松集成到各类企业系统。
|
||||
|
||||
## 📌 新增功能
|
||||
|
||||
- 2024-04-19 支持对话 API ([更多](./docs/conversation_api.md)).
|
||||
- 2024-04-16 添加嵌入模型 [BCEmbedding](https://github.com/netease-youdao/BCEmbedding) 。
|
||||
- 2024-04-16 添加 [FastEmbed](https://github.com/qdrant/fastembed) 专为轻型和高速嵌入而设计。
|
||||
- 2024-04-11 支持用 [Xinference](./docs/xinference.md) 本地化部署大模型。
|
||||
- 2024-04-10 为‘Laws’版面分析增加了底层模型。
|
||||
- 2024-04-08 支持用 [Ollama](./docs/ollama.md) 本地化部署大模型。
|
||||
- 2024-04-07 支持中文界面。
|
||||
|
||||
## 🔎 系统架构
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
@ -84,7 +93,7 @@
|
||||
|
||||
### 🚀 启动服务器
|
||||
|
||||
1. 确保 `vm.max_map_count` 不小于 262144 【[更多](./docs/max_map_count.md)】:
|
||||
1. 确保 `vm.max_map_count` 不小于 262144 【[更多](./docs/guides/max_map_count.md)】:
|
||||
|
||||
> 如需确认 `vm.max_map_count` 的大小:
|
||||
>
|
||||
@ -119,7 +128,9 @@
|
||||
$ docker compose -f docker-compose-CN.yml up -d
|
||||
```
|
||||
|
||||
> 核心镜像文件大约 15 GB,可能需要一定时间拉取。请耐心等待。
|
||||
> 请注意,运行上述命令会自动下载 RAGFlow 的开发版本 docker 镜像。如果你想下载并运行特定版本的 docker 镜像,请在 docker/.env 文件中找到 RAGFLOW_VERSION 变量,将其改为对应版本。例如 RAGFLOW_VERSION=v0.7.0,然后运行上述命令。
|
||||
|
||||
> 核心镜像文件大约 9 GB,可能需要一定时间拉取。请耐心等待。
|
||||
|
||||
4. 服务器启动成功后再次确认服务器状态:
|
||||
|
||||
@ -148,7 +159,7 @@
|
||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||
6. 在 [service_conf.yaml](./docker/service_conf.yaml) 文件的 `user_default_llm` 栏配置 LLM factory,并在 `API_KEY` 栏填写和你选择的大模型相对应的 API key。
|
||||
|
||||
> 详见 [./docs/llm_api_key_setup.md](./docs/llm_api_key_setup.md)。
|
||||
> 详见 [./docs/guides/llm_api_key_setup.md](./docs/guides/llm_api_key_setup.md)。
|
||||
|
||||
_好戏开始,接着奏乐接着舞!_
|
||||
|
||||
@ -179,15 +190,97 @@
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
$ docker build -t infiniflow/ragflow:v0.3.1 .
|
||||
$ docker build -t infiniflow/ragflow:v0.7.0 .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
## 🛠️ 源码启动服务
|
||||
|
||||
如需从源码启动服务,请参考以下步骤:
|
||||
|
||||
1. 克隆仓库
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
```
|
||||
|
||||
2. 创建虚拟环境(确保已安装 Anaconda 或 Miniconda)
|
||||
```bash
|
||||
$ conda create -n ragflow python=3.11.0
|
||||
$ conda activate ragflow
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
如果cuda > 12.0,需额外执行以下命令:
|
||||
```bash
|
||||
$ pip uninstall -y onnxruntime-gpu
|
||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
```
|
||||
|
||||
3. 拷贝入口脚本并配置环境变量
|
||||
```bash
|
||||
$ cp docker/entrypoint.sh .
|
||||
$ vi entrypoint.sh
|
||||
```
|
||||
使用以下命令获取python路径及ragflow项目路径:
|
||||
```bash
|
||||
$ which python
|
||||
$ pwd
|
||||
```
|
||||
|
||||
将上述`which python`的输出作为`PY`的值,将`pwd`的输出作为`PYTHONPATH`的值。
|
||||
|
||||
`LD_LIBRARY_PATH`如果环境已经配置好,可以注释掉。
|
||||
|
||||
```bash
|
||||
# 此处配置需要按照实际情况调整,两个export为新增配置
|
||||
PY=${PY}
|
||||
export PYTHONPATH=${PYTHONPATH}
|
||||
# 可选:添加Hugging Face镜像
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
4. 启动基础服务
|
||||
```bash
|
||||
$ cd docker
|
||||
$ docker compose -f docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
5. 检查配置文件
|
||||
确保**docker/.env**中的配置与**conf/service_conf.yaml**中配置一致, **service_conf.yaml**中相关服务的IP地址与端口应该改成本机IP地址及容器映射出来的端口。
|
||||
|
||||
6. 启动服务
|
||||
```bash
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ bash ./entrypoint.sh
|
||||
```
|
||||
7. 启动WebUI服务
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ vim .umirc.ts
|
||||
# 修改proxy.target为127.0.0.1:9380
|
||||
$ npm run dev
|
||||
```
|
||||
|
||||
8. 部署WebUI服务
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ umi build
|
||||
$ mkdir -p /ragflow/web
|
||||
$ cp -r dist /ragflow/web
|
||||
$ apt install nginx -y
|
||||
$ cp ../docker/nginx/proxy.conf /etc/nginx
|
||||
$ cp ../docker/nginx/nginx.conf /etc/nginx
|
||||
$ cp ../docker/nginx/ragflow.conf /etc/nginx/conf.d
|
||||
$ systemctl start nginx
|
||||
```
|
||||
## 📚 技术文档
|
||||
|
||||
- [FAQ](./docs/faq.md)
|
||||
- [Quickstart](./docs/quickstart.md)
|
||||
- [FAQ](./docs/references/faq.md)
|
||||
|
||||
## 📜 路线图
|
||||
|
||||
@ -200,7 +293,7 @@ $ docker compose up -d
|
||||
|
||||
## 🙌 贡献指南
|
||||
|
||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的[贡献者指南](https://github.com/infiniflow/ragflow/blob/main/docs/CONTRIBUTING.md) 。
|
||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的[贡献者指南](./docs/references/CONTRIBUTING.md) 。
|
||||
|
||||
## 👥 加入社区
|
||||
|
||||
|
||||
@ -13,27 +13,31 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime, timedelta
|
||||
from flask import request
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db import FileType, ParserType
|
||||
from api.db.db_models import APIToken, API4Conversation
|
||||
from api.db import FileType, ParserType, FileSource
|
||||
from api.db.db_models import APIToken, API4Conversation, Task, File
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||
from api.db.services.dialog_service import DialogService, chat
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.task_service import queue_tasks, TaskService
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.settings import RetCode
|
||||
from api.settings import RetCode, retrievaler
|
||||
from api.utils import get_uuid, current_timestamp, datetime_format
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, get_json_result, validate_request
|
||||
from itsdangerous import URLSafeTimedSerializer
|
||||
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
from rag.utils import MINIO
|
||||
from rag.utils.minio_conn import MINIO
|
||||
|
||||
|
||||
def generate_confirmation_token(tenent_id):
|
||||
@ -164,6 +168,7 @@ def completion():
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
if "quote" not in req: req["quote"] = False
|
||||
|
||||
msg = []
|
||||
for m in req["messages"]:
|
||||
@ -180,13 +185,56 @@ def completion():
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
del req["messages"]
|
||||
ans = chat(dia, msg, **req)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append(ans["reference"])
|
||||
conv.message.append({"role": "assistant", "content": ans["answer"]})
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
return get_json_result(data=ans)
|
||||
conv.message.append({"role": "assistant", "content": ""})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else: conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"]}
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
fillin_conv(ans)
|
||||
for chunk_i in ans['reference'].get('chunks', []):
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
yield "data:"+json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
"data": {"answer": "**ERROR**: "+str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:"+json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
break
|
||||
|
||||
for chunk_i in answer['reference'].get('chunks',[]):
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
|
||||
return get_json_result(data=answer)
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -199,7 +247,13 @@ def get(conversation_id):
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
|
||||
return get_json_result(data=conv.to_dict())
|
||||
conv = conv.to_dict()
|
||||
for referenct_i in conv['reference']:
|
||||
for chunk_i in referenct_i['chunks']:
|
||||
if 'docnm_kwd' in chunk_i.keys():
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -233,6 +287,13 @@ def upload():
|
||||
if file.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||
kb_root_folder = FileService.get_kb_folder(tenant_id)
|
||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
try:
|
||||
if DocumentService.get_doc_count(kb.tenant_id) >= int(os.environ.get('MAX_FILE_NUM_PER_USER', 8192)):
|
||||
return get_data_error_result(
|
||||
@ -264,11 +325,254 @@ def upload():
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob)
|
||||
}
|
||||
|
||||
form_data=request.form
|
||||
if "parser_id" in form_data.keys():
|
||||
if request.form.get("parser_id").strip() in list(vars(ParserType).values())[1:-3]:
|
||||
doc["parser_id"] = request.form.get("parser_id").strip()
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||
doc = DocumentService.insert(doc)
|
||||
return get_json_result(data=doc.to_json())
|
||||
|
||||
doc_result = DocumentService.insert(doc)
|
||||
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
if "run" in form_data.keys():
|
||||
if request.form.get("run").strip() == "1":
|
||||
try:
|
||||
info = {"run": 1, "progress": 0}
|
||||
info["progress_msg"] = ""
|
||||
info["chunk_num"] = 0
|
||||
info["token_num"] = 0
|
||||
DocumentService.update_by_id(doc["id"], info)
|
||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||
tenant_id = DocumentService.get_tenant_id(doc["id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
#e, doc = DocumentService.get_by_id(doc["id"])
|
||||
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
||||
e, doc = DocumentService.get_by_id(doc["id"])
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
bucket, name = File2DocumentService.get_minio_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
return get_json_result(data=doc_result.to_json())
|
||||
|
||||
|
||||
@manager.route('/list_chunks', methods=['POST'])
|
||||
# @login_required
|
||||
def list_chunks():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
form_data = request.form
|
||||
|
||||
try:
|
||||
if "doc_name" in form_data.keys():
|
||||
tenant_id = DocumentService.get_tenant_id_by_name(form_data['doc_name'])
|
||||
doc_id = DocumentService.get_doc_id_by_doc_name(form_data['doc_name'])
|
||||
|
||||
elif "doc_id" in form_data.keys():
|
||||
tenant_id = DocumentService.get_tenant_id(form_data['doc_id'])
|
||||
doc_id = form_data['doc_id']
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False,retmsg="Can't find doc_name or doc_id"
|
||||
)
|
||||
|
||||
res = retrievaler.chunk_list(doc_id=doc_id, tenant_id=tenant_id)
|
||||
res = [
|
||||
{
|
||||
"content": res_item["content_with_weight"],
|
||||
"doc_name": res_item["docnm_kwd"],
|
||||
"img_id": res_item["img_id"]
|
||||
} for res_item in res
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
return get_json_result(data=res)
|
||||
|
||||
|
||||
@manager.route('/list_kb_docs', methods=['POST'])
|
||||
# @login_required
|
||||
def list_kb_docs():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
tenant_id = objs[0].tenant_id
|
||||
kb_name = request.form.get("kb_name").strip()
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
kb_id = kb.id
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
page_number = int(request.form.get("page", 1))
|
||||
items_per_page = int(request.form.get("page_size", 15))
|
||||
orderby = request.form.get("orderby", "create_time")
|
||||
desc = request.form.get("desc", True)
|
||||
keywords = request.form.get("keywords", "")
|
||||
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
docs = [{"doc_id": doc['id'], "doc_name": doc['name']} for doc in docs]
|
||||
|
||||
return get_json_result(data={"total": tol, "docs": docs})
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/document', methods=['DELETE'])
|
||||
# @login_required
|
||||
def document_rm():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
tenant_id = objs[0].tenant_id
|
||||
req = request.json
|
||||
doc_ids = []
|
||||
try:
|
||||
doc_ids = [DocumentService.get_doc_id_by_doc_name(doc_name) for doc_name in req.get("doc_names", [])]
|
||||
for doc_id in req.get("doc_ids", []):
|
||||
if doc_id not in doc_ids:
|
||||
doc_ids.append(doc_id)
|
||||
|
||||
if not doc_ids:
|
||||
return get_json_result(
|
||||
data=False, retmsg="Can't find doc_names or doc_ids"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||
|
||||
errors = ""
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
b, n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc_id)
|
||||
|
||||
MINIO.rm(b, n)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
|
||||
if errors:
|
||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/completion_aibotk', methods=['POST'])
|
||||
@validate_request("Authorization", "conversation_id", "word")
|
||||
def completion_faq():
|
||||
import base64
|
||||
req = request.json
|
||||
|
||||
token = req["Authorization"]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
if "quote" not in req: req["quote"] = True
|
||||
|
||||
msg = []
|
||||
msg.append({"role": "user", "content": req["word"]})
|
||||
|
||||
try:
|
||||
conv.message.append(msg[-1])
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": ""})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else: conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"]}
|
||||
|
||||
data_type_picture = {
|
||||
"type": 3,
|
||||
"url": "base64 content"
|
||||
}
|
||||
data = [
|
||||
{
|
||||
"type": 1,
|
||||
"content": ""
|
||||
}
|
||||
]
|
||||
for ans in chat(dia, msg, stream=False, **req):
|
||||
# answer = ans
|
||||
data[0]["content"] += re.sub(r'##\d\$\$', '', ans["answer"])
|
||||
fillin_conv(ans)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
|
||||
chunk_idxs = [int(match[2]) for match in re.findall(r'##\d\$\$', ans["answer"])]
|
||||
for chunk_idx in chunk_idxs[:1]:
|
||||
if ans["reference"]["chunks"][chunk_idx]["img_id"]:
|
||||
try:
|
||||
bkt, nm = ans["reference"]["chunks"][chunk_idx]["img_id"].split("-")
|
||||
response = MINIO.get(bkt, nm)
|
||||
data_type_picture["url"] = base64.b64encode(response).decode('utf-8')
|
||||
data.append(data_type_picture)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
break
|
||||
|
||||
response = {"code": 200, "msg": "success", "data": data}
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -20,8 +20,9 @@ from flask_login import login_required, current_user
|
||||
from elasticsearch_dsl import Q
|
||||
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.nlp import search, huqie
|
||||
from rag.utils import ELASTICSEARCH, rmSpace
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils import rmSpace
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
@ -37,7 +38,7 @@ import re
|
||||
@manager.route('/list', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def list():
|
||||
def list_chunk():
|
||||
req = request.json
|
||||
doc_id = req["doc_id"]
|
||||
page = int(req.get("page", 1))
|
||||
@ -124,10 +125,10 @@ def set():
|
||||
d = {
|
||||
"id": req["chunk_id"],
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_ltks"] = huqie.qie(req["content_with_weight"])
|
||||
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
||||
d["content_ltks"] = rag_tokenizer.tokenize(req["content_with_weight"])
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req["important_kwd"]
|
||||
d["important_tks"] = huqie.qie(" ".join(req["important_kwd"]))
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||
if "available_int" in req:
|
||||
d["available_int"] = req["available_int"]
|
||||
|
||||
@ -149,9 +150,9 @@ def set():
|
||||
if len(arr) != 2:
|
||||
return get_data_error_result(
|
||||
retmsg="Q&A must be separated by TAB/ENTER key.")
|
||||
q, a = rmPrefix(arr[0]), rmPrefix[arr[1]]
|
||||
q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
|
||||
d = beAdoc(d, arr[0], arr[1], not any(
|
||||
[huqie.is_chinese(t) for t in q + a]))
|
||||
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||
@ -201,11 +202,11 @@ def create():
|
||||
md5 = hashlib.md5()
|
||||
md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
|
||||
chunck_id = md5.hexdigest()
|
||||
d = {"id": chunck_id, "content_ltks": huqie.qie(req["content_with_weight"]),
|
||||
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req.get("important_kwd", [])
|
||||
d["important_tks"] = huqie.qie(" ".join(req.get("important_kwd", [])))
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||
|
||||
@ -228,6 +229,9 @@ def create():
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
||||
|
||||
DocumentService.increment_chunk_num(
|
||||
doc.id, doc.kb_id, c, 1, 0)
|
||||
return get_json_result(data={"chunk_id": chunck_id})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -253,8 +257,15 @@ def retrieval_test():
|
||||
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
ranks = retrievaler.retrieval(question, embd_mdl, kb.tenant_id, [kb_id], page, size, similarity_threshold,
|
||||
vector_similarity_weight, top, doc_ids)
|
||||
|
||||
rerank_mdl = None
|
||||
if req.get("rerank_id"):
|
||||
rerank_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
||||
|
||||
ranks = retrievaler.retrieval(question, embd_mdl, kb.tenant_id, [kb_id], page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl)
|
||||
for c in ranks["chunks"]:
|
||||
if "vector" in c:
|
||||
del c["vector"]
|
||||
|
||||
@ -13,12 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from flask import request
|
||||
from flask import request, Response, jsonify
|
||||
from flask_login import login_required
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result
|
||||
import json
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@ -103,9 +104,12 @@ def list_convsersation():
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("conversation_id", "messages")
|
||||
#@validate_request("conversation_id", "messages")
|
||||
def completion():
|
||||
req = request.json
|
||||
#req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [
|
||||
# {"role": "user", "content": "上海有吗?"}
|
||||
#]}
|
||||
msg = []
|
||||
for m in req["messages"]:
|
||||
if m["role"] == "system":
|
||||
@ -123,13 +127,48 @@ def completion():
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
del req["messages"]
|
||||
ans = chat(dia, msg, **req)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append(ans["reference"])
|
||||
conv.message.append({"role": "assistant", "content": ans["answer"]})
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
return get_json_result(data=ans)
|
||||
conv.message.append({"role": "assistant", "content": ""})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else: conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"]}
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
fillin_conv(ans)
|
||||
yield "data:"+json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
"data": {"answer": "**ERROR**: "+str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:"+json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_json_result(data=answer)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@ -33,15 +33,13 @@ def set_dialog():
|
||||
name = req.get("name", "New Dialog")
|
||||
description = req.get("description", "A helpful Dialog")
|
||||
top_n = req.get("top_n", 6)
|
||||
top_k = req.get("top_k", 1024)
|
||||
rerank_id = req.get("rerank_id", "")
|
||||
if not rerank_id: req["rerank_id"] = ""
|
||||
similarity_threshold = req.get("similarity_threshold", 0.1)
|
||||
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
||||
llm_setting = req.get("llm_setting", {
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3,
|
||||
"frequency_penalty": 0.7,
|
||||
"presence_penalty": 0.4,
|
||||
"max_tokens": 215
|
||||
})
|
||||
if vector_similarity_weight is None: vector_similarity_weight = 0.3
|
||||
llm_setting = req.get("llm_setting", {})
|
||||
default_prompt = {
|
||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||
以下是知识库:
|
||||
@ -89,6 +87,8 @@ def set_dialog():
|
||||
"llm_setting": llm_setting,
|
||||
"prompt_config": prompt_config,
|
||||
"top_n": top_n,
|
||||
"top_k": top_k,
|
||||
"rerank_id": rerank_id,
|
||||
"similarity_threshold": similarity_threshold,
|
||||
"vector_similarity_weight": vector_similarity_weight
|
||||
}
|
||||
@ -142,7 +142,7 @@ def get_kb_names(kb_ids):
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list():
|
||||
def list_dialogs():
|
||||
try:
|
||||
diags = DialogService.query(
|
||||
tenant_id=current_user.id,
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
# limitations under the License
|
||||
#
|
||||
|
||||
import base64
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
@ -23,13 +22,18 @@ import flask
|
||||
from elasticsearch_dsl import Q
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.db_models import Task, File
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.task_service import TaskService, queue_tasks
|
||||
from rag.nlp import search
|
||||
from rag.utils import ELASTICSEARCH
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType, TaskStatus, ParserType
|
||||
from api.db import FileType, TaskStatus, ParserType, FileSource
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.settings import RetCode
|
||||
from api.utils.api_utils import get_json_result
|
||||
@ -48,55 +52,68 @@ def upload():
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
file = request.files['file']
|
||||
if file.filename == '':
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
kb_root_folder = FileService.get_kb_folder(current_user.id)
|
||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
err = []
|
||||
for file in file_objs:
|
||||
try:
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(kb.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
||||
raise RuntimeError("Exceed the maximum file number of a free user!")
|
||||
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=file.filename,
|
||||
kb_id=kb.id)
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
raise RuntimeError("This type of file has not been supported yet!")
|
||||
|
||||
location = filename
|
||||
while MINIO.obj_exist(kb_id, location):
|
||||
location += "_"
|
||||
blob = file.read()
|
||||
MINIO.put(kb_id, location, blob)
|
||||
doc = {
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob)
|
||||
}
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||
DocumentService.insert(doc)
|
||||
|
||||
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
||||
except Exception as e:
|
||||
err.append(file.filename + ": " + str(e))
|
||||
if err:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(kb.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
||||
return get_data_error_result(
|
||||
retmsg="Exceed the maximum file number of a free user!")
|
||||
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=file.filename,
|
||||
kb_id=kb.id)
|
||||
filetype = filename_type(filename)
|
||||
if not filetype:
|
||||
return get_data_error_result(
|
||||
retmsg="This type of file has not been supported yet!")
|
||||
|
||||
location = filename
|
||||
while MINIO.obj_exist(kb_id, location):
|
||||
location += "_"
|
||||
blob = request.files['file'].read()
|
||||
MINIO.put(kb_id, location, blob)
|
||||
doc = {
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob)
|
||||
}
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||
doc = DocumentService.insert(doc)
|
||||
return get_json_result(data=doc.to_json())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
data=False, retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@ -137,7 +154,7 @@ def create():
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list():
|
||||
def list_docs():
|
||||
kb_id = request.args.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
@ -218,26 +235,39 @@ def change_status():
|
||||
@validate_request("doc_id")
|
||||
def rm():
|
||||
req = request.json
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||
doc_ids = req["doc_id"]
|
||||
if isinstance(doc_ids, str): doc_ids = [doc_ids]
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
errors = ""
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
DocumentService.increment_chunk_num(
|
||||
doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1, 0)
|
||||
if not DocumentService.delete(doc):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
b, n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||
|
||||
MINIO.rm(doc.kb_id, doc.location)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc_id)
|
||||
|
||||
MINIO.rm(b, n)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
|
||||
if errors:
|
||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/run', methods=['POST'])
|
||||
@ -259,6 +289,14 @@ def run():
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
||||
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
bucket, name = File2DocumentService.get_minio_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name)
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
@ -280,15 +318,21 @@ def rename():
|
||||
data=False,
|
||||
retmsg="The extension of file can't be changed",
|
||||
retcode=RetCode.ARGUMENT_ERROR)
|
||||
if DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated document name in the same knowledgebase.")
|
||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||
if d.name == req["name"]:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
||||
if informs:
|
||||
e, file = FileService.get_by_id(informs[0].file_id)
|
||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -302,7 +346,9 @@ def get(doc_id):
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
|
||||
response = flask.make_response(MINIO.get(doc.kb_id, doc.location))
|
||||
b,n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||
response = flask.make_response(MINIO.get(b, n))
|
||||
|
||||
ext = re.search(r"\.([^.]+)$", doc.name)
|
||||
if ext:
|
||||
if doc.type == FileType.VISUAL.value:
|
||||
@ -338,7 +384,8 @@ def change_parser():
|
||||
return get_data_error_result(retmsg="Not supported yet!")
|
||||
|
||||
e = DocumentService.update_by_id(doc.id,
|
||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "", "run": "0"})
|
||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||
"run": TaskStatus.UNSTART.value})
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
if "parser_config" in req:
|
||||
|
||||
129
api/apps/file2document_app.py
Normal file
129
api/apps/file2document_app.py
Normal file
@ -0,0 +1,129 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
from elasticsearch_dsl import Q
|
||||
|
||||
from api.db.db_models import File2Document
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.settings import RetCode
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.nlp import search
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
|
||||
|
||||
@manager.route('/convert', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("file_ids", "kb_ids")
|
||||
def convert():
|
||||
req = request.json
|
||||
kb_ids = req["kb_ids"]
|
||||
file_ids = req["file_ids"]
|
||||
file2documents = []
|
||||
|
||||
try:
|
||||
for file_id in file_ids:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
file_ids_list = [file_id]
|
||||
if file.type == FileType.FOLDER.value:
|
||||
file_ids_list = FileService.get_all_innermost_file_ids(file_id, [])
|
||||
for id in file_ids_list:
|
||||
informs = File2DocumentService.get_by_file_id(id)
|
||||
# delete
|
||||
for inform in informs:
|
||||
doc_id = inform.document_id
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
File2DocumentService.delete_by_file_id(id)
|
||||
|
||||
# insert
|
||||
for kb_id in kb_ids:
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
e, file = FileService.get_by_id(id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this file!")
|
||||
|
||||
doc = DocumentService.insert({
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": file.type,
|
||||
"name": file.name,
|
||||
"location": file.location,
|
||||
"size": file.size
|
||||
})
|
||||
file2document = File2DocumentService.insert({
|
||||
"id": get_uuid(),
|
||||
"file_id": id,
|
||||
"document_id": doc.id,
|
||||
})
|
||||
file2documents.append(file2document.to_json())
|
||||
return get_json_result(data=file2documents)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("file_ids")
|
||||
def rm():
|
||||
req = request.json
|
||||
file_ids = req["file_ids"]
|
||||
if not file_ids:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "Files ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
try:
|
||||
for file_id in file_ids:
|
||||
informs = File2DocumentService.get_by_file_id(file_id)
|
||||
if not informs:
|
||||
return get_data_error_result(retmsg="Inform not found!")
|
||||
for inform in informs:
|
||||
if not inform:
|
||||
return get_data_error_result(retmsg="Inform not found!")
|
||||
File2DocumentService.delete_by_file_id(file_id)
|
||||
doc_id = inform.document_id
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
347
api/apps/file_app.py
Normal file
347
api/apps/file_app.py
Normal file
@ -0,0 +1,347 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
|
||||
import flask
|
||||
from elasticsearch_dsl import Q
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType, FileSource
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.file_service import FileService
|
||||
from api.settings import RetCode
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.file_utils import filename_type
|
||||
from rag.nlp import search
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils.minio_conn import MINIO
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST'])
|
||||
@login_required
|
||||
# @validate_request("parent_id")
|
||||
def upload():
|
||||
pf_id = request.form.get("parent_id")
|
||||
|
||||
if not pf_id:
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
file_objs = request.files.getlist('file')
|
||||
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
file_res = []
|
||||
try:
|
||||
for file_obj in file_objs:
|
||||
e, file = FileService.get_by_id(pf_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this folder!")
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(current_user.id) >= MAX_FILE_NUM_PER_USER:
|
||||
return get_data_error_result(
|
||||
retmsg="Exceed the maximum file number of a free user!")
|
||||
|
||||
# split file name path
|
||||
if not file_obj.filename:
|
||||
e, file = FileService.get_by_id(pf_id)
|
||||
file_obj_names = [file.name, file_obj.filename]
|
||||
else:
|
||||
full_path = '/' + file_obj.filename
|
||||
file_obj_names = full_path.split('/')
|
||||
file_len = len(file_obj_names)
|
||||
|
||||
# get folder
|
||||
file_id_list = FileService.get_id_list_by_id(pf_id, file_obj_names, 1, [pf_id])
|
||||
len_id_list = len(file_id_list)
|
||||
|
||||
# create folder
|
||||
if file_len != len_id_list:
|
||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 1])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Folder not found!")
|
||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 1], file_obj_names,
|
||||
len_id_list)
|
||||
else:
|
||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 2])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Folder not found!")
|
||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 2], file_obj_names,
|
||||
len_id_list)
|
||||
|
||||
# file type
|
||||
filetype = filename_type(file_obj_names[file_len - 1])
|
||||
location = file_obj_names[file_len - 1]
|
||||
while MINIO.obj_exist(last_folder.id, location):
|
||||
location += "_"
|
||||
blob = file_obj.read()
|
||||
filename = duplicate_name(
|
||||
FileService.query,
|
||||
name=file_obj_names[file_len - 1],
|
||||
parent_id=last_folder.id)
|
||||
file = {
|
||||
"id": get_uuid(),
|
||||
"parent_id": last_folder.id,
|
||||
"tenant_id": current_user.id,
|
||||
"created_by": current_user.id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
}
|
||||
file = FileService.insert(file)
|
||||
MINIO.put(last_folder.id, location, blob)
|
||||
file_res.append(file.to_json())
|
||||
return get_json_result(data=file_res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("name")
|
||||
def create():
|
||||
req = request.json
|
||||
pf_id = request.json.get("parent_id")
|
||||
input_file_type = request.json.get("type")
|
||||
if not pf_id:
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
|
||||
try:
|
||||
if not FileService.is_parent_folder_exist(pf_id):
|
||||
return get_json_result(
|
||||
data=False, retmsg="Parent Folder Doesn't Exist!", retcode=RetCode.OPERATING_ERROR)
|
||||
if FileService.query(name=req["name"], parent_id=pf_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated folder name in the same folder.")
|
||||
|
||||
if input_file_type == FileType.FOLDER.value:
|
||||
file_type = FileType.FOLDER.value
|
||||
else:
|
||||
file_type = FileType.VIRTUAL.value
|
||||
|
||||
file = FileService.insert({
|
||||
"id": get_uuid(),
|
||||
"parent_id": pf_id,
|
||||
"tenant_id": current_user.id,
|
||||
"created_by": current_user.id,
|
||||
"name": req["name"],
|
||||
"location": "",
|
||||
"size": 0,
|
||||
"type": file_type
|
||||
})
|
||||
|
||||
return get_json_result(data=file.to_json())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_files():
|
||||
pf_id = request.args.get("parent_id")
|
||||
|
||||
keywords = request.args.get("keywords", "")
|
||||
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 15))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
if not pf_id:
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
try:
|
||||
e, file = FileService.get_by_id(pf_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Folder not found!")
|
||||
|
||||
files, total = FileService.get_by_pf_id(
|
||||
current_user.id, pf_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
|
||||
parent_folder = FileService.get_parent_folder(pf_id)
|
||||
if not FileService.get_parent_folder(pf_id):
|
||||
return get_json_result(retmsg="File not found!")
|
||||
|
||||
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/root_folder', methods=['GET'])
|
||||
@login_required
|
||||
def get_root_folder():
|
||||
try:
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
return get_json_result(data={"root_folder": root_folder})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/parent_folder', methods=['GET'])
|
||||
@login_required
|
||||
def get_parent_folder():
|
||||
file_id = request.args.get("file_id")
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Folder not found!")
|
||||
|
||||
parent_folder = FileService.get_parent_folder(file_id)
|
||||
return get_json_result(data={"parent_folder": parent_folder.to_json()})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/all_parent_folder', methods=['GET'])
|
||||
@login_required
|
||||
def get_all_parent_folders():
|
||||
file_id = request.args.get("file_id")
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Folder not found!")
|
||||
|
||||
parent_folders = FileService.get_all_parent_folders(file_id)
|
||||
parent_folders_res = []
|
||||
for parent_folder in parent_folders:
|
||||
parent_folders_res.append(parent_folder.to_json())
|
||||
return get_json_result(data={"parent_folders": parent_folders_res})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("file_ids")
|
||||
def rm():
|
||||
req = request.json
|
||||
file_ids = req["file_ids"]
|
||||
try:
|
||||
for file_id in file_ids:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="File or Folder not found!")
|
||||
if not file.tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
if file.source_type == FileSource.KNOWLEDGEBASE:
|
||||
continue
|
||||
|
||||
if file.type == FileType.FOLDER.value:
|
||||
file_id_list = FileService.get_all_innermost_file_ids(file_id, [])
|
||||
for inner_file_id in file_id_list:
|
||||
e, file = FileService.get_by_id(inner_file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="File not found!")
|
||||
MINIO.rm(file.parent_id, file.location)
|
||||
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
||||
else:
|
||||
if not FileService.delete(file):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (File removal)!")
|
||||
|
||||
# delete file2document
|
||||
informs = File2DocumentService.get_by_file_id(file_id)
|
||||
for inform in informs:
|
||||
doc_id = inform.document_id
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
File2DocumentService.delete_by_file_id(file_id)
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rename', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("file_id", "name")
|
||||
def rename():
|
||||
req = request.json
|
||||
try:
|
||||
e, file = FileService.get_by_id(req["file_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="File not found!")
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||
file.name.lower()).suffix:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg="The extension of file can't be changed",
|
||||
retcode=RetCode.ARGUMENT_ERROR)
|
||||
for file in FileService.query(name=req["name"], pf_id=file.parent_id):
|
||||
if file.name == req["name"]:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated file name in the same folder.")
|
||||
|
||||
if not FileService.update_by_id(
|
||||
req["file_id"], {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (File rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_file_id(req["file_id"])
|
||||
if informs:
|
||||
if not DocumentService.update_by_id(
|
||||
informs[0].document_id, {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document rename)!")
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get/<file_id>', methods=['GET'])
|
||||
# @login_required
|
||||
def get(file_id):
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
|
||||
response = flask.make_response(MINIO.get(file.parent_id, file.location))
|
||||
ext = re.search(r"\.([^.]+)$", file.name)
|
||||
if ext:
|
||||
if file.type == FileType.VISUAL.value:
|
||||
response.headers.set('Content-Type', 'image/%s' % ext.group(1))
|
||||
else:
|
||||
response.headers.set(
|
||||
'Content-Type',
|
||||
'application/%s' %
|
||||
ext.group(1))
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -19,16 +19,18 @@ from flask_login import login_required, current_user
|
||||
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid, get_format_time
|
||||
from api.db import StatusEnum, UserTenantRole
|
||||
from api.db import StatusEnum, UserTenantRole, FileSource
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.db_models import Knowledgebase
|
||||
from api.db.db_models import Knowledgebase, File
|
||||
from api.settings import stat_logger, RetCode
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.nlp import search
|
||||
from rag.utils import ELASTICSEARCH
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
|
||||
|
||||
@manager.route('/create', methods=['post'])
|
||||
@ -109,9 +111,9 @@ def detail():
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list():
|
||||
def list_kbs():
|
||||
page_number = request.args.get("page", 1)
|
||||
items_per_page = request.args.get("page_size", 15)
|
||||
items_per_page = request.args.get("page_size", 150)
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
try:
|
||||
@ -136,17 +138,14 @@ def rm():
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
for doc in DocumentService.query(kb_id=req["kb_id"]):
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=doc.id), idxnm=search.index_name(kbs[0].tenant_id))
|
||||
|
||||
DocumentService.increment_chunk_num(
|
||||
doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1, 0)
|
||||
if not DocumentService.delete(doc):
|
||||
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
|
||||
if not KnowledgebaseService.update_by_id(
|
||||
req["kb_id"], {"status": StatusEnum.INVALID.value}):
|
||||
if not KnowledgebaseService.delete_by_id(req["kb_id"]):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Knowledgebase removal)!")
|
||||
return get_json_result(data=True)
|
||||
|
||||
@ -20,7 +20,7 @@ from api.utils.api_utils import server_error_response, get_data_error_result, va
|
||||
from api.db import StatusEnum, LLMType
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.llm import EmbeddingModel, ChatModel
|
||||
from rag.llm import EmbeddingModel, ChatModel, RerankModel
|
||||
|
||||
|
||||
@manager.route('/factories', methods=['GET'])
|
||||
@ -28,7 +28,7 @@ from rag.llm import EmbeddingModel, ChatModel
|
||||
def factories():
|
||||
try:
|
||||
fac = LLMFactoriesService.get_all()
|
||||
return get_json_result(data=[f.to_dict() for f in fac if f.name not in ["Youdao", "FastEmbed"]])
|
||||
return get_json_result(data=[f.to_dict() for f in fac if f.name not in ["Youdao", "FastEmbed", "BAAI"]])
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -39,17 +39,18 @@ def factories():
|
||||
def set_api_key():
|
||||
req = request.json
|
||||
# test if api key works
|
||||
chat_passed = False
|
||||
chat_passed, embd_passed, rerank_passed = False, False, False
|
||||
factory = req["llm_factory"]
|
||||
msg = ""
|
||||
for llm in LLMService.query(fid=factory):
|
||||
if llm.model_type == LLMType.EMBEDDING.value:
|
||||
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
||||
mdl = EmbeddingModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0 or tc == 0:
|
||||
raise Exception("Fail")
|
||||
embd_passed = True
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
||||
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
||||
@ -60,10 +61,21 @@ def set_api_key():
|
||||
"temperature": 0.9})
|
||||
if not tc:
|
||||
raise Exception(m)
|
||||
chat_passed = True
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||
e)
|
||||
chat_passed = True
|
||||
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
||||
mdl = RerankModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
arr, tc = mdl.similarity("What's the weather?", ["Is it sunny today?"])
|
||||
if len(arr) == 0 or tc == 0:
|
||||
raise Exception("Fail")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||
e)
|
||||
rerank_passed = True
|
||||
|
||||
if msg:
|
||||
return get_data_error_result(retmsg=msg)
|
||||
@ -96,16 +108,29 @@ def set_api_key():
|
||||
@validate_request("llm_factory", "llm_name", "model_type")
|
||||
def add_llm():
|
||||
req = request.json
|
||||
factory = req["llm_factory"]
|
||||
# For VolcEngine, due to its special authentication method
|
||||
# Assemble volc_ak, volc_sk, endpoint_id into api_key
|
||||
if factory == "VolcEngine":
|
||||
temp = list(eval(req["llm_name"]).items())[0]
|
||||
llm_name = temp[0]
|
||||
endpoint_id = temp[1]
|
||||
api_key = '{' + f'"volc_ak": "{req.get("volc_ak", "")}", ' \
|
||||
f'"volc_sk": "{req.get("volc_sk", "")}", ' \
|
||||
f'"ep_id": "{endpoint_id}", ' + '}'
|
||||
else:
|
||||
llm_name = req["llm_name"]
|
||||
api_key = "xxxxxxxxxxxxxxx"
|
||||
|
||||
llm = {
|
||||
"tenant_id": current_user.id,
|
||||
"llm_factory": req["llm_factory"],
|
||||
"llm_factory": factory,
|
||||
"model_type": req["model_type"],
|
||||
"llm_name": req["llm_name"],
|
||||
"llm_name": llm_name,
|
||||
"api_base": req.get("api_base", ""),
|
||||
"api_key": "xxxxxxxxxxxxxxx"
|
||||
"api_key": api_key
|
||||
}
|
||||
|
||||
factory = req["llm_factory"]
|
||||
msg = ""
|
||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||
mdl = EmbeddingModel[factory](
|
||||
@ -118,7 +143,10 @@ def add_llm():
|
||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
||||
elif llm["model_type"] == LLMType.CHAT.value:
|
||||
mdl = ChatModel[factory](
|
||||
key=None, model_name=llm["llm_name"], base_url=llm["api_base"])
|
||||
key=llm['api_key'] if factory == "VolcEngine" else None,
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||
"temperature": 0.9})
|
||||
@ -134,7 +162,6 @@ def add_llm():
|
||||
if msg:
|
||||
return get_data_error_result(retmsg=msg)
|
||||
|
||||
|
||||
if not TenantLLMService.filter_update(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory, TenantLLM.llm_name == llm["llm_name"]], llm):
|
||||
TenantLLMService.save(**llm)
|
||||
@ -142,6 +169,16 @@ def add_llm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/delete_llm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("llm_factory", "llm_name")
|
||||
def delete_llm():
|
||||
req = request.json
|
||||
TenantLLMService.filter_delete(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"], TenantLLM.llm_name == req["llm_name"]])
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/my_llms', methods=['GET'])
|
||||
@login_required
|
||||
def my_llms():
|
||||
@ -165,7 +202,7 @@ def my_llms():
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list():
|
||||
def list_app():
|
||||
model_type = request.args.get("model_type")
|
||||
try:
|
||||
objs = TenantLLMService.query(tenant_id=current_user.id)
|
||||
@ -174,7 +211,7 @@ def list():
|
||||
llms = [m.to_dict()
|
||||
for m in llms if m.status == StatusEnum.VALID.value]
|
||||
for m in llms:
|
||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in ["Youdao","FastEmbed"]
|
||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in ["Youdao","FastEmbed", "BAAI"]
|
||||
|
||||
llm_set = set([m["llm_name"] for m in llms])
|
||||
for o in objs:
|
||||
@ -184,7 +221,7 @@ def list():
|
||||
|
||||
res = {}
|
||||
for m in llms:
|
||||
if model_type and m["model_type"] != model_type:
|
||||
if model_type and m["model_type"].find(model_type)<0:
|
||||
continue
|
||||
if m["fid"] not in res:
|
||||
res[m["fid"]] = []
|
||||
|
||||
68
api/apps/system_app.py
Normal file
68
api/apps/system_app.py
Normal file
@ -0,0 +1,68 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
from flask_login import login_required
|
||||
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.versions import get_rag_version
|
||||
from rag.settings import SVR_QUEUE_NAME
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils.minio_conn import MINIO
|
||||
from timeit import default_timer as timer
|
||||
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
|
||||
@manager.route('/version', methods=['GET'])
|
||||
@login_required
|
||||
def version():
|
||||
return get_json_result(data=get_rag_version())
|
||||
|
||||
|
||||
@manager.route('/status', methods=['GET'])
|
||||
@login_required
|
||||
def status():
|
||||
res = {}
|
||||
st = timer()
|
||||
try:
|
||||
res["es"] = ELASTICSEARCH.health()
|
||||
res["es"]["elapsed"] = "{:.1f}".format((timer() - st)*1000.)
|
||||
except Exception as e:
|
||||
res["es"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||
|
||||
st = timer()
|
||||
try:
|
||||
MINIO.health()
|
||||
res["minio"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
||||
except Exception as e:
|
||||
res["minio"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||
|
||||
st = timer()
|
||||
try:
|
||||
KnowledgebaseService.get_by_id("x")
|
||||
res["mysql"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
||||
except Exception as e:
|
||||
res["mysql"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||
|
||||
st = timer()
|
||||
try:
|
||||
qinfo = REDIS_CONN.health(SVR_QUEUE_NAME)
|
||||
res["redis"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.),
|
||||
"pending": qinfo.get("pending", 0)}
|
||||
except Exception as e:
|
||||
res["redis"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||
|
||||
return get_json_result(data=res)
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
@ -24,10 +25,12 @@ from api.db.db_models import TenantLLM
|
||||
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||
from api.utils.api_utils import server_error_response, validate_request
|
||||
from api.utils import get_uuid, get_format_time, decrypt, download_img, current_timestamp, datetime_format
|
||||
from api.db import UserTenantRole, LLMType
|
||||
from api.settings import RetCode, GITHUB_OAUTH, CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, API_KEY, \
|
||||
LLM_FACTORY, LLM_BASE_URL
|
||||
from api.db import UserTenantRole, LLMType, FileType
|
||||
from api.settings import RetCode, GITHUB_OAUTH, FEISHU_OAUTH, CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, \
|
||||
API_KEY, \
|
||||
LLM_FACTORY, LLM_BASE_URL, RERANK_MDL
|
||||
from api.db.services.user_service import UserService, TenantService, UserTenantService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.settings import stat_logger
|
||||
from api.utils.api_utils import get_json_result, cors_reponse
|
||||
|
||||
@ -121,6 +124,79 @@ def github_callback():
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
|
||||
|
||||
@manager.route('/feishu_callback', methods=['GET'])
|
||||
def feishu_callback():
|
||||
import requests
|
||||
app_access_token_res = requests.post(FEISHU_OAUTH.get("app_access_token_url"), data=json.dumps({
|
||||
"app_id": FEISHU_OAUTH.get("app_id"),
|
||||
"app_secret": FEISHU_OAUTH.get("app_secret")
|
||||
}), headers={"Content-Type": "application/json; charset=utf-8"})
|
||||
app_access_token_res = app_access_token_res.json()
|
||||
if app_access_token_res['code'] != 0:
|
||||
return redirect("/?error=%s" % app_access_token_res)
|
||||
|
||||
res = requests.post(FEISHU_OAUTH.get("user_access_token_url"), data=json.dumps({
|
||||
"grant_type": FEISHU_OAUTH.get("grant_type"),
|
||||
"code": request.args.get('code')
|
||||
}), headers={"Content-Type": "application/json; charset=utf-8",
|
||||
'Authorization': f"Bearer {app_access_token_res['app_access_token']}"})
|
||||
res = res.json()
|
||||
if res['code'] != 0:
|
||||
return redirect("/?error=%s" % res["message"])
|
||||
|
||||
if "contact:user.email:readonly" not in res["data"]["scope"].split(" "):
|
||||
return redirect("/?error=contact:user.email:readonly not in scope")
|
||||
session["access_token"] = res["data"]["access_token"]
|
||||
session["access_token_from"] = "feishu"
|
||||
userinfo = user_info_from_feishu(session["access_token"])
|
||||
users = UserService.query(email=userinfo["email"])
|
||||
user_id = get_uuid()
|
||||
if not users:
|
||||
try:
|
||||
try:
|
||||
avatar = download_img(userinfo["avatar_url"])
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
avatar = ""
|
||||
users = user_register(user_id, {
|
||||
"access_token": session["access_token"],
|
||||
"email": userinfo["email"],
|
||||
"avatar": avatar,
|
||||
"nickname": userinfo["en_name"],
|
||||
"login_channel": "feishu",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
})
|
||||
if not users:
|
||||
raise Exception('Register user failure.')
|
||||
if len(users) > 1:
|
||||
raise Exception('Same E-mail exist!')
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
stat_logger.exception(e)
|
||||
return redirect("/?error=%s" % str(e))
|
||||
user = users[0]
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.save()
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
|
||||
|
||||
def user_info_from_feishu(access_token):
|
||||
import requests
|
||||
headers = {"Content-Type": "application/json; charset=utf-8",
|
||||
'Authorization': f"Bearer {access_token}"}
|
||||
res = requests.get(
|
||||
f"https://open.feishu.cn/open-apis/authen/v1/user_info",
|
||||
headers=headers)
|
||||
user_info = res.json()["data"]
|
||||
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
def user_info_from_github(access_token):
|
||||
import requests
|
||||
headers = {"Accept": "application/json",
|
||||
@ -199,7 +275,7 @@ def rollback_user_registration(user_id):
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
TenantLLM.delete().where(TenantLLM.tenant_id == user_id).excute()
|
||||
TenantLLM.delete().where(TenantLLM.tenant_id == user_id).execute()
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
@ -213,7 +289,8 @@ def user_register(user_id, user):
|
||||
"embd_id": EMBEDDING_MDL,
|
||||
"asr_id": ASR_MDL,
|
||||
"parser_ids": PARSERS,
|
||||
"img2txt_id": IMAGE2TEXT_MDL
|
||||
"img2txt_id": IMAGE2TEXT_MDL,
|
||||
"rerank_id": RERANK_MDL
|
||||
}
|
||||
usr_tenant = {
|
||||
"tenant_id": user_id,
|
||||
@ -221,6 +298,17 @@ def user_register(user_id, user):
|
||||
"invited_by": user_id,
|
||||
"role": UserTenantRole.OWNER
|
||||
}
|
||||
file_id = get_uuid()
|
||||
file = {
|
||||
"id": file_id,
|
||||
"parent_id": file_id,
|
||||
"tenant_id": user_id,
|
||||
"created_by": user_id,
|
||||
"name": "/",
|
||||
"type": FileType.FOLDER.value,
|
||||
"size": 0,
|
||||
"location": "",
|
||||
}
|
||||
tenant_llm = []
|
||||
for llm in LLMService.query(fid=LLM_FACTORY):
|
||||
tenant_llm.append({"tenant_id": user_id,
|
||||
@ -236,6 +324,7 @@ def user_register(user_id, user):
|
||||
TenantService.insert(**tenant)
|
||||
UserTenantService.insert(**usr_tenant)
|
||||
TenantLLMService.insert_many(tenant_llm)
|
||||
FileService.insert(file)
|
||||
return UserService.query(email=user["email"])
|
||||
|
||||
|
||||
|
||||
@ -45,6 +45,8 @@ class FileType(StrEnum):
|
||||
VISUAL = 'visual'
|
||||
AURAL = 'aural'
|
||||
VIRTUAL = 'virtual'
|
||||
FOLDER = 'folder'
|
||||
OTHER = "other"
|
||||
|
||||
|
||||
class LLMType(StrEnum):
|
||||
@ -52,6 +54,7 @@ class LLMType(StrEnum):
|
||||
EMBEDDING = 'embedding'
|
||||
SPEECH2TEXT = 'speech2text'
|
||||
IMAGE2TEXT = 'image2text'
|
||||
RERANK = 'rerank'
|
||||
|
||||
|
||||
class ChatStyle(StrEnum):
|
||||
@ -62,6 +65,7 @@ class ChatStyle(StrEnum):
|
||||
|
||||
|
||||
class TaskStatus(StrEnum):
|
||||
UNSTART = "0"
|
||||
RUNNING = "1"
|
||||
CANCEL = "2"
|
||||
DONE = "3"
|
||||
@ -80,3 +84,11 @@ class ParserType(StrEnum):
|
||||
NAIVE = "naive"
|
||||
PICTURE = "picture"
|
||||
ONE = "one"
|
||||
|
||||
|
||||
class FileSource(StrEnum):
|
||||
LOCAL = ""
|
||||
KNOWLEDGEBASE = "knowledgebase"
|
||||
S3 = "s3"
|
||||
|
||||
KNOWLEDGEBASE_FOLDER_NAME=".knowledgebase"
|
||||
@ -21,14 +21,13 @@ import operator
|
||||
from functools import wraps
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
from flask_login import UserMixin
|
||||
|
||||
from playhouse.migrate import MySQLMigrator, migrate
|
||||
from peewee import (
|
||||
BigAutoField, BigIntegerField, BooleanField, CharField,
|
||||
CompositeKey, Insert, IntegerField, TextField, FloatField, DateTimeField,
|
||||
BigIntegerField, BooleanField, CharField,
|
||||
CompositeKey, IntegerField, TextField, FloatField, DateTimeField,
|
||||
Field, Model, Metadata
|
||||
)
|
||||
from playhouse.pool import PooledMySQLDatabase
|
||||
|
||||
from api.db import SerializedType, ParserType
|
||||
from api.settings import DATABASE, stat_logger, SECRET_KEY
|
||||
from api.utils.log_utils import getLogger
|
||||
@ -344,7 +343,7 @@ class DataBaseModel(BaseModel):
|
||||
|
||||
|
||||
@DB.connection_context()
|
||||
def init_database_tables():
|
||||
def init_database_tables(alter_fields=[]):
|
||||
members = inspect.getmembers(sys.modules[__name__], inspect.isclass)
|
||||
table_objs = []
|
||||
create_failed_list = []
|
||||
@ -361,6 +360,7 @@ def init_database_tables():
|
||||
if create_failed_list:
|
||||
LOGGER.info(f"create tables failed: {create_failed_list}")
|
||||
raise Exception(f"create tables failed: {create_failed_list}")
|
||||
migrate_db()
|
||||
|
||||
|
||||
def fill_db_model_object(model_object, human_model_dict):
|
||||
@ -386,7 +386,7 @@ class User(DataBaseModel, UserMixin):
|
||||
max_length=32,
|
||||
null=True,
|
||||
help_text="English|Chinese",
|
||||
default="English")
|
||||
default="Chinese" if "zh_CN" in os.getenv("LANG", "") else "English")
|
||||
color_schema = CharField(
|
||||
max_length=32,
|
||||
null=True,
|
||||
@ -437,6 +437,10 @@ class Tenant(DataBaseModel):
|
||||
max_length=128,
|
||||
null=False,
|
||||
help_text="default image to text model ID")
|
||||
rerank_id = CharField(
|
||||
max_length=128,
|
||||
null=False,
|
||||
help_text="default rerank model ID")
|
||||
parser_ids = CharField(
|
||||
max_length=256,
|
||||
null=False,
|
||||
@ -578,7 +582,7 @@ class Knowledgebase(DataBaseModel):
|
||||
language = CharField(
|
||||
max_length=32,
|
||||
null=True,
|
||||
default="English",
|
||||
default="Chinese" if "zh_CN" in os.getenv("LANG", "") else "English",
|
||||
help_text="English|Chinese")
|
||||
description = TextField(null=True, help_text="KB description")
|
||||
embd_id = CharField(
|
||||
@ -669,6 +673,66 @@ class Document(DataBaseModel):
|
||||
db_table = "document"
|
||||
|
||||
|
||||
class File(DataBaseModel):
|
||||
id = CharField(
|
||||
max_length=32,
|
||||
primary_key=True,
|
||||
)
|
||||
parent_id = CharField(
|
||||
max_length=32,
|
||||
null=False,
|
||||
help_text="parent folder id",
|
||||
index=True)
|
||||
tenant_id = CharField(
|
||||
max_length=32,
|
||||
null=False,
|
||||
help_text="tenant id",
|
||||
index=True)
|
||||
created_by = CharField(
|
||||
max_length=32,
|
||||
null=False,
|
||||
help_text="who created it")
|
||||
name = CharField(
|
||||
max_length=255,
|
||||
null=False,
|
||||
help_text="file name or folder name",
|
||||
index=True)
|
||||
location = CharField(
|
||||
max_length=255,
|
||||
null=True,
|
||||
help_text="where dose it store")
|
||||
size = IntegerField(default=0)
|
||||
type = CharField(max_length=32, null=False, help_text="file extension")
|
||||
source_type = CharField(
|
||||
max_length=128,
|
||||
null=False,
|
||||
default="",
|
||||
help_text="where dose this document come from")
|
||||
|
||||
class Meta:
|
||||
db_table = "file"
|
||||
|
||||
|
||||
class File2Document(DataBaseModel):
|
||||
id = CharField(
|
||||
max_length=32,
|
||||
primary_key=True,
|
||||
)
|
||||
file_id = CharField(
|
||||
max_length=32,
|
||||
null=True,
|
||||
help_text="file id",
|
||||
index=True)
|
||||
document_id = CharField(
|
||||
max_length=32,
|
||||
null=True,
|
||||
help_text="document id",
|
||||
index=True)
|
||||
|
||||
class Meta:
|
||||
db_table = "file2document"
|
||||
|
||||
|
||||
class Task(DataBaseModel):
|
||||
id = CharField(max_length=32, primary_key=True)
|
||||
doc_id = CharField(max_length=32, null=False, index=True)
|
||||
@ -695,11 +759,11 @@ class Dialog(DataBaseModel):
|
||||
language = CharField(
|
||||
max_length=32,
|
||||
null=True,
|
||||
default="Chinese",
|
||||
default="Chinese" if "zh_CN" in os.getenv("LANG", "") else "English",
|
||||
help_text="English|Chinese")
|
||||
llm_id = CharField(max_length=128, null=False, help_text="default llm ID")
|
||||
llm_setting = JSONField(null=False, default={"temperature": 0.1, "top_p": 0.3, "frequency_penalty": 0.7,
|
||||
"presence_penalty": 0.4, "max_tokens": 215})
|
||||
"presence_penalty": 0.4, "max_tokens": 512})
|
||||
prompt_type = CharField(
|
||||
max_length=16,
|
||||
null=False,
|
||||
@ -711,11 +775,16 @@ class Dialog(DataBaseModel):
|
||||
similarity_threshold = FloatField(default=0.2)
|
||||
vector_similarity_weight = FloatField(default=0.3)
|
||||
top_n = IntegerField(default=6)
|
||||
top_k = IntegerField(default=1024)
|
||||
do_refer = CharField(
|
||||
max_length=1,
|
||||
null=False,
|
||||
help_text="it needs to insert reference index into answer or not",
|
||||
default="1")
|
||||
rerank_id = CharField(
|
||||
max_length=128,
|
||||
null=False,
|
||||
help_text="default rerank model ID")
|
||||
|
||||
kb_ids = JSONField(null=False, default=[])
|
||||
status = CharField(
|
||||
@ -762,3 +831,32 @@ class API4Conversation(DataBaseModel):
|
||||
|
||||
class Meta:
|
||||
db_table = "api_4_conversation"
|
||||
|
||||
|
||||
def migrate_db():
|
||||
with DB.transaction():
|
||||
migrator = MySQLMigrator(DB)
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column('file', 'source_type', CharField(max_length=128, null=False, default="", help_text="where dose this document come from"))
|
||||
)
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column('tenant', 'rerank_id', CharField(max_length=128, null=False, default="BAAI/bge-reranker-v2-m3", help_text="default rerank model ID"))
|
||||
)
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column('dialog', 'rerank_id', CharField(max_length=128, null=False, default="", help_text="default rerank model ID"))
|
||||
)
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column('dialog', 'top_k', IntegerField(default=1024))
|
||||
)
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
@ -16,10 +16,13 @@
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from copy import deepcopy
|
||||
|
||||
from api.db import LLMType, UserTenantRole
|
||||
from api.db.db_models import init_database_tables as init_web_db, LLMFactories, LLM, TenantLLM
|
||||
from api.db.services import UserService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY, LLM_BASE_URL
|
||||
@ -123,8 +126,33 @@ factory_infos = [{
|
||||
"name": "Youdao",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
},
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "DeepSeek",
|
||||
"logo": "",
|
||||
"tags": "LLM",
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "VolcEngine",
|
||||
"logo": "",
|
||||
"tags": "LLM, TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "BaiChuan",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "Jina",
|
||||
"logo": "",
|
||||
"tags": "TEXT EMBEDDING, TEXT RE-RANK",
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "BAAI",
|
||||
"logo": "",
|
||||
"tags": "TEXT EMBEDDING, TEXT RE-RANK",
|
||||
"status": "1",
|
||||
}
|
||||
# {
|
||||
# "name": "文心一言",
|
||||
# "logo": "",
|
||||
@ -138,6 +166,12 @@ def init_llm_factory():
|
||||
llm_infos = [
|
||||
# ---------------------- OpenAI ------------------------
|
||||
{
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "gpt-4o",
|
||||
"tags": "LLM,CHAT,128K",
|
||||
"max_tokens": 128000,
|
||||
"model_type": LLMType.CHAT.value + "," + LLMType.IMAGE2TEXT.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "gpt-3.5-turbo",
|
||||
"tags": "LLM,CHAT,4K",
|
||||
@ -155,6 +189,18 @@ def init_llm_factory():
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "text-embedding-3-small",
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "text-embedding-3-large",
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "whisper-1",
|
||||
@ -331,6 +377,165 @@ def init_llm_factory():
|
||||
"max_tokens": 512,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[7]["name"],
|
||||
"llm_name": "maidalun1020/bce-reranker-base_v1",
|
||||
"tags": "RE-RANK, 8K",
|
||||
"max_tokens": 8196,
|
||||
"model_type": LLMType.RERANK.value
|
||||
},
|
||||
# ------------------------ DeepSeek -----------------------
|
||||
{
|
||||
"fid": factory_infos[8]["name"],
|
||||
"llm_name": "deepseek-chat",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 32768,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[8]["name"],
|
||||
"llm_name": "deepseek-coder",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 16385,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
# ------------------------ VolcEngine -----------------------
|
||||
{
|
||||
"fid": factory_infos[9]["name"],
|
||||
"llm_name": "Skylark2-pro-32k",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32768,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[9]["name"],
|
||||
"llm_name": "Skylark2-pro-4k",
|
||||
"tags": "LLM,CHAT,4k",
|
||||
"max_tokens": 4096,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
# ------------------------ BaiChuan -----------------------
|
||||
{
|
||||
"fid": factory_infos[10]["name"],
|
||||
"llm_name": "Baichuan2-Turbo",
|
||||
"tags": "LLM,CHAT,32K",
|
||||
"max_tokens": 32768,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[10]["name"],
|
||||
"llm_name": "Baichuan2-Turbo-192k",
|
||||
"tags": "LLM,CHAT,192K",
|
||||
"max_tokens": 196608,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[10]["name"],
|
||||
"llm_name": "Baichuan3-Turbo",
|
||||
"tags": "LLM,CHAT,32K",
|
||||
"max_tokens": 32768,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[10]["name"],
|
||||
"llm_name": "Baichuan3-Turbo-128k",
|
||||
"tags": "LLM,CHAT,128K",
|
||||
"max_tokens": 131072,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[10]["name"],
|
||||
"llm_name": "Baichuan4",
|
||||
"tags": "LLM,CHAT,128K",
|
||||
"max_tokens": 131072,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[10]["name"],
|
||||
"llm_name": "Baichuan-Text-Embedding",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 512,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
# ------------------------ Jina -----------------------
|
||||
{
|
||||
"fid": factory_infos[11]["name"],
|
||||
"llm_name": "jina-reranker-v1-base-en",
|
||||
"tags": "RE-RANK,8k",
|
||||
"max_tokens": 8196,
|
||||
"model_type": LLMType.RERANK.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[11]["name"],
|
||||
"llm_name": "jina-reranker-v1-turbo-en",
|
||||
"tags": "RE-RANK,8k",
|
||||
"max_tokens": 8196,
|
||||
"model_type": LLMType.RERANK.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[11]["name"],
|
||||
"llm_name": "jina-reranker-v1-tiny-en",
|
||||
"tags": "RE-RANK,8k",
|
||||
"max_tokens": 8196,
|
||||
"model_type": LLMType.RERANK.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[11]["name"],
|
||||
"llm_name": "jina-colbert-v1-en",
|
||||
"tags": "RE-RANK,8k",
|
||||
"max_tokens": 8196,
|
||||
"model_type": LLMType.RERANK.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[11]["name"],
|
||||
"llm_name": "jina-embeddings-v2-base-en",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 8196,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[11]["name"],
|
||||
"llm_name": "jina-embeddings-v2-base-de",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 8196,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[11]["name"],
|
||||
"llm_name": "jina-embeddings-v2-base-es",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 8196,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[11]["name"],
|
||||
"llm_name": "jina-embeddings-v2-base-code",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 8196,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[11]["name"],
|
||||
"llm_name": "jina-embeddings-v2-base-zh",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 8196,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
# ------------------------ BAAI -----------------------
|
||||
{
|
||||
"fid": factory_infos[12]["name"],
|
||||
"llm_name": "BAAI/bge-large-zh-v1.5",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 1024,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[12]["name"],
|
||||
"llm_name": "BAAI/bge-reranker-v2-m3",
|
||||
"tags": "RE-RANK,2k",
|
||||
"max_tokens": 2048,
|
||||
"model_type": LLMType.RERANK.value
|
||||
},
|
||||
]
|
||||
for info in factory_infos:
|
||||
try:
|
||||
@ -347,9 +552,28 @@ def init_llm_factory():
|
||||
LLMService.filter_delete([LLM.fid == "Local"])
|
||||
LLMService.filter_delete([LLM.fid == "Moonshot", LLM.llm_name == "flag-embedding"])
|
||||
TenantLLMService.filter_delete([TenantLLM.llm_factory == "Moonshot", TenantLLM.llm_name == "flag-embedding"])
|
||||
LLMFactoriesService.filter_update([LLMFactoriesService.model.name == "QAnything"], {"name": "Youdao"})
|
||||
LLMService.filter_update([LLMService.model.fid == "QAnything"], {"fid": "Youdao"})
|
||||
LLMFactoriesService.filter_delete([LLMFactoriesService.model.name == "QAnything"])
|
||||
LLMService.filter_delete([LLMService.model.fid == "QAnything"])
|
||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
|
||||
## insert openai two embedding models to the current openai user.
|
||||
print("Start to insert 2 OpenAI embedding models...")
|
||||
tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
|
||||
for tid in tenant_ids:
|
||||
for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
|
||||
row = row.to_dict()
|
||||
row["model_type"] = LLMType.EMBEDDING.value
|
||||
row["llm_name"] = "text-embedding-3-small"
|
||||
row["used_tokens"] = 0
|
||||
try:
|
||||
TenantLLMService.save(**row)
|
||||
row = deepcopy(row)
|
||||
row["llm_name"] = "text-embedding-3-large"
|
||||
TenantLLMService.save(**row)
|
||||
except Exception as e:
|
||||
pass
|
||||
break
|
||||
for kb_id in KnowledgebaseService.get_all_ids():
|
||||
KnowledgebaseService.update_by_id(kb_id, {"doc_num": DocumentService.get_kb_doc_count(kb_id)})
|
||||
"""
|
||||
drop table llm;
|
||||
drop table llm_factories;
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.db_models import Dialog, Conversation
|
||||
@ -57,21 +58,21 @@ def message_fit_in(msg, max_length=4000):
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
ll = num_tokens_from_string(msg_[0].content)
|
||||
l = num_tokens_from_string(msg_[-1].content)
|
||||
ll = num_tokens_from_string(msg_[0]["content"])
|
||||
l = num_tokens_from_string(msg_[-1]["content"])
|
||||
if ll / (ll + l) > 0.8:
|
||||
m = msg_[0].content
|
||||
m = msg_[0]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||
msg[0].content = m
|
||||
msg[0]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
m = msg_[1].content
|
||||
m = msg_[1]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||
msg[1].content = m
|
||||
msg[1]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
|
||||
def chat(dialog, messages, **kwargs):
|
||||
def chat(dialog, messages, stream=True, **kwargs):
|
||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||
llm = LLMService.query(llm_name=dialog.llm_id)
|
||||
if not llm:
|
||||
@ -82,7 +83,9 @@ def chat(dialog, messages, **kwargs):
|
||||
else: max_tokens = llm[0].max_tokens
|
||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
||||
if len(embd_nms) != 1:
|
||||
yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
|
||||
questions = [m["content"] for m in messages if m["role"] == "user"]
|
||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
@ -94,7 +97,9 @@ def chat(dialog, messages, **kwargs):
|
||||
if field_map:
|
||||
chat_logger.info("Use SQL to retrieval:{}".format(questions[-1]))
|
||||
ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
|
||||
if ans: return ans
|
||||
if ans:
|
||||
yield ans
|
||||
return
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["key"] == "knowledge":
|
||||
@ -110,53 +115,73 @@ def chat(dialog, messages, **kwargs):
|
||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
else:
|
||||
rerank_mdl = None
|
||||
if dialog.rerank_id:
|
||||
rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
|
||||
kbinfos = retrievaler.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight, top=1024, aggs=False)
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None,
|
||||
top=1024, aggs=False, rerank_mdl=rerank_mdl)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
chat_logger.info(
|
||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
|
||||
if not knowledges and prompt_config.get("empty_response"):
|
||||
return {
|
||||
"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||
yield {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||
return {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||
|
||||
kwargs["knowledge"] = "\n".join(knowledges)
|
||||
gen_conf = dialog.llm_setting
|
||||
msg = [{"role": m["role"], "content": m["content"]}
|
||||
for m in messages if m["role"] != "system"]
|
||||
|
||||
msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
|
||||
msg.extend([{"role": m["role"], "content": m["content"]}
|
||||
for m in messages if m["role"] != "system"])
|
||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
||||
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
||||
|
||||
if "max_tokens" in gen_conf:
|
||||
gen_conf["max_tokens"] = min(
|
||||
gen_conf["max_tokens"],
|
||||
max_tokens - used_token_count)
|
||||
answer = chat_mdl.chat(
|
||||
prompt_config["system"].format(
|
||||
**kwargs), msg, gen_conf)
|
||||
chat_logger.info("User: {}|Assistant: {}".format(
|
||||
msg[-1]["content"], answer))
|
||||
|
||||
if knowledges and prompt_config.get("quote", True):
|
||||
answer, idx = retrievaler.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=1 - dialog.vector_similarity_weight,
|
||||
vtweight=dialog.vector_similarity_weight)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
def decorate_answer(answer):
|
||||
nonlocal prompt_config, knowledges, kwargs, kbinfos
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
answer, idx = retrievaler.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=1 - dialog.vector_similarity_weight,
|
||||
vtweight=dialog.vector_similarity_weight)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
|
||||
for c in kbinfos["chunks"]:
|
||||
if c.get("vector"):
|
||||
del c["vector"]
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api")>=0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
return {"answer": answer, "reference": kbinfos}
|
||||
refs = deepcopy(kbinfos)
|
||||
for c in refs["chunks"]:
|
||||
if c.get("vector"):
|
||||
del c["vector"]
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api")>=0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
return {"answer": answer, "reference": refs}
|
||||
|
||||
if stream:
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], gen_conf):
|
||||
answer = ans
|
||||
yield {"answer": answer, "reference": {}}
|
||||
yield decorate_answer(answer)
|
||||
else:
|
||||
answer = chat_mdl.chat(
|
||||
msg[0]["content"], msg[1:], gen_conf)
|
||||
chat_logger.info("User: {}|Assistant: {}".format(
|
||||
msg[-1]["content"], answer))
|
||||
yield decorate_answer(answer)
|
||||
|
||||
|
||||
def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
|
||||
@ -13,14 +13,26 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from peewee import Expression
|
||||
import random
|
||||
from datetime import datetime
|
||||
from elasticsearch_dsl import Q
|
||||
from peewee import fn
|
||||
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from api.settings import stat_logger
|
||||
from api.utils import current_timestamp, get_format_time, get_uuid
|
||||
from rag.settings import SVR_QUEUE_NAME
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils.minio_conn import MINIO
|
||||
from rag.nlp import search
|
||||
|
||||
from api.db import FileType, TaskStatus
|
||||
from api.db.db_models import DB, Knowledgebase, Tenant
|
||||
from api.db.db_models import DB, Knowledgebase, Tenant, Task
|
||||
from api.db.db_models import Document
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db import StatusEnum
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
|
||||
class DocumentService(CommonService):
|
||||
@ -32,8 +44,9 @@ class DocumentService(CommonService):
|
||||
orderby, desc, keywords):
|
||||
if keywords:
|
||||
docs = cls.model.select().where(
|
||||
cls.model.kb_id == kb_id,
|
||||
cls.model.name.like(f"%%{keywords}%%"))
|
||||
(cls.model.kb_id == kb_id),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
||||
count = docs.count()
|
||||
@ -62,16 +75,15 @@ class DocumentService(CommonService):
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete(cls, doc):
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
if not KnowledgebaseService.update_by_id(
|
||||
kb.id, {"doc_num": kb.doc_num - 1}):
|
||||
raise RuntimeError("Database error (Knowledgebase)!")
|
||||
def remove_document(cls, doc, tenant_id):
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||
cls.clear_chunk_num(doc.id)
|
||||
return cls.delete_by_id(doc.id)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_newly_uploaded(cls, tm, mod=0, comm=1, items_per_page=64):
|
||||
def get_newly_uploaded(cls):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.kb_id,
|
||||
@ -93,17 +105,15 @@ class DocumentService(CommonService):
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
~(cls.model.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress == 0,
|
||||
cls.model.update_time >= tm,
|
||||
cls.model.run == TaskStatus.RUNNING.value,
|
||||
(Expression(cls.model.create_time, "%%", comm) == mod))\
|
||||
.order_by(cls.model.update_time.asc())\
|
||||
.paginate(1, items_per_page)
|
||||
cls.model.update_time >= current_timestamp() - 1000 * 600,
|
||||
cls.model.run == TaskStatus.RUNNING.value)\
|
||||
.order_by(cls.model.update_time.asc())
|
||||
return list(docs.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_unfinished_docs(cls):
|
||||
fields = [cls.model.id, cls.model.process_begin_at]
|
||||
fields = [cls.model.id, cls.model.process_begin_at, cls.model.parser_config, cls.model.progress_msg]
|
||||
docs = cls.model.select(*fields) \
|
||||
.where(
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
@ -130,6 +140,22 @@ class DocumentService(CommonService):
|
||||
Knowledgebase.id == kb_id).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def clear_chunk_num(cls, doc_id):
|
||||
doc = cls.model.get_by_id(doc_id)
|
||||
assert doc, "Can't fine document in database."
|
||||
|
||||
num = Knowledgebase.update(
|
||||
token_num=Knowledgebase.token_num -
|
||||
doc.token_num,
|
||||
chunk_num=Knowledgebase.chunk_num -
|
||||
doc.chunk_num,
|
||||
doc_num=Knowledgebase.doc_num-1
|
||||
).where(
|
||||
Knowledgebase.id == doc.kb_id).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tenant_id(cls, doc_id):
|
||||
@ -143,6 +169,30 @@ class DocumentService(CommonService):
|
||||
return
|
||||
return docs[0]["tenant_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tenant_id_by_name(cls, name):
|
||||
docs = cls.model.select(
|
||||
Knowledgebase.tenant_id).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)).where(
|
||||
cls.model.name == name, Knowledgebase.status == StatusEnum.VALID.value)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return
|
||||
return docs[0]["tenant_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_doc_id_by_doc_name(cls, doc_name):
|
||||
fields = [cls.model.id]
|
||||
doc_id = cls.model.select(*fields) \
|
||||
.where(cls.model.name == doc_name)
|
||||
doc_id = doc_id.dicts()
|
||||
if not doc_id:
|
||||
return
|
||||
return doc_id[0]["id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_thumbnails(cls, docids):
|
||||
@ -177,3 +227,82 @@ class DocumentService(CommonService):
|
||||
on=(Knowledgebase.id == cls.model.kb_id)).where(
|
||||
Knowledgebase.tenant_id == tenant_id)
|
||||
return len(docs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def begin2parse(cls, docid):
|
||||
cls.update_by_id(
|
||||
docid, {"progress": random.random() * 1 / 100.,
|
||||
"progress_msg": "Task dispatched...",
|
||||
"process_begin_at": get_format_time()
|
||||
})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_progress(cls):
|
||||
docs = cls.get_unfinished_docs()
|
||||
for d in docs:
|
||||
try:
|
||||
tsks = Task.query(doc_id=d["id"], order_by=Task.create_time)
|
||||
if not tsks:
|
||||
continue
|
||||
msg = []
|
||||
prg = 0
|
||||
finished = True
|
||||
bad = 0
|
||||
status = TaskStatus.RUNNING.value
|
||||
for t in tsks:
|
||||
if 0 <= t.progress < 1:
|
||||
finished = False
|
||||
prg += t.progress if t.progress >= 0 else 0
|
||||
msg.append(t.progress_msg)
|
||||
if t.progress == -1:
|
||||
bad += 1
|
||||
prg /= len(tsks)
|
||||
if finished and bad:
|
||||
prg = -1
|
||||
status = TaskStatus.FAIL.value
|
||||
elif finished:
|
||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and d["progress_msg"].lower().find(" raptor")<0:
|
||||
queue_raptor_tasks(d)
|
||||
prg *= 0.98
|
||||
msg.append("------ RAPTOR -------")
|
||||
else:
|
||||
status = TaskStatus.DONE.value
|
||||
|
||||
msg = "\n".join(msg)
|
||||
info = {
|
||||
"process_duation": datetime.timestamp(
|
||||
datetime.now()) -
|
||||
d["process_begin_at"].timestamp(),
|
||||
"run": status}
|
||||
if prg != 0:
|
||||
info["progress"] = prg
|
||||
if msg:
|
||||
info["progress_msg"] = msg
|
||||
cls.update_by_id(d["id"], info)
|
||||
except Exception as e:
|
||||
stat_logger.error("fetch task exception:" + str(e))
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_doc_count(cls, kb_id):
|
||||
return len(cls.model.select(cls.model.id).where(
|
||||
cls.model.kb_id == kb_id).dicts())
|
||||
|
||||
|
||||
def queue_raptor_tasks(doc):
|
||||
def new_task():
|
||||
nonlocal doc
|
||||
return {
|
||||
"id": get_uuid(),
|
||||
"doc_id": doc["id"],
|
||||
"from_page": 0,
|
||||
"to_page": -1,
|
||||
"progress_msg": "Start to do RAPTOR (Recursive Abstractive Processing For Tree-Organized Retrieval)."
|
||||
}
|
||||
|
||||
task = new_task()
|
||||
bulk_insert_into_db(Task, [task], True)
|
||||
task["type"] = "raptor"
|
||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=task), "Can't access Redis. Please check the Redis' status."
|
||||
85
api/db/services/file2document_service.py
Normal file
85
api/db/services/file2document_service.py
Normal file
@ -0,0 +1,85 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
|
||||
from api.db import FileSource
|
||||
from api.db.db_models import DB
|
||||
from api.db.db_models import File, File2Document
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.utils import current_timestamp, datetime_format, get_uuid
|
||||
|
||||
|
||||
class File2DocumentService(CommonService):
|
||||
model = File2Document
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_file_id(cls, file_id):
|
||||
objs = cls.model.select().where(cls.model.file_id == file_id)
|
||||
return objs
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_document_id(cls, document_id):
|
||||
objs = cls.model.select().where(cls.model.document_id == document_id)
|
||||
return objs
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, obj):
|
||||
if not cls.save(**obj):
|
||||
raise RuntimeError("Database error (File)!")
|
||||
e, obj = cls.get_by_id(obj["id"])
|
||||
if not e:
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_file_id(cls, file_id):
|
||||
return cls.model.delete().where(cls.model.file_id == file_id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_document_id(cls, doc_id):
|
||||
return cls.model.delete().where(cls.model.document_id == doc_id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_by_file_id(cls, file_id, obj):
|
||||
obj["update_time"] = current_timestamp()
|
||||
obj["update_date"] = datetime_format(datetime.now())
|
||||
num = cls.model.update(obj).where(cls.model.id == file_id).execute()
|
||||
e, obj = cls.get_by_id(cls.model.id)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_minio_address(cls, doc_id=None, file_id=None):
|
||||
if doc_id:
|
||||
f2d = cls.get_by_document_id(doc_id)
|
||||
else:
|
||||
f2d = cls.get_by_file_id(file_id)
|
||||
if f2d:
|
||||
file = File.get_by_id(f2d[0].file_id)
|
||||
if file.source_type == FileSource.LOCAL:
|
||||
return file.parent_id, file.location
|
||||
doc_id = f2d[0].document_id
|
||||
|
||||
assert doc_id, "please specify doc_id"
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
return doc.kb_id, doc.location
|
||||
307
api/db/services/file_service.py
Normal file
307
api/db/services/file_service.py
Normal file
@ -0,0 +1,307 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from flask_login import current_user
|
||||
from peewee import fn
|
||||
|
||||
from api.db import FileType, KNOWLEDGEBASE_FOLDER_NAME, FileSource
|
||||
from api.db.db_models import DB, File2Document, Knowledgebase
|
||||
from api.db.db_models import File, Document
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.utils import get_uuid
|
||||
|
||||
|
||||
class FileService(CommonService):
|
||||
model = File
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_pf_id(cls, tenant_id, pf_id, page_number, items_per_page,
|
||||
orderby, desc, keywords):
|
||||
if keywords:
|
||||
files = cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == pf_id),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower())),
|
||||
~(cls.model.id == pf_id)
|
||||
)
|
||||
else:
|
||||
files = cls.model.select().where((cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == pf_id),
|
||||
~(cls.model.id == pf_id)
|
||||
)
|
||||
count = files.count()
|
||||
if desc:
|
||||
files = files.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
files = files.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
files = files.paginate(page_number, items_per_page)
|
||||
|
||||
res_files = list(files.dicts())
|
||||
for file in res_files:
|
||||
if file["type"] == FileType.FOLDER.value:
|
||||
file["size"] = cls.get_folder_size(file["id"])
|
||||
file['kbs_info'] = []
|
||||
continue
|
||||
kbs_info = cls.get_kb_id_by_file_id(file['id'])
|
||||
file['kbs_info'] = kbs_info
|
||||
|
||||
return res_files, count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_id_by_file_id(cls, file_id):
|
||||
kbs = (cls.model.select(*[Knowledgebase.id, Knowledgebase.name])
|
||||
.join(File2Document, on=(File2Document.file_id == file_id))
|
||||
.join(Document, on=(File2Document.document_id == Document.id))
|
||||
.join(Knowledgebase, on=(Knowledgebase.id == Document.kb_id))
|
||||
.where(cls.model.id == file_id))
|
||||
if not kbs: return []
|
||||
kbs_info_list = []
|
||||
for kb in list(kbs.dicts()):
|
||||
kbs_info_list.append({"kb_id": kb['id'], "kb_name": kb['name']})
|
||||
return kbs_info_list
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_pf_id_name(cls, id, name):
|
||||
file = cls.model.select().where((cls.model.parent_id == id) & (cls.model.name == name))
|
||||
if file.count():
|
||||
e, file = cls.get_by_id(file[0].id)
|
||||
if not e:
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
return file
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_id_list_by_id(cls, id, name, count, res):
|
||||
if count < len(name):
|
||||
file = cls.get_by_pf_id_name(id, name[count])
|
||||
if file:
|
||||
res.append(file.id)
|
||||
return cls.get_id_list_by_id(file.id, name, count + 1, res)
|
||||
else:
|
||||
return res
|
||||
else:
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_innermost_file_ids(cls, folder_id, result_ids):
|
||||
subfolders = cls.model.select().where(cls.model.parent_id == folder_id)
|
||||
if subfolders.exists():
|
||||
for subfolder in subfolders:
|
||||
cls.get_all_innermost_file_ids(subfolder.id, result_ids)
|
||||
else:
|
||||
result_ids.append(folder_id)
|
||||
return result_ids
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def create_folder(cls, file, parent_id, name, count):
|
||||
if count > len(name) - 2:
|
||||
return file
|
||||
else:
|
||||
file = cls.insert({
|
||||
"id": get_uuid(),
|
||||
"parent_id": parent_id,
|
||||
"tenant_id": current_user.id,
|
||||
"created_by": current_user.id,
|
||||
"name": name[count],
|
||||
"location": "",
|
||||
"size": 0,
|
||||
"type": FileType.FOLDER.value
|
||||
})
|
||||
return cls.create_folder(file, file.id, name, count + 1)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def is_parent_folder_exist(cls, parent_id):
|
||||
parent_files = cls.model.select().where(cls.model.id == parent_id)
|
||||
if parent_files.count():
|
||||
return True
|
||||
cls.delete_folder_by_pf_id(parent_id)
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_root_folder(cls, tenant_id):
|
||||
for file in cls.model.select().where((cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == cls.model.id)
|
||||
):
|
||||
return file.to_dict()
|
||||
|
||||
file_id = get_uuid()
|
||||
file = {
|
||||
"id": file_id,
|
||||
"parent_id": file_id,
|
||||
"tenant_id": tenant_id,
|
||||
"created_by": tenant_id,
|
||||
"name": "/",
|
||||
"type": FileType.FOLDER.value,
|
||||
"size": 0,
|
||||
"location": "",
|
||||
}
|
||||
cls.save(**file)
|
||||
return file
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_folder(cls, tenant_id):
|
||||
for root in cls.model.select().where(cls.model.tenant_id == tenant_id and
|
||||
cls.model.parent_id == cls.model.id):
|
||||
for folder in cls.model.select().where(cls.model.tenant_id == tenant_id and
|
||||
cls.model.parent_id == root.id and
|
||||
cls.model.name == KNOWLEDGEBASE_FOLDER_NAME
|
||||
):
|
||||
return folder.to_dict()
|
||||
assert False, "Can't find the KB folder. Database init error."
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def new_a_file_from_kb(cls, tenant_id, name, parent_id, ty=FileType.FOLDER.value, size=0, location=""):
|
||||
for file in cls.query(tenant_id=tenant_id, parent_id=parent_id, name=name):
|
||||
return file.to_dict()
|
||||
file = {
|
||||
"id": get_uuid(),
|
||||
"parent_id": parent_id,
|
||||
"tenant_id": tenant_id,
|
||||
"created_by": tenant_id,
|
||||
"name": name,
|
||||
"type": ty,
|
||||
"size": size,
|
||||
"location": location,
|
||||
"source_type": FileSource.KNOWLEDGEBASE
|
||||
}
|
||||
cls.save(**file)
|
||||
return file
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def init_knowledgebase_docs(cls, root_id, tenant_id):
|
||||
for _ in cls.model.select().where((cls.model.name == KNOWLEDGEBASE_FOLDER_NAME)\
|
||||
& (cls.model.parent_id == root_id)):
|
||||
return
|
||||
folder = cls.new_a_file_from_kb(tenant_id, KNOWLEDGEBASE_FOLDER_NAME, root_id)
|
||||
|
||||
for kb in Knowledgebase.select(*[Knowledgebase.id, Knowledgebase.name]).where(Knowledgebase.tenant_id==tenant_id):
|
||||
kb_folder = cls.new_a_file_from_kb(tenant_id, kb.name, folder["id"])
|
||||
for doc in DocumentService.query(kb_id=kb.id):
|
||||
FileService.add_file_from_kb(doc.to_dict(), kb_folder["id"], tenant_id)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_parent_folder(cls, file_id):
|
||||
file = cls.model.select().where(cls.model.id == file_id)
|
||||
if file.count():
|
||||
e, file = cls.get_by_id(file[0].parent_id)
|
||||
if not e:
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
else:
|
||||
raise RuntimeError("Database error (File doesn't exist)!")
|
||||
return file
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_parent_folders(cls, start_id):
|
||||
parent_folders = []
|
||||
current_id = start_id
|
||||
while current_id:
|
||||
e, file = cls.get_by_id(current_id)
|
||||
if file.parent_id != file.id and e:
|
||||
parent_folders.append(file)
|
||||
current_id = file.parent_id
|
||||
else:
|
||||
parent_folders.append(file)
|
||||
break
|
||||
return parent_folders
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, file):
|
||||
if not cls.save(**file):
|
||||
raise RuntimeError("Database error (File)!")
|
||||
e, file = cls.get_by_id(file["id"])
|
||||
if not e:
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
return file
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete(cls, file):
|
||||
return cls.delete_by_id(file.id)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_pf_id(cls, folder_id):
|
||||
return cls.model.delete().where(cls.model.parent_id == folder_id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_folder_by_pf_id(cls, user_id, folder_id):
|
||||
try:
|
||||
files = cls.model.select().where((cls.model.tenant_id == user_id)
|
||||
& (cls.model.parent_id == folder_id))
|
||||
for file in files:
|
||||
cls.delete_folder_by_pf_id(user_id, file.id)
|
||||
return cls.model.delete().where((cls.model.tenant_id == user_id)
|
||||
& (cls.model.id == folder_id)).execute(),
|
||||
except Exception as e:
|
||||
print(e)
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_file_count(cls, tenant_id):
|
||||
files = cls.model.select(cls.model.id).where(cls.model.tenant_id == tenant_id)
|
||||
return len(files)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_folder_size(cls, folder_id):
|
||||
size = 0
|
||||
|
||||
def dfs(parent_id):
|
||||
nonlocal size
|
||||
for f in cls.model.select(*[cls.model.id, cls.model.size, cls.model.type]).where(
|
||||
cls.model.parent_id == parent_id, cls.model.id != parent_id):
|
||||
size += f.size
|
||||
if f.type == FileType.FOLDER.value:
|
||||
dfs(f.id)
|
||||
|
||||
dfs(folder_id)
|
||||
return size
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def add_file_from_kb(cls, doc, kb_folder_id, tenant_id):
|
||||
for _ in File2DocumentService.get_by_document_id(doc["id"]): return
|
||||
file = {
|
||||
"id": get_uuid(),
|
||||
"parent_id": kb_folder_id,
|
||||
"tenant_id": tenant_id,
|
||||
"created_by": tenant_id,
|
||||
"name": doc["name"],
|
||||
"type": doc["type"],
|
||||
"size": doc["size"],
|
||||
"location": doc["location"],
|
||||
"source_type": FileSource.KNOWLEDGEBASE
|
||||
}
|
||||
cls.save(**file)
|
||||
File2DocumentService.save(**{"id": get_uuid(), "file_id": file["id"], "document_id": doc["id"]})
|
||||
@ -1,67 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from api.db import TenantPermission
|
||||
from api.db.db_models import DB, Tenant
|
||||
from api.db.db_models import Knowledgebase
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db import StatusEnum
|
||||
|
||||
|
||||
class KnowledgebaseService(CommonService):
|
||||
model = Knowledgebase
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_ids(cls, joined_tenant_ids, user_id,
|
||||
page_number, items_per_page, orderby, desc):
|
||||
kbs = cls.model.select().where(
|
||||
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||
TenantPermission.TEAM.value)) | (cls.model.tenant_id == user_id))
|
||||
& (cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
if desc:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
kbs = kbs.paginate(page_number, items_per_page)
|
||||
|
||||
return list(kbs.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_detail(cls, kb_id):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
Tenant.embd_id,
|
||||
cls.model.avatar,
|
||||
cls.model.name,
|
||||
cls.model.description,
|
||||
cls.model.permission,
|
||||
cls.model.doc_num,
|
||||
cls.model.token_num,
|
||||
cls.model.chunk_num,
|
||||
cls.model.parser_id]
|
||||
kbs = cls.model.select(*fields).join(Tenant, on=((Tenant.id == cls.model.tenant_id)&(Tenant.status== StatusEnum.VALID.value))).where(
|
||||
(cls.model.id == kb_id),
|
||||
(cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
if not kbs:
|
||||
return
|
||||
d = kbs[0].to_dict()
|
||||
d["embd_id"] = kbs[0].tenant.embd_id
|
||||
return d
|
||||
@ -112,3 +112,8 @@ class KnowledgebaseService(CommonService):
|
||||
if kb:
|
||||
return True, kb[0]
|
||||
return False, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_ids(cls):
|
||||
return [m["id"] for m in cls.model.select(cls.model.id).dicts()]
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
#
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.settings import database_logger
|
||||
from rag.llm import EmbeddingModel, CvModel, ChatModel
|
||||
from rag.llm import EmbeddingModel, CvModel, ChatModel, RerankModel
|
||||
from api.db import LLMType
|
||||
from api.db.db_models import DB, UserTenant
|
||||
from api.db.db_models import LLMFactories, LLM, TenantLLM
|
||||
@ -73,21 +73,25 @@ class TenantLLMService(CommonService):
|
||||
mdlnm = tenant.img2txt_id
|
||||
elif llm_type == LLMType.CHAT.value:
|
||||
mdlnm = tenant.llm_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.RERANK:
|
||||
mdlnm = tenant.rerank_id if not llm_name else llm_name
|
||||
else:
|
||||
assert False, "LLM type error"
|
||||
|
||||
model_config = cls.get_api_key(tenant_id, mdlnm)
|
||||
if model_config: model_config = model_config.to_dict()
|
||||
if not model_config:
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
if llm_type in [LLMType.EMBEDDING, LLMType.RERANK]:
|
||||
llm = LLMService.query(llm_name=llm_name)
|
||||
if llm and llm[0].fid in ["Youdao", "FastEmbed"]:
|
||||
if llm and llm[0].fid in ["Youdao", "FastEmbed", "BAAI"]:
|
||||
model_config = {"llm_factory": llm[0].fid, "api_key":"", "llm_name": llm_name, "api_base": ""}
|
||||
if not model_config:
|
||||
if llm_name == "flag-embedding":
|
||||
model_config = {"llm_factory": "Tongyi-Qianwen", "api_key": "",
|
||||
"llm_name": llm_name, "api_base": ""}
|
||||
else:
|
||||
if not mdlnm:
|
||||
raise LookupError(f"Type of {llm_type} model is not set.")
|
||||
raise LookupError("Model({}) not authorized".format(mdlnm))
|
||||
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
@ -96,6 +100,12 @@ class TenantLLMService(CommonService):
|
||||
return EmbeddingModel[model_config["llm_factory"]](
|
||||
model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||
|
||||
if llm_type == LLMType.RERANK:
|
||||
if model_config["llm_factory"] not in RerankModel:
|
||||
return
|
||||
return RerankModel[model_config["llm_factory"]](
|
||||
model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||
|
||||
if llm_type == LLMType.IMAGE2TEXT.value:
|
||||
if model_config["llm_factory"] not in CvModel:
|
||||
return
|
||||
@ -125,14 +135,31 @@ class TenantLLMService(CommonService):
|
||||
mdlnm = tenant.img2txt_id
|
||||
elif llm_type == LLMType.CHAT.value:
|
||||
mdlnm = tenant.llm_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.RERANK:
|
||||
mdlnm = tenant.llm_id if not llm_name else llm_name
|
||||
else:
|
||||
assert False, "LLM type error"
|
||||
|
||||
num = cls.model.update(used_tokens=cls.model.used_tokens + used_tokens)\
|
||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_name == mdlnm)\
|
||||
.execute()
|
||||
num = 0
|
||||
try:
|
||||
for u in cls.query(tenant_id = tenant_id, llm_name=mdlnm):
|
||||
num += cls.model.update(used_tokens = u.used_tokens + used_tokens)\
|
||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_name == mdlnm)\
|
||||
.execute()
|
||||
except Exception as e:
|
||||
pass
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_openai_models(cls):
|
||||
objs = cls.model.select().where(
|
||||
(cls.model.llm_factory == "OpenAI"),
|
||||
~(cls.model.llm_name == "text-embedding-3-small"),
|
||||
~(cls.model.llm_name == "text-embedding-3-large")
|
||||
).dicts()
|
||||
return list(objs)
|
||||
|
||||
|
||||
class LLMBundle(object):
|
||||
def __init__(self, tenant_id, llm_type, llm_name=None, lang="Chinese"):
|
||||
@ -143,6 +170,10 @@ class LLMBundle(object):
|
||||
tenant_id, llm_type, llm_name, lang=lang)
|
||||
assert self.mdl, "Can't find mole for {}/{}/{}".format(
|
||||
tenant_id, llm_type, llm_name)
|
||||
self.max_length = 512
|
||||
for lm in LLMService.query(llm_name=llm_name):
|
||||
self.max_length = lm.max_tokens
|
||||
break
|
||||
|
||||
def encode(self, texts: list, batch_size=32):
|
||||
emd, used_tokens = self.mdl.encode(texts, batch_size)
|
||||
@ -160,6 +191,14 @@ class LLMBundle(object):
|
||||
"Can't update token usage for {}/EMBEDDING".format(self.tenant_id))
|
||||
return emd, used_tokens
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
sim, used_tokens = self.mdl.similarity(query, texts)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
database_logger.error(
|
||||
"Can't update token usage for {}/RERANK".format(self.tenant_id))
|
||||
return sim, used_tokens
|
||||
|
||||
def describe(self, image, max_tokens=300):
|
||||
txt, used_tokens = self.mdl.describe(image, max_tokens)
|
||||
if not TenantLLMService.increase_usage(
|
||||
@ -170,8 +209,18 @@ class LLMBundle(object):
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
txt, used_tokens = self.mdl.chat(system, history, gen_conf)
|
||||
if TenantLLMService.increase_usage(
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens, self.llm_name):
|
||||
database_logger.error(
|
||||
"Can't update token usage for {}/CHAT".format(self.tenant_id))
|
||||
return txt
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
for txt in self.mdl.chat_streamly(system, history, gen_conf):
|
||||
if isinstance(txt, int):
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, txt, self.llm_name):
|
||||
database_logger.error(
|
||||
"Can't update token usage for {}/CHAT".format(self.tenant_id))
|
||||
return
|
||||
yield txt
|
||||
|
||||
@ -15,13 +15,19 @@
|
||||
#
|
||||
import random
|
||||
|
||||
from peewee import Expression
|
||||
from api.db.db_models import DB
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from deepdoc.parser import PdfParser
|
||||
from peewee import JOIN
|
||||
from api.db.db_models import DB, File2Document, File
|
||||
from api.db import StatusEnum, FileType, TaskStatus
|
||||
from api.db.db_models import Task, Document, Knowledgebase, Tenant
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.utils import current_timestamp
|
||||
from api.utils import current_timestamp, get_uuid
|
||||
from deepdoc.parser.excel_parser import RAGFlowExcelParser
|
||||
from rag.settings import SVR_QUEUE_NAME
|
||||
from rag.utils.minio_conn import MINIO
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
|
||||
class TaskService(CommonService):
|
||||
@ -29,7 +35,7 @@ class TaskService(CommonService):
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tasks(cls, tm, mod=0, comm=1, items_per_page=1, takeit=True):
|
||||
def get_tasks(cls, task_id):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.doc_id,
|
||||
@ -47,48 +53,40 @@ class TaskService(CommonService):
|
||||
Knowledgebase.embd_id,
|
||||
Tenant.img2txt_id,
|
||||
Tenant.asr_id,
|
||||
Tenant.llm_id,
|
||||
cls.model.update_time]
|
||||
with DB.lock("get_task", -1):
|
||||
docs = cls.model.select(*fields) \
|
||||
.join(Document, on=(cls.model.doc_id == Document.id)) \
|
||||
.join(Knowledgebase, on=(Document.kb_id == Knowledgebase.id)) \
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))\
|
||||
.where(
|
||||
Document.status == StatusEnum.VALID.value,
|
||||
Document.run == TaskStatus.RUNNING.value,
|
||||
~(Document.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress == 0,
|
||||
#cls.model.update_time >= tm,
|
||||
#(Expression(cls.model.create_time, "%%", comm) == mod)
|
||||
)\
|
||||
.order_by(cls.model.update_time.asc())\
|
||||
.paginate(0, items_per_page)
|
||||
docs = list(docs.dicts())
|
||||
if not docs: return []
|
||||
if not takeit: return docs
|
||||
docs = cls.model.select(*fields) \
|
||||
.join(Document, on=(cls.model.doc_id == Document.id)) \
|
||||
.join(Knowledgebase, on=(Document.kb_id == Knowledgebase.id)) \
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id)) \
|
||||
.where(cls.model.id == task_id)
|
||||
docs = list(docs.dicts())
|
||||
if not docs: return []
|
||||
|
||||
cls.model.update(progress_msg=cls.model.progress_msg + "\n" + "Task has been received.", progress=random.random()/10.).where(
|
||||
cls.model.id == docs[0]["id"]).execute()
|
||||
return docs
|
||||
cls.model.update(progress_msg=cls.model.progress_msg + "\n" + "Task has been received.",
|
||||
progress=random.random() / 10.).where(
|
||||
cls.model.id == docs[0]["id"]).execute()
|
||||
return docs
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_ongoing_doc_name(cls):
|
||||
with DB.lock("get_task", -1):
|
||||
docs = cls.model.select(*[Document.kb_id, Document.location]) \
|
||||
docs = cls.model.select(*[Document.id, Document.kb_id, Document.location, File.parent_id]) \
|
||||
.join(Document, on=(cls.model.doc_id == Document.id)) \
|
||||
.join(File2Document, on=(File2Document.document_id == Document.id), join_type=JOIN.LEFT_OUTER) \
|
||||
.join(File, on=(File2Document.file_id == File.id), join_type=JOIN.LEFT_OUTER) \
|
||||
.where(
|
||||
Document.status == StatusEnum.VALID.value,
|
||||
Document.run == TaskStatus.RUNNING.value,
|
||||
~(Document.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress >= 0,
|
||||
cls.model.progress < 1,
|
||||
cls.model.create_time >= current_timestamp() - 180000
|
||||
cls.model.create_time >= current_timestamp() - 1000 * 600
|
||||
)
|
||||
docs = list(docs.dicts())
|
||||
if not docs: return []
|
||||
|
||||
return list(set([(d["kb_id"], d["location"]) for d in docs]))
|
||||
return list(set([(d["parent_id"] if d["parent_id"] else d["kb_id"], d["location"]) for d in docs]))
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -99,7 +97,7 @@ class TaskService(CommonService):
|
||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||
except Exception as e:
|
||||
pass
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -111,3 +109,55 @@ class TaskService(CommonService):
|
||||
if "progress" in info:
|
||||
cls.model.update(progress=info["progress"]).where(
|
||||
cls.model.id == id).execute()
|
||||
|
||||
|
||||
def queue_tasks(doc, bucket, name):
|
||||
def new_task():
|
||||
nonlocal doc
|
||||
return {
|
||||
"id": get_uuid(),
|
||||
"doc_id": doc["id"]
|
||||
}
|
||||
tsks = []
|
||||
|
||||
if doc["type"] == FileType.PDF.value:
|
||||
file_bin = MINIO.get(bucket, name)
|
||||
do_layout = doc["parser_config"].get("layout_recognize", True)
|
||||
pages = PdfParser.total_page_number(doc["name"], file_bin)
|
||||
page_size = doc["parser_config"].get("task_page_size", 12)
|
||||
if doc["parser_id"] == "paper":
|
||||
page_size = doc["parser_config"].get("task_page_size", 22)
|
||||
if doc["parser_id"] == "one":
|
||||
page_size = 1000000000
|
||||
if not do_layout:
|
||||
page_size = 1000000000
|
||||
page_ranges = doc["parser_config"].get("pages")
|
||||
if not page_ranges:
|
||||
page_ranges = [(1, 100000)]
|
||||
for s, e in page_ranges:
|
||||
s -= 1
|
||||
s = max(0, s)
|
||||
e = min(e - 1, pages)
|
||||
for p in range(s, e, page_size):
|
||||
task = new_task()
|
||||
task["from_page"] = p
|
||||
task["to_page"] = min(p + page_size, e)
|
||||
tsks.append(task)
|
||||
|
||||
elif doc["parser_id"] == "table":
|
||||
file_bin = MINIO.get(bucket, name)
|
||||
rn = RAGFlowExcelParser.row_number(
|
||||
doc["name"], file_bin)
|
||||
for i in range(0, rn, 3000):
|
||||
task = new_task()
|
||||
task["from_page"] = i
|
||||
task["to_page"] = min(i + 3000, rn)
|
||||
tsks.append(task)
|
||||
else:
|
||||
tsks.append(new_task())
|
||||
|
||||
bulk_insert_into_db(Task, tsks, True)
|
||||
DocumentService.begin2parse(doc["id"])
|
||||
|
||||
for t in tsks:
|
||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=t), "Can't access Redis. Please check the Redis' status."
|
||||
|
||||
@ -93,6 +93,7 @@ class TenantService(CommonService):
|
||||
cls.model.name,
|
||||
cls.model.llm_id,
|
||||
cls.model.embd_id,
|
||||
cls.model.rerank_id,
|
||||
cls.model.asr_id,
|
||||
cls.model.img2txt_id,
|
||||
cls.model.parser_ids,
|
||||
|
||||
@ -18,10 +18,14 @@ import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from werkzeug.serving import run_simple
|
||||
from api.apps import app
|
||||
from api.db.runtime_config import RuntimeConfig
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.settings import (
|
||||
HOST, HTTP_PORT, access_logger, database_logger, stat_logger,
|
||||
)
|
||||
@ -31,6 +35,16 @@ from api.db.db_models import init_database_tables as init_web_db
|
||||
from api.db.init_data import init_web_data
|
||||
from api.versions import get_versions
|
||||
|
||||
|
||||
def update_progress():
|
||||
while True:
|
||||
time.sleep(1)
|
||||
try:
|
||||
DocumentService.update_progress()
|
||||
except Exception as e:
|
||||
stat_logger.error("update_progress exception:" + str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("""
|
||||
____ ______ __
|
||||
@ -71,6 +85,9 @@ if __name__ == '__main__':
|
||||
peewee_logger.addHandler(database_logger.handlers[0])
|
||||
peewee_logger.setLevel(database_logger.level)
|
||||
|
||||
thr = ThreadPoolExecutor(max_workers=1)
|
||||
thr.submit(update_progress)
|
||||
|
||||
# start http server
|
||||
try:
|
||||
stat_logger.info("RAG Flow http server start...")
|
||||
|
||||
@ -32,7 +32,7 @@ access_logger = getLogger("access")
|
||||
database_logger = getLogger("database")
|
||||
chat_logger = getLogger("chat")
|
||||
|
||||
from rag.utils import ELASTICSEARCH
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.nlp import search
|
||||
from api.utils import get_base_config, decrypt_database_config
|
||||
|
||||
@ -86,6 +86,25 @@ default_llm = {
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"DeepSeek": {
|
||||
"chat_model": "deepseek-chat",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"VolcEngine": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"BAAI": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "BAAI/bge-large-zh-v1.5",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
"rerank_model": "BAAI/bge-reranker-v2-m3",
|
||||
}
|
||||
}
|
||||
LLM = get_base_config("user_default_llm", {})
|
||||
@ -98,7 +117,8 @@ if LLM_FACTORY not in default_llm:
|
||||
f"LLM factory {LLM_FACTORY} has not supported yet, switch to 'Tongyi-Qianwen/QWen' automatically, and please check the API_KEY in service_conf.yaml.")
|
||||
LLM_FACTORY = "Tongyi-Qianwen"
|
||||
CHAT_MDL = default_llm[LLM_FACTORY]["chat_model"]
|
||||
EMBEDDING_MDL = default_llm[LLM_FACTORY]["embedding_model"]
|
||||
EMBEDDING_MDL = default_llm["BAAI"]["embedding_model"]
|
||||
RERANK_MDL = default_llm["BAAI"]["rerank_model"]
|
||||
ASR_MDL = default_llm[LLM_FACTORY]["asr_model"]
|
||||
IMAGE2TEXT_MDL = default_llm[LLM_FACTORY]["image2text_model"]
|
||||
|
||||
@ -152,6 +172,7 @@ CLIENT_AUTHENTICATION = AUTHENTICATION_CONF.get(
|
||||
"switch", False)
|
||||
HTTP_APP_KEY = AUTHENTICATION_CONF.get("client", {}).get("http_app_key")
|
||||
GITHUB_OAUTH = get_base_config("oauth", {}).get("github")
|
||||
FEISHU_OAUTH = get_base_config("oauth", {}).get("feishu")
|
||||
WECHAT_OAUTH = get_base_config("oauth", {}).get("wechat")
|
||||
|
||||
# site
|
||||
|
||||
@ -25,7 +25,6 @@ from flask import (
|
||||
from werkzeug.http import HTTP_STATUS_CODES
|
||||
|
||||
from api.utils import json_dumps
|
||||
from api.versions import get_rag_version
|
||||
from api.settings import RetCode
|
||||
from api.settings import (
|
||||
REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC,
|
||||
@ -84,9 +83,6 @@ def request(**kwargs):
|
||||
return sess.send(prepped, stream=stream, timeout=timeout)
|
||||
|
||||
|
||||
rag_version = get_rag_version() or ''
|
||||
|
||||
|
||||
def get_exponential_backoff_interval(retries, full_jitter=False):
|
||||
"""Calculate the exponential backoff wait time."""
|
||||
# Will be zero if factor equals 0
|
||||
|
||||
@ -19,7 +19,7 @@ import os
|
||||
import re
|
||||
from io import BytesIO
|
||||
|
||||
import fitz
|
||||
import pdfplumber
|
||||
from PIL import Image
|
||||
from cachetools import LRUCache, cached
|
||||
from ruamel.yaml import YAML
|
||||
@ -66,6 +66,15 @@ def get_rag_python_directory(*args):
|
||||
return get_rag_directory("python", *args)
|
||||
|
||||
|
||||
def get_home_cache_dir():
|
||||
dir = os.path.join(os.path.expanduser('~'), ".ragflow")
|
||||
try:
|
||||
os.mkdir(dir)
|
||||
except OSError as error:
|
||||
pass
|
||||
return dir
|
||||
|
||||
|
||||
@cached(cache=LRUCache(maxsize=10))
|
||||
def load_json_conf(conf_path):
|
||||
if os.path.isabs(conf_path):
|
||||
@ -147,7 +156,7 @@ def filename_type(filename):
|
||||
return FileType.PDF.value
|
||||
|
||||
if re.match(
|
||||
r".*\.(doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md)$", filename):
|
||||
r".*\.(doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|html)$", filename):
|
||||
return FileType.DOC.value
|
||||
|
||||
if re.match(
|
||||
@ -155,17 +164,17 @@ def filename_type(filename):
|
||||
return FileType.AURAL.value
|
||||
|
||||
if re.match(r".*\.(jpg|jpeg|png|tif|gif|pcx|tga|exif|fpx|svg|psd|cdr|pcd|dxf|ufo|eps|ai|raw|WMF|webp|avif|apng|icon|ico|mpg|mpeg|avi|rm|rmvb|mov|wmv|asf|dat|asx|wvx|mpe|mpa|mp4)$", filename):
|
||||
return FileType.VISUAL
|
||||
return FileType.VISUAL.value
|
||||
|
||||
return FileType.OTHER.value
|
||||
|
||||
|
||||
def thumbnail(filename, blob):
|
||||
filename = filename.lower()
|
||||
if re.match(r".*\.pdf$", filename):
|
||||
pdf = fitz.open(stream=blob, filetype="pdf")
|
||||
pix = pdf[0].get_pixmap(matrix=fitz.Matrix(0.03, 0.03))
|
||||
pdf = pdfplumber.open(BytesIO(blob))
|
||||
buffered = BytesIO()
|
||||
Image.frombytes("RGB", [pix.width, pix.height],
|
||||
pix.samples).save(buffered, format="png")
|
||||
pdf.pages[0].to_image(resolution=32).annotated.save(buffered, format="png")
|
||||
return "data:image/png;base64," + \
|
||||
base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
|
||||
|
||||
@ -14,17 +14,15 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
|
||||
import dotenv
|
||||
import typing
|
||||
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
|
||||
|
||||
def get_versions() -> typing.Mapping[str, typing.Any]:
|
||||
return dotenv.dotenv_values(
|
||||
dotenv_path=os.path.join(get_project_base_directory(), "rag.env")
|
||||
)
|
||||
dotenv.load_dotenv(dotenv.find_dotenv())
|
||||
return dotenv.dotenv_values()
|
||||
|
||||
|
||||
def get_rag_version() -> typing.Optional[str]:
|
||||
return get_versions().get("RAG")
|
||||
return get_versions().get("RAGFLOW_VERSION", "dev")
|
||||
@ -13,12 +13,12 @@ minio:
|
||||
user: 'rag_flow'
|
||||
password: 'infini_rag_flow'
|
||||
host: 'minio:9000'
|
||||
es:
|
||||
hosts: 'http://es01:9200'
|
||||
redis:
|
||||
db: 1
|
||||
password: 'infini_rag_flow'
|
||||
host: 'redis:6379'
|
||||
es:
|
||||
hosts: 'http://es01:9200'
|
||||
user_default_llm:
|
||||
factory: 'Tongyi-Qianwen'
|
||||
api_key: 'sk-xxxxxxxxxxxxx'
|
||||
@ -28,6 +28,12 @@ oauth:
|
||||
client_id: xxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
url: https://github.com/login/oauth/access_token
|
||||
feishu:
|
||||
app_id: cli_xxxxxxxxxxxxxxxxxxx
|
||||
app_secret: xxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
app_access_token_url: https://open.feishu.cn/open-apis/auth/v3/app_access_token/internal
|
||||
user_access_token_url: https://open.feishu.cn/open-apis/authen/v1/oidc/access_token
|
||||
grant_type: 'authorization_code'
|
||||
authentication:
|
||||
client:
|
||||
switch: false
|
||||
@ -38,4 +44,4 @@ authentication:
|
||||
permission:
|
||||
switch: false
|
||||
component: false
|
||||
dataset: false
|
||||
dataset: false
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
|
||||
|
||||
from .pdf_parser import HuParser as PdfParser, PlainParser
|
||||
from .docx_parser import HuDocxParser as DocxParser
|
||||
from .excel_parser import HuExcelParser as ExcelParser
|
||||
from .ppt_parser import HuPptParser as PptParser
|
||||
from .pdf_parser import RAGFlowPdfParser as PdfParser, PlainParser
|
||||
from .docx_parser import RAGFlowDocxParser as DocxParser
|
||||
from .excel_parser import RAGFlowExcelParser as ExcelParser
|
||||
from .ppt_parser import RAGFlowPptParser as PptParser
|
||||
from .html_parser import RAGFlowHtmlParser as HtmlParser
|
||||
|
||||
@ -3,11 +3,11 @@ from docx import Document
|
||||
import re
|
||||
import pandas as pd
|
||||
from collections import Counter
|
||||
from rag.nlp import huqie
|
||||
from rag.nlp import rag_tokenizer
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
class HuDocxParser:
|
||||
class RAGFlowDocxParser:
|
||||
|
||||
def __extract_table_content(self, tb):
|
||||
df = []
|
||||
@ -35,14 +35,14 @@ class HuDocxParser:
|
||||
for p, n in patt:
|
||||
if re.search(p, b):
|
||||
return n
|
||||
tks = [t for t in huqie.qie(b).split(" ") if len(t) > 1]
|
||||
tks = [t for t in rag_tokenizer.tokenize(b).split(" ") if len(t) > 1]
|
||||
if len(tks) > 3:
|
||||
if len(tks) < 12:
|
||||
return "Tx"
|
||||
else:
|
||||
return "Lx"
|
||||
|
||||
if len(tks) == 1 and huqie.tag(tks[0]) == "nr":
|
||||
if len(tks) == 1 and rag_tokenizer.tag(tks[0]) == "nr":
|
||||
return "Nr"
|
||||
|
||||
return "Ot"
|
||||
|
||||
@ -6,31 +6,40 @@ from io import BytesIO
|
||||
from rag.nlp import find_codec
|
||||
|
||||
|
||||
class HuExcelParser:
|
||||
def html(self, fnm):
|
||||
class RAGFlowExcelParser:
|
||||
def html(self, fnm, chunk_rows=256):
|
||||
if isinstance(fnm, str):
|
||||
wb = load_workbook(fnm)
|
||||
else:
|
||||
wb = load_workbook(BytesIO(fnm))
|
||||
tb = ""
|
||||
|
||||
tb_chunks = []
|
||||
for sheetname in wb.sheetnames:
|
||||
ws = wb[sheetname]
|
||||
rows = list(ws.rows)
|
||||
if not rows:continue
|
||||
tb += f"<table><caption>{sheetname}</caption><tr>"
|
||||
if not rows: continue
|
||||
|
||||
tb_rows_0 = "<tr>"
|
||||
for t in list(rows[0]):
|
||||
tb += f"<th>{t.value}</th>"
|
||||
tb += "</tr>"
|
||||
for r in list(rows[1:]):
|
||||
tb += "<tr>"
|
||||
for i, c in enumerate(r):
|
||||
if c.value is None:
|
||||
tb += "<td></td>"
|
||||
else:
|
||||
tb += f"<td>{c.value}</td>"
|
||||
tb += "</tr>"
|
||||
tb += "</table>\n"
|
||||
return tb
|
||||
tb_rows_0 += f"<th>{t.value}</th>"
|
||||
tb_rows_0 += "</tr>"
|
||||
|
||||
for chunk_i in range((len(rows) - 1) // chunk_rows + 1):
|
||||
tb = ""
|
||||
tb += f"<table><caption>{sheetname}</caption>"
|
||||
tb += tb_rows_0
|
||||
for r in list(rows[1 + chunk_i * chunk_rows:1 + (chunk_i + 1) * chunk_rows]):
|
||||
tb += "<tr>"
|
||||
for i, c in enumerate(r):
|
||||
if c.value is None:
|
||||
tb += "<td></td>"
|
||||
else:
|
||||
tb += f"<td>{c.value}</td>"
|
||||
tb += "</tr>"
|
||||
tb += "</table>\n"
|
||||
tb_chunks.append(tb)
|
||||
|
||||
return tb_chunks
|
||||
|
||||
def __call__(self, fnm):
|
||||
if isinstance(fnm, str):
|
||||
@ -69,10 +78,10 @@ class HuExcelParser:
|
||||
|
||||
if fnm.split(".")[-1].lower() in ["csv", "txt"]:
|
||||
encoding = find_codec(binary)
|
||||
txt = binary.decode(encoding)
|
||||
txt = binary.decode(encoding, errors="ignore")
|
||||
return len(txt.split("\n"))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
psr = HuExcelParser()
|
||||
psr = RAGFlowExcelParser()
|
||||
psr(sys.argv[1])
|
||||
|
||||
39
deepdoc/parser/html_parser.py
Normal file
39
deepdoc/parser/html_parser.py
Normal file
@ -0,0 +1,39 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from rag.nlp import find_codec
|
||||
import readability
|
||||
import html_text
|
||||
import chardet
|
||||
|
||||
def get_encoding(file):
|
||||
with open(file,'rb') as f:
|
||||
tmp = chardet.detect(f.read())
|
||||
return tmp['encoding']
|
||||
|
||||
class RAGFlowHtmlParser:
|
||||
def __call__(self, fnm, binary=None):
|
||||
txt = ""
|
||||
if binary:
|
||||
encoding = find_codec(binary)
|
||||
txt = binary.decode(encoding, errors="ignore")
|
||||
else:
|
||||
with open(fnm, "r",encoding=get_encoding(fnm)) as f:
|
||||
txt = f.read()
|
||||
|
||||
html_doc = readability.Document(txt)
|
||||
title = html_doc.title()
|
||||
content = html_text.extract_text(html_doc.summary(html_partial=True))
|
||||
txt = f'{title}\n{content}'
|
||||
sections = txt.split("\n")
|
||||
return sections
|
||||
@ -2,7 +2,6 @@
|
||||
import os
|
||||
import random
|
||||
|
||||
import fitz
|
||||
import xgboost as xgb
|
||||
from io import BytesIO
|
||||
import torch
|
||||
@ -16,14 +15,14 @@ from PyPDF2 import PdfReader as pdf2_read
|
||||
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from deepdoc.vision import OCR, Recognizer, LayoutRecognizer, TableStructureRecognizer
|
||||
from rag.nlp import huqie
|
||||
from rag.nlp import rag_tokenizer
|
||||
from copy import deepcopy
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
logging.getLogger("pdfminer").setLevel(logging.WARNING)
|
||||
|
||||
|
||||
class HuParser:
|
||||
class RAGFlowPdfParser:
|
||||
def __init__(self):
|
||||
self.ocr = OCR()
|
||||
if hasattr(self, "model_speciess"):
|
||||
@ -95,13 +94,13 @@ class HuParser:
|
||||
h = max(self.__height(up), self.__height(down))
|
||||
y_dis = self._y_dis(up, down)
|
||||
LEN = 6
|
||||
tks_down = huqie.qie(down["text"][:LEN]).split(" ")
|
||||
tks_up = huqie.qie(up["text"][-LEN:]).split(" ")
|
||||
tks_down = rag_tokenizer.tokenize(down["text"][:LEN]).split(" ")
|
||||
tks_up = rag_tokenizer.tokenize(up["text"][-LEN:]).split(" ")
|
||||
tks_all = up["text"][-LEN:].strip() \
|
||||
+ (" " if re.match(r"[a-zA-Z0-9]+",
|
||||
up["text"][-1] + down["text"][0]) else "") \
|
||||
+ down["text"][:LEN].strip()
|
||||
tks_all = huqie.qie(tks_all).split(" ")
|
||||
tks_all = rag_tokenizer.tokenize(tks_all).split(" ")
|
||||
fea = [
|
||||
up.get("R", -1) == down.get("R", -1),
|
||||
y_dis / h,
|
||||
@ -142,8 +141,8 @@ class HuParser:
|
||||
tks_down[-1] == tks_up[-1],
|
||||
max(down["in_row"], up["in_row"]),
|
||||
abs(down["in_row"] - up["in_row"]),
|
||||
len(tks_down) == 1 and huqie.tag(tks_down[0]).find("n") >= 0,
|
||||
len(tks_up) == 1 and huqie.tag(tks_up[0]).find("n") >= 0
|
||||
len(tks_down) == 1 and rag_tokenizer.tag(tks_down[0]).find("n") >= 0,
|
||||
len(tks_up) == 1 and rag_tokenizer.tag(tks_up[0]).find("n") >= 0
|
||||
]
|
||||
return fea
|
||||
|
||||
@ -393,11 +392,11 @@ class HuParser:
|
||||
b["text"].strip()[-1] in ",;:'\",、‘“;:-",
|
||||
len(b["text"].strip()) > 1 and b["text"].strip(
|
||||
)[-2] in ",;:'\",‘“、;:",
|
||||
b["text"].strip()[0] in "。;?!?”)),,、:",
|
||||
b_["text"].strip() and b_["text"].strip()[0] in "。;?!?”)),,、:",
|
||||
]
|
||||
# features for not concating
|
||||
feats = [
|
||||
b.get("layoutno", 0) != b.get("layoutno", 0),
|
||||
b.get("layoutno", 0) != b_.get("layoutno", 0),
|
||||
b["text"].strip()[-1] in "。?!?",
|
||||
self.is_english and b["text"].strip()[-1] in ".!?",
|
||||
b["page_number"] == b_["page_number"] and b_["top"] -
|
||||
@ -470,7 +469,8 @@ class HuParser:
|
||||
continue
|
||||
|
||||
if re.match(r"[0-9]{2,3}/[0-9]{3}$", up["text"]) \
|
||||
or re.match(r"[0-9]{2,3}/[0-9]{3}$", down["text"]):
|
||||
or re.match(r"[0-9]{2,3}/[0-9]{3}$", down["text"]) \
|
||||
or not down["text"].strip():
|
||||
i += 1
|
||||
continue
|
||||
|
||||
@ -598,7 +598,7 @@ class HuParser:
|
||||
|
||||
if b["text"].strip()[0] != b_["text"].strip()[0] \
|
||||
or b["text"].strip()[0].lower() in set("qwertyuopasdfghjklzxcvbnm") \
|
||||
or huqie.is_chinese(b["text"].strip()[0]) \
|
||||
or rag_tokenizer.is_chinese(b["text"].strip()[0]) \
|
||||
or b["top"] > b_["bottom"]:
|
||||
i += 1
|
||||
continue
|
||||
@ -749,6 +749,7 @@ class HuParser:
|
||||
"layoutno", "")))
|
||||
|
||||
left, top, right, bott = b["x0"], b["top"], b["x1"], b["bottom"]
|
||||
if right < left: right = left + 1
|
||||
poss.append((pn + self.page_from, left, right, top, bott))
|
||||
return self.page_images[pn] \
|
||||
.crop((left * ZM, top * ZM,
|
||||
@ -921,9 +922,7 @@ class HuParser:
|
||||
fnm) if not binary else pdfplumber.open(BytesIO(binary))
|
||||
return len(pdf.pages)
|
||||
except Exception as e:
|
||||
pdf = fitz.open(fnm) if not binary else fitz.open(
|
||||
stream=fnm, filetype="pdf")
|
||||
return len(pdf)
|
||||
logging.error(str(e))
|
||||
|
||||
def __images__(self, fnm, zoomin=3, page_from=0,
|
||||
page_to=299, callback=None):
|
||||
@ -945,23 +944,7 @@ class HuParser:
|
||||
self.pdf.pages[page_from:page_to]]
|
||||
self.total_page = len(self.pdf.pages)
|
||||
except Exception as e:
|
||||
self.pdf = fitz.open(fnm) if isinstance(
|
||||
fnm, str) else fitz.open(
|
||||
stream=fnm, filetype="pdf")
|
||||
self.page_images = []
|
||||
self.page_chars = []
|
||||
mat = fitz.Matrix(zoomin, zoomin)
|
||||
self.total_page = len(self.pdf)
|
||||
for i, page in enumerate(self.pdf):
|
||||
if i < page_from:
|
||||
continue
|
||||
if i >= page_to:
|
||||
break
|
||||
pix = page.get_pixmap(matrix=mat)
|
||||
img = Image.frombytes("RGB", [pix.width, pix.height],
|
||||
pix.samples)
|
||||
self.page_images.append(img)
|
||||
self.page_chars.append([])
|
||||
logging.error(str(e))
|
||||
|
||||
self.outlines = []
|
||||
try:
|
||||
|
||||
@ -14,7 +14,7 @@ from io import BytesIO
|
||||
from pptx import Presentation
|
||||
|
||||
|
||||
class HuPptParser(object):
|
||||
class RAGFlowPptParser(object):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import re,json,os
|
||||
import pandas as pd
|
||||
from rag.nlp import huqie
|
||||
from rag.nlp import rag_tokenizer
|
||||
from . import regions
|
||||
current_file_path = os.path.dirname(os.path.abspath(__file__))
|
||||
GOODS = pd.read_csv(os.path.join(current_file_path, "res/corp_baike_len.csv"), sep="\t", header=0).fillna(0)
|
||||
@ -22,14 +22,14 @@ def baike(cid, default_v=0):
|
||||
def corpNorm(nm, add_region=True):
|
||||
global CORP_TKS
|
||||
if not nm or type(nm)!=type(""):return ""
|
||||
nm = huqie.tradi2simp(huqie.strQ2B(nm)).lower()
|
||||
nm = rag_tokenizer.tradi2simp(rag_tokenizer.strQ2B(nm)).lower()
|
||||
nm = re.sub(r"&", "&", nm)
|
||||
nm = re.sub(r"[\(\)()\+'\"\t \*\\【】-]+", " ", nm)
|
||||
nm = re.sub(r"([—-]+.*| +co\..*|corp\..*| +inc\..*| +ltd.*)", "", nm, 10000, re.IGNORECASE)
|
||||
nm = re.sub(r"(计算机|技术|(技术|科技|网络)*有限公司|公司|有限|研发中心|中国|总部)$", "", nm, 10000, re.IGNORECASE)
|
||||
if not nm or (len(nm)<5 and not regions.isName(nm[0:2])):return nm
|
||||
|
||||
tks = huqie.qie(nm).split(" ")
|
||||
tks = rag_tokenizer.tokenize(nm).split(" ")
|
||||
reg = [t for i,t in enumerate(tks) if regions.isName(t) and (t != "中国" or i > 0)]
|
||||
nm = ""
|
||||
for t in tks:
|
||||
|
||||
@ -3,7 +3,7 @@ import re, copy, time, datetime, demjson3, \
|
||||
traceback, signal
|
||||
import numpy as np
|
||||
from deepdoc.parser.resume.entities import degrees, schools, corporations
|
||||
from rag.nlp import huqie, surname
|
||||
from rag.nlp import rag_tokenizer, surname
|
||||
from xpinyin import Pinyin
|
||||
from contextlib import contextmanager
|
||||
|
||||
@ -83,7 +83,7 @@ def forEdu(cv):
|
||||
if n.get("school_name") and isinstance(n["school_name"], str):
|
||||
sch.append(re.sub(r"(211|985|重点大学|[,&;;-])", "", n["school_name"]))
|
||||
e["sch_nm_kwd"] = sch[-1]
|
||||
fea.append(huqie.qieqie(huqie.qie(n.get("school_name", ""))).split(" ")[-1])
|
||||
fea.append(rag_tokenizer.fine_grained_tokenize(rag_tokenizer.tokenize(n.get("school_name", ""))).split(" ")[-1])
|
||||
|
||||
if n.get("discipline_name") and isinstance(n["discipline_name"], str):
|
||||
maj.append(n["discipline_name"])
|
||||
@ -166,10 +166,10 @@ def forEdu(cv):
|
||||
if "tag_kwd" not in cv: cv["tag_kwd"] = []
|
||||
if "好学历" not in cv["tag_kwd"]: cv["tag_kwd"].append("好学历")
|
||||
|
||||
if cv.get("major_kwd"): cv["major_tks"] = huqie.qie(" ".join(maj))
|
||||
if cv.get("school_name_kwd"): cv["school_name_tks"] = huqie.qie(" ".join(sch))
|
||||
if cv.get("first_school_name_kwd"): cv["first_school_name_tks"] = huqie.qie(" ".join(fsch))
|
||||
if cv.get("first_major_kwd"): cv["first_major_tks"] = huqie.qie(" ".join(fmaj))
|
||||
if cv.get("major_kwd"): cv["major_tks"] = rag_tokenizer.tokenize(" ".join(maj))
|
||||
if cv.get("school_name_kwd"): cv["school_name_tks"] = rag_tokenizer.tokenize(" ".join(sch))
|
||||
if cv.get("first_school_name_kwd"): cv["first_school_name_tks"] = rag_tokenizer.tokenize(" ".join(fsch))
|
||||
if cv.get("first_major_kwd"): cv["first_major_tks"] = rag_tokenizer.tokenize(" ".join(fmaj))
|
||||
|
||||
return cv
|
||||
|
||||
@ -187,11 +187,11 @@ def forProj(cv):
|
||||
if n.get("achivement"): desc.append(str(n["achivement"]))
|
||||
|
||||
if pro_nms:
|
||||
# cv["pro_nms_tks"] = huqie.qie(" ".join(pro_nms))
|
||||
cv["project_name_tks"] = huqie.qie(pro_nms[0])
|
||||
# cv["pro_nms_tks"] = rag_tokenizer.tokenize(" ".join(pro_nms))
|
||||
cv["project_name_tks"] = rag_tokenizer.tokenize(pro_nms[0])
|
||||
if desc:
|
||||
cv["pro_desc_ltks"] = huqie.qie(rmHtmlTag(" ".join(desc)))
|
||||
cv["project_desc_ltks"] = huqie.qie(rmHtmlTag(desc[0]))
|
||||
cv["pro_desc_ltks"] = rag_tokenizer.tokenize(rmHtmlTag(" ".join(desc)))
|
||||
cv["project_desc_ltks"] = rag_tokenizer.tokenize(rmHtmlTag(desc[0]))
|
||||
|
||||
return cv
|
||||
|
||||
@ -280,25 +280,25 @@ def forWork(cv):
|
||||
if fea["corporation_id"]: cv["corporation_id"] = fea["corporation_id"]
|
||||
|
||||
if fea["position_name"]:
|
||||
cv["position_name_tks"] = huqie.qie(fea["position_name"][0])
|
||||
cv["position_name_sm_tks"] = huqie.qieqie(cv["position_name_tks"])
|
||||
cv["pos_nm_tks"] = huqie.qie(" ".join(fea["position_name"][1:]))
|
||||
cv["position_name_tks"] = rag_tokenizer.tokenize(fea["position_name"][0])
|
||||
cv["position_name_sm_tks"] = rag_tokenizer.fine_grained_tokenize(cv["position_name_tks"])
|
||||
cv["pos_nm_tks"] = rag_tokenizer.tokenize(" ".join(fea["position_name"][1:]))
|
||||
|
||||
if fea["industry_name"]:
|
||||
cv["industry_name_tks"] = huqie.qie(fea["industry_name"][0])
|
||||
cv["industry_name_sm_tks"] = huqie.qieqie(cv["industry_name_tks"])
|
||||
cv["indu_nm_tks"] = huqie.qie(" ".join(fea["industry_name"][1:]))
|
||||
cv["industry_name_tks"] = rag_tokenizer.tokenize(fea["industry_name"][0])
|
||||
cv["industry_name_sm_tks"] = rag_tokenizer.fine_grained_tokenize(cv["industry_name_tks"])
|
||||
cv["indu_nm_tks"] = rag_tokenizer.tokenize(" ".join(fea["industry_name"][1:]))
|
||||
|
||||
if fea["corporation_name"]:
|
||||
cv["corporation_name_kwd"] = fea["corporation_name"][0]
|
||||
cv["corp_nm_kwd"] = fea["corporation_name"]
|
||||
cv["corporation_name_tks"] = huqie.qie(fea["corporation_name"][0])
|
||||
cv["corporation_name_sm_tks"] = huqie.qieqie(cv["corporation_name_tks"])
|
||||
cv["corp_nm_tks"] = huqie.qie(" ".join(fea["corporation_name"][1:]))
|
||||
cv["corporation_name_tks"] = rag_tokenizer.tokenize(fea["corporation_name"][0])
|
||||
cv["corporation_name_sm_tks"] = rag_tokenizer.fine_grained_tokenize(cv["corporation_name_tks"])
|
||||
cv["corp_nm_tks"] = rag_tokenizer.tokenize(" ".join(fea["corporation_name"][1:]))
|
||||
|
||||
if fea["responsibilities"]:
|
||||
cv["responsibilities_ltks"] = huqie.qie(fea["responsibilities"][0])
|
||||
cv["resp_ltks"] = huqie.qie(" ".join(fea["responsibilities"][1:]))
|
||||
cv["responsibilities_ltks"] = rag_tokenizer.tokenize(fea["responsibilities"][0])
|
||||
cv["resp_ltks"] = rag_tokenizer.tokenize(" ".join(fea["responsibilities"][1:]))
|
||||
|
||||
if fea["subordinates_count"]: fea["subordinates_count"] = [int(i) for i in fea["subordinates_count"] if
|
||||
re.match(r"[^0-9]+$", str(i))]
|
||||
@ -444,15 +444,15 @@ def parse(cv):
|
||||
if nms:
|
||||
t = k[:-4]
|
||||
cv[f"{t}_kwd"] = nms
|
||||
cv[f"{t}_tks"] = huqie.qie(" ".join(nms))
|
||||
cv[f"{t}_tks"] = rag_tokenizer.tokenize(" ".join(nms))
|
||||
except Exception as e:
|
||||
print("【EXCEPTION】:", str(traceback.format_exc()), cv[k])
|
||||
cv[k] = []
|
||||
|
||||
# tokenize fields
|
||||
if k in tks_fld:
|
||||
cv[f"{k}_tks"] = huqie.qie(cv[k])
|
||||
if k in small_tks_fld: cv[f"{k}_sm_tks"] = huqie.qie(cv[f"{k}_tks"])
|
||||
cv[f"{k}_tks"] = rag_tokenizer.tokenize(cv[k])
|
||||
if k in small_tks_fld: cv[f"{k}_sm_tks"] = rag_tokenizer.tokenize(cv[f"{k}_tks"])
|
||||
|
||||
# keyword fields
|
||||
if k in kwd_fld: cv[f"{k}_kwd"] = [n.lower()
|
||||
@ -492,7 +492,7 @@ def parse(cv):
|
||||
cv["name_kwd"] = name
|
||||
cv["name_pinyin_kwd"] = PY.get_pinyins(nm[:20], ' ')[:3]
|
||||
cv["name_tks"] = (
|
||||
huqie.qie(name) + " " + (" ".join(list(name)) if not re.match(r"[a-zA-Z ]+$", name) else "")
|
||||
rag_tokenizer.tokenize(name) + " " + (" ".join(list(name)) if not re.match(r"[a-zA-Z ]+$", name) else "")
|
||||
) if name else ""
|
||||
else:
|
||||
cv["integerity_flt"] /= 2.
|
||||
@ -515,7 +515,7 @@ def parse(cv):
|
||||
cv["updated_at_dt"] = f"%s-%02d-%02d 00:00:00" % (y, int(m), int(d))
|
||||
# long text tokenize
|
||||
|
||||
if cv.get("responsibilities"): cv["responsibilities_ltks"] = huqie.qie(rmHtmlTag(cv["responsibilities"]))
|
||||
if cv.get("responsibilities"): cv["responsibilities_ltks"] = rag_tokenizer.tokenize(rmHtmlTag(cv["responsibilities"]))
|
||||
|
||||
# for yes or no field
|
||||
fea = []
|
||||
|
||||
@ -1,12 +1,13 @@
|
||||
import pdfplumber
|
||||
|
||||
from .ocr import OCR
|
||||
from .recognizer import Recognizer
|
||||
from .layout_recognizer import LayoutRecognizer
|
||||
from .table_structure_recognizer import TableStructureRecognizer
|
||||
|
||||
|
||||
def init_in_out(args):
|
||||
from PIL import Image
|
||||
import fitz
|
||||
import os
|
||||
import traceback
|
||||
from api.utils.file_utils import traversal_files
|
||||
@ -18,13 +19,11 @@ def init_in_out(args):
|
||||
|
||||
def pdf_pages(fnm, zoomin=3):
|
||||
nonlocal outputs, images
|
||||
pdf = fitz.open(fnm)
|
||||
mat = fitz.Matrix(zoomin, zoomin)
|
||||
for i, page in enumerate(pdf):
|
||||
pix = page.get_pixmap(matrix=mat)
|
||||
img = Image.frombytes("RGB", [pix.width, pix.height],
|
||||
pix.samples)
|
||||
images.append(img)
|
||||
pdf = pdfplumber.open(fnm)
|
||||
images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
|
||||
enumerate(pdf.pages)]
|
||||
|
||||
for i, page in enumerate(images):
|
||||
outputs.append(os.path.split(fnm)[-1] + f"_{i}.jpg")
|
||||
|
||||
def images_and_outputs(fnm):
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import copy
|
||||
|
||||
import re
|
||||
import numpy as np
|
||||
import cv2
|
||||
from shapely.geometry import Polygon
|
||||
|
||||
@ -11,10 +11,6 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from deepdoc.vision.seeit import draw_box
|
||||
from deepdoc.vision import OCR, init_in_out
|
||||
import argparse
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(
|
||||
@ -25,6 +21,11 @@ sys.path.insert(
|
||||
os.path.abspath(__file__)),
|
||||
'../../')))
|
||||
|
||||
from deepdoc.vision.seeit import draw_box
|
||||
from deepdoc.vision import OCR, init_in_out
|
||||
import argparse
|
||||
import numpy as np
|
||||
|
||||
|
||||
def main(args):
|
||||
ocr = OCR()
|
||||
|
||||
@ -10,17 +10,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from deepdoc.vision.seeit import draw_box
|
||||
from deepdoc.vision import Recognizer, LayoutRecognizer, TableStructureRecognizer, OCR, init_in_out
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
|
||||
import os, sys
|
||||
sys.path.insert(
|
||||
0,
|
||||
os.path.abspath(
|
||||
@ -29,6 +19,13 @@ sys.path.insert(
|
||||
os.path.abspath(__file__)),
|
||||
'../../')))
|
||||
|
||||
from deepdoc.vision.seeit import draw_box
|
||||
from deepdoc.vision import Recognizer, LayoutRecognizer, TableStructureRecognizer, OCR, init_in_out
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
import argparse
|
||||
import re
|
||||
import numpy as np
|
||||
|
||||
|
||||
def main(args):
|
||||
images, outputs = init_in_out(args)
|
||||
|
||||
@ -19,7 +19,7 @@ import numpy as np
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.nlp import huqie
|
||||
from rag.nlp import rag_tokenizer
|
||||
from .recognizer import Recognizer
|
||||
|
||||
|
||||
@ -117,14 +117,14 @@ class TableStructureRecognizer(Recognizer):
|
||||
for p, n in patt:
|
||||
if re.search(p, b["text"].strip()):
|
||||
return n
|
||||
tks = [t for t in huqie.qie(b["text"]).split(" ") if len(t) > 1]
|
||||
tks = [t for t in rag_tokenizer.tokenize(b["text"]).split(" ") if len(t) > 1]
|
||||
if len(tks) > 3:
|
||||
if len(tks) < 12:
|
||||
return "Tx"
|
||||
else:
|
||||
return "Lx"
|
||||
|
||||
if len(tks) == 1 and huqie.tag(tks[0]) == "nr":
|
||||
if len(tks) == 1 and rag_tokenizer.tag(tks[0]) == "nr":
|
||||
return "Nr"
|
||||
|
||||
return "Ot"
|
||||
|
||||
@ -25,9 +25,11 @@ MINIO_PORT=9000
|
||||
MINIO_USER=rag_flow
|
||||
MINIO_PASSWORD=infini_rag_flow
|
||||
|
||||
REDIS_PASSWORD=infini_rag_flow
|
||||
|
||||
SVR_HTTP_PORT=9380
|
||||
|
||||
RAGFLOW_VERSION=v0.3.1
|
||||
RAGFLOW_VERSION=v0.7.0
|
||||
|
||||
TIMEZONE='Asia/Shanghai'
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@ The serving port of mysql inside the container. The modification should be synch
|
||||
The max database connection.
|
||||
|
||||
### stale_timeout
|
||||
The timeout duation in seconds.
|
||||
The timeout duration in seconds.
|
||||
|
||||
## minio
|
||||
|
||||
@ -67,7 +67,7 @@ The serving IP and port inside the docker container. This is not updating until
|
||||
Newly signed-up users use LLM configured by this part. Otherwise, user need to configure his own LLM in *setting*.
|
||||
|
||||
### factory
|
||||
The LLM suppliers. 'Tongyi-Qianwen', "OpenAI", "Moonshot" and "ZHIPU-AI" are supported.
|
||||
The LLM suppliers. "OpenAI", "Tongyi-Qianwen", "ZHIPU-AI", "Moonshot", "DeepSeek", "Baichuan", and "VolcEngine" are supported.
|
||||
|
||||
### api_key
|
||||
The corresponding API key of your assigned LLM vendor.
|
||||
|
||||
@ -29,24 +29,6 @@ services:
|
||||
- ragflow
|
||||
restart: always
|
||||
|
||||
#kibana:
|
||||
# depends_on:
|
||||
# es01:
|
||||
# condition: service_healthy
|
||||
# image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
|
||||
# container_name: ragflow-kibana
|
||||
# volumes:
|
||||
# - kibanadata:/usr/share/kibana/data
|
||||
# ports:
|
||||
# - ${KIBANA_PORT}:5601
|
||||
# environment:
|
||||
# - SERVERNAME=kibana
|
||||
# - ELASTICSEARCH_HOSTS=http://es01:9200
|
||||
# - TZ=${TIMEZONE}
|
||||
# mem_limit: ${MEM_LIMIT}
|
||||
# networks:
|
||||
# - ragflow
|
||||
|
||||
mysql:
|
||||
image: mysql:5.7.18
|
||||
container_name: ragflow-mysql
|
||||
@ -74,7 +56,6 @@ services:
|
||||
retries: 3
|
||||
restart: always
|
||||
|
||||
|
||||
minio:
|
||||
image: quay.io/minio/minio:RELEASE.2023-12-20T01-00-02Z
|
||||
container_name: ragflow-minio
|
||||
@ -92,16 +73,27 @@ services:
|
||||
- ragflow
|
||||
restart: always
|
||||
|
||||
redis:
|
||||
image: redis:7.2.4
|
||||
container_name: ragflow-redis
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD} --maxmemory 128mb --maxmemory-policy allkeys-lru
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
networks:
|
||||
- ragflow
|
||||
restart: always
|
||||
|
||||
|
||||
|
||||
volumes:
|
||||
esdata01:
|
||||
driver: local
|
||||
kibanadata:
|
||||
driver: local
|
||||
mysql_data:
|
||||
driver: local
|
||||
minio_data:
|
||||
driver: local
|
||||
redis_data:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
ragflow:
|
||||
|
||||
@ -4,36 +4,24 @@
|
||||
|
||||
export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/
|
||||
|
||||
PY=/root/miniconda3/envs/py11/bin/python
|
||||
PY=python3
|
||||
if [[ -z "$WS" || $WS -lt 1 ]]; then
|
||||
WS=1
|
||||
fi
|
||||
|
||||
function task_exe(){
|
||||
while [ 1 -eq 1 ];do
|
||||
$PY rag/svr/task_executor.py $1 $2;
|
||||
$PY rag/svr/task_executor.py ;
|
||||
done
|
||||
}
|
||||
|
||||
function watch_broker(){
|
||||
while [ 1 -eq 1 ];do
|
||||
C=`ps aux|grep "task_broker.py"|grep -v grep|wc -l`;
|
||||
if [ $C -lt 1 ];then
|
||||
$PY rag/svr/task_broker.py &
|
||||
fi
|
||||
sleep 5;
|
||||
done
|
||||
}
|
||||
|
||||
function task_bro(){
|
||||
watch_broker;
|
||||
}
|
||||
|
||||
task_bro &
|
||||
|
||||
WS=1
|
||||
for ((i=0;i<WS;i++))
|
||||
do
|
||||
task_exe $i $WS &
|
||||
task_exe &
|
||||
done
|
||||
|
||||
$PY api/ragflow_server.py
|
||||
while [ 1 -eq 1 ];do
|
||||
$PY api/ragflow_server.py
|
||||
done
|
||||
|
||||
wait;
|
||||
wait;
|
||||
|
||||
@ -13,12 +13,12 @@ minio:
|
||||
user: 'rag_flow'
|
||||
password: 'infini_rag_flow'
|
||||
host: 'minio:9000'
|
||||
es:
|
||||
hosts: 'http://es01:9200'
|
||||
redis:
|
||||
db: 1
|
||||
password: 'infini_rag_flow'
|
||||
host: 'redis:6379'
|
||||
es:
|
||||
hosts: 'http://es01:9200'
|
||||
user_default_llm:
|
||||
factory: 'Tongyi-Qianwen'
|
||||
api_key: 'sk-xxxxxxxxxxxxx'
|
||||
@ -38,4 +38,4 @@ authentication:
|
||||
permission:
|
||||
switch: false
|
||||
component: false
|
||||
dataset: false
|
||||
dataset: false
|
||||
|
||||
8
docs/_category_.json
Normal file
8
docs/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Get Started",
|
||||
"position": 1,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "RAGFlow Quick Start"
|
||||
}
|
||||
}
|
||||
8
docs/guides/_category_.json
Normal file
8
docs/guides/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "User Guides",
|
||||
"position": 2,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "RAGFlow User Guides"
|
||||
}
|
||||
}
|
||||
138
docs/guides/configure_knowledge_base.md
Normal file
138
docs/guides/configure_knowledge_base.md
Normal file
@ -0,0 +1,138 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
slug: /configure_knowledge_base
|
||||
---
|
||||
|
||||
# Configure a knowledge base
|
||||
|
||||
Knowledge base, hallucination-free chat, and file management are three pillars of RAGFlow. RAGFlow's AI chats are based on knowledge bases. Each of RAGFlow's knowledge bases serves as a knowledge source, *parsing* files uploaded from your local machine and file references generated in **File Management** into the real 'knowledge' for future AI chats. This guide demonstrates some basic usages of the knowledge base feature, covering the following topics:
|
||||
|
||||
- Create a knowledge base
|
||||
- Configure a knowledge base
|
||||
- Search for a knowledge base
|
||||
- Delete a knowledge base
|
||||
|
||||
## Create knowledge base
|
||||
|
||||
With multiple knowledge bases, you can build more flexible, diversified question answering. To create your first knowledge base:
|
||||
|
||||

|
||||
|
||||
_Each time a knowledge base is created, a folder with the same name is generated in the **root/.knowledgebase** directory._
|
||||
|
||||
## Configure knowledge base
|
||||
|
||||
The following screen shot shows the configuration page of a knowledge base. A proper configuration of your knowledge base is crucial for future AI chats. For example, choosing the wrong embedding model or chunk method would cause unexpected semantic loss or mismatched answers in chats.
|
||||
|
||||

|
||||
|
||||
This section covers the following topics:
|
||||
|
||||
- Select chunk method
|
||||
- Select embedding model
|
||||
- Upload file
|
||||
- Parse file
|
||||
- Intervene with file parsing results
|
||||
- Run retrieval testing
|
||||
|
||||
### Select chunk method
|
||||
|
||||
RAGFlow offers multiple chunking template to facilitate chunking files of different layouts and ensure semantic integrity. In **Chunk method**, you can choose the default template that suits the layouts and formats of your files. The following table shows the descriptions and the compatible file formats of each supported chunk template:
|
||||
|
||||
| **Template** | Description | File format |
|
||||
| ------------ | ------------------------------------------------------------ | ---------------------------------------------------- |
|
||||
| General | Files are consecutively chunked based on a preset chunk token number. | DOCX, EXCEL, PPT, PDF, TXT, JPEG, JPG, PNG, TIF, GIF |
|
||||
| Q&A | | EXCEL, CSV/TXT |
|
||||
| Manual | | PDF |
|
||||
| Table | | EXCEL, CSV/TXT |
|
||||
| Paper | | PDF |
|
||||
| Book | | DOCX, PDF, TXT |
|
||||
| Laws | | DOCX, PDF, TXT |
|
||||
| Presentation | | PDF, PPTX |
|
||||
| Picture | | JPEG, JPG, PNG, TIF, GIF |
|
||||
| One | The entire document is chunked as one. | DOCX, EXCEL, PDF, TXT |
|
||||
|
||||
You can also change the chunk template for a particular file on the **Datasets** page.
|
||||
|
||||

|
||||
|
||||
### Select embedding model
|
||||
|
||||
An embedding model builds vector index on file chunks. Once you have chosen an embedding model and used it to parse a file, you are no longer allowed to change it. To switch to a different embedding model, you *must* deletes all completed file chunks in the knowledge base. The obvious reason is that we must *ensure* that all files in a specific knowledge base are parsed using the *same* embedding model (ensure that they are compared in the same embedding space).
|
||||
|
||||
The following embedding models can be deployed locally:
|
||||
|
||||
- BAAI/bge-large-zh-v1.5
|
||||
- BAAI/bge-base-en-v1.5
|
||||
- BAAI/bge-large-en-v1.5
|
||||
- BAAI/bge-small-en-v1.5
|
||||
- BAAI/bge-small-zh-v1.5
|
||||
- jinaai/jina-embeddings-v2-base-en
|
||||
- jinaai/jina-embeddings-v2-small-en
|
||||
- nomic-ai/nomic-embed-text-v1.5
|
||||
- sentence-transformers/all-MiniLM-L6-v2
|
||||
- maidalun1020/bce-embedding-base_v1
|
||||
|
||||
### Upload file
|
||||
|
||||
- RAGFlow's **File Management** allows you to link a file to multiple knowledge bases, in which case each target knowledge base holds a reference to the file.
|
||||
- In **Knowledge Base**, you are also given the option of uploading a single file or a folder of files (bulk upload) from your local machine to a knowledge base, in which case the knowledge base holds file copies.
|
||||
|
||||
While uploading files directly to a knowledge base seems more convenient, we *highly* recommend uploading files to **File Management** and then linking them to the target knowledge bases. This way, you can avoid permanently deleting files uploaded to the knowledge base.
|
||||
|
||||
### Parse file
|
||||
|
||||
File parsing is a crucial topic in knowledge base configuration. The meaning of file parsing in RAGFlow is twofold: chunking files based on file layout and building embedding and full-text (keyword) indexes on these chunks. After having selected the chunk method and embedding model, you can start parsing an file:
|
||||
|
||||

|
||||
|
||||
- Click the play button next to **UNSTART** to start file parsing.
|
||||
- Click the red-cross icon and then refresh, if your file parsing stalls for a long time.
|
||||
- As shown above, RAGFlow allows you to use a different chunk method for a particular file, offering flexibility beyond the default method.
|
||||
- As shown above, RAGFlow allows you to enable or disable individual files, offering finer control over knowledge base-based AI chats.
|
||||
|
||||
### Intervene with file parsing results
|
||||
|
||||
RAGFlow features visibility and explainability, allowing you to view the chunking results and intervene where necessary. To do so:
|
||||
|
||||
1. Click on the file that completes file parsing to view the chunking results:
|
||||
|
||||
_You are taken to the **Chunk** page:_
|
||||
|
||||

|
||||
|
||||
2. Hover over each snapshot for a quick view of each chunk.
|
||||
|
||||
3. Double click the chunked texts to add keywords or make *manual* changes where necessary:
|
||||
|
||||

|
||||
|
||||
4. In Retrieval testing, ask a quick question in **Test text** to double check if your configurations work:
|
||||
|
||||
_As you can tell from the following, RAGFlow responds with truthful citations._
|
||||
|
||||

|
||||
|
||||
### Run retrieval testing
|
||||
|
||||
RAGFlow uses multiple recall of both full-text search and vector search in its chats. Prior to setting up an AI chat, consider adjusting the following parameters to ensure that the intended information always turns up in answers:
|
||||
|
||||
- Similarity threshold: Chunks with similarities below the threshold will be filtered. Defaultly set to 0.2.
|
||||
- Vector similarity weight: The percentage by which vector similarity contributes to the overall score. Defaultly set to 0.3.
|
||||
|
||||

|
||||
|
||||
## Search for knowledge base
|
||||
|
||||
As of RAGFlow v0.7.0, the search feature is still in a rudimentary form, supporting only knowledge base search by name.
|
||||
|
||||

|
||||
|
||||
## Delete knowledge base
|
||||
|
||||
You are allowed to delete a knowledge base. Hover your mouse over the three dot of the intended knowledge base card and the **Delete** option appears. Once you delete a knowledge base, the associated folder under **root/.knowledge** directory is AUTOMATICALLY REMOVED. The consequence is:
|
||||
|
||||
- The files uploaded directly to the knowledge base are gone;
|
||||
- The file references, which you created from within **File Management**, are gone, but the associated files still exist in **File Management**.
|
||||
|
||||

|
||||
75
docs/guides/deploy_local_llm.md
Normal file
75
docs/guides/deploy_local_llm.md
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
sidebar_position: 5
|
||||
slug: /deploy_local_llm
|
||||
---
|
||||
|
||||
# Deploy a local LLM
|
||||
|
||||
RAGFlow supports deploying LLMs locally using Ollama or Xinference.
|
||||
|
||||
## Ollama
|
||||
|
||||
One-click deployment of local LLMs, that is [Ollama](https://github.com/ollama/ollama).
|
||||
|
||||
### Install
|
||||
|
||||
- [Ollama on Linux](https://github.com/ollama/ollama/blob/main/docs/linux.md)
|
||||
- [Ollama Windows Preview](https://github.com/ollama/ollama/blob/main/docs/windows.md)
|
||||
- [Docker](https://hub.docker.com/r/ollama/ollama)
|
||||
|
||||
### Launch Ollama
|
||||
|
||||
Decide which LLM you want to deploy ([here's a list for supported LLM](https://ollama.com/library)), say, **mistral**:
|
||||
```bash
|
||||
$ ollama run mistral
|
||||
```
|
||||
Or,
|
||||
```bash
|
||||
$ docker exec -it ollama ollama run mistral
|
||||
```
|
||||
|
||||
### Use Ollama in RAGFlow
|
||||
|
||||
- Go to 'Settings > Model Providers > Models to be added > Ollama'.
|
||||
|
||||

|
||||
|
||||
> Base URL: Enter the base URL where the Ollama service is accessible, like, `http://<your-ollama-endpoint-domain>:11434`.
|
||||
|
||||
- Use Ollama Models.
|
||||
|
||||

|
||||
|
||||
## Xinference
|
||||
|
||||
Xorbits Inference([Xinference](https://github.com/xorbitsai/inference)) empowers you to unleash the full potential of cutting-edge AI models.
|
||||
|
||||
### Install
|
||||
|
||||
- [pip install "xinference[all]"](https://inference.readthedocs.io/en/latest/getting_started/installation.html)
|
||||
- [Docker](https://inference.readthedocs.io/en/latest/getting_started/using_docker_image.html)
|
||||
|
||||
To start a local instance of Xinference, run the following command:
|
||||
```bash
|
||||
$ xinference-local --host 0.0.0.0 --port 9997
|
||||
```
|
||||
### Launch Xinference
|
||||
|
||||
Decide which LLM you want to deploy ([here's a list for supported LLM](https://inference.readthedocs.io/en/latest/models/builtin/)), say, **mistral**.
|
||||
Execute the following command to launch the model, remember to replace `${quantization}` with your chosen quantization method from the options listed above:
|
||||
```bash
|
||||
$ xinference launch -u mistral --model-name mistral-v0.1 --size-in-billions 7 --model-format pytorch --quantization ${quantization}
|
||||
```
|
||||
|
||||
### Use Xinference in RAGFlow
|
||||
|
||||
- Go to 'Settings > Model Providers > Models to be added > Xinference'.
|
||||
|
||||

|
||||
|
||||
> Base URL: Enter the base URL where the Xinference service is accessible, like, `http://<your-xinference-endpoint-domain>:9997/v1`.
|
||||
|
||||
- Use Xinference Models.
|
||||
|
||||

|
||||

|
||||
30
docs/guides/llm_api_key_setup.md
Normal file
30
docs/guides/llm_api_key_setup.md
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
sidebar_position: 4
|
||||
slug: /llm_api_key_setup
|
||||
---
|
||||
|
||||
# Set your LLM API key
|
||||
|
||||
You have two ways to input your LLM API key.
|
||||
|
||||
## Before Starting The System
|
||||
|
||||
In **user_default_llm** of [service_conf.yaml](https://github.com/infiniflow/ragflow/blob/main/docker/service_conf.yaml), you need to specify LLM factory and your own _API_KEY_.
|
||||
RAGFlow supports the flowing LLMs, with more coming in the pipeline:
|
||||
|
||||
- [OpenAI](https://platform.openai.com/login?launch)
|
||||
- [Tongyi-Qianwen](https://dashscope.console.aliyun.com/model),
|
||||
- [ZHIPU-AI](https://open.bigmodel.cn/),
|
||||
- [Moonshot](https://platform.moonshot.cn/docs)
|
||||
- [DeepSeek](https://platform.deepseek.com/api-docs/)
|
||||
- [Baichuan](https://www.baichuan-ai.com/home)
|
||||
- [VolcEngine](https://www.volcengine.com/docs/82379)
|
||||
|
||||
After sign in these LLM suppliers, create your own API-Key, they all have a certain amount of free quota.
|
||||
|
||||
## After Starting The System
|
||||
|
||||
You can also set API-Key in **User Setting** as following:
|
||||
|
||||

|
||||
|
||||
84
docs/guides/manage_files.md
Normal file
84
docs/guides/manage_files.md
Normal file
@ -0,0 +1,84 @@
|
||||
---
|
||||
sidebar_position: 3
|
||||
slug: /manage_files
|
||||
---
|
||||
|
||||
# Manage files
|
||||
|
||||
Knowledge base, hallucination-free chat, and file management are three pillars of RAGFlow. RAGFlow's file management allows you to upload files individually or in bulk. You can then link an uploaded file to multiple target knowledge bases. This guide showcases some basic usages of the file management feature.
|
||||
|
||||
## Create folder
|
||||
|
||||
RAGFlow's file management allows you to establish your file system with nested folder structures. To create a folder in the root directory of RAGFlow:
|
||||
|
||||

|
||||
|
||||
> Each knowledge base in RAGFlow has a corresponding folder under the **root/.knowledgebase** directory. You are not allowed to create a subfolder within it.
|
||||
|
||||
## Upload file
|
||||
|
||||
RAGFlow's file management supports file uploads from your local machine, allowing both individual and bulk uploads:
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
## Preview file
|
||||
|
||||
RAGFlow's file management supports previewing files in the following formats:
|
||||
|
||||
- Documents (PDF, DOCS)
|
||||
- Tables (XLSX)
|
||||
- Pictures (JPEG, JPG, PNG, TIF, GIF)
|
||||
|
||||

|
||||
|
||||
## Link file to knowledge bases
|
||||
|
||||
RAGFlow's file management allows you to *link* an uploaded file to multiple knowledge bases, creating a file reference in each target knowledge base. Therefore, deleting a file in your file management will AUTOMATICALLY REMOVE all related file references across the knowledge bases.
|
||||
|
||||

|
||||
|
||||
You can link your file to one knowledge base or multiple knowledge bases at one time:
|
||||
|
||||

|
||||
|
||||
## Move file to specified folder
|
||||
|
||||
As of RAGFlow v0.7.0, this feature is *not* available.
|
||||
|
||||
## Search files or folders
|
||||
|
||||
As of RAGFlow v0.7.0, the search feature is still in a rudimentary form, supporting only file and folder search in the current directory by name (files or folders in the child directory will not be retrieved).
|
||||
|
||||

|
||||
|
||||
## Rename file or folder
|
||||
|
||||
RAGFlow's file management allows you to rename a file or folder:
|
||||
|
||||

|
||||
|
||||
|
||||
## Delete files or folders
|
||||
|
||||
RAGFlow's file management allows you to delete files or folders individually or in bulk.
|
||||
|
||||
To delete a file or folder:
|
||||
|
||||

|
||||
|
||||
To bulk delete files or folders:
|
||||
|
||||

|
||||
|
||||
> - You are not allowed to delete the **root/.knowledgebase** folder.
|
||||
> - Deleting files that have been linked to knowledge bases will AUTOMATICALLY REMOVE all associated file references across the knowledge bases.
|
||||
|
||||
## Download uploaded file
|
||||
|
||||
RAGFlow's file management allows you to download an uploaded file:
|
||||
|
||||

|
||||
|
||||
> As of RAGFlow v0.7.0, bulk download is not supported, nor can you download an entire folder.
|
||||
@ -1,66 +1,71 @@
|
||||
# Set vm.max_map_count to at least 262144
|
||||
|
||||
## Linux
|
||||
|
||||
To check the value of `vm.max_map_count`:
|
||||
|
||||
```bash
|
||||
$ sysctl vm.max_map_count
|
||||
```
|
||||
|
||||
Reset `vm.max_map_count` to a value at least 262144 if it is not.
|
||||
|
||||
```bash
|
||||
# In this case, we set it to 262144:
|
||||
$ sudo sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
|
||||
This change will be reset after a system reboot. To ensure your change remains permanent, add or update the `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
|
||||
```bash
|
||||
vm.max_map_count=262144
|
||||
```
|
||||
|
||||
## Mac
|
||||
|
||||
```bash
|
||||
$ screen ~/Library/Containers/com.docker.docker/Data/vms/0/tty
|
||||
$ sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
To exit the screen session, type Ctrl a d.
|
||||
|
||||
## Windows and macOS with Docker Desktop
|
||||
|
||||
The vm.max_map_count setting must be set via docker-machine:
|
||||
|
||||
```bash
|
||||
$ docker-machine ssh
|
||||
$ sudo sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
|
||||
## Windows with Docker Desktop WSL 2 backend
|
||||
|
||||
To manually set it every time you reboot, you must run the following commands in a command prompt or PowerShell window every time you restart Docker:
|
||||
|
||||
```bash
|
||||
$ wsl -d docker-desktop -u root
|
||||
$ sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
If you are on these versions of WSL and you do not want to have to run those commands every time you restart Docker, you can globally change every WSL distribution with this setting by modifying your %USERPROFILE%\.wslconfig as follows:
|
||||
|
||||
```bash
|
||||
[wsl2]
|
||||
kernelCommandLine = "sysctl.vm.max_map_count=262144"
|
||||
```
|
||||
This will cause all WSL2 VMs to have that setting assigned when they start.
|
||||
|
||||
If you are on Windows 11, or Windows 10 version 22H2 and have installed the Microsoft Store version of WSL, you can modify the /etc/sysctl.conf within the "docker-desktop" WSL distribution, perhaps with commands like this:
|
||||
|
||||
```bash
|
||||
$ wsl -d docker-desktop -u root
|
||||
$ vi /etc/sysctl.conf
|
||||
```
|
||||
and appending a line which reads:
|
||||
```bash
|
||||
vm.max_map_count = 262144
|
||||
---
|
||||
sidebar_position: 7
|
||||
slug: /max_map_count
|
||||
---
|
||||
|
||||
# Update vm.max_map_count
|
||||
|
||||
## Linux
|
||||
|
||||
To check the value of `vm.max_map_count`:
|
||||
|
||||
```bash
|
||||
$ sysctl vm.max_map_count
|
||||
```
|
||||
|
||||
Reset `vm.max_map_count` to a value at least 262144 if it is not.
|
||||
|
||||
```bash
|
||||
# In this case, we set it to 262144:
|
||||
$ sudo sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
|
||||
This change will be reset after a system reboot. To ensure your change remains permanent, add or update the `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
|
||||
```bash
|
||||
vm.max_map_count=262144
|
||||
```
|
||||
|
||||
## Mac
|
||||
|
||||
```bash
|
||||
$ screen ~/Library/Containers/com.docker.docker/Data/vms/0/tty
|
||||
$ sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
To exit the screen session, type Ctrl a d.
|
||||
|
||||
## Windows and macOS with Docker Desktop
|
||||
|
||||
The vm.max_map_count setting must be set via docker-machine:
|
||||
|
||||
```bash
|
||||
$ docker-machine ssh
|
||||
$ sudo sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
|
||||
## Windows with Docker Desktop WSL 2 backend
|
||||
|
||||
To manually set it every time you reboot, you must run the following commands in a command prompt or PowerShell window every time you restart Docker:
|
||||
|
||||
```bash
|
||||
$ wsl -d docker-desktop -u root
|
||||
$ sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
If you are on these versions of WSL and you do not want to have to run those commands every time you restart Docker, you can globally change every WSL distribution with this setting by modifying your %USERPROFILE%\.wslconfig as follows:
|
||||
|
||||
```bash
|
||||
[wsl2]
|
||||
kernelCommandLine = "sysctl.vm.max_map_count=262144"
|
||||
```
|
||||
This will cause all WSL2 VMs to have that setting assigned when they start.
|
||||
|
||||
If you are on Windows 11, or Windows 10 version 22H2 and have installed the Microsoft Store version of WSL, you can modify the /etc/sysctl.conf within the "docker-desktop" WSL distribution, perhaps with commands like this:
|
||||
|
||||
```bash
|
||||
$ wsl -d docker-desktop -u root
|
||||
$ vi /etc/sysctl.conf
|
||||
```
|
||||
and appending a line which reads:
|
||||
```bash
|
||||
vm.max_map_count = 262144
|
||||
```
|
||||
59
docs/guides/start_chat.md
Normal file
59
docs/guides/start_chat.md
Normal file
@ -0,0 +1,59 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
slug: /start_chat
|
||||
---
|
||||
|
||||
# Start an AI chat
|
||||
|
||||
Knowledge base, hallucination-free chat, and file management are three pillars of RAGFlow. Chats in RAGFlow are based on a particular knowledge base or multiple knowledge bases. Once you have created your knowledge base and finished file parsing, you can go ahead and start an AI conversation.
|
||||
|
||||
## Start an AI chat
|
||||
|
||||
You start an AI conversation by creating an assistant.
|
||||
|
||||
1. Click the **Chat** tab in the middle top of the page **>** **Create an assistant** to show the **Chat Configuration** dialogue *of your next dialogue*.
|
||||
|
||||
> RAGFlow offers you the flexibility of choosing a different chat model for each dialogue, while allowing you to set the default models in **System Model Settings**.
|
||||
|
||||
2. Update **Assistant Setting**:
|
||||
|
||||
- **Assistant name** is the name of your chat assistant. Each assistant corresponds to a dialogue with a unique combination of knowledge bases, prompts, hybrid search configurations, and large model settings.
|
||||
- **Empty response**:
|
||||
- If you wish to *confine* RAGFlow's answers to your knowledge bases, leave a response here. Then when it doesn't retrieve an answer, it *uniformly* responds with what you set here.
|
||||
- If you wish RAGFlow to *improvise* when it doesn't retrieve an answer from your knowledge bases, leave it blank, which may give rise to hallucinations.
|
||||
- **Show Quote**: This is a key feature of RAGFlow and enabled by default. RAGFlow does not work like a black box. instead, it clearly shows the sources of information that its responses are based on.
|
||||
- Select the corresponding knowledge bases. You can select one or multiple knowledge bases, but ensure that they use the same embedding model, otherwise an error would occur.
|
||||
|
||||
3. Update **Prompt Engine**:
|
||||
|
||||
- In **System**, you fill in the prompts for your LLM, you can also leave the default prompt as-is for the beginning.
|
||||
- **Similarity threshold** sets the similarity "bar" for each chunk of text. The default is 0.2. Text chunks with lower similarity scores are filtered out of the final response.
|
||||
- **Vector similarity weight** is set to 0.3 by default. RAGFlow uses a hybrid score system, combining keyword similarity and vector similarity, for evaluating the relevance of different text chunks. This value sets the weight assigned to the vector similarity component in the hybrid score.
|
||||
- **Top N** determines the *maximum* number of chunks to feed to the LLM. In other words, even if more chunks are retrieved, only the top N chunks are provided as input.
|
||||
- **Variable**:
|
||||
|
||||
4. Update **Model Setting**:
|
||||
|
||||
- In **Model**: you select the chat model. Though you have selected the default chat model in **System Model Settings**, RAGFlow allows you to choose an alternative chat model for your dialogue.
|
||||
- **Freedom** refers to the level that the LLM improvises. From **Improvise**, **Precise**, to **Balance**, each freedom level corresponds to a unique combination of **Temperature**, **Top P**, **Presence Penalty**, and **Frequency Penalty**.
|
||||
- **Temperature**: Level of the prediction randomness of the LLM. The higher the value, the more creative the LLM is.
|
||||
- **Top P** is also known as "nucleus sampling". See [here](https://en.wikipedia.org/wiki/Top-p_sampling) for more information.
|
||||
- **Max Tokens**: The maximum length of the LLM's responses. Note that the responses may be curtailed if this value is set too low.
|
||||
|
||||
5. Now, let's start the show:
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
## Update settings of an existing dialogue
|
||||
|
||||
Hover over an intended dialogue **>** **Edit** to show the chat configuration dialogue:
|
||||
|
||||

|
||||
|
||||
## Integrate chat capabilities into your application
|
||||
|
||||
RAGFlow also offers conversation APIs. Hover over your dialogue **>** **Chat Bot API** to integrate RAGFlow's chat capabilities into your application:
|
||||
|
||||

|
||||
@ -1,19 +0,0 @@
|
||||
|
||||
## Set Before Starting The System
|
||||
|
||||
In **user_default_llm** of [service_conf.yaml](./docker/service_conf.yaml), you need to specify LLM factory and your own _API_KEY_.
|
||||
RagFlow supports the flowing LLM factory, and with more coming in the pipeline:
|
||||
|
||||
> [OpenAI](https://platform.openai.com/login?launch), [Tongyi-Qianwen](https://dashscope.console.aliyun.com/model),
|
||||
> [ZHIPU-AI](https://open.bigmodel.cn/), [Moonshot](https://platform.moonshot.cn/docs)
|
||||
|
||||
After sign in these LLM suppliers, create your own API-Key, they all have a certain amount of free quota.
|
||||
|
||||
## After Starting The System
|
||||
|
||||
You can also set API-Key in **User Setting** as following:
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/e4e4066c-e964-45ff-bd56-c3fc7fb18bd3" width="1000"/>
|
||||
</div>
|
||||
|
||||
@ -1,40 +0,0 @@
|
||||
# Ollama
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/2019e7ee-1e8a-412e-9349-11bbf702e549" width="130"/>
|
||||
</div>
|
||||
|
||||
One-click deployment of local LLMs, that is [Ollama](https://github.com/ollama/ollama).
|
||||
|
||||
## Install
|
||||
|
||||
- [Ollama on Linux](https://github.com/ollama/ollama/blob/main/docs/linux.md)
|
||||
- [Ollama Windows Preview](https://github.com/ollama/ollama/blob/main/docs/windows.md)
|
||||
- [Docker](https://hub.docker.com/r/ollama/ollama)
|
||||
|
||||
## Launch Ollama
|
||||
|
||||
Decide which LLM you want to deploy ([here's a list for supported LLM](https://ollama.com/library)), say, **mistral**:
|
||||
```bash
|
||||
$ ollama run mistral
|
||||
```
|
||||
Or,
|
||||
```bash
|
||||
$ docker exec -it ollama ollama run mistral
|
||||
```
|
||||
|
||||
## Use Ollama in RAGFlow
|
||||
|
||||
- Go to 'Settings > Model Providers > Models to be added > Ollama'.
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/a9df198a-226d-4f30-b8d7-829f00256d46" width="1300"/>
|
||||
</div>
|
||||
|
||||
> Base URL: Enter the base URL where the Ollama service is accessible, like, `http://<your-ollama-endpoint-domain>:11434`.
|
||||
|
||||
- Use Ollama Models.
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/60ff384e-5013-41ff-a573-9a543d237fd3" width="530"/>
|
||||
</div>
|
||||
214
docs/quickstart.md
Normal file
214
docs/quickstart.md
Normal file
@ -0,0 +1,214 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
slug: /
|
||||
---
|
||||
|
||||
# Quick start
|
||||
|
||||
RAGFlow is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. When integrated with LLMs, it is capable of providing truthful question-answering capabilities, backed by well-founded citations from various complex formatted data.
|
||||
|
||||
This quick start guide describes a general process from:
|
||||
|
||||
- Starting up a local RAGFlow server,
|
||||
- Creating a knowledge base,
|
||||
- Intervening with file parsing, to
|
||||
- Establishing an AI chat based on your datasets.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- CPU >= 4 cores
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
## Start up the server
|
||||
|
||||
This section provides instructions on setting up the RAGFlow server on Linux. If you are on a different operating system, no worries. Most steps are alike.
|
||||
|
||||
1. Ensure `vm.max_map_count` >= 262144:
|
||||
|
||||
> To check the value of `vm.max_map_count`:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> Reset `vm.max_map_count` to a value at least 262144 if it is not.
|
||||
>
|
||||
> ```bash
|
||||
> # In this case, we set it to 262144:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
> See [this guide](./guides/max_map_count.md) for instructions on permanently setting `vm.max_map_count` on an operating system other than Linux.
|
||||
|
||||
2. Clone the repo:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. Build the pre-built Docker images and start up the server:
|
||||
|
||||
> Running the following commands automatically downloads the *dev* version RAGFlow Docker image. To download and run a specified Docker version, update `RAGFLOW_VERSION` in **docker/.env** to the intended version, for example `RAGFLOW_VERSION=v0.7.0`, before running the following commands.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
> The core image is about 9 GB in size and may take a while to load.
|
||||
|
||||
4. Check the server status after having the server up and running:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
```
|
||||
|
||||
_The following output confirms a successful launch of the system:_
|
||||
|
||||
```bash
|
||||
____ ______ __
|
||||
/ __ \ ____ _ ____ _ / ____// /____ _ __
|
||||
/ /_/ // __ `// __ `// /_ / // __ \| | /| / /
|
||||
/ _, _// /_/ // /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_| \__,_/ \__, //_/ /_/ \____/ |__/|__/
|
||||
/____/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anomaly` error because, at that moment, your RAGFlow may not be fully initialized.
|
||||
|
||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||
|
||||
> - With default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default HTTP serving port `80` can be omitted when using the default configurations.
|
||||
|
||||
## Configure LLMs
|
||||
|
||||
RAGFlow is a RAG engine, and it needs to work with an LLM to offer grounded, hallucination-free question-answering capabilities. For now, RAGFlow supports the following LLMs, and the list is expanding:
|
||||
|
||||
- OpenAI
|
||||
- Tongyi-Qianwen
|
||||
- ZHIPU-AI
|
||||
- Moonshot
|
||||
- DeepSeek-V2
|
||||
- Baichuan
|
||||
- VolcEngine
|
||||
|
||||
> RAGFlow also supports deploying LLMs locally using Ollama or Xinference, but this part is not covered in this quick start guide.
|
||||
|
||||
To add and configure an LLM:
|
||||
|
||||
1. Click on your logo on the top right of the page **>** **Model Providers**:
|
||||
|
||||

|
||||
|
||||
> Each RAGFlow account is able to use **text-embedding-v2** for free, a embedding model of Tongyi-Qianwen. This is why you can see Tongyi-Qianwen in the **Added models** list. And you may need to update your Tongyi-Qianwen API key at a later point.
|
||||
|
||||
2. Click on the desired LLM and update the API key accordingly (DeepSeek-V2 in this case):
|
||||
|
||||

|
||||
|
||||
*Your added models appear as follows:*
|
||||
|
||||

|
||||
|
||||
3. Click **System Model Settings** to select the default models:
|
||||
|
||||
- Chat model,
|
||||
- Embedding model,
|
||||
- Image-to-text model.
|
||||
|
||||

|
||||
|
||||
> Some models, such as the image-to-text model **qwen-vl-max**, are subsidiary to a specific LLM. And you may need to update your API key to access these models.
|
||||
|
||||
## Create your first knowledge base
|
||||
|
||||
You are allowed to upload files to a knowledge base in RAGFlow and parse them into datasets. A knowledge base is virtually a collection of datasets. Question answering in RAGFlow can be based on a particular knowledge base or multiple knowledge bases. File formats that RAGFlow supports include documents (PDF, DOC, DOCX, TXT, MD), tables (CSV, XLSX, XLS), pictures (JPEG, JPG, PNG, TIF, GIF), and slides (PPT, PPTX).
|
||||
|
||||
To create your first knowledge base:
|
||||
|
||||
1. Click the **Knowledge Base** tab in the top middle of the page **>** **Create knowledge base**.
|
||||
|
||||
2. Input the name of your knowledge base and click **OK** to confirm your changes.
|
||||
|
||||
_You are taken to the **Configuration** page of your knowledge base._
|
||||
|
||||

|
||||
|
||||
3. RAGFlow offers multiple chunk templates that cater to different document layouts and file formats. Select the embedding model and chunk method (template) for your knowledge base.
|
||||
|
||||
> IMPORTANT: Once you have selected an embedding model and used it to parse a file, you are no longer allowed to change it. The obvious reason is that we must ensure that all files in a specific knowledge base are parsed using the *same* embedding model (ensure that they are being compared in the same embedding space).
|
||||
|
||||
_You are taken to the **Dataset** page of your knowledge base._
|
||||
|
||||
4. Click **+ Add file** **>** **Local files** to start uploading a particular file to the knowledge base.
|
||||
|
||||
5. In the uploaded file entry, click the play button to start file parsing:
|
||||
|
||||

|
||||
|
||||
_When the file parsing completes, its parsing status changes to **SUCCESS**._
|
||||
|
||||
## Intervene with file parsing
|
||||
|
||||
RAGFlow features visibility and explainability, allowing you to view the chunking results and intervene where necessary. To do so:
|
||||
|
||||
1. Click on the file that completes file parsing to view the chunking results:
|
||||
|
||||
_You are taken to the **Chunk** page:_
|
||||
|
||||

|
||||
|
||||
2. Hover over each snapshot for a quick view of each chunk.
|
||||
|
||||
3. Double click the chunked texts to add keywords or make *manual* changes where necessary:
|
||||
|
||||

|
||||
|
||||
4. In Retrieval testing, ask a quick question in **Test text** to double check if your configurations work:
|
||||
|
||||
_As you can tell from the following, RAGFlow responds with truthful citations._
|
||||
|
||||

|
||||
|
||||
## Set up an AI chat
|
||||
|
||||
Conversations in RAGFlow are based on a particular knowledge base or multiple knowledge bases. Once you have created your knowledge base and finished file parsing, you can go ahead and start an AI conversation.
|
||||
|
||||
1. Click the **Chat** tab in the middle top of the mage **>** **Create an assistant** to show the **Chat Configuration** dialogue *of your next dialogue*.
|
||||
> RAGFlow offer the flexibility of choosing a different chat model for each dialogue, while allowing you to set the default models in **System Model Settings**.
|
||||
|
||||
2. Update **Assistant Setting**:
|
||||
|
||||
- Name your assistant and specify your knowledge bases.
|
||||
- **Empty response**:
|
||||
- If you wish to *confine* RAGFlow's answers to your knowledge bases, leave a response here. Then when it doesn't retrieve an answer, it *uniformly* responds with what you set here.
|
||||
- If you wish RAGFlow to *improvise* when it doesn't retrieve an answer from your knowledge bases, leave it blank, which may give rise to hallucinations.
|
||||
|
||||
3. Update **Prompt Engine** or leave it as is for the beginning.
|
||||
|
||||
4. Update **Model Setting**.
|
||||
|
||||
5. RAGFlow also offers conversation APIs. Hover over your dialogue **>** **Chat Bot API** to integrate RAGFlow's chat capabilities into your applications:
|
||||
|
||||

|
||||
|
||||
6. Now, let's start the show:
|
||||
|
||||

|
||||
|
||||

|
||||
@ -1,52 +1,52 @@
|
||||
---
|
||||
sidebar_position: 0
|
||||
slug: /contribution_guidelines
|
||||
---
|
||||
|
||||
# Contribution Guidelines
|
||||
|
||||
Thanks for wanting to contribute to RAGFlow. This document offers guidlines and major considerations for submitting your contributions.
|
||||
|
||||
- To report a bug, file a [GitHub issue](https://github.com/infiniflow/ragflow/issues/new/choose) with us.
|
||||
- For further questions, you can explore existing discussions or initiate a new one in [Discussions](https://github.com/orgs/infiniflow/discussions).
|
||||
|
||||
|
||||
## What you can contribute
|
||||
|
||||
The list below mentions some contributions you can make, but it is not a complete list.
|
||||
|
||||
- Proposing or implementing new features
|
||||
- Fixing a bug
|
||||
- Adding test cases or demos
|
||||
- Posting a blog or tutorial
|
||||
- Updates to existing documents, codes, or annotations.
|
||||
- Suggesting more user-friendly error codes
|
||||
|
||||
## File a pull request (PR)
|
||||
|
||||
### General workflow
|
||||
|
||||
1. Fork our GitHub repository.
|
||||
2. Clone your fork to your local machine:
|
||||
`git clone git@github.com:<yourname>/ragflow.git`
|
||||
3. Create a local branch:
|
||||
`git checkout -b my-branch`
|
||||
4. Provide sufficient information in your commit message
|
||||
`git commit -m 'Provide sufficient info in your commit message'`
|
||||
5. Commit changes to your local branch, and push to GitHub: (include necessary commit message)
|
||||
`git push origin my-branch.`
|
||||
6. Submit a pull request for review.
|
||||
|
||||
### Before filing a PR
|
||||
|
||||
- Consider splitting a large PR into multiple smaller, standalone PRs to keep a traceable development history.
|
||||
- Ensure that your PR addresses just one issue, or keep any unrelated changes small.
|
||||
- Add test cases when contributing new features. They demonstrate that your code functions correctly and protect against potential issues from future changes.
|
||||
### Describing your PR
|
||||
|
||||
- Ensure that your PR title is concise and clear, providing all the required information.
|
||||
- Refer to a corresponding GitHub issue in your PR description if applicable.
|
||||
- Include sufficient design details for *breaking changes* or *API changes* in your description.
|
||||
|
||||
### Reviewing & merging a PR
|
||||
---
|
||||
sidebar_position: 0
|
||||
slug: /contribution_guidelines
|
||||
---
|
||||
|
||||
# Contribution guidelines
|
||||
|
||||
Thanks for wanting to contribute to RAGFlow. This document offers guidlines and major considerations for submitting your contributions.
|
||||
|
||||
- To report a bug, file a [GitHub issue](https://github.com/infiniflow/ragflow/issues/new/choose) with us.
|
||||
- For further questions, you can explore existing discussions or initiate a new one in [Discussions](https://github.com/orgs/infiniflow/discussions).
|
||||
|
||||
|
||||
## What you can contribute
|
||||
|
||||
The list below mentions some contributions you can make, but it is not a complete list.
|
||||
|
||||
- Proposing or implementing new features
|
||||
- Fixing a bug
|
||||
- Adding test cases or demos
|
||||
- Posting a blog or tutorial
|
||||
- Updates to existing documents, codes, or annotations.
|
||||
- Suggesting more user-friendly error codes
|
||||
|
||||
## File a pull request (PR)
|
||||
|
||||
### General workflow
|
||||
|
||||
1. Fork our GitHub repository.
|
||||
2. Clone your fork to your local machine:
|
||||
`git clone git@github.com:<yourname>/ragflow.git`
|
||||
3. Create a local branch:
|
||||
`git checkout -b my-branch`
|
||||
4. Provide sufficient information in your commit message
|
||||
`git commit -m 'Provide sufficient info in your commit message'`
|
||||
5. Commit changes to your local branch, and push to GitHub: (include necessary commit message)
|
||||
`git push origin my-branch.`
|
||||
6. Submit a pull request for review.
|
||||
|
||||
### Before filing a PR
|
||||
|
||||
- Consider splitting a large PR into multiple smaller, standalone PRs to keep a traceable development history.
|
||||
- Ensure that your PR addresses just one issue, or keep any unrelated changes small.
|
||||
- Add test cases when contributing new features. They demonstrate that your code functions correctly and protect against potential issues from future changes.
|
||||
### Describing your PR
|
||||
|
||||
- Ensure that your PR title is concise and clear, providing all the required information.
|
||||
- Refer to a corresponding GitHub issue in your PR description if applicable.
|
||||
- Include sufficient design details for *breaking changes* or *API changes* in your description.
|
||||
|
||||
### Reviewing & merging a PR
|
||||
- Ensure that your PR passes all Continuous Integration (CI) tests before merging it.
|
||||
8
docs/references/_category_.json
Normal file
8
docs/references/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "References",
|
||||
"position": 3,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "RAGFlow References"
|
||||
}
|
||||
}
|
||||
@ -1,363 +1,506 @@
|
||||
# Conversation API Instruction
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/df0dcc3d-789a-44f7-89f1-7a5f044ab729" width="830"/>
|
||||
</div>
|
||||
|
||||
## Base URL
|
||||
```buildoutcfg
|
||||
https://demo.ragflow.io/v1/
|
||||
```
|
||||
|
||||
## Authorization
|
||||
|
||||
All the APIs are authorized with API-Key. Please keep it safe and private. Don't reveal it in any way from the front-end.
|
||||
The API-Key should put in the header of request:
|
||||
```buildoutcfg
|
||||
Authorization: Bearer {API_KEY}
|
||||
```
|
||||
|
||||
## Start a conversation
|
||||
|
||||
This should be called whenever there's new user coming to chat.
|
||||
### Path: /api/new_conversation
|
||||
### Method: GET
|
||||
### Parameter:
|
||||
|
||||
| name | type | optional | description|
|
||||
|------|-------|----|----|
|
||||
| user_id| string | No | It's for identifying user in order to search and calculate statistics.|
|
||||
|
||||
### Response
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"create_date": "Fri, 12 Apr 2024 17:26:21 GMT",
|
||||
"create_time": 1712913981857,
|
||||
"dialog_id": "4f0a2e4cb9af11ee9ba20aef05f5e94f",
|
||||
"duration": 0.0,
|
||||
"id": "b9b2e098f8ae11ee9f45fa163e197198",
|
||||
"message": [
|
||||
{
|
||||
"content": "Hi, I'm your assistant, can I help you?",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"reference": [],
|
||||
"tokens": 0,
|
||||
"update_date": "Fri, 12 Apr 2024 17:26:21 GMT",
|
||||
"update_time": 1712913981857,
|
||||
"user_id": "kevinhu"
|
||||
},
|
||||
"retcode": 0,
|
||||
"retmsg": "success"
|
||||
}
|
||||
```
|
||||
> data['id'] in response should be stored and will be used in every round of following conversation.
|
||||
|
||||
## Get history of a conversation
|
||||
|
||||
### Path: /api/conversation/\<id\>
|
||||
### Method: GET
|
||||
### Response
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"create_date": "Mon, 01 Apr 2024 09:28:42 GMT",
|
||||
"create_time": 1711934922220,
|
||||
"dialog_id": "df4a4916d7bd11eeaa650242ac180006",
|
||||
"id": "2cae30fcefc711ee94140242ac180006",
|
||||
"message": [
|
||||
{
|
||||
"content": "Hi! I'm your assistant, what can I do for you?",
|
||||
"role": "assistant"
|
||||
},
|
||||
{
|
||||
"content": "What's the vit score for GPT-4?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "The ViT Score for GPT-4 in the zero-shot scenario is 0.5058, and in the few-shot scenario, it is 0.6480. ##0$$",
|
||||
"role": "assistant"
|
||||
},
|
||||
{
|
||||
"content": "How is the nvlink topology like?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "NVLink topology refers to the arrangement of connections between GPUs using NVIDIA's NVLink technology. Correct NVLink topology for NVIDIA A100 cards involves connecting one GPU to another through a series of NVLink bridges ##0$$. Each of the three attached bridges spans two PCIe slots, and for optimal performance and balanced bridge topology, all three NVLink bridges should be used when connecting two adjacent A100 cards.\n\nHere's a summary of the correct and incorrect topologies:\n\n- **Correct**: Both GPUs are connected via all three NVLink bridges, ensuring full bandwidth and proper communication.\n- **Incorrect**: Not using all three bridges or having an uneven connection configuration would result in suboptimal performance.\n\nIt's also important to note that for multi-CPU systems, both A100 cards in a bridged pair should be within the same CPU domain, unless each CPU has a single A100 PCIe card, in which case they can be bridged together.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"user_id": "user name",
|
||||
"reference": [
|
||||
{
|
||||
"chunks": [
|
||||
{
|
||||
"chunk_id": "d0bc7892c3ec4aeac071544fd56730a8",
|
||||
"content_ltks": "tabl 1:openagi task-solv perform under differ set for three closed-sourc llm . boldfac denot the highest score under each learn schema . metric gpt-3.5-turbo claude-2 gpt-4 zero few zero few zero few clip score 0.0 0.0 0.0 0.2543 0.0 0.3055 bert score 0.1914 0.3820 0.2111 0.5038 0.2076 0.6307 vit score 0.2437 0.7497 0.4082 0.5416 0.5058 0.6480 overal 0.1450 0.3772 0.2064 0.4332 0.2378 0.5281",
|
||||
"content_with_weight": "<table><caption>Table 1: OpenAGI task-solving performances under different settings for three closed-source LLMs. Boldface denotes the highest score under each learning schema.</caption>\n<tr><th rowspan=2 >Metrics</th><th >GPT-3.5-turbo</th><th></th><th >Claude-2</th><th >GPT-4</th></tr>\n<tr><th >Zero</th><th >Few</th><th >Zero Few</th><th >Zero Few</th></tr>\n<tr><td >CLIP Score</td><td >0.0</td><td >0.0</td><td >0.0 0.2543</td><td >0.0 0.3055</td></tr>\n<tr><td >BERT Score</td><td >0.1914</td><td >0.3820</td><td >0.2111 0.5038</td><td >0.2076 0.6307</td></tr>\n<tr><td >ViT Score</td><td >0.2437</td><td >0.7497</td><td >0.4082 0.5416</td><td >0.5058 0.6480</td></tr>\n<tr><td >Overall</td><td >0.1450</td><td >0.3772</td><td >0.2064 0.4332</td><td >0.2378 0.5281</td></tr>\n</table>",
|
||||
"doc_id": "c790da40ea8911ee928e0242ac180005",
|
||||
"docnm_kwd": "OpenAGI When LLM Meets Domain Experts.pdf",
|
||||
"img_id": "afab9fdad6e511eebdb20242ac180006-d0bc7892c3ec4aeac071544fd56730a8",
|
||||
"important_kwd": [],
|
||||
"kb_id": "afab9fdad6e511eebdb20242ac180006",
|
||||
"positions": [
|
||||
[
|
||||
9.0,
|
||||
159.9383341471354,
|
||||
472.1773274739583,
|
||||
223.58013916015625,
|
||||
307.86692301432294
|
||||
]
|
||||
],
|
||||
"similarity": 0.7310340654129031,
|
||||
"term_similarity": 0.7671974387781668,
|
||||
"vector_similarity": 0.40556370512552886
|
||||
},
|
||||
{
|
||||
"chunk_id": "7e2345d440383b756670e1b0f43a7007",
|
||||
"content_ltks": "5.5 experiment analysi the main experiment result are tabul in tab . 1 and 2 , showcas the result for closed-sourc and open-sourc llm , respect . the overal perform is calcul a the averag of cllp 8 bert and vit score . here , onli the task descript of the benchmark task are fed into llm(addit inform , such a the input prompt and llm\u2019output , is provid in fig . a.4 and a.5 in supplementari). broadli speak , closed-sourc llm demonstr superior perform on openagi task , with gpt-4 lead the pack under both zero-and few-shot scenario . in the open-sourc categori , llama-2-13b take the lead , consist post top result across variou learn schema--the perform possibl influenc by it larger model size . notabl , open-sourc llm significantli benefit from the tune method , particularli fine-tun and\u2019rltf . these method mark notic enhanc for flan-t5-larg , vicuna-7b , and llama-2-13b when compar with zero-shot and few-shot learn schema . in fact , each of these open-sourc model hit it pinnacl under the rltf approach . conclus , with rltf tune , the perform of llama-2-13b approach that of gpt-3.5 , illustr it potenti .",
|
||||
"content_with_weight": "5.5 Experimental Analysis\nThe main experimental results are tabulated in Tab. 1 and 2, showcasing the results for closed-source and open-source LLMs, respectively. The overall performance is calculated as the average of CLlP\n8\nBERT and ViT scores. Here, only the task descriptions of the benchmark tasks are fed into LLMs (additional information, such as the input prompt and LLMs\u2019 outputs, is provided in Fig. A.4 and A.5 in supplementary). Broadly speaking, closed-source LLMs demonstrate superior performance on OpenAGI tasks, with GPT-4 leading the pack under both zero- and few-shot scenarios. In the open-source category, LLaMA-2-13B takes the lead, consistently posting top results across various learning schema--the performance possibly influenced by its larger model size. Notably, open-source LLMs significantly benefit from the tuning methods, particularly Fine-tuning and\u2019 RLTF. These methods mark noticeable enhancements for Flan-T5-Large, Vicuna-7B, and LLaMA-2-13B when compared with zero-shot and few-shot learning schema. In fact, each of these open-source models hits its pinnacle under the RLTF approach. Conclusively, with RLTF tuning, the performance of LLaMA-2-13B approaches that of GPT-3.5, illustrating its potential.",
|
||||
"doc_id": "c790da40ea8911ee928e0242ac180005",
|
||||
"docnm_kwd": "OpenAGI When LLM Meets Domain Experts.pdf",
|
||||
"img_id": "afab9fdad6e511eebdb20242ac180006-7e2345d440383b756670e1b0f43a7007",
|
||||
"important_kwd": [],
|
||||
"kb_id": "afab9fdad6e511eebdb20242ac180006",
|
||||
"positions": [
|
||||
[
|
||||
8.0,
|
||||
107.3,
|
||||
508.90000000000003,
|
||||
686.3,
|
||||
697.0
|
||||
],
|
||||
],
|
||||
"similarity": 0.6691508616357027,
|
||||
"term_similarity": 0.6999011754270821,
|
||||
"vector_similarity": 0.39239803751328806
|
||||
},
|
||||
],
|
||||
"doc_aggs": [
|
||||
{
|
||||
"count": 8,
|
||||
"doc_id": "c790da40ea8911ee928e0242ac180005",
|
||||
"doc_name": "OpenAGI When LLM Meets Domain Experts.pdf"
|
||||
}
|
||||
],
|
||||
"total": 8
|
||||
},
|
||||
{
|
||||
"chunks": [
|
||||
{
|
||||
"chunk_id": "8c11a1edddb21ad2ae0c43b4a5dcfa62",
|
||||
"content_ltks": "nvlink bridg support nvidia\u00aenvlink\u00aei a high-spe point-to-point peer transfer connect , where one gpu can transfer data to and receiv data from one other gpu . the nvidia a100 card support nvlink bridg connect with a singl adjac a100 card . each of the three attach bridg span two pcie slot . to function correctli a well a to provid peak bridg bandwidth , bridg connect with an adjac a100 card must incorpor all three nvlink bridg . wherev an adjac pair of a100 card exist in the server , for best bridg perform and balanc bridg topolog , the a100 pair should be bridg . figur 4 illustr correct and incorrect a100 nvlink connect topolog . nvlink topolog\u2013top view figur 4. correct incorrect correct incorrect for system that featur multipl cpu , both a100 card of a bridg card pair should be within the same cpu domain\u2014that is , under the same cpu\u2019s topolog . ensur thi benefit workload applic perform . the onli except is for dual cpu system wherein each cpu ha a singl a100 pcie card under it;in that case , the two a100 pcie card in the system may be bridg togeth . a100 nvlink speed and bandwidth are given in the follow tabl . tabl 5. a100 nvlink speed and bandwidth paramet valu total nvlink bridg support by nvidia a100 3 total nvlink rx and tx lane support 96 data rate per nvidia a100 nvlink lane(each direct)50 gbp total maximum nvlink bandwidth 600 gbyte per second pb-10137-001_v03|8 nvidia a100 40gb pcie gpu acceler",
|
||||
"content_with_weight": "NVLink Bridge Support\nNVIDIA\u00aeNVLink\u00aeis a high-speed point-to-point peer transfer connection, where one GPU can transfer data to and receive data from one other GPU. The NVIDIA A100 card supports NVLink bridge connection with a single adjacent A100 card.\nEach of the three attached bridges spans two PCIe slots. To function correctly as well as to provide peak bridge bandwidth, bridge connection with an adjacent A100 card must incorporate all three NVLink bridges. Wherever an adjacent pair of A100 cards exists in the server, for best bridging performance and balanced bridge topology, the A100 pair should be bridged. Figure 4 illustrates correct and incorrect A100 NVLink connection topologies.\nNVLink Topology \u2013Top Views \nFigure 4. \nCORRECT \nINCORRECT \nCORRECT \nINCORRECT \nFor systems that feature multiple CPUs, both A100 cards of a bridged card pair should be within the same CPU domain\u2014that is, under the same CPU\u2019s topology. Ensuring this benefits workload application performance. The only exception is for dual CPU systems wherein each CPU has a single A100 PCIe card under it; in that case, the two A100 PCIe cards in the system may be bridged together.\nA100 NVLink speed and bandwidth are given in the following table.\n<table><caption>Table 5. A100 NVLink Speed and Bandwidth </caption>\n<tr><th >Parameter </th><th >Value </th></tr>\n<tr><td >Total NVLink bridges supported by NVIDIA A100 </td><td >3 </td></tr>\n<tr><td >Total NVLink Rx and Tx lanes supported </td><td >96 </td></tr>\n<tr><td >Data rate per NVIDIA A100 NVLink lane (each direction)</td><td >50 Gbps </td></tr>\n<tr><td >Total maximum NVLink bandwidth</td><td >600 Gbytes per second </td></tr>\n</table>\nPB-10137-001_v03 |8\nNVIDIA A100 40GB PCIe GPU Accelerator",
|
||||
"doc_id": "806d1ed0ea9311ee860a0242ac180005",
|
||||
"docnm_kwd": "A100-PCIE-Prduct-Brief.pdf",
|
||||
"img_id": "afab9fdad6e511eebdb20242ac180006-8c11a1edddb21ad2ae0c43b4a5dcfa62",
|
||||
"important_kwd": [],
|
||||
"kb_id": "afab9fdad6e511eebdb20242ac180006",
|
||||
"positions": [
|
||||
[
|
||||
12.0,
|
||||
84.0,
|
||||
541.3,
|
||||
76.7,
|
||||
96.7
|
||||
],
|
||||
],
|
||||
"similarity": 0.3200748779905588,
|
||||
"term_similarity": 0.3082244010114718,
|
||||
"vector_similarity": 0.42672917080234146
|
||||
},
|
||||
],
|
||||
"doc_aggs": [
|
||||
{
|
||||
"count": 1,
|
||||
"doc_id": "806d1ed0ea9311ee860a0242ac180005",
|
||||
"doc_name": "A100-PCIE-Prduct-Brief.pdf"
|
||||
}
|
||||
],
|
||||
"total": 3
|
||||
}
|
||||
],
|
||||
"update_date": "Tue, 02 Apr 2024 09:07:49 GMT",
|
||||
"update_time": 1712020069421
|
||||
},
|
||||
"retcode": 0,
|
||||
"retmsg": "success"
|
||||
}
|
||||
```
|
||||
|
||||
- **message**: All the chat history in it.
|
||||
- role: user or assistant
|
||||
- content: the text content of user or assistant. The citations are in format like: ##0$$. The number in the middle indicate which part in data.reference.chunks it refers to.
|
||||
|
||||
- **user_id**: This is set by the caller.
|
||||
- **reference**: Every item in it refer to the corresponding message in data.message whose role is assistant.
|
||||
- chunks
|
||||
- content_with_weight: The content of chunk.
|
||||
- docnm_kwd: the document name.
|
||||
- img_id: the image id of the chunk. It is an optional field only for PDF/pptx/picture. And accessed by 'GET' /document/get/\<id\>.
|
||||
- positions: [page_number, [upleft corner(x, y)], [right bottom(x, y)]], the chunk position, only for PDF.
|
||||
- similarity: the hybrid similarity.
|
||||
- term_similarity: keyword simimlarity
|
||||
- vector_similarity: embedding similarity
|
||||
- doc_aggs:
|
||||
- doc_id: the document can be accessed by 'GET' /document/get/\<id\>
|
||||
- doc_name: the file name
|
||||
- count: the chunk number hit in this document.
|
||||
|
||||
## Chat
|
||||
|
||||
This will be called to get the answer to users' questions.
|
||||
|
||||
### Path: /api/completion
|
||||
### Method: POST
|
||||
### Parameter:
|
||||
|
||||
| name | type | optional | description|
|
||||
|------|-------|----|----|
|
||||
| conversation_id| string | No | This is from calling /new_conversation.|
|
||||
| messages| json | No | All the conversation history stored here including the latest user's question.|
|
||||
|
||||
### Response
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"answer": "The ViT Score for GPT-4 in the zero-shot scenario is 0.5058, and in the few-shot scenario, it is 0.6480. ##0$$",
|
||||
"reference": {
|
||||
"chunks": [
|
||||
{
|
||||
"chunk_id": "d0bc7892c3ec4aeac071544fd56730a8",
|
||||
"content_ltks": "tabl 1:openagi task-solv perform under differ set for three closed-sourc llm . boldfac denot the highest score under each learn schema . metric gpt-3.5-turbo claude-2 gpt-4 zero few zero few zero few clip score 0.0 0.0 0.0 0.2543 0.0 0.3055 bert score 0.1914 0.3820 0.2111 0.5038 0.2076 0.6307 vit score 0.2437 0.7497 0.4082 0.5416 0.5058 0.6480 overal 0.1450 0.3772 0.2064 0.4332 0.2378 0.5281",
|
||||
"content_with_weight": "<table><caption>Table 1: OpenAGI task-solving performances under different settings for three closed-source LLMs. Boldface denotes the highest score under each learning schema.</caption>\n<tr><th rowspan=2 >Metrics</th><th >GPT-3.5-turbo</th><th></th><th >Claude-2</th><th >GPT-4</th></tr>\n<tr><th >Zero</th><th >Few</th><th >Zero Few</th><th >Zero Few</th></tr>\n<tr><td >CLIP Score</td><td >0.0</td><td >0.0</td><td >0.0 0.2543</td><td >0.0 0.3055</td></tr>\n<tr><td >BERT Score</td><td >0.1914</td><td >0.3820</td><td >0.2111 0.5038</td><td >0.2076 0.6307</td></tr>\n<tr><td >ViT Score</td><td >0.2437</td><td >0.7497</td><td >0.4082 0.5416</td><td >0.5058 0.6480</td></tr>\n<tr><td >Overall</td><td >0.1450</td><td >0.3772</td><td >0.2064 0.4332</td><td >0.2378 0.5281</td></tr>\n</table>",
|
||||
"doc_id": "c790da40ea8911ee928e0242ac180005",
|
||||
"docnm_kwd": "OpenAGI When LLM Meets Domain Experts.pdf",
|
||||
"img_id": "afab9fdad6e511eebdb20242ac180006-d0bc7892c3ec4aeac071544fd56730a8",
|
||||
"important_kwd": [],
|
||||
"kb_id": "afab9fdad6e511eebdb20242ac180006",
|
||||
"positions": [
|
||||
[
|
||||
9.0,
|
||||
159.9383341471354,
|
||||
472.1773274739583,
|
||||
223.58013916015625,
|
||||
307.86692301432294
|
||||
]
|
||||
],
|
||||
"similarity": 0.7310340654129031,
|
||||
"term_similarity": 0.7671974387781668,
|
||||
"vector_similarity": 0.40556370512552886
|
||||
},
|
||||
{
|
||||
"chunk_id": "7e2345d440383b756670e1b0f43a7007",
|
||||
"content_ltks": "5.5 experiment analysi the main experiment result are tabul in tab . 1 and 2 , showcas the result for closed-sourc and open-sourc llm , respect . the overal perform is calcul a the averag of cllp 8 bert and vit score . here , onli the task descript of the benchmark task are fed into llm(addit inform , such a the input prompt and llm\u2019output , is provid in fig . a.4 and a.5 in supplementari). broadli speak , closed-sourc llm demonstr superior perform on openagi task , with gpt-4 lead the pack under both zero-and few-shot scenario . in the open-sourc categori , llama-2-13b take the lead , consist post top result across variou learn schema--the perform possibl influenc by it larger model size . notabl , open-sourc llm significantli benefit from the tune method , particularli fine-tun and\u2019rltf . these method mark notic enhanc for flan-t5-larg , vicuna-7b , and llama-2-13b when compar with zero-shot and few-shot learn schema . in fact , each of these open-sourc model hit it pinnacl under the rltf approach . conclus , with rltf tune , the perform of llama-2-13b approach that of gpt-3.5 , illustr it potenti .",
|
||||
"content_with_weight": "5.5 Experimental Analysis\nThe main experimental results are tabulated in Tab. 1 and 2, showcasing the results for closed-source and open-source LLMs, respectively. The overall performance is calculated as the average of CLlP\n8\nBERT and ViT scores. Here, only the task descriptions of the benchmark tasks are fed into LLMs (additional information, such as the input prompt and LLMs\u2019 outputs, is provided in Fig. A.4 and A.5 in supplementary). Broadly speaking, closed-source LLMs demonstrate superior performance on OpenAGI tasks, with GPT-4 leading the pack under both zero- and few-shot scenarios. In the open-source category, LLaMA-2-13B takes the lead, consistently posting top results across various learning schema--the performance possibly influenced by its larger model size. Notably, open-source LLMs significantly benefit from the tuning methods, particularly Fine-tuning and\u2019 RLTF. These methods mark noticeable enhancements for Flan-T5-Large, Vicuna-7B, and LLaMA-2-13B when compared with zero-shot and few-shot learning schema. In fact, each of these open-source models hits its pinnacle under the RLTF approach. Conclusively, with RLTF tuning, the performance of LLaMA-2-13B approaches that of GPT-3.5, illustrating its potential.",
|
||||
"doc_id": "c790da40ea8911ee928e0242ac180005",
|
||||
"docnm_kwd": "OpenAGI When LLM Meets Domain Experts.pdf",
|
||||
"img_id": "afab9fdad6e511eebdb20242ac180006-7e2345d440383b756670e1b0f43a7007",
|
||||
"important_kwd": [],
|
||||
"kb_id": "afab9fdad6e511eebdb20242ac180006",
|
||||
"positions": [
|
||||
[
|
||||
8.0,
|
||||
107.3,
|
||||
508.90000000000003,
|
||||
686.3,
|
||||
697.0
|
||||
]
|
||||
],
|
||||
"similarity": 0.6691508616357027,
|
||||
"term_similarity": 0.6999011754270821,
|
||||
"vector_similarity": 0.39239803751328806
|
||||
}
|
||||
],
|
||||
"doc_aggs": {
|
||||
"OpenAGI When LLM Meets Domain Experts.pdf": 4
|
||||
},
|
||||
"total": 8
|
||||
}
|
||||
},
|
||||
"retcode": 0,
|
||||
"retmsg": "success"
|
||||
}
|
||||
```
|
||||
|
||||
- **answer**: The replay of the chat bot.
|
||||
- **reference**:
|
||||
- chunks: Every item in it refer to the corresponding message in answer.
|
||||
- content_with_weight: The content of chunk.
|
||||
- docnm_kwd: the document name.
|
||||
- img_id: the image id of the chunk. It is an optional field only for PDF/pptx/picture. And accessed by 'GET' /document/get/\<id\>.
|
||||
- positions: [page_number, [upleft corner(x, y)], [right bottom(x, y)]], the chunk position, only for PDF.
|
||||
- similarity: the hybrid similarity.
|
||||
- term_similarity: keyword simimlarity
|
||||
- vector_similarity: embedding similarity
|
||||
- doc_aggs:
|
||||
- doc_id: the document can be accessed by 'GET' /document/get/\<id\>
|
||||
- doc_name: the file name
|
||||
- count: the chunk number hit in this document.
|
||||
|
||||
## Get document content or image
|
||||
|
||||
This is usually used when display content of citation.
|
||||
### Path: /api/document/get/\<id\>
|
||||
### Method: GET
|
||||
|
||||
## Upload file
|
||||
|
||||
This is usually used when upload a file to.
|
||||
### Path: /api/document/upload/
|
||||
### Method: POST
|
||||
|
||||
### Parameter:
|
||||
|
||||
| name | type | optional | description |
|
||||
|---------|--------|----------|----------------------------------------|
|
||||
| file | file | No | Upload file. |
|
||||
| kb_name | string | No | Choose the upload knowledge base name. |
|
||||
|
||||
### Response
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"chunk_num": 0,
|
||||
"create_date": "Thu, 25 Apr 2024 14:30:06 GMT",
|
||||
"create_time": 1714026606921,
|
||||
"created_by": "553ec818fd5711ee8ea63043d7ed348e",
|
||||
"id": "41e9324602cd11ef9f5f3043d7ed348e",
|
||||
"kb_id": "06802686c0a311ee85d6246e9694c130",
|
||||
"location": "readme.txt",
|
||||
"name": "readme.txt",
|
||||
"parser_config": {
|
||||
"field_map": {
|
||||
},
|
||||
"pages": [
|
||||
[
|
||||
0,
|
||||
1000000
|
||||
]
|
||||
]
|
||||
},
|
||||
"parser_id": "general",
|
||||
"process_begin_at": null,
|
||||
"process_duation": 0.0,
|
||||
"progress": 0.0,
|
||||
"progress_msg": "",
|
||||
"run": "0",
|
||||
"size": 929,
|
||||
"source_type": "local",
|
||||
"status": "1",
|
||||
"thumbnail": null,
|
||||
"token_num": 0,
|
||||
"type": "doc",
|
||||
"update_date": "Thu, 25 Apr 2024 14:30:06 GMT",
|
||||
"update_time": 1714026606921
|
||||
},
|
||||
"retcode": 0,
|
||||
"retmsg": "success"
|
||||
}
|
||||
|
||||
```
|
||||
---
|
||||
sidebar_position: 1
|
||||
slug: /api
|
||||
---
|
||||
|
||||
# API reference
|
||||
|
||||
RAGFlow offers RESTful APIs for you to integrate its capabilities into third-party applications.
|
||||
|
||||
## Base URL
|
||||
```
|
||||
https://demo.ragflow.io/v1/
|
||||
```
|
||||
|
||||
## Authorization
|
||||
|
||||
All of RAGFlow's RESTFul APIs use API key for authorization, so keep it safe and do not expose it to the front end.
|
||||
Put your API key in the request header.
|
||||
|
||||
```buildoutcfg
|
||||
Authorization: Bearer {API_KEY}
|
||||
```
|
||||
|
||||
To get your API key:
|
||||
|
||||
1. In RAGFlow, click **Chat** tab in the middle top of the page.
|
||||
2. Hover over the corresponding dialogue **>** **Chat Bot API** to show the chatbot API configuration page.
|
||||
3. Click **Api Key** **>** **Create new key** to create your API key.
|
||||
4. Copy and keep your API key safe.
|
||||
|
||||
## Create conversation
|
||||
|
||||
This method creates (news) a conversation for a specific user.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|----------|-------------------------------------------------------------|
|
||||
| GET | `/api/new_conversation` |
|
||||
|
||||
:::note
|
||||
You are *required* to save the `data.id` value returned in the response data, which is the session ID for all upcoming conversations.
|
||||
:::
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|----------|--------|----------|-------------------------------------------------------------|
|
||||
| `user_id`| string | Yes | The unique identifier assigned to each user. `user_id` must be less than 32 characters and cannot be empty. The following character sets are supported: <br />- 26 lowercase English letters (a-z)<br />- 26 uppercase English letters (A-Z)<br />- 10 digits (0-9)<br />- "_", "-", "." |
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"create_date": "Fri, 12 Apr 2024 17:26:21 GMT",
|
||||
"create_time": 1712913981857,
|
||||
"dialog_id": "4f0a2e4cb9af11ee9ba20aef05f5e94f",
|
||||
"duration": 0.0,
|
||||
"id": "b9b2e098f8ae11ee9f45fa163e197198",
|
||||
"message": [
|
||||
{
|
||||
"content": "Hi, I'm your assistant, what can I do for you?",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"reference": [],
|
||||
"tokens": 0,
|
||||
"update_date": "Fri, 12 Apr 2024 17:26:21 GMT",
|
||||
"update_time": 1712913981857,
|
||||
"user_id": "<USER_ID_SET_BY_THE_CALLER>"
|
||||
},
|
||||
"retcode": 0,
|
||||
"retmsg": "success"
|
||||
}
|
||||
```
|
||||
|
||||
## Get conversation history
|
||||
|
||||
This method retrieves the history of a specified conversation session.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|----------|-------------------------------------------------------------|
|
||||
| GET | `/api/conversation/<id>` |
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|----------|--------|----------|-------------------------------------------------------------|
|
||||
| `id` | string | Yes | The unique identifier assigned to a conversation session. `id` must be less than 32 characters and cannot be empty. The following character sets are supported: <br />- 26 lowercase English letters (a-z)<br />- 26 uppercase English letters (A-Z)<br />- 10 digits (0-9)<br />- "_", "-", "." |
|
||||
|
||||
### Response
|
||||
|
||||
#### Response parameter
|
||||
|
||||
- `message`: All conversations in the specified conversation session.
|
||||
- `role`: `"user"` or `"assistant"`.
|
||||
- `content`: The text content of user or assistant. The citations are in a format like `##0$$`. The number in the middle, 0 in this case, indicates which part in data.reference.chunks it refers to.
|
||||
|
||||
- `user_id`: This is set by the caller.
|
||||
- `reference`: Each reference corresponds to one of the assistant's answers in `data.message`.
|
||||
- `chunks`
|
||||
- `content_with_weight`: Content of the chunk.
|
||||
- `doc_name`: Name of the *hit* document.
|
||||
- `img_id`: The image ID of the chunk. It is an optional field only for PDF, PPTX, and images. Call ['GET' /document/get/\<id\>](#get-document-content) to retrieve the image.
|
||||
- positions: [page_number, [upleft corner(x, y)], [right bottom(x, y)]], the chunk position, only for PDF.
|
||||
- similarity: The hybrid similarity.
|
||||
- term_similarity: The keyword simimlarity.
|
||||
- vector_similarity: The embedding similarity.
|
||||
- `doc_aggs`:
|
||||
- `doc_id`: ID of the *hit* document. Call ['GET' /document/get/\<id\>](#get-document-content) to retrieve the document.
|
||||
- `doc_name`: Name of the *hit* document.
|
||||
- `count`: The number of *hit* chunks in this document.
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"create_date": "Mon, 01 Apr 2024 09:28:42 GMT",
|
||||
"create_time": 1711934922220,
|
||||
"dialog_id": "df4a4916d7bd11eeaa650242ac180006",
|
||||
"id": "2cae30fcefc711ee94140242ac180006",
|
||||
"message": [
|
||||
{
|
||||
"content": "Hi! I'm your assistant, what can I do for you?",
|
||||
"role": "assistant"
|
||||
},
|
||||
{
|
||||
"content": "What's the vit score for GPT-4?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "The ViT Score for GPT-4 in the zero-shot scenario is 0.5058, and in the few-shot scenario, it is 0.6480. ##0$$",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"user_id": "<USER_ID_SET_BY_THE_CALLER>",
|
||||
"reference": [
|
||||
{
|
||||
"chunks": [
|
||||
{
|
||||
"chunk_id": "d0bc7892c3ec4aeac071544fd56730a8",
|
||||
"content_ltks": "tabl 1:openagi task-solv perform under differ set for three closed-sourc llm . boldfac denot the highest score under each learn schema . metric gpt-3.5-turbo claude-2 gpt-4 zero few zero few zero few clip score 0.0 0.0 0.0 0.2543 0.0 0.3055 bert score 0.1914 0.3820 0.2111 0.5038 0.2076 0.6307 vit score 0.2437 0.7497 0.4082 0.5416 0.5058 0.6480 overal 0.1450 0.3772 0.2064 0.4332 0.2378 0.5281",
|
||||
"content_with_weight": "<table><caption>Table 1: OpenAGI task-solving performances under different settings for three closed-source LLMs. Boldface denotes the highest score under each learning schema.</caption>\n<tr><th rowspan=2 >Metrics</th><th >GPT-3.5-turbo</th><th></th><th >Claude-2</th><th >GPT-4</th></tr>\n<tr><th >Zero</th><th >Few</th><th >Zero Few</th><th >Zero Few</th></tr>\n<tr><td >CLIP Score</td><td >0.0</td><td >0.0</td><td >0.0 0.2543</td><td >0.0 0.3055</td></tr>\n<tr><td >BERT Score</td><td >0.1914</td><td >0.3820</td><td >0.2111 0.5038</td><td >0.2076 0.6307</td></tr>\n<tr><td >ViT Score</td><td >0.2437</td><td >0.7497</td><td >0.4082 0.5416</td><td >0.5058 0.6480</td></tr>\n<tr><td >Overall</td><td >0.1450</td><td >0.3772</td><td >0.2064 0.4332</td><td >0.2378 0.5281</td></tr>\n</table>",
|
||||
"doc_id": "c790da40ea8911ee928e0242ac180005",
|
||||
"doc_name": "OpenAGI When LLM Meets Domain Experts.pdf",
|
||||
"img_id": "afab9fdad6e511eebdb20242ac180006-d0bc7892c3ec4aeac071544fd56730a8",
|
||||
"important_kwd": [],
|
||||
"kb_id": "afab9fdad6e511eebdb20242ac180006",
|
||||
"positions": [
|
||||
[
|
||||
9.0,
|
||||
159.9383341471354,
|
||||
472.1773274739583,
|
||||
223.58013916015625,
|
||||
307.86692301432294
|
||||
]
|
||||
],
|
||||
"similarity": 0.7310340654129031,
|
||||
"term_similarity": 0.7671974387781668,
|
||||
"vector_similarity": 0.40556370512552886
|
||||
},
|
||||
{
|
||||
"chunk_id": "7e2345d440383b756670e1b0f43a7007",
|
||||
"content_ltks": "5.5 experiment analysi the main experiment result are tabul in tab . 1 and 2 , showcas the result for closed-sourc and open-sourc llm , respect . the overal perform is calcul a the averag of cllp 8 bert and vit score . ",
|
||||
"content_with_weight": "5.5 Experimental Analysis\nThe main experimental results are tabulated in Tab. 1 and 2, showcasing the results for closed-source and open-source LLMs, respectively. The overall performance is calculated as the average of CLlP\n8\nBERT and ViT scores.",
|
||||
"doc_id": "c790da40ea8911ee928e0242ac180005",
|
||||
"doc_name": "OpenAGI When LLM Meets Domain Experts.pdf",
|
||||
"img_id": "afab9fdad6e511eebdb20242ac180006-7e2345d440383b756670e1b0f43a7007",
|
||||
"important_kwd": [],
|
||||
"kb_id": "afab9fdad6e511eebdb20242ac180006",
|
||||
"positions": [
|
||||
[
|
||||
8.0,
|
||||
107.3,
|
||||
508.90000000000003,
|
||||
686.3,
|
||||
697.0
|
||||
],
|
||||
],
|
||||
"similarity": 0.6691508616357027,
|
||||
"term_similarity": 0.6999011754270821,
|
||||
"vector_similarity": 0.39239803751328806
|
||||
},
|
||||
],
|
||||
"doc_aggs": [
|
||||
{
|
||||
"count": 8,
|
||||
"doc_id": "c790da40ea8911ee928e0242ac180005",
|
||||
"doc_name": "OpenAGI When LLM Meets Domain Experts.pdf"
|
||||
}
|
||||
],
|
||||
"total": 8
|
||||
},
|
||||
],
|
||||
"update_date": "Tue, 02 Apr 2024 09:07:49 GMT",
|
||||
"update_time": 1712020069421
|
||||
},
|
||||
"retcode": 0,
|
||||
"retmsg": "success"
|
||||
}
|
||||
```
|
||||
|
||||
## Get answer
|
||||
|
||||
This method retrieves from RAGFlow the answer to the user's latest question.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|----------|-------------------------------------------------------------|
|
||||
| POST | `/api/completion` |
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|------------------|--------|----------|---------------|
|
||||
| `conversation_id`| string | Yes | The ID of the conversation session. Call ['GET' /new_conversation](#create-conversation) to retrieve the ID.|
|
||||
| `messages` | json | Yes | The latest question in a JSON form, such as `[{"role": "user", "content": "How are you doing!"}]`|
|
||||
| `quote` | bool | No | Default: true |
|
||||
| `stream` | bool | No | Default: true |
|
||||
| `doc_ids` | string | No | Document IDs delimited by comma, like `c790da40ea8911ee928e0242ac180005,23dsf34ree928e0242ac180005`. The retrieved contents will be confined to these documents. |
|
||||
|
||||
### Response
|
||||
|
||||
- `answer`: The answer to the user's latest question.
|
||||
- `reference`:
|
||||
- `chunks`: The retrieved chunks that contribute to the answer.
|
||||
- `content_with_weight`: Content of the chunk.
|
||||
- `doc_name`: Name of the *hit* document.
|
||||
- `img_id`: The image ID of the chunk. It is an optional field only for PDF, PPTX, and images. Call ['GET' /document/get/\<id\>](#get-document-content) to retrieve the image.
|
||||
- `positions`: [page_number, [upleft corner(x, y)], [right bottom(x, y)]], the chunk position, only for PDF.
|
||||
- `similarity`: The hybrid similarity.
|
||||
- `term_similarity`: The keyword simimlarity.
|
||||
- `vector_similarity`: The embedding similarity.
|
||||
- `doc_aggs`:
|
||||
- `doc_id`: ID of the *hit* document. Call ['GET' /document/get/\<id\>](#get-document-content) to retrieve the document.
|
||||
- `doc_name`: Name of the *hit* document.
|
||||
- `count`: The number of *hit* chunks in this document.
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"answer": "The ViT Score for GPT-4 in the zero-shot scenario is 0.5058, and in the few-shot scenario, it is 0.6480. ##0$$",
|
||||
"reference": {
|
||||
"chunks": [
|
||||
{
|
||||
"chunk_id": "d0bc7892c3ec4aeac071544fd56730a8",
|
||||
"content_ltks": "tabl 1:openagi task-solv perform under differ set for three closed-sourc llm . boldfac denot the highest score under each learn schema . metric gpt-3.5-turbo claude-2 gpt-4 zero few zero few zero few clip score 0.0 0.0 0.0 0.2543 0.0 0.3055 bert score 0.1914 0.3820 0.2111 0.5038 0.2076 0.6307 vit score 0.2437 0.7497 0.4082 0.5416 0.5058 0.6480 overal 0.1450 0.3772 0.2064 0.4332 0.2378 0.5281",
|
||||
"content_with_weight": "<table><caption>Table 1: OpenAGI task-solving performances under different settings for three closed-source LLMs. Boldface denotes the highest score under each learning schema.</caption>\n<tr><th rowspan=2 >Metrics</th><th >GPT-3.5-turbo</th><th></th><th >Claude-2</th><th >GPT-4</th></tr>\n<tr><th >Zero</th><th >Few</th><th >Zero Few</th><th >Zero Few</th></tr>\n<tr><td >CLIP Score</td><td >0.0</td><td >0.0</td><td >0.0 0.2543</td><td >0.0 0.3055</td></tr>\n<tr><td >BERT Score</td><td >0.1914</td><td >0.3820</td><td >0.2111 0.5038</td><td >0.2076 0.6307</td></tr>\n<tr><td >ViT Score</td><td >0.2437</td><td >0.7497</td><td >0.4082 0.5416</td><td >0.5058 0.6480</td></tr>\n<tr><td >Overall</td><td >0.1450</td><td >0.3772</td><td >0.2064 0.4332</td><td >0.2378 0.5281</td></tr>\n</table>",
|
||||
"doc_id": "c790da40ea8911ee928e0242ac180005",
|
||||
"doc_name": "OpenAGI When LLM Meets Domain Experts.pdf",
|
||||
"img_id": "afab9fdad6e511eebdb20242ac180006-d0bc7892c3ec4aeac071544fd56730a8",
|
||||
"important_kwd": [],
|
||||
"kb_id": "afab9fdad6e511eebdb20242ac180006",
|
||||
"positions": [
|
||||
[
|
||||
9.0,
|
||||
159.9383341471354,
|
||||
472.1773274739583,
|
||||
223.58013916015625,
|
||||
307.86692301432294
|
||||
]
|
||||
],
|
||||
"similarity": 0.7310340654129031,
|
||||
"term_similarity": 0.7671974387781668,
|
||||
"vector_similarity": 0.40556370512552886
|
||||
},
|
||||
{
|
||||
"chunk_id": "7e2345d440383b756670e1b0f43a7007",
|
||||
"content_ltks": "5.5 experiment analysi the main experiment result are tabul in tab . 1 and 2 , showcas the result for closed-sourc and open-sourc llm , respect . the overal perform is calcul a the averag of cllp 8 bert and vit score . here , onli the task descript of the benchmark task are fed into llm(addit inform , such a the input prompt and llm\u2019output , is provid in fig . a.4 and a.5 in supplementari). broadli speak , closed-sourc llm demonstr superior perform on openagi task , with gpt-4 lead the pack under both zero-and few-shot scenario . in the open-sourc categori , llama-2-13b take the lead , consist post top result across variou learn schema--the perform possibl influenc by it larger model size . notabl , open-sourc llm significantli benefit from the tune method , particularli fine-tun and\u2019rltf . these method mark notic enhanc for flan-t5-larg , vicuna-7b , and llama-2-13b when compar with zero-shot and few-shot learn schema . in fact , each of these open-sourc model hit it pinnacl under the rltf approach . conclus , with rltf tune , the perform of llama-2-13b approach that of gpt-3.5 , illustr it potenti .",
|
||||
"content_with_weight": "5.5 Experimental Analysis\nThe main experimental results are tabulated in Tab. 1 and 2, showcasing the results for closed-source and open-source LLMs, respectively. The overall performance is calculated as the average of CLlP\n8\nBERT and ViT scores. Here, only the task descriptions of the benchmark tasks are fed into LLMs (additional information, such as the input prompt and LLMs\u2019 outputs, is provided in Fig. A.4 and A.5 in supplementary). Broadly speaking, closed-source LLMs demonstrate superior performance on OpenAGI tasks, with GPT-4 leading the pack under both zero- and few-shot scenarios. In the open-source category, LLaMA-2-13B takes the lead, consistently posting top results across various learning schema--the performance possibly influenced by its larger model size. Notably, open-source LLMs significantly benefit from the tuning methods, particularly Fine-tuning and\u2019 RLTF. These methods mark noticeable enhancements for Flan-T5-Large, Vicuna-7B, and LLaMA-2-13B when compared with zero-shot and few-shot learning schema. In fact, each of these open-source models hits its pinnacle under the RLTF approach. Conclusively, with RLTF tuning, the performance of LLaMA-2-13B approaches that of GPT-3.5, illustrating its potential.",
|
||||
"doc_id": "c790da40ea8911ee928e0242ac180005",
|
||||
"doc_name": "OpenAGI When LLM Meets Domain Experts.pdf",
|
||||
"img_id": "afab9fdad6e511eebdb20242ac180006-7e2345d440383b756670e1b0f43a7007",
|
||||
"important_kwd": [],
|
||||
"kb_id": "afab9fdad6e511eebdb20242ac180006",
|
||||
"positions": [
|
||||
[
|
||||
8.0,
|
||||
107.3,
|
||||
508.90000000000003,
|
||||
686.3,
|
||||
697.0
|
||||
]
|
||||
],
|
||||
"similarity": 0.6691508616357027,
|
||||
"term_similarity": 0.6999011754270821,
|
||||
"vector_similarity": 0.39239803751328806
|
||||
}
|
||||
],
|
||||
"doc_aggs": {
|
||||
"OpenAGI When LLM Meets Domain Experts.pdf": 4
|
||||
},
|
||||
"total": 8
|
||||
}
|
||||
},
|
||||
"retcode": 0,
|
||||
"retmsg": "success"
|
||||
}
|
||||
```
|
||||
|
||||
## Get document content
|
||||
|
||||
This method retrieves the content of a document.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|----------|-------------------------------------------------------------|
|
||||
| GET | `/document/get/<id>` |
|
||||
|
||||
### Response
|
||||
|
||||
A binary file.
|
||||
|
||||
## Upload file
|
||||
|
||||
This method uploads a specific file to a specified knowledge base.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|----------|-------------------------------------------------------------|
|
||||
| POST | `/api/document/upload` |
|
||||
|
||||
#### Response parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|-------------|--------|----------|---------------------------------------------------------|
|
||||
| `file` | file | Yes | The file to upload. |
|
||||
| `kb_name` | string | Yes | The name of the knowledge base to upload the file to. |
|
||||
| `parser_id` | string | No | The parsing method (chunk template) to use. <br />- "naive": General;<br />- "qa": Q&A;<br />- "manual": Manual;<br />- "table": Table;<br />- "paper": Paper;<br />- "laws": Laws;<br />- "presentation": Presentation;<br />- "picture": Picture;<br />- "one": One. |
|
||||
| `run` | string | No | 1: Automatically start file parsing. If `parser_id` is not set, RAGFlow uses the general template by default. |
|
||||
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"chunk_num": 0,
|
||||
"create_date": "Thu, 25 Apr 2024 14:30:06 GMT",
|
||||
"create_time": 1714026606921,
|
||||
"created_by": "553ec818fd5711ee8ea63043d7ed348e",
|
||||
"id": "41e9324602cd11ef9f5f3043d7ed348e",
|
||||
"kb_id": "06802686c0a311ee85d6246e9694c130",
|
||||
"location": "readme.txt",
|
||||
"name": "readme.txt",
|
||||
"parser_config": {
|
||||
"field_map": {
|
||||
},
|
||||
"pages": [
|
||||
[
|
||||
0,
|
||||
1000000
|
||||
]
|
||||
]
|
||||
},
|
||||
"parser_id": "general",
|
||||
"process_begin_at": null,
|
||||
"process_duation": 0.0,
|
||||
"progress": 0.0,
|
||||
"progress_msg": "",
|
||||
"run": "0",
|
||||
"size": 929,
|
||||
"source_type": "local",
|
||||
"status": "1",
|
||||
"thumbnail": null,
|
||||
"token_num": 0,
|
||||
"type": "doc",
|
||||
"update_date": "Thu, 25 Apr 2024 14:30:06 GMT",
|
||||
"update_time": 1714026606921
|
||||
},
|
||||
"retcode": 0,
|
||||
"retmsg": "success"
|
||||
}
|
||||
```
|
||||
|
||||
## Get document chunks
|
||||
|
||||
This method retrieves the chunks of a specific document by `doc_name` or `doc_id`.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|----------|-------------------------------------------------------------|
|
||||
| GET | `/api/list_chunks` |
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|------------|--------|----------|---------------------------------------------------------------------------------------------|
|
||||
| `doc_name` | string | No | The name of the document in the knowledge base. It must not be empty if `doc_id` is not set.|
|
||||
| `doc_id` | string | No | The ID of the document in the knowledge base. It must not be empty if `doc_name` is not set.|
|
||||
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"content": "Figure 14: Per-request neural-net processingof RL-Cache.\n103\n(sn)\nCPU\n 102\nGPU\n8101\n100\n8\n16 64 256 1K\n4K",
|
||||
"doc_name": "RL-Cache.pdf",
|
||||
"img_id": "0335167613f011ef91240242ac120006-b46c3524952f82dbe061ce9b123f2211"
|
||||
},
|
||||
{
|
||||
"content": "4.3 ProcessingOverheadof RL-CacheACKNOWLEDGMENTSThis section evaluates how effectively our RL-Cache implemen-tation leverages modern multi-core CPUs and GPUs to keep the per-request neural-net processing overhead low. Figure 14 depictsThis researchwas supported inpart by the Regional Government of Madrid (grant P2018/TCS-4499, EdgeData-CM)andU.S. National Science Foundation (grants CNS-1763617 andCNS-1717179).REFERENCES",
|
||||
"doc_name": "RL-Cache.pdf",
|
||||
"img_id": "0335167613f011ef91240242ac120006-d4c12c43938eb55d2d8278eea0d7e6d7"
|
||||
}
|
||||
],
|
||||
"retcode": 0,
|
||||
"retmsg": "success"
|
||||
}
|
||||
```
|
||||
|
||||
## Get document list
|
||||
|
||||
This method retrieves a list of documents from a specified knowledge base.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|----------|-------------------------------------------------------------|
|
||||
| POST | `/api/list_kb_docs` |
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|-------------|--------|----------|-----------------------------------------------------------------------|
|
||||
| `kb_name` | string | Yes | The name of the knowledge base, from which you get the document list. |
|
||||
| `page` | int | No | The number of pages, default:1. |
|
||||
| `page_size` | int | No | The number of docs for each page, default:15. |
|
||||
| `orderby` | string | No | `chunk_num`, `create_time`, or `size`, default:`create_time` |
|
||||
| `desc` | bool | No | Default:True. |
|
||||
| `keywords` | string | No | Keyword of the document name. |
|
||||
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"docs": [
|
||||
{
|
||||
"doc_id": "bad89a84168c11ef9ce40242ac120006",
|
||||
"doc_name": "test.xlsx"
|
||||
},
|
||||
{
|
||||
"doc_id": "641a9b4013f111efb53f0242ac120006",
|
||||
"doc_name": "1111.pdf"
|
||||
}
|
||||
],
|
||||
"total": 2
|
||||
},
|
||||
"retcode": 0,
|
||||
"retmsg": "success"
|
||||
}
|
||||
```
|
||||
|
||||
## Delete documents
|
||||
|
||||
This method deletes documents by document ID or name.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|----------|-------------------------------------------------------------|
|
||||
| DELETE | `/api/document` |
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|-------------|--------|----------|----------------------------|
|
||||
| `doc_names` | List | No | A list of document names. It must not be empty if `doc_ids` is not set. |
|
||||
| `doc_ids` | List | No | A list of document IDs. It must not be empty if `doc_names` is not set. |
|
||||
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"data": true,
|
||||
"retcode": 0,
|
||||
"retmsg": "success"
|
||||
}
|
||||
```
|
||||
@ -1,4 +1,9 @@
|
||||
# Frequently Asked Questions
|
||||
---
|
||||
sidebar_position: 3
|
||||
slug: /faq
|
||||
---
|
||||
|
||||
# Frequently asked questions
|
||||
|
||||
## General
|
||||
|
||||
@ -13,6 +18,19 @@ The "garbage in garbage out" status quo remains unchanged despite the fact that
|
||||
|
||||
English, simplified Chinese, traditional Chinese for now.
|
||||
|
||||
### 3. Which embedding models can be deployed locally?
|
||||
|
||||
- BAAI/bge-large-zh-v1.5
|
||||
- BAAI/bge-base-en-v1.5
|
||||
- BAAI/bge-large-en-v1.5
|
||||
- BAAI/bge-small-en-v1.5
|
||||
- BAAI/bge-small-zh-v1.5
|
||||
- jinaai/jina-embeddings-v2-base-en
|
||||
- jinaai/jina-embeddings-v2-small-en
|
||||
- nomic-ai/nomic-embed-text-v1.5
|
||||
- sentence-transformers/all-MiniLM-L6-v2
|
||||
- maidalun1020/bce-embedding-base_v1
|
||||
|
||||
## Performance
|
||||
|
||||
### 1. Why does it take longer for RAGFlow to parse a document than LangChain?
|
||||
@ -31,7 +49,7 @@ Currently, we only support x86 CPU and Nvidia GPU.
|
||||
|
||||
### 2. Do you offer an API for integration with third-party applications?
|
||||
|
||||
The corresponding APIs are now available. See the [Conversation API](./conversation_api.md) for more information.
|
||||
The corresponding APIs are now available. See the [RAGFlow API Reference](./api.md) for more information.
|
||||
|
||||
### 3. Do you support stream output?
|
||||
|
||||
@ -55,7 +73,7 @@ This feature and the related APIs are still in development. Contributions are we
|
||||
```
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow
|
||||
$ docker build -t infiniflow/ragflow:v0.3.1 .
|
||||
$ docker build -t infiniflow/ragflow:latest .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
@ -193,18 +211,31 @@ docker logs -f ragflow-server
|
||||
2. Check if the **task_executor.py** process exists.
|
||||
3. Check if your RAGFlow server can access hf-mirror.com or huggingface.com.
|
||||
|
||||
#### 4.5 Why does my pdf parsing stall near completion, while the log does not show any error?
|
||||
|
||||
#### 4.5 `Index failure`
|
||||
If your RAGFlow is deployed *locally*, the parsing process is likely killed due to insufficient RAM. Try increasing your memory allocation by increasing the `MEM_LIMIT` value in **docker/.env**.
|
||||
|
||||
> Ensure that you restart up your RAGFlow server for your changes to take effect!
|
||||
> ```bash
|
||||
> docker compose stop
|
||||
> ```
|
||||
> ```bash
|
||||
> docker compose up -d
|
||||
> ```
|
||||
|
||||

|
||||
|
||||
#### 4.6 `Index failure`
|
||||
|
||||
An index failure usually indicates an unavailable Elasticsearch service.
|
||||
|
||||
#### 4.6 How to check the log of RAGFlow?
|
||||
#### 4.7 How to check the log of RAGFlow?
|
||||
|
||||
```bash
|
||||
tail -f path_to_ragflow/docker/ragflow-logs/rag/*.log
|
||||
```
|
||||
|
||||
#### 4.7 How to check the status of each component in RAGFlow?
|
||||
#### 4.8 How to check the status of each component in RAGFlow?
|
||||
|
||||
```bash
|
||||
$ docker ps
|
||||
@ -212,13 +243,13 @@ $ docker ps
|
||||
*The system displays the following if all your RAGFlow components are running properly:*
|
||||
|
||||
```
|
||||
5bc45806b680 infiniflow/ragflow:v0.3.1 "./entrypoint.sh" 11 hours ago Up 11 hours 0.0.0.0:80->80/tcp, :::80->80/tcp, 0.0.0.0:443->443/tcp, :::443->443/tcp, 0.0.0.0:9380->9380/tcp, :::9380->9380/tcp ragflow-server
|
||||
5bc45806b680 infiniflow/ragflow:latest "./entrypoint.sh" 11 hours ago Up 11 hours 0.0.0.0:80->80/tcp, :::80->80/tcp, 0.0.0.0:443->443/tcp, :::443->443/tcp, 0.0.0.0:9380->9380/tcp, :::9380->9380/tcp ragflow-server
|
||||
91220e3285dd docker.elastic.co/elasticsearch/elasticsearch:8.11.3 "/bin/tini -- /usr/l…" 11 hours ago Up 11 hours (healthy) 9300/tcp, 0.0.0.0:9200->9200/tcp, :::9200->9200/tcp ragflow-es-01
|
||||
d8c86f06c56b mysql:5.7.18 "docker-entrypoint.s…" 7 days ago Up 16 seconds (healthy) 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp ragflow-mysql
|
||||
cd29bcb254bc quay.io/minio/minio:RELEASE.2023-12-20T01-00-02Z "/usr/bin/docker-ent…" 2 weeks ago Up 11 hours 0.0.0.0:9001->9001/tcp, :::9001->9001/tcp, 0.0.0.0:9000->9000/tcp, :::9000->9000/tcp ragflow-minio
|
||||
```
|
||||
|
||||
#### 4.8 `Exception: Can't connect to ES cluster`
|
||||
#### 4.9 `Exception: Can't connect to ES cluster`
|
||||
|
||||
1. Check the status of your Elasticsearch component:
|
||||
|
||||
@ -245,23 +276,26 @@ $ docker ps
|
||||
curl http://<IP_OF_ES>:<PORT_OF_ES>
|
||||
```
|
||||
|
||||
#### 4.10 Can't start ES container and get `Elasticsearch did not exit normally`
|
||||
|
||||
#### 4.9 `{"data":null,"retcode":100,"retmsg":"<NotFound '404: Not Found'>"}`
|
||||
This is because you forgot to update the `vm.max_map_count` value in **/etc/sysctl.conf** and your change to this value was reset after a system reboot.
|
||||
|
||||
Your IP address or port number may be incorrect. If you are using the default configurations, enter http://<IP_OF_YOUR_MACHINE> (**NOT 9380, AND NO PORT NUMBER REQUIRED!**) in your browser. This should work.
|
||||
#### 4.11 `{"data":null,"retcode":100,"retmsg":"<NotFound '404: Not Found'>"}`
|
||||
|
||||
#### 4.10 `Ollama - Mistral instance running at 127.0.0.1:11434 but cannot add Ollama as model in RagFlow`
|
||||
Your IP address or port number may be incorrect. If you are using the default configurations, enter `http://<IP_OF_YOUR_MACHINE>` (**NOT 9380, AND NO PORT NUMBER REQUIRED!**) in your browser. This should work.
|
||||
|
||||
#### 4.12 `Ollama - Mistral instance running at 127.0.0.1:11434 but cannot add Ollama as model in RagFlow`
|
||||
|
||||
A correct Ollama IP address and port is crucial to adding models to Ollama:
|
||||
|
||||
- If you are on demo.ragflow.io, ensure that the server hosting Ollama has a publicly accessible IP address.Note that 127.0.0.1 is not a publicly accessible IP address.
|
||||
- If you deploy RAGFlow locally, ensure that Ollama and RAGFlow are in the same LAN and can comunicate with each other.
|
||||
|
||||
#### 4.11 Do you offer examples of using deepdoc to parse PDF or other files?
|
||||
#### 4.13 Do you offer examples of using deepdoc to parse PDF or other files?
|
||||
|
||||
Yes, we do. See the Python files under the **rag/app** folder.
|
||||
|
||||
#### 4.12 Why did I fail to upload a 10MB+ file to my locally deployed RAGFlow?
|
||||
#### 4.14 Why did I fail to upload a 10MB+ file to my locally deployed RAGFlow?
|
||||
|
||||
You probably forgot to update the **MAX_CONTENT_LENGTH** environment variable:
|
||||
|
||||
@ -280,7 +314,7 @@ docker compose up ragflow -d
|
||||
```
|
||||
*Now you should be able to upload files of sizes less than 100MB.*
|
||||
|
||||
#### 4.13 `Table 'rag_flow.document' doesn't exist`
|
||||
#### 4.15 `Table 'rag_flow.document' doesn't exist`
|
||||
|
||||
This exception occurs when starting up the RAGFlow server. Try the following:
|
||||
|
||||
@ -303,7 +337,7 @@ This exception occurs when starting up the RAGFlow server. Try the following:
|
||||
docker compose up
|
||||
```
|
||||
|
||||
#### 4.14 `hint : 102 Fail to access model Connection error`
|
||||
#### 4.16 `hint : 102 Fail to access model Connection error`
|
||||
|
||||

|
||||
|
||||
@ -311,6 +345,13 @@ This exception occurs when starting up the RAGFlow server. Try the following:
|
||||
2. Do not forget to append **/v1/** to **http://IP:port**:
|
||||
**http://IP:port/v1/**
|
||||
|
||||
#### 4.17 `FileNotFoundError: [Errno 2] No such file or directory`
|
||||
|
||||
1. Check if the status of your minio container is healthy:
|
||||
```bash
|
||||
docker ps
|
||||
```
|
||||
2. Ensure that the username and password settings of MySQL and MinIO in **docker/.env** are in line with those in **docker/service_conf.yml**.
|
||||
|
||||
## Usage
|
||||
|
||||
@ -331,7 +372,7 @@ You limit what the system responds to what you specify in **Empty response** if
|
||||
|
||||
### 4. How to run RAGFlow with a locally deployed LLM?
|
||||
|
||||
You can use Ollama to deploy local LLM. See [here](https://github.com/infiniflow/ragflow/blob/main/docs/ollama.md) for more information.
|
||||
You can use Ollama to deploy local LLM. See [here](https://github.com/infiniflow/ragflow/blob/main/docs/guides/deploy_local_llm.md) for more information.
|
||||
|
||||
### 5. How to link up ragflow and ollama servers?
|
||||
|
||||
@ -340,10 +381,78 @@ You can use Ollama to deploy local LLM. See [here](https://github.com/infiniflow
|
||||
|
||||
### 6. How to configure RAGFlow to respond with 100% matched results, rather than utilizing LLM?
|
||||
|
||||
1. Click the **Knowledge Base** tab in the middle top of the page.
|
||||
1. Click **Knowledge Base** in the middle top of the page.
|
||||
2. Right click the desired knowledge base to display the **Configuration** dialogue.
|
||||
3. Choose **Q&A** as the chunk method and click **Save** to confirm your change.
|
||||
|
||||
### Do I need to connect to Redis?
|
||||
### 7. Do I need to connect to Redis?
|
||||
|
||||
No, connecting to Redis is not required to use RAGFlow.
|
||||
No, connecting to Redis is not required.
|
||||
|
||||
### 8. `Error: Range of input length should be [1, 30000]`
|
||||
|
||||
This error occurs because there are too many chunks matching your search criteria. Try reducing the **TopN** and increasing **Similarity threshold** to fix this issue:
|
||||
|
||||
1. Click **Chat** in the middle top of the page.
|
||||
2. Right click the desired conversation > **Edit** > **Prompt Engine**
|
||||
3. Reduce the **TopN** and/or raise **Silimarity threshold**.
|
||||
4. Click **OK** to confirm your changes.
|
||||
|
||||

|
||||
|
||||
### 9. How to upgrade RAGFlow?
|
||||
|
||||
You can upgrade RAGFlow to either the dev version or the latest version:
|
||||
|
||||
- Dev versions are for developers and contributors. They are published on a nightly basis and may crash because they are not fully tested. We cannot guarantee their validity and you are at your own risk trying out latest, untested features.
|
||||
- The latest version refers to the most recent, officially published release. It is stable and works best with regular users.
|
||||
|
||||
|
||||
To upgrade RAGFlow to the dev version:
|
||||
|
||||
1. Pull the latest source code
|
||||
```bash
|
||||
cd ragflow
|
||||
git pull
|
||||
```
|
||||
2. If you used `docker compose up -d` to start up RAGFlow server:
|
||||
```bash
|
||||
docker pull infiniflow/ragflow:dev
|
||||
```
|
||||
```bash
|
||||
docker compose up ragflow -d
|
||||
```
|
||||
3. If you used `docker compose -f docker-compose-CN.yml up -d` to start up RAGFlow server:
|
||||
```bash
|
||||
docker pull swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:dev
|
||||
```
|
||||
```bash
|
||||
docker compose -f docker-compose-CN.yml up -d
|
||||
```
|
||||
|
||||
To upgrade RAGFlow to the latest version:
|
||||
|
||||
1. Update **ragflow/docker/.env** as follows:
|
||||
```bash
|
||||
RAGFLOW_VERSION=latest
|
||||
```
|
||||
2. Pull the latest source code:
|
||||
```bash
|
||||
cd ragflow
|
||||
git pull
|
||||
```
|
||||
|
||||
3. If you used `docker compose up -d` to start up RAGFlow server:
|
||||
```bash
|
||||
docker pull infiniflow/ragflow:latest
|
||||
```
|
||||
```bash
|
||||
docker compose up ragflow -d
|
||||
```
|
||||
4. If you used `docker compose -f docker-compose-CN.yml up -d` to start up RAGFlow server:
|
||||
```bash
|
||||
docker pull swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:latest
|
||||
```
|
||||
```bash
|
||||
docker compose -f docker-compose-CN.yml up -d
|
||||
```
|
||||
@ -1,43 +0,0 @@
|
||||
# Xinference
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/2c5e86a7-807b-4d29-bd2b-f73fb1018866" width="130"/>
|
||||
</div>
|
||||
|
||||
Xorbits Inference([Xinference](https://github.com/xorbitsai/inference)) empowers you to unleash the full potential of cutting-edge AI models.
|
||||
|
||||
## Install
|
||||
|
||||
- [pip install "xinference[all]"](https://inference.readthedocs.io/en/latest/getting_started/installation.html)
|
||||
- [Docker](https://inference.readthedocs.io/en/latest/getting_started/using_docker_image.html)
|
||||
|
||||
To start a local instance of Xinference, run the following command:
|
||||
```bash
|
||||
$ xinference-local --host 0.0.0.0 --port 9997
|
||||
```
|
||||
## Launch Xinference
|
||||
|
||||
Decide which LLM you want to deploy ([here's a list for supported LLM](https://inference.readthedocs.io/en/latest/models/builtin/)), say, **mistral**.
|
||||
Execute the following command to launch the model, remember to replace ${quantization} with your chosen quantization method from the options listed above:
|
||||
```bash
|
||||
$ xinference launch -u mistral --model-name mistral-v0.1 --size-in-billions 7 --model-format pytorch --quantization ${quantization}
|
||||
```
|
||||
|
||||
## Use Xinference in RAGFlow
|
||||
|
||||
- Go to 'Settings > Model Providers > Models to be added > Xinference'.
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/bcbf4d7a-ade6-44c7-ad5f-0a92c8a73789" width="1300"/>
|
||||
</div>
|
||||
|
||||
> Base URL: Enter the base URL where the Xinference service is accessible, like, `http://<your-xinference-endpoint-domain>:9997/v1`.
|
||||
|
||||
- Use Xinference Models.
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b01fcb6f-47c9-4777-82e0-f1e947ed615a" width="530"/>
|
||||
</div>
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/1763dcd1-044f-438d-badd-9729f5b3a144" width="530"/>
|
||||
</div>
|
||||
@ -18,14 +18,14 @@ from io import BytesIO
|
||||
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, \
|
||||
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions, \
|
||||
tokenize_chunks, find_codec
|
||||
from rag.nlp import huqie
|
||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser
|
||||
from rag.nlp import rag_tokenizer
|
||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
||||
|
||||
|
||||
class Pdf(PdfParser):
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
callback(msg="OCR is running...")
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
@ -63,9 +63,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
"""
|
||||
doc = {
|
||||
"docnm_kwd": filename,
|
||||
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
"title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
}
|
||||
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
||||
doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
|
||||
pdf_parser = None
|
||||
sections, tbls = [], []
|
||||
if re.search(r"\.docx$", filename, re.IGNORECASE):
|
||||
@ -91,7 +91,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
txt = ""
|
||||
if binary:
|
||||
encoding = find_codec(binary)
|
||||
txt = binary.decode(encoding)
|
||||
txt = binary.decode(encoding, errors="ignore")
|
||||
else:
|
||||
with open(filename, "r") as f:
|
||||
while True:
|
||||
@ -105,6 +105,14 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
random_choices([t for t, _ in sections], k=200)))
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
sections = HtmlParser()(filename, binary)
|
||||
sections = [(l, "") for l in sections if l]
|
||||
remove_contents_table(sections, eng=is_english(
|
||||
random_choices([t for t, _ in sections], k=200)))
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
binary = BytesIO(binary)
|
||||
@ -127,7 +135,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
for ck in hierarchical_merge(bull, sections, 5)]
|
||||
else:
|
||||
sections = [s.split("@") for s, _ in sections]
|
||||
sections = [(pr[0], "@" + pr[1]) for pr in sections if len(pr) == 2]
|
||||
sections = [(pr[0], "@" + pr[1]) if len(pr) == 2 else (pr[0], '') for pr in sections ]
|
||||
chunks = naive_merge(
|
||||
sections, kwargs.get(
|
||||
"chunk_token_num", 256), kwargs.get(
|
||||
|
||||
@ -19,8 +19,8 @@ from docx import Document
|
||||
from api.db import ParserType
|
||||
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
|
||||
make_colon_as_title, add_positions, tokenize_chunks, find_codec
|
||||
from rag.nlp import huqie
|
||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser
|
||||
from rag.nlp import rag_tokenizer
|
||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
||||
from rag.settings import cron_logger
|
||||
|
||||
|
||||
@ -58,7 +58,7 @@ class Pdf(PdfParser):
|
||||
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
callback(msg="OCR is running...")
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
@ -89,9 +89,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
"""
|
||||
doc = {
|
||||
"docnm_kwd": filename,
|
||||
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
"title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
}
|
||||
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
||||
doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
|
||||
pdf_parser = None
|
||||
sections = []
|
||||
if re.search(r"\.docx$", filename, re.IGNORECASE):
|
||||
@ -113,7 +113,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
txt = ""
|
||||
if binary:
|
||||
encoding = find_codec(binary)
|
||||
txt = binary.decode(encoding)
|
||||
txt = binary.decode(encoding, errors="ignore")
|
||||
else:
|
||||
with open(filename, "r") as f:
|
||||
while True:
|
||||
@ -125,6 +125,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
sections = [l for l in sections if l]
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
sections = HtmlParser()(filename, binary)
|
||||
sections = [l for l in sections if l]
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
binary = BytesIO(binary)
|
||||
|
||||
@ -2,7 +2,7 @@ import copy
|
||||
import re
|
||||
|
||||
from api.db import ParserType
|
||||
from rag.nlp import huqie, tokenize, tokenize_table, add_positions, bullets_category, title_frequency, tokenize_chunks
|
||||
from rag.nlp import rag_tokenizer, tokenize, tokenize_table, add_positions, bullets_category, title_frequency, tokenize_chunks
|
||||
from deepdoc.parser import PdfParser, PlainParser
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
@ -16,7 +16,7 @@ class Pdf(PdfParser):
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
from timeit import default_timer as timer
|
||||
start = timer()
|
||||
callback(msg="OCR is running...")
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
@ -70,8 +70,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
doc = {
|
||||
"docnm_kwd": filename
|
||||
}
|
||||
doc["title_tks"] = huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", doc["docnm_kwd"]))
|
||||
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
||||
doc["title_tks"] = rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", doc["docnm_kwd"]))
|
||||
doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
|
||||
# is it English
|
||||
eng = lang.lower() == "english" # pdf_parser.is_english
|
||||
|
||||
|
||||
@ -16,9 +16,11 @@ from docx import Document
|
||||
from timeit import default_timer as timer
|
||||
import re
|
||||
from deepdoc.parser.pdf_parser import PlainParser
|
||||
from rag.nlp import huqie, naive_merge, tokenize_table, tokenize_chunks, find_codec
|
||||
from deepdoc.parser import PdfParser, ExcelParser, DocxParser
|
||||
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec
|
||||
from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser
|
||||
from rag.settings import cron_logger
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
|
||||
class Docx(DocxParser):
|
||||
def __init__(self):
|
||||
@ -69,7 +71,7 @@ class Pdf(PdfParser):
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
start = timer()
|
||||
callback(msg="OCR is running...")
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
@ -112,9 +114,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
"chunk_token_num": 128, "delimiter": "\n!?。;!?", "layout_recognize": True})
|
||||
doc = {
|
||||
"docnm_kwd": filename,
|
||||
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
"title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
}
|
||||
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
||||
doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
|
||||
res = []
|
||||
pdf_parser = None
|
||||
sections = []
|
||||
@ -134,14 +136,14 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
elif re.search(r"\.xlsx?$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
excel_parser = ExcelParser()
|
||||
sections = [(excel_parser.html(binary), "")]
|
||||
sections = [(l, "") for l in excel_parser.html(binary) if l]
|
||||
|
||||
elif re.search(r"\.(txt|md)$", filename, re.IGNORECASE):
|
||||
elif re.search(r"\.(txt|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt)$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
txt = ""
|
||||
if binary:
|
||||
encoding = find_codec(binary)
|
||||
txt = binary.decode(encoding)
|
||||
txt = binary.decode(encoding, errors="ignore")
|
||||
else:
|
||||
with open(filename, "r") as f:
|
||||
while True:
|
||||
@ -149,7 +151,19 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
if not l:
|
||||
break
|
||||
txt += l
|
||||
sections = txt.split("\n")
|
||||
sections = []
|
||||
for sec in txt.split("\n"):
|
||||
if num_tokens_from_string(sec) > 10 * parser_config.get("chunk_token_num", 128):
|
||||
sections.append((sec[:int(len(sec)/2)], ""))
|
||||
sections.append((sec[int(len(sec)/2):], ""))
|
||||
else:
|
||||
sections.append((sec, ""))
|
||||
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
sections = HtmlParser()(filename, binary)
|
||||
sections = [(l, "") for l in sections if l]
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
@ -163,7 +177,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"file type not supported yet(doc, docx, pdf, txt supported)")
|
||||
"file type not supported yet(pdf, xlsx, doc, docx, txt supported)")
|
||||
|
||||
st = timer()
|
||||
chunks = naive_merge(
|
||||
|
||||
@ -14,14 +14,14 @@ from tika import parser
|
||||
from io import BytesIO
|
||||
import re
|
||||
from rag.app import laws
|
||||
from rag.nlp import huqie, tokenize, find_codec
|
||||
from deepdoc.parser import PdfParser, ExcelParser, PlainParser
|
||||
from rag.nlp import rag_tokenizer, tokenize, find_codec
|
||||
from deepdoc.parser import PdfParser, ExcelParser, PlainParser, HtmlParser
|
||||
|
||||
|
||||
class Pdf(PdfParser):
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
callback(msg="OCR is running...")
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
@ -78,14 +78,14 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
elif re.search(r"\.xlsx?$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
excel_parser = ExcelParser()
|
||||
sections = [excel_parser.html(binary)]
|
||||
sections = excel_parser.html(binary, 1000000000)
|
||||
|
||||
elif re.search(r"\.txt$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
txt = ""
|
||||
if binary:
|
||||
encoding = find_codec(binary)
|
||||
txt = binary.decode(encoding)
|
||||
txt = binary.decode(encoding, errors="ignore")
|
||||
else:
|
||||
with open(filename, "r") as f:
|
||||
while True:
|
||||
@ -97,6 +97,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
sections = [s for s in sections if s]
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
sections = HtmlParser()(filename, binary)
|
||||
sections = [s for s in sections if s]
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
binary = BytesIO(binary)
|
||||
@ -111,9 +117,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
|
||||
doc = {
|
||||
"docnm_kwd": filename,
|
||||
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
"title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
}
|
||||
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
||||
doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
|
||||
tokenize(doc, "\n".join(sections), eng)
|
||||
return [doc]
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ import re
|
||||
from collections import Counter
|
||||
|
||||
from api.db import ParserType
|
||||
from rag.nlp import huqie, tokenize, tokenize_table, add_positions, bullets_category, title_frequency, tokenize_chunks
|
||||
from rag.nlp import rag_tokenizer, tokenize, tokenize_table, add_positions, bullets_category, title_frequency, tokenize_chunks
|
||||
from deepdoc.parser import PdfParser, PlainParser
|
||||
import numpy as np
|
||||
from rag.utils import num_tokens_from_string
|
||||
@ -28,7 +28,7 @@ class Pdf(PdfParser):
|
||||
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
callback(msg="OCR is running...")
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
@ -153,10 +153,10 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
else:
|
||||
raise NotImplementedError("file type not supported yet(pdf supported)")
|
||||
|
||||
doc = {"docnm_kwd": filename, "authors_tks": huqie.qie(paper["authors"]),
|
||||
"title_tks": huqie.qie(paper["title"] if paper["title"] else filename)}
|
||||
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
||||
doc["authors_sm_tks"] = huqie.qieqie(doc["authors_tks"])
|
||||
doc = {"docnm_kwd": filename, "authors_tks": rag_tokenizer.tokenize(paper["authors"]),
|
||||
"title_tks": rag_tokenizer.tokenize(paper["title"] if paper["title"] else filename)}
|
||||
doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
|
||||
doc["authors_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["authors_tks"])
|
||||
# is it English
|
||||
eng = lang.lower() == "english" # pdf_parser.is_english
|
||||
print("It's English.....", eng)
|
||||
|
||||
@ -17,7 +17,7 @@ from io import BytesIO
|
||||
from PIL import Image
|
||||
|
||||
from rag.nlp import tokenize, is_english
|
||||
from rag.nlp import huqie
|
||||
from rag.nlp import rag_tokenizer
|
||||
from deepdoc.parser import PdfParser, PptParser, PlainParser
|
||||
from PyPDF2 import PdfReader as pdf2_read
|
||||
|
||||
@ -58,7 +58,7 @@ class Pdf(PdfParser):
|
||||
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
callback(msg="OCR is running...")
|
||||
callback(msg="OCR is running...")
|
||||
self.__images__(filename if not binary else binary,
|
||||
zoomin, from_page, to_page, callback)
|
||||
callback(0.8, "Page {}~{}: OCR finished".format(
|
||||
@ -96,9 +96,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
eng = lang.lower() == "english"
|
||||
doc = {
|
||||
"docnm_kwd": filename,
|
||||
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
"title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
}
|
||||
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
||||
doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
|
||||
res = []
|
||||
if re.search(r"\.pptx?$", filename, re.IGNORECASE):
|
||||
ppt_parser = Ppt()
|
||||
|
||||
@ -16,7 +16,7 @@ from io import BytesIO
|
||||
from nltk import word_tokenize
|
||||
from openpyxl import load_workbook
|
||||
from rag.nlp import is_english, random_choices, find_codec
|
||||
from rag.nlp import huqie
|
||||
from rag.nlp import rag_tokenizer
|
||||
from deepdoc.parser import ExcelParser
|
||||
|
||||
|
||||
@ -73,8 +73,8 @@ def beAdoc(d, q, a, eng):
|
||||
aprefix = "Answer: " if eng else "回答:"
|
||||
d["content_with_weight"] = "\t".join(
|
||||
[qprefix + rmPrefix(q), aprefix + rmPrefix(a)])
|
||||
d["content_ltks"] = huqie.qie(q)
|
||||
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
||||
d["content_ltks"] = rag_tokenizer.tokenize(q)
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
return d
|
||||
|
||||
|
||||
@ -94,7 +94,7 @@ def chunk(filename, binary=None, lang="Chinese", callback=None, **kwargs):
|
||||
res = []
|
||||
doc = {
|
||||
"docnm_kwd": filename,
|
||||
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
"title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
}
|
||||
if re.search(r"\.xlsx?$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
@ -107,7 +107,7 @@ def chunk(filename, binary=None, lang="Chinese", callback=None, **kwargs):
|
||||
txt = ""
|
||||
if binary:
|
||||
encoding = find_codec(binary)
|
||||
txt = binary.decode(encoding)
|
||||
txt = binary.decode(encoding, errors="ignore")
|
||||
else:
|
||||
with open(filename, "r") as f:
|
||||
while True:
|
||||
@ -116,18 +116,31 @@ def chunk(filename, binary=None, lang="Chinese", callback=None, **kwargs):
|
||||
break
|
||||
txt += l
|
||||
lines = txt.split("\n")
|
||||
#is_english([rmPrefix(l) for l in lines[:100]])
|
||||
comma, tab = 0, 0
|
||||
for l in lines:
|
||||
if len(l.split(",")) == 2: comma += 1
|
||||
if len(l.split("\t")) == 2: tab += 1
|
||||
delimiter = "\t" if tab >= comma else ","
|
||||
|
||||
fails = []
|
||||
for i, line in enumerate(lines):
|
||||
arr = [l for l in line.split("\t") if len(l) > 1]
|
||||
question, answer = "", ""
|
||||
i = 0
|
||||
while i < len(lines):
|
||||
arr = lines[i].split(delimiter)
|
||||
if len(arr) != 2:
|
||||
fails.append(str(i))
|
||||
continue
|
||||
res.append(beAdoc(deepcopy(doc), arr[0], arr[1], eng))
|
||||
if question: answer += "\n" + lines[i]
|
||||
else:
|
||||
fails.append(str(i+1))
|
||||
elif len(arr) == 2:
|
||||
if question and answer: res.append(beAdoc(deepcopy(doc), question, answer, eng))
|
||||
question, answer = arr
|
||||
i += 1
|
||||
if len(res) % 999 == 0:
|
||||
callback(len(res) * 0.6 / len(lines), ("Extract Q&A: {}".format(len(res)) + (
|
||||
f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))
|
||||
|
||||
if question: res.append(beAdoc(deepcopy(doc), question, answer, eng))
|
||||
|
||||
callback(0.6, ("Extract Q&A: {}".format(len(res)) + (
|
||||
f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))
|
||||
|
||||
|
||||
@ -18,7 +18,7 @@ import re
|
||||
import pandas as pd
|
||||
import requests
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from rag.nlp import huqie
|
||||
from rag.nlp import rag_tokenizer
|
||||
from deepdoc.parser.resume import refactor
|
||||
from deepdoc.parser.resume import step_one, step_two
|
||||
from rag.settings import cron_logger
|
||||
@ -131,9 +131,9 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
||||
titles.append(str(v))
|
||||
doc = {
|
||||
"docnm_kwd": filename,
|
||||
"title_tks": huqie.qie("-".join(titles) + "-简历")
|
||||
"title_tks": rag_tokenizer.tokenize("-".join(titles) + "-简历")
|
||||
}
|
||||
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
||||
doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
|
||||
pairs = []
|
||||
for n, m in field_map.items():
|
||||
if not resume.get(n):
|
||||
@ -147,8 +147,8 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
||||
|
||||
doc["content_with_weight"] = "\n".join(
|
||||
["{}: {}".format(re.sub(r"([^()]+)", "", k), v) for k, v in pairs])
|
||||
doc["content_ltks"] = huqie.qie(doc["content_with_weight"])
|
||||
doc["content_sm_ltks"] = huqie.qieqie(doc["content_ltks"])
|
||||
doc["content_ltks"] = rag_tokenizer.tokenize(doc["content_with_weight"])
|
||||
doc["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(doc["content_ltks"])
|
||||
for n, _ in field_map.items():
|
||||
if n not in resume:
|
||||
continue
|
||||
@ -156,7 +156,7 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
||||
len(resume[n]) == 1 or n not in forbidden_select_fields4resume):
|
||||
resume[n] = resume[n][0]
|
||||
if n.find("_tks") > 0:
|
||||
resume[n] = huqie.qieqie(resume[n])
|
||||
resume[n] = rag_tokenizer.fine_grained_tokenize(resume[n])
|
||||
doc[n] = resume[n]
|
||||
|
||||
print(doc)
|
||||
|
||||
@ -20,7 +20,7 @@ from openpyxl import load_workbook
|
||||
from dateutil.parser import parse as datetime_parse
|
||||
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from rag.nlp import huqie, is_english, tokenize, find_codec
|
||||
from rag.nlp import rag_tokenizer, is_english, tokenize, find_codec
|
||||
from deepdoc.parser import ExcelParser
|
||||
|
||||
|
||||
@ -47,6 +47,7 @@ class Excel(ExcelParser):
|
||||
cell.value for i,
|
||||
cell in enumerate(
|
||||
rows[0]) if i not in missed]
|
||||
if not headers:continue
|
||||
data = []
|
||||
for i, r in enumerate(rows[1:]):
|
||||
rn += 1
|
||||
@ -148,7 +149,7 @@ def chunk(filename, binary=None, from_page=0, to_page=10000000000,
|
||||
txt = ""
|
||||
if binary:
|
||||
encoding = find_codec(binary)
|
||||
txt = binary.decode(encoding)
|
||||
txt = binary.decode(encoding, errors="ignore")
|
||||
else:
|
||||
with open(filename, "r") as f:
|
||||
while True:
|
||||
@ -216,7 +217,7 @@ def chunk(filename, binary=None, from_page=0, to_page=10000000000,
|
||||
for ii, row in df.iterrows():
|
||||
d = {
|
||||
"docnm_kwd": filename,
|
||||
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
"title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
}
|
||||
row_txt = []
|
||||
for j in range(len(clmns)):
|
||||
@ -227,7 +228,7 @@ def chunk(filename, binary=None, from_page=0, to_page=10000000000,
|
||||
if pd.isna(row[clmns[j]]):
|
||||
continue
|
||||
fld = clmns_map[j][0]
|
||||
d[fld] = row[clmns[j]] if clmn_tys[j] != "text" else huqie.qie(
|
||||
d[fld] = row[clmns[j]] if clmn_tys[j] != "text" else rag_tokenizer.tokenize(
|
||||
row[clmns[j]])
|
||||
row_txt.append("{}:{}".format(clmns[j], row[clmns[j]]))
|
||||
if not row_txt:
|
||||
|
||||
@ -16,16 +16,20 @@
|
||||
from .embedding_model import *
|
||||
from .chat_model import *
|
||||
from .cv_model import *
|
||||
from .rerank_model import *
|
||||
|
||||
|
||||
EmbeddingModel = {
|
||||
"Ollama": OllamaEmbed,
|
||||
"OpenAI": OpenAIEmbed,
|
||||
"Xinference": XinferenceEmbed,
|
||||
"Tongyi-Qianwen": HuEmbedding, #QWenEmbed,
|
||||
"Tongyi-Qianwen": DefaultEmbedding,#QWenEmbed,
|
||||
"ZHIPU-AI": ZhipuEmbed,
|
||||
"FastEmbed": FastEmbed,
|
||||
"Youdao": YoudaoEmbed
|
||||
"Youdao": YoudaoEmbed,
|
||||
"BaiChuan": BaiChuanEmbed,
|
||||
"Jina": JinaEmbed,
|
||||
"BAAI": DefaultEmbedding
|
||||
}
|
||||
|
||||
|
||||
@ -45,6 +49,14 @@ ChatModel = {
|
||||
"Tongyi-Qianwen": QWenChat,
|
||||
"Ollama": OllamaChat,
|
||||
"Xinference": XinferenceChat,
|
||||
"Moonshot": MoonshotChat
|
||||
"Moonshot": MoonshotChat,
|
||||
"DeepSeek": DeepSeekChat,
|
||||
"BaiChuan": BaiChuanChat
|
||||
}
|
||||
|
||||
|
||||
RerankModel = {
|
||||
"BAAI": DefaultRerank,
|
||||
"Jina": JinaRerank,
|
||||
"Youdao": YoudaoRerank,
|
||||
}
|
||||
|
||||
@ -19,21 +19,13 @@ from abc import ABC
|
||||
from openai import OpenAI
|
||||
import openai
|
||||
from ollama import Client
|
||||
from volcengine.maas.v2 import MaasService
|
||||
from rag.nlp import is_english
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
|
||||
class Base(ABC):
|
||||
def __init__(self, key, model_name):
|
||||
pass
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
raise NotImplementedError("Please implement encode method!")
|
||||
|
||||
|
||||
class GptTurbo(Base):
|
||||
def __init__(self, key, model_name="gpt-3.5-turbo", base_url="https://api.openai.com/v1"):
|
||||
if not base_url: base_url="https://api.openai.com/v1"
|
||||
def __init__(self, key, model_name, base_url):
|
||||
self.client = OpenAI(api_key=key, base_url=base_url)
|
||||
self.model_name = model_name
|
||||
|
||||
@ -53,13 +45,69 @@ class GptTurbo(Base):
|
||||
except openai.APIError as e:
|
||||
return "**ERROR**: " + str(e), 0
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
ans = ""
|
||||
total_tokens = 0
|
||||
try:
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=history,
|
||||
stream=True,
|
||||
**gen_conf)
|
||||
for resp in response:
|
||||
if not resp.choices or not resp.choices[0].delta.content:continue
|
||||
ans += resp.choices[0].delta.content
|
||||
total_tokens += 1
|
||||
if resp.choices[0].finish_reason == "length":
|
||||
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
|
||||
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
||||
yield ans
|
||||
|
||||
class MoonshotChat(GptTurbo):
|
||||
except openai.APIError as e:
|
||||
yield ans + "\n**ERROR**: " + str(e)
|
||||
|
||||
yield total_tokens
|
||||
|
||||
|
||||
class GptTurbo(Base):
|
||||
def __init__(self, key, model_name="gpt-3.5-turbo", base_url="https://api.openai.com/v1"):
|
||||
if not base_url: base_url="https://api.openai.com/v1"
|
||||
super().__init__(key, model_name, base_url)
|
||||
|
||||
|
||||
class MoonshotChat(Base):
|
||||
def __init__(self, key, model_name="moonshot-v1-8k", base_url="https://api.moonshot.cn/v1"):
|
||||
if not base_url: base_url="https://api.moonshot.cn/v1"
|
||||
self.client = OpenAI(
|
||||
api_key=key, base_url=base_url)
|
||||
self.model_name = model_name
|
||||
super().__init__(key, model_name, base_url)
|
||||
|
||||
|
||||
class XinferenceChat(Base):
|
||||
def __init__(self, key=None, model_name="", base_url=""):
|
||||
key = "xxx"
|
||||
super().__init__(key, model_name, base_url)
|
||||
|
||||
|
||||
class DeepSeekChat(Base):
|
||||
def __init__(self, key, model_name="deepseek-chat", base_url="https://api.deepseek.com/v1"):
|
||||
if not base_url: base_url="https://api.deepseek.com/v1"
|
||||
super().__init__(key, model_name, base_url)
|
||||
|
||||
|
||||
class BaiChuanChat(Base):
|
||||
def __init__(self, key, model_name="Baichuan3-Turbo", base_url="https://api.baichuan-ai.com/v1"):
|
||||
if not base_url:
|
||||
base_url = "https://api.baichuan-ai.com/v1"
|
||||
super().__init__(key, model_name, base_url)
|
||||
|
||||
@staticmethod
|
||||
def _format_params(params):
|
||||
return {
|
||||
"temperature": params.get("temperature", 0.3),
|
||||
"max_tokens": params.get("max_tokens", 2048),
|
||||
"top_p": params.get("top_p", 0.85),
|
||||
}
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
if system:
|
||||
@ -68,7 +116,16 @@ class MoonshotChat(GptTurbo):
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=history,
|
||||
**gen_conf)
|
||||
extra_body={
|
||||
"tools": [{
|
||||
"type": "web_search",
|
||||
"web_search": {
|
||||
"enable": True,
|
||||
"search_mode": "performance_first"
|
||||
}
|
||||
}]
|
||||
},
|
||||
**self._format_params(gen_conf))
|
||||
ans = response.choices[0].message.content.strip()
|
||||
if response.choices[0].finish_reason == "length":
|
||||
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
|
||||
@ -77,6 +134,44 @@ class MoonshotChat(GptTurbo):
|
||||
except openai.APIError as e:
|
||||
return "**ERROR**: " + str(e), 0
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
ans = ""
|
||||
total_tokens = 0
|
||||
try:
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=history,
|
||||
extra_body={
|
||||
"tools": [{
|
||||
"type": "web_search",
|
||||
"web_search": {
|
||||
"enable": True,
|
||||
"search_mode": "performance_first"
|
||||
}
|
||||
}]
|
||||
},
|
||||
stream=True,
|
||||
**self._format_params(gen_conf))
|
||||
for resp in response:
|
||||
if resp.choices[0].finish_reason == "stop":
|
||||
if not resp.choices[0].delta.content:
|
||||
continue
|
||||
total_tokens = resp.usage.get('total_tokens', 0)
|
||||
if not resp.choices[0].delta.content:
|
||||
continue
|
||||
ans += resp.choices[0].delta.content
|
||||
if resp.choices[0].finish_reason == "length":
|
||||
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
|
||||
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
||||
yield ans
|
||||
|
||||
except Exception as e:
|
||||
yield ans + "\n**ERROR**: " + str(e)
|
||||
|
||||
yield total_tokens
|
||||
|
||||
|
||||
class QWenChat(Base):
|
||||
def __init__(self, key, model_name=Generation.Models.qwen_turbo, **kwargs):
|
||||
@ -106,6 +201,35 @@ class QWenChat(Base):
|
||||
|
||||
return "**ERROR**: " + response.message, tk_count
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
from http import HTTPStatus
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
ans = ""
|
||||
tk_count = 0
|
||||
try:
|
||||
response = Generation.call(
|
||||
self.model_name,
|
||||
messages=history,
|
||||
result_format='message',
|
||||
stream=True,
|
||||
**gen_conf
|
||||
)
|
||||
for resp in response:
|
||||
if resp.status_code == HTTPStatus.OK:
|
||||
ans = resp.output.choices[0]['message']['content']
|
||||
tk_count = resp.usage.total_tokens
|
||||
if resp.output.choices[0].get("finish_reason", "") == "length":
|
||||
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
|
||||
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
||||
yield ans
|
||||
else:
|
||||
yield ans + "\n**ERROR**: " + resp.message if str(resp.message).find("Access")<0 else "Out of credit. Please set the API key in **settings > Model providers.**"
|
||||
except Exception as e:
|
||||
yield ans + "\n**ERROR**: " + str(e)
|
||||
|
||||
yield tk_count
|
||||
|
||||
|
||||
class ZhipuChat(Base):
|
||||
def __init__(self, key, model_name="glm-3-turbo", **kwargs):
|
||||
@ -131,6 +255,35 @@ class ZhipuChat(Base):
|
||||
except Exception as e:
|
||||
return "**ERROR**: " + str(e), 0
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
if "presence_penalty" in gen_conf: del gen_conf["presence_penalty"]
|
||||
if "frequency_penalty" in gen_conf: del gen_conf["frequency_penalty"]
|
||||
ans = ""
|
||||
tk_count = 0
|
||||
try:
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=history,
|
||||
stream=True,
|
||||
**gen_conf
|
||||
)
|
||||
for resp in response:
|
||||
if not resp.choices[0].delta.content:continue
|
||||
delta = resp.choices[0].delta.content
|
||||
ans += delta
|
||||
if resp.choices[0].finish_reason == "length":
|
||||
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
|
||||
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
||||
tk_count = resp.usage.total_tokens
|
||||
if resp.choices[0].finish_reason == "stop": tk_count = resp.usage.total_tokens
|
||||
yield ans
|
||||
except Exception as e:
|
||||
yield ans + "\n**ERROR**: " + str(e)
|
||||
|
||||
yield tk_count
|
||||
|
||||
|
||||
class OllamaChat(Base):
|
||||
def __init__(self, key, model_name, **kwargs):
|
||||
@ -141,41 +294,173 @@ class OllamaChat(Base):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
try:
|
||||
options = {"temperature": gen_conf.get("temperature", 0.1),
|
||||
"num_predict": gen_conf.get("max_tokens", 128),
|
||||
"top_k": gen_conf.get("top_p", 0.3),
|
||||
"presence_penalty": gen_conf.get("presence_penalty", 0.4),
|
||||
"frequency_penalty": gen_conf.get("frequency_penalty", 0.7),
|
||||
}
|
||||
options = {}
|
||||
if "temperature" in gen_conf: options["temperature"] = gen_conf["temperature"]
|
||||
if "max_tokens" in gen_conf: options["num_predict"] = gen_conf["max_tokens"]
|
||||
if "top_p" in gen_conf: options["top_k"] = gen_conf["top_p"]
|
||||
if "presence_penalty" in gen_conf: options["presence_penalty"] = gen_conf["presence_penalty"]
|
||||
if "frequency_penalty" in gen_conf: options["frequency_penalty"] = gen_conf["frequency_penalty"]
|
||||
response = self.client.chat(
|
||||
model=self.model_name,
|
||||
messages=history,
|
||||
options=options
|
||||
options=options,
|
||||
keep_alive=-1
|
||||
)
|
||||
ans = response["message"]["content"].strip()
|
||||
return ans, response["eval_count"] + response.get("prompt_eval_count", 0)
|
||||
except Exception as e:
|
||||
return "**ERROR**: " + str(e), 0
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
options = {}
|
||||
if "temperature" in gen_conf: options["temperature"] = gen_conf["temperature"]
|
||||
if "max_tokens" in gen_conf: options["num_predict"] = gen_conf["max_tokens"]
|
||||
if "top_p" in gen_conf: options["top_k"] = gen_conf["top_p"]
|
||||
if "presence_penalty" in gen_conf: options["presence_penalty"] = gen_conf["presence_penalty"]
|
||||
if "frequency_penalty" in gen_conf: options["frequency_penalty"] = gen_conf["frequency_penalty"]
|
||||
ans = ""
|
||||
try:
|
||||
response = self.client.chat(
|
||||
model=self.model_name,
|
||||
messages=history,
|
||||
stream=True,
|
||||
options=options,
|
||||
keep_alive=-1
|
||||
)
|
||||
for resp in response:
|
||||
if resp["done"]:
|
||||
yield resp.get("prompt_eval_count", 0) + resp.get("eval_count", 0)
|
||||
ans += resp["message"]["content"]
|
||||
yield ans
|
||||
except Exception as e:
|
||||
yield ans + "\n**ERROR**: " + str(e)
|
||||
yield 0
|
||||
|
||||
class XinferenceChat(Base):
|
||||
def __init__(self, key=None, model_name="", base_url=""):
|
||||
self.client = OpenAI(api_key="xxx", base_url=base_url)
|
||||
self.model_name = model_name
|
||||
|
||||
class LocalLLM(Base):
|
||||
class RPCProxy:
|
||||
def __init__(self, host, port):
|
||||
self.host = host
|
||||
self.port = int(port)
|
||||
self.__conn()
|
||||
|
||||
def __conn(self):
|
||||
from multiprocessing.connection import Client
|
||||
self._connection = Client(
|
||||
(self.host, self.port), authkey=b'infiniflow-token4kevinhu')
|
||||
|
||||
def __getattr__(self, name):
|
||||
import pickle
|
||||
|
||||
def do_rpc(*args, **kwargs):
|
||||
for _ in range(3):
|
||||
try:
|
||||
self._connection.send(
|
||||
pickle.dumps((name, args, kwargs)))
|
||||
return pickle.loads(self._connection.recv())
|
||||
except Exception as e:
|
||||
self.__conn()
|
||||
raise Exception("RPC connection lost!")
|
||||
|
||||
return do_rpc
|
||||
|
||||
def __init__(self, key, model_name="glm-3-turbo"):
|
||||
self.client = LocalLLM.RPCProxy("127.0.0.1", 7860)
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
try:
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=history,
|
||||
**gen_conf)
|
||||
ans = self.client.chat(
|
||||
history,
|
||||
gen_conf
|
||||
)
|
||||
return ans, num_tokens_from_string(ans)
|
||||
except Exception as e:
|
||||
return "**ERROR**: " + str(e), 0
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
token_count = 0
|
||||
answer = ""
|
||||
try:
|
||||
for ans in self.client.chat_streamly(history, gen_conf):
|
||||
answer += ans
|
||||
token_count += 1
|
||||
yield answer
|
||||
except Exception as e:
|
||||
yield answer + "\n**ERROR**: " + str(e)
|
||||
|
||||
yield token_count
|
||||
|
||||
|
||||
class VolcEngineChat(Base):
|
||||
def __init__(self, key, model_name, base_url):
|
||||
"""
|
||||
Since do not want to modify the original database fields, and the VolcEngine authentication method is quite special,
|
||||
Assemble ak, sk, ep_id into api_key, store it as a dictionary type, and parse it for use
|
||||
model_name is for display only
|
||||
"""
|
||||
self.client = MaasService('maas-api.ml-platform-cn-beijing.volces.com', 'cn-beijing')
|
||||
self.volc_ak = eval(key).get('volc_ak', '')
|
||||
self.volc_sk = eval(key).get('volc_sk', '')
|
||||
self.client.set_ak(self.volc_ak)
|
||||
self.client.set_sk(self.volc_sk)
|
||||
self.model_name = eval(key).get('ep_id', '')
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
try:
|
||||
req = {
|
||||
"parameters": {
|
||||
"min_new_tokens": gen_conf.get("min_new_tokens", 1),
|
||||
"top_k": gen_conf.get("top_k", 0),
|
||||
"max_prompt_tokens": gen_conf.get("max_prompt_tokens", 30000),
|
||||
"temperature": gen_conf.get("temperature", 0.1),
|
||||
"max_new_tokens": gen_conf.get("max_tokens", 1000),
|
||||
"top_p": gen_conf.get("top_p", 0.3),
|
||||
},
|
||||
"messages": history
|
||||
}
|
||||
response = self.client.chat(self.model_name, req)
|
||||
ans = response.choices[0].message.content.strip()
|
||||
if response.choices[0].finish_reason == "length":
|
||||
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
|
||||
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
||||
return ans, response.usage.total_tokens
|
||||
except openai.APIError as e:
|
||||
except Exception as e:
|
||||
return "**ERROR**: " + str(e), 0
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "system", "content": system})
|
||||
ans = ""
|
||||
tk_count = 0
|
||||
try:
|
||||
req = {
|
||||
"parameters": {
|
||||
"min_new_tokens": gen_conf.get("min_new_tokens", 1),
|
||||
"top_k": gen_conf.get("top_k", 0),
|
||||
"max_prompt_tokens": gen_conf.get("max_prompt_tokens", 30000),
|
||||
"temperature": gen_conf.get("temperature", 0.1),
|
||||
"max_new_tokens": gen_conf.get("max_tokens", 1000),
|
||||
"top_p": gen_conf.get("top_p", 0.3),
|
||||
},
|
||||
"messages": history
|
||||
}
|
||||
stream = self.client.stream_chat(self.model_name, req)
|
||||
for resp in stream:
|
||||
if not resp.choices[0].message.content:
|
||||
continue
|
||||
ans += resp.choices[0].message.content
|
||||
if resp.choices[0].finish_reason == "stop":
|
||||
tk_count = resp.usage.total_tokens
|
||||
yield ans
|
||||
|
||||
except Exception as e:
|
||||
yield ans + "\n**ERROR**: " + str(e)
|
||||
yield tk_count
|
||||
|
||||
@ -13,8 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
from huggingface_hub import snapshot_download
|
||||
from zhipuai import ZhipuAI
|
||||
import os
|
||||
@ -26,23 +28,8 @@ from FlagEmbedding import FlagModel
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
|
||||
try:
|
||||
flag_model = FlagModel(os.path.join(
|
||||
get_project_base_directory(),
|
||||
"rag/res/bge-large-zh-v1.5"),
|
||||
query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:",
|
||||
use_fp16=torch.cuda.is_available())
|
||||
except Exception as e:
|
||||
model_dir = snapshot_download(repo_id="BAAI/bge-large-zh-v1.5",
|
||||
local_dir=os.path.join(get_project_base_directory(), "rag/res/bge-large-zh-v1.5"),
|
||||
local_dir_use_symlinks=False)
|
||||
flag_model = FlagModel(model_dir,
|
||||
query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:",
|
||||
use_fp16=torch.cuda.is_available())
|
||||
from api.utils.file_utils import get_home_cache_dir
|
||||
from rag.utils import num_tokens_from_string, truncate
|
||||
|
||||
|
||||
class Base(ABC):
|
||||
@ -56,8 +43,10 @@ class Base(ABC):
|
||||
raise NotImplementedError("Please implement encode method!")
|
||||
|
||||
|
||||
class HuEmbedding(Base):
|
||||
def __init__(self, *args, **kwargs):
|
||||
class DefaultEmbedding(Base):
|
||||
_model = None
|
||||
|
||||
def __init__(self, key, model_name, **kwargs):
|
||||
"""
|
||||
If you have trouble downloading HuggingFace models, -_^ this might help!!
|
||||
|
||||
@ -69,21 +58,32 @@ class HuEmbedding(Base):
|
||||
^_-
|
||||
|
||||
"""
|
||||
self.model = flag_model
|
||||
if not DefaultEmbedding._model:
|
||||
try:
|
||||
self._model = FlagModel(os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)),
|
||||
query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:",
|
||||
use_fp16=torch.cuda.is_available())
|
||||
except Exception as e:
|
||||
model_dir = snapshot_download(repo_id="BAAI/bge-large-zh-v1.5",
|
||||
local_dir=os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)),
|
||||
local_dir_use_symlinks=False)
|
||||
self._model = FlagModel(model_dir,
|
||||
query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:",
|
||||
use_fp16=torch.cuda.is_available())
|
||||
|
||||
def encode(self, texts: list, batch_size=32):
|
||||
texts = [t[:2000] for t in texts]
|
||||
texts = [truncate(t, 2048) for t in texts]
|
||||
token_count = 0
|
||||
for t in texts:
|
||||
token_count += num_tokens_from_string(t)
|
||||
res = []
|
||||
for i in range(0, len(texts), batch_size):
|
||||
res.extend(self.model.encode(texts[i:i + batch_size]).tolist())
|
||||
res.extend(self._model.encode(texts[i:i + batch_size]).tolist())
|
||||
return np.array(res), token_count
|
||||
|
||||
def encode_queries(self, text: str):
|
||||
token_count = num_tokens_from_string(text)
|
||||
return self.model.encode_queries([text]).tolist()[0], token_count
|
||||
return self._model.encode_queries([text]).tolist()[0], token_count
|
||||
|
||||
|
||||
class OpenAIEmbed(Base):
|
||||
@ -95,17 +95,27 @@ class OpenAIEmbed(Base):
|
||||
self.model_name = model_name
|
||||
|
||||
def encode(self, texts: list, batch_size=32):
|
||||
texts = [truncate(t, 8196) for t in texts]
|
||||
res = self.client.embeddings.create(input=texts,
|
||||
model=self.model_name)
|
||||
return np.array([d.embedding for d in res.data]
|
||||
), res.usage.total_tokens
|
||||
|
||||
def encode_queries(self, text):
|
||||
res = self.client.embeddings.create(input=[text],
|
||||
res = self.client.embeddings.create(input=[truncate(text, 8196)],
|
||||
model=self.model_name)
|
||||
return np.array(res.data[0].embedding), res.usage.total_tokens
|
||||
|
||||
|
||||
class BaiChuanEmbed(OpenAIEmbed):
|
||||
def __init__(self, key,
|
||||
model_name='Baichuan-Text-Embedding',
|
||||
base_url='https://api.baichuan-ai.com/v1'):
|
||||
if not base_url:
|
||||
base_url = "https://api.baichuan-ai.com/v1"
|
||||
super().__init__(key, model_name, base_url)
|
||||
|
||||
|
||||
class QWenEmbed(Base):
|
||||
def __init__(self, key, model_name="text_embedding_v2", **kwargs):
|
||||
dashscope.api_key = key
|
||||
@ -115,7 +125,7 @@ class QWenEmbed(Base):
|
||||
import dashscope
|
||||
res = []
|
||||
token_count = 0
|
||||
texts = [txt[:2048] for txt in texts]
|
||||
texts = [truncate(t, 2048) for t in texts]
|
||||
for i in range(0, len(texts), batch_size):
|
||||
resp = dashscope.TextEmbedding.call(
|
||||
model=self.model_name,
|
||||
@ -182,16 +192,19 @@ class OllamaEmbed(Base):
|
||||
|
||||
|
||||
class FastEmbed(Base):
|
||||
_model = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
key: Optional[str] = None,
|
||||
model_name: str = "BAAI/bge-small-en-v1.5",
|
||||
cache_dir: Optional[str] = None,
|
||||
threads: Optional[int] = None,
|
||||
**kwargs,
|
||||
self,
|
||||
key: Optional[str] = None,
|
||||
model_name: str = "BAAI/bge-small-en-v1.5",
|
||||
cache_dir: Optional[str] = None,
|
||||
threads: Optional[int] = None,
|
||||
**kwargs,
|
||||
):
|
||||
from fastembed import TextEmbedding
|
||||
self._model = TextEmbedding(model_name, cache_dir, threads, **kwargs)
|
||||
if not FastEmbed._model:
|
||||
self._model = TextEmbedding(model_name, cache_dir, threads, **kwargs)
|
||||
|
||||
def encode(self, texts: list, batch_size=32):
|
||||
# Using the internal tokenizer to encode the texts and get the total
|
||||
@ -238,8 +251,8 @@ class YoudaoEmbed(Base):
|
||||
try:
|
||||
print("LOADING BCE...")
|
||||
YoudaoEmbed._client = qanthing(model_name_or_path=os.path.join(
|
||||
get_project_base_directory(),
|
||||
"rag/res/bce-embedding-base_v1"))
|
||||
get_home_cache_dir(),
|
||||
"bce-embedding-base_v1"))
|
||||
except Exception as e:
|
||||
YoudaoEmbed._client = qanthing(
|
||||
model_name_or_path=model_name.replace(
|
||||
@ -258,3 +271,29 @@ class YoudaoEmbed(Base):
|
||||
def encode_queries(self, text):
|
||||
embds = YoudaoEmbed._client.encode([text])
|
||||
return np.array(embds[0]), num_tokens_from_string(text)
|
||||
|
||||
|
||||
class JinaEmbed(Base):
|
||||
def __init__(self, key, model_name="jina-embeddings-v2-base-zh",
|
||||
base_url="https://api.jina.ai/v1/embeddings"):
|
||||
|
||||
self.base_url = "https://api.jina.ai/v1/embeddings"
|
||||
self.headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {key}"
|
||||
}
|
||||
self.model_name = model_name
|
||||
|
||||
def encode(self, texts: list, batch_size=None):
|
||||
texts = [truncate(t, 8196) for t in texts]
|
||||
data = {
|
||||
"model": self.model_name,
|
||||
"input": texts,
|
||||
'encoding_type': 'float'
|
||||
}
|
||||
res = requests.post(self.base_url, headers=self.headers, json=data).json()
|
||||
return np.array([d["embedding"] for d in res["data"]]), res["usage"]["total_tokens"]
|
||||
|
||||
def encode_queries(self, text):
|
||||
embds, cnt = self.encode([text])
|
||||
return np.array(embds[0]), cnt
|
||||
116
rag/llm/rerank_model.py
Normal file
116
rag/llm/rerank_model.py
Normal file
@ -0,0 +1,116 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
import requests
|
||||
import torch
|
||||
from FlagEmbedding import FlagReranker
|
||||
from huggingface_hub import snapshot_download
|
||||
import os
|
||||
from abc import ABC
|
||||
import numpy as np
|
||||
from api.utils.file_utils import get_home_cache_dir
|
||||
from rag.utils import num_tokens_from_string, truncate
|
||||
|
||||
def sigmoid(x):
|
||||
return 1 / (1 + np.exp(-x))
|
||||
|
||||
class Base(ABC):
|
||||
def __init__(self, key, model_name):
|
||||
pass
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
raise NotImplementedError("Please implement encode method!")
|
||||
|
||||
|
||||
class DefaultRerank(Base):
|
||||
_model = None
|
||||
|
||||
def __init__(self, key, model_name, **kwargs):
|
||||
"""
|
||||
If you have trouble downloading HuggingFace models, -_^ this might help!!
|
||||
|
||||
For Linux:
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
For Windows:
|
||||
Good luck
|
||||
^_-
|
||||
|
||||
"""
|
||||
if not DefaultRerank._model:
|
||||
try:
|
||||
self._model = FlagReranker(os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)),
|
||||
use_fp16=torch.cuda.is_available())
|
||||
except Exception as e:
|
||||
self._model = snapshot_download(repo_id=model_name,
|
||||
local_dir=os.path.join(get_home_cache_dir(),
|
||||
re.sub(r"^[a-zA-Z]+/", "", model_name)),
|
||||
local_dir_use_symlinks=False)
|
||||
self._model = FlagReranker(os.path.join(get_home_cache_dir(), model_name),
|
||||
use_fp16=torch.cuda.is_available())
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
pairs = [(query,truncate(t, 2048)) for t in texts]
|
||||
token_count = 0
|
||||
for _, t in pairs:
|
||||
token_count += num_tokens_from_string(t)
|
||||
batch_size = 32
|
||||
res = []
|
||||
for i in range(0, len(pairs), batch_size):
|
||||
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=2048)
|
||||
scores = sigmoid(np.array(scores))
|
||||
res.extend(scores)
|
||||
return np.array(res), token_count
|
||||
|
||||
|
||||
class JinaRerank(Base):
|
||||
def __init__(self, key, model_name="jina-reranker-v1-base-en",
|
||||
base_url="https://api.jina.ai/v1/rerank"):
|
||||
self.base_url = "https://api.jina.ai/v1/rerank"
|
||||
self.headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {key}"
|
||||
}
|
||||
self.model_name = model_name
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
texts = [truncate(t, 8196) for t in texts]
|
||||
data = {
|
||||
"model": self.model_name,
|
||||
"query": query,
|
||||
"documents": texts,
|
||||
"top_n": len(texts)
|
||||
}
|
||||
res = requests.post(self.base_url, headers=self.headers, json=data).json()
|
||||
return np.array([d["relevance_score"] for d in res["results"]]), res["usage"]["total_tokens"]
|
||||
|
||||
|
||||
class YoudaoRerank(DefaultRerank):
|
||||
_model = None
|
||||
|
||||
def __init__(self, key=None, model_name="maidalun1020/bce-reranker-base_v1", **kwargs):
|
||||
from BCEmbedding import RerankerModel
|
||||
if not YoudaoRerank._model:
|
||||
try:
|
||||
print("LOADING BCE...")
|
||||
YoudaoRerank._model = RerankerModel(model_name_or_path=os.path.join(
|
||||
get_home_cache_dir(),
|
||||
re.sub(r"^[a-zA-Z]+/", "", model_name)))
|
||||
except Exception as e:
|
||||
YoudaoRerank._model = RerankerModel(
|
||||
model_name_or_path=model_name.replace(
|
||||
"maidalun1020", "InfiniFlow"))
|
||||
|
||||
@ -2,9 +2,10 @@ import argparse
|
||||
import pickle
|
||||
import random
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from multiprocessing.connection import Listener
|
||||
from threading import Thread
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
|
||||
|
||||
|
||||
def torch_gc():
|
||||
@ -95,6 +96,32 @@ def chat(messages, gen_conf):
|
||||
return str(e)
|
||||
|
||||
|
||||
def chat_streamly(messages, gen_conf):
|
||||
global tokenizer
|
||||
model = Model()
|
||||
try:
|
||||
torch_gc()
|
||||
conf = deepcopy(gen_conf)
|
||||
print(messages, conf)
|
||||
text = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tokenize=False,
|
||||
add_generation_prompt=True
|
||||
)
|
||||
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
||||
streamer = TextStreamer(tokenizer)
|
||||
conf["inputs"] = model_inputs.input_ids
|
||||
conf["streamer"] = streamer
|
||||
conf["max_new_tokens"] = conf["max_tokens"]
|
||||
del conf["max_tokens"]
|
||||
thread = Thread(target=model.generate, kwargs=conf)
|
||||
thread.start()
|
||||
for _, new_text in enumerate(streamer):
|
||||
yield new_text
|
||||
except Exception as e:
|
||||
yield "**ERROR**: " + str(e)
|
||||
|
||||
|
||||
def Model():
|
||||
global models
|
||||
random.seed(time.time())
|
||||
@ -113,6 +140,7 @@ if __name__ == "__main__":
|
||||
|
||||
handler = RPCHandler()
|
||||
handler.register_function(chat)
|
||||
handler.register_function(chat_streamly)
|
||||
|
||||
models = []
|
||||
for _ in range(1):
|
||||
|
||||
@ -2,7 +2,7 @@ import random
|
||||
from collections import Counter
|
||||
|
||||
from rag.utils import num_tokens_from_string
|
||||
from . import huqie
|
||||
from . import rag_tokenizer
|
||||
import re
|
||||
import copy
|
||||
|
||||
@ -28,11 +28,17 @@ all_codecs = [
|
||||
def find_codec(blob):
|
||||
global all_codecs
|
||||
for c in all_codecs:
|
||||
try:
|
||||
blob[:1024].decode(c)
|
||||
return c
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
blob.decode(c)
|
||||
return c
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
return "utf-8"
|
||||
|
||||
|
||||
@ -109,8 +115,8 @@ def is_english(texts):
|
||||
def tokenize(d, t, eng):
|
||||
d["content_with_weight"] = t
|
||||
t = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", t)
|
||||
d["content_ltks"] = huqie.qie(t)
|
||||
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
||||
d["content_ltks"] = rag_tokenizer.tokenize(t)
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
|
||||
|
||||
def tokenize_chunks(chunks, doc, eng, pdf_parser):
|
||||
|
||||
@ -1,475 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
import os
|
||||
import copy
|
||||
import base64
|
||||
import magic
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
import numpy as np
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
class HuChunker:
|
||||
|
||||
@dataclass
|
||||
class Fields:
|
||||
text_chunks: List = None
|
||||
table_chunks: List = None
|
||||
|
||||
def __init__(self):
|
||||
self.MAX_LVL = 12
|
||||
self.proj_patt = [
|
||||
(r"第[零一二三四五六七八九十百]+章", 1),
|
||||
(r"第[零一二三四五六七八九十百]+[条节]", 2),
|
||||
(r"[零一二三四五六七八九十百]+[、 ]", 3),
|
||||
(r"[\((][零一二三四五六七八九十百]+[)\)]", 4),
|
||||
(r"[0-9]+(、|\.[ ]|\.[^0-9])", 5),
|
||||
(r"[0-9]+\.[0-9]+(、|[ ]|[^0-9])", 6),
|
||||
(r"[0-9]+\.[0-9]+\.[0-9]+(、|[ ]|[^0-9])", 7),
|
||||
(r"[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+(、|[ ]|[^0-9])", 8),
|
||||
(r".{,48}[::??]@", 9),
|
||||
(r"[0-9]+)", 10),
|
||||
(r"[\((][0-9]+[)\)]", 11),
|
||||
(r"[零一二三四五六七八九十百]+是", 12),
|
||||
(r"[⚫•➢✓ ]", 12)
|
||||
]
|
||||
self.lines = []
|
||||
|
||||
def _garbage(self, txt):
|
||||
patt = [
|
||||
r"(在此保证|不得以任何形式翻版|请勿传阅|仅供内部使用|未经事先书面授权)",
|
||||
r"(版权(归本公司)*所有|免责声明|保留一切权力|承担全部责任|特别声明|报告中涉及)",
|
||||
r"(不承担任何责任|投资者的通知事项:|任何机构和个人|本报告仅为|不构成投资)",
|
||||
r"(不构成对任何个人或机构投资建议|联系其所在国家|本报告由从事证券交易)",
|
||||
r"(本研究报告由|「认可投资者」|所有研究报告均以|请发邮件至)",
|
||||
r"(本报告仅供|市场有风险,投资需谨慎|本报告中提及的)",
|
||||
r"(本报告反映|此信息仅供|证券分析师承诺|具备证券投资咨询业务资格)",
|
||||
r"^(时间|签字|签章)[::]",
|
||||
r"(参考文献|目录索引|图表索引)",
|
||||
r"[ ]*年[ ]+月[ ]+日",
|
||||
r"^(中国证券业协会|[0-9]+年[0-9]+月[0-9]+日)$",
|
||||
r"\.{10,}",
|
||||
r"(———————END|帮我转发|欢迎收藏|快来关注我吧)"
|
||||
]
|
||||
return any([re.search(p, txt) for p in patt])
|
||||
|
||||
def _proj_match(self, line):
|
||||
for p, j in self.proj_patt:
|
||||
if re.match(p, line):
|
||||
return j
|
||||
return
|
||||
|
||||
def _does_proj_match(self):
|
||||
mat = [None for _ in range(len(self.lines))]
|
||||
for i in range(len(self.lines)):
|
||||
mat[i] = self._proj_match(self.lines[i])
|
||||
return mat
|
||||
|
||||
def naive_text_chunk(self, text, ti="", MAX_LEN=612):
|
||||
if text:
|
||||
self.lines = [l.strip().replace(u'\u3000', u' ')
|
||||
.replace(u'\xa0', u'')
|
||||
for l in text.split("\n\n")]
|
||||
self.lines = [l for l in self.lines if not self._garbage(l)]
|
||||
self.lines = [re.sub(r"([ ]+| )", " ", l)
|
||||
for l in self.lines if l]
|
||||
if not self.lines:
|
||||
return []
|
||||
arr = self.lines
|
||||
|
||||
res = [""]
|
||||
i = 0
|
||||
while i < len(arr):
|
||||
a = arr[i]
|
||||
if not a:
|
||||
i += 1
|
||||
continue
|
||||
if len(a) > MAX_LEN:
|
||||
a_ = a.split("\n")
|
||||
if len(a_) >= 2:
|
||||
arr.pop(i)
|
||||
for j in range(2, len(a_) + 1):
|
||||
if len("\n".join(a_[:j])) >= MAX_LEN:
|
||||
arr.insert(i, "\n".join(a_[:j - 1]))
|
||||
arr.insert(i + 1, "\n".join(a_[j - 1:]))
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't split: {a}"
|
||||
continue
|
||||
|
||||
if len(res[-1]) < MAX_LEN / 3:
|
||||
res[-1] += "\n" + a
|
||||
else:
|
||||
res.append(a)
|
||||
i += 1
|
||||
|
||||
if ti:
|
||||
for i in range(len(res)):
|
||||
if res[i].find("——来自") >= 0:
|
||||
continue
|
||||
res[i] += f"\t——来自“{ti}”"
|
||||
|
||||
return res
|
||||
|
||||
def _merge(self):
|
||||
# merge continuous same level text
|
||||
lines = [self.lines[0]] if self.lines else []
|
||||
for i in range(1, len(self.lines)):
|
||||
if self.mat[i] == self.mat[i - 1] \
|
||||
and len(lines[-1]) < 256 \
|
||||
and len(self.lines[i]) < 256:
|
||||
lines[-1] += "\n" + self.lines[i]
|
||||
continue
|
||||
lines.append(self.lines[i])
|
||||
self.lines = lines
|
||||
self.mat = self._does_proj_match()
|
||||
return self.mat
|
||||
|
||||
def text_chunks(self, text):
|
||||
if text:
|
||||
self.lines = [l.strip().replace(u'\u3000', u' ')
|
||||
.replace(u'\xa0', u'')
|
||||
for l in re.split(r"[\r\n]", text)]
|
||||
self.lines = [l for l in self.lines if not self._garbage(l)]
|
||||
self.lines = [l for l in self.lines if l]
|
||||
self.mat = self._does_proj_match()
|
||||
mat = self._merge()
|
||||
|
||||
tree = []
|
||||
for i in range(len(self.lines)):
|
||||
tree.append({"proj": mat[i],
|
||||
"children": [],
|
||||
"read": False})
|
||||
# find all children
|
||||
for i in range(len(self.lines) - 1):
|
||||
if tree[i]["proj"] is None:
|
||||
continue
|
||||
ed = i + 1
|
||||
while ed < len(tree) and (tree[ed]["proj"] is None or
|
||||
tree[ed]["proj"] > tree[i]["proj"]):
|
||||
ed += 1
|
||||
|
||||
nxt = tree[i]["proj"] + 1
|
||||
st = set([p["proj"] for p in tree[i + 1: ed] if p["proj"]])
|
||||
while nxt not in st:
|
||||
nxt += 1
|
||||
if nxt > self.MAX_LVL:
|
||||
break
|
||||
if nxt <= self.MAX_LVL:
|
||||
for j in range(i + 1, ed):
|
||||
if tree[j]["proj"] is not None:
|
||||
break
|
||||
tree[i]["children"].append(j)
|
||||
for j in range(i + 1, ed):
|
||||
if tree[j]["proj"] != nxt:
|
||||
continue
|
||||
tree[i]["children"].append(j)
|
||||
else:
|
||||
for j in range(i + 1, ed):
|
||||
tree[i]["children"].append(j)
|
||||
|
||||
# get DFS combinations, find all the paths to leaf
|
||||
paths = []
|
||||
|
||||
def dfs(i, path):
|
||||
nonlocal tree, paths
|
||||
path.append(i)
|
||||
tree[i]["read"] = True
|
||||
if len(self.lines[i]) > 256:
|
||||
paths.append(path)
|
||||
return
|
||||
if not tree[i]["children"]:
|
||||
if len(path) > 1 or len(self.lines[i]) >= 32:
|
||||
paths.append(path)
|
||||
return
|
||||
for j in tree[i]["children"]:
|
||||
dfs(j, copy.deepcopy(path))
|
||||
|
||||
for i, t in enumerate(tree):
|
||||
if t["read"]:
|
||||
continue
|
||||
dfs(i, [])
|
||||
|
||||
# concat txt on the path for all paths
|
||||
res = []
|
||||
lines = np.array(self.lines)
|
||||
for p in paths:
|
||||
if len(p) < 2:
|
||||
tree[p[0]]["read"] = False
|
||||
continue
|
||||
txt = "\n".join(lines[p[:-1]]) + "\n" + lines[p[-1]]
|
||||
res.append(txt)
|
||||
# concat continuous orphans
|
||||
assert len(tree) == len(lines)
|
||||
ii = 0
|
||||
while ii < len(tree):
|
||||
if tree[ii]["read"]:
|
||||
ii += 1
|
||||
continue
|
||||
txt = lines[ii]
|
||||
e = ii + 1
|
||||
while e < len(tree) and not tree[e]["read"] and len(txt) < 256:
|
||||
txt += "\n" + lines[e]
|
||||
e += 1
|
||||
res.append(txt)
|
||||
ii = e
|
||||
|
||||
# if the node has not been read, find its daddy
|
||||
def find_daddy(st):
|
||||
nonlocal lines, tree
|
||||
proj = tree[st]["proj"]
|
||||
if len(self.lines[st]) > 512:
|
||||
return [st]
|
||||
if proj is None:
|
||||
proj = self.MAX_LVL + 1
|
||||
for i in range(st - 1, -1, -1):
|
||||
if tree[i]["proj"] and tree[i]["proj"] < proj:
|
||||
a = [st] + find_daddy(i)
|
||||
return a
|
||||
return []
|
||||
|
||||
return res
|
||||
|
||||
|
||||
class PdfChunker(HuChunker):
|
||||
|
||||
def __init__(self, pdf_parser):
|
||||
self.pdf = pdf_parser
|
||||
super().__init__()
|
||||
|
||||
def tableHtmls(self, pdfnm):
|
||||
_, tbls = self.pdf(pdfnm, return_html=True)
|
||||
res = []
|
||||
for img, arr in tbls:
|
||||
if arr[0].find("<table>") < 0:
|
||||
continue
|
||||
buffered = BytesIO()
|
||||
if img:
|
||||
img.save(buffered, format="JPEG")
|
||||
img_str = base64.b64encode(
|
||||
buffered.getvalue()).decode('utf-8') if img else ""
|
||||
res.append({"table": arr[0], "image": img_str})
|
||||
return res
|
||||
|
||||
def html(self, pdfnm):
|
||||
txts, tbls = self.pdf(pdfnm, return_html=True)
|
||||
res = []
|
||||
txt_cks = self.text_chunks(txts)
|
||||
for txt, img in [(self.pdf.remove_tag(c), self.pdf.crop(c))
|
||||
for c in txt_cks]:
|
||||
buffered = BytesIO()
|
||||
if img:
|
||||
img.save(buffered, format="JPEG")
|
||||
img_str = base64.b64encode(
|
||||
buffered.getvalue()).decode('utf-8') if img else ""
|
||||
res.append({"table": "<p>%s</p>" % txt.replace("\n", "<br/>"),
|
||||
"image": img_str})
|
||||
|
||||
for img, arr in tbls:
|
||||
if not arr:
|
||||
continue
|
||||
buffered = BytesIO()
|
||||
if img:
|
||||
img.save(buffered, format="JPEG")
|
||||
img_str = base64.b64encode(
|
||||
buffered.getvalue()).decode('utf-8') if img else ""
|
||||
res.append({"table": arr[0], "image": img_str})
|
||||
|
||||
return res
|
||||
|
||||
def __call__(self, pdfnm, return_image=True, naive_chunk=False):
|
||||
flds = self.Fields()
|
||||
text, tbls = self.pdf(pdfnm)
|
||||
fnm = pdfnm
|
||||
txt_cks = self.text_chunks(text) if not naive_chunk else \
|
||||
self.naive_text_chunk(text, ti=fnm if isinstance(fnm, str) else "")
|
||||
flds.text_chunks = [(self.pdf.remove_tag(c),
|
||||
self.pdf.crop(c) if return_image else None) for c in txt_cks]
|
||||
|
||||
flds.table_chunks = [(arr, img if return_image else None)
|
||||
for img, arr in tbls]
|
||||
return flds
|
||||
|
||||
|
||||
class DocxChunker(HuChunker):
|
||||
|
||||
def __init__(self, doc_parser):
|
||||
self.doc = doc_parser
|
||||
super().__init__()
|
||||
|
||||
def _does_proj_match(self):
|
||||
mat = []
|
||||
for s in self.styles:
|
||||
s = s.split(" ")[-1]
|
||||
try:
|
||||
mat.append(int(s))
|
||||
except Exception as e:
|
||||
mat.append(None)
|
||||
return mat
|
||||
|
||||
def _merge(self):
|
||||
i = 1
|
||||
while i < len(self.lines):
|
||||
if self.mat[i] == self.mat[i - 1] \
|
||||
and len(self.lines[i - 1]) < 256 \
|
||||
and len(self.lines[i]) < 256:
|
||||
self.lines[i - 1] += "\n" + self.lines[i]
|
||||
self.styles.pop(i)
|
||||
self.lines.pop(i)
|
||||
self.mat.pop(i)
|
||||
continue
|
||||
i += 1
|
||||
self.mat = self._does_proj_match()
|
||||
return self.mat
|
||||
|
||||
def __call__(self, fnm):
|
||||
flds = self.Fields()
|
||||
flds.title = os.path.splitext(
|
||||
os.path.basename(fnm))[0] if isinstance(
|
||||
fnm, type("")) else ""
|
||||
secs, tbls = self.doc(fnm)
|
||||
self.lines = [l for l, s in secs]
|
||||
self.styles = [s for l, s in secs]
|
||||
|
||||
txt_cks = self.text_chunks("")
|
||||
flds.text_chunks = [(t, None) for t in txt_cks if not self._garbage(t)]
|
||||
flds.table_chunks = [(tb, None) for tb in tbls for t in tb if t]
|
||||
return flds
|
||||
|
||||
|
||||
class ExcelChunker(HuChunker):
|
||||
|
||||
def __init__(self, excel_parser):
|
||||
self.excel = excel_parser
|
||||
super().__init__()
|
||||
|
||||
def __call__(self, fnm):
|
||||
flds = self.Fields()
|
||||
flds.text_chunks = [(t, None) for t in self.excel(fnm)]
|
||||
flds.table_chunks = []
|
||||
return flds
|
||||
|
||||
|
||||
class PptChunker(HuChunker):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def __extract(self, shape):
|
||||
if shape.shape_type == 19:
|
||||
tb = shape.table
|
||||
rows = []
|
||||
for i in range(1, len(tb.rows)):
|
||||
rows.append("; ".join([tb.cell(
|
||||
0, j).text + ": " + tb.cell(i, j).text for j in range(len(tb.columns)) if tb.cell(i, j)]))
|
||||
return "\n".join(rows)
|
||||
|
||||
if shape.has_text_frame:
|
||||
return shape.text_frame.text
|
||||
|
||||
if shape.shape_type == 6:
|
||||
texts = []
|
||||
for p in shape.shapes:
|
||||
t = self.__extract(p)
|
||||
if t:
|
||||
texts.append(t)
|
||||
return "\n".join(texts)
|
||||
|
||||
def __call__(self, fnm):
|
||||
from pptx import Presentation
|
||||
ppt = Presentation(fnm) if isinstance(
|
||||
fnm, str) else Presentation(
|
||||
BytesIO(fnm))
|
||||
txts = []
|
||||
for slide in ppt.slides:
|
||||
texts = []
|
||||
for shape in slide.shapes:
|
||||
txt = self.__extract(shape)
|
||||
if txt:
|
||||
texts.append(txt)
|
||||
txts.append("\n".join(texts))
|
||||
|
||||
import aspose.slides as slides
|
||||
import aspose.pydrawing as drawing
|
||||
imgs = []
|
||||
with slides.Presentation(BytesIO(fnm)) as presentation:
|
||||
for slide in presentation.slides:
|
||||
buffered = BytesIO()
|
||||
slide.get_thumbnail(
|
||||
0.5, 0.5).save(
|
||||
buffered, drawing.imaging.ImageFormat.jpeg)
|
||||
imgs.append(buffered.getvalue())
|
||||
assert len(imgs) == len(
|
||||
txts), "Slides text and image do not match: {} vs. {}".format(len(imgs), len(txts))
|
||||
|
||||
flds = self.Fields()
|
||||
flds.text_chunks = [(txts[i], imgs[i]) for i in range(len(txts))]
|
||||
flds.table_chunks = []
|
||||
|
||||
return flds
|
||||
|
||||
|
||||
class TextChunker(HuChunker):
|
||||
|
||||
@dataclass
|
||||
class Fields:
|
||||
text_chunks: List = None
|
||||
table_chunks: List = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@staticmethod
|
||||
def is_binary_file(file_path):
|
||||
mime = magic.Magic(mime=True)
|
||||
if isinstance(file_path, str):
|
||||
file_type = mime.from_file(file_path)
|
||||
else:
|
||||
file_type = mime.from_buffer(file_path)
|
||||
if 'text' in file_type:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def __call__(self, fnm):
|
||||
flds = self.Fields()
|
||||
if self.is_binary_file(fnm):
|
||||
return flds
|
||||
txt = ""
|
||||
if isinstance(fnm, str):
|
||||
with open(fnm, "r") as f:
|
||||
txt = f.read()
|
||||
else:
|
||||
txt = fnm.decode("utf-8")
|
||||
flds.text_chunks = [(c, None) for c in self.naive_text_chunk(txt)]
|
||||
flds.table_chunks = []
|
||||
return flds
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
sys.path.append(os.path.dirname(__file__) + "/../")
|
||||
if sys.argv[1].split(".")[-1].lower() == "pdf":
|
||||
from deepdoc.parser import PdfParser
|
||||
ckr = PdfChunker(PdfParser())
|
||||
if sys.argv[1].split(".")[-1].lower().find("doc") >= 0:
|
||||
from deepdoc.parser import DocxParser
|
||||
ckr = DocxChunker(DocxParser())
|
||||
if sys.argv[1].split(".")[-1].lower().find("xlsx") >= 0:
|
||||
from deepdoc.parser import ExcelParser
|
||||
ckr = ExcelChunker(ExcelParser())
|
||||
|
||||
# ckr.html(sys.argv[1])
|
||||
print(ckr(sys.argv[1]))
|
||||
@ -7,14 +7,13 @@ import logging
|
||||
import copy
|
||||
from elasticsearch_dsl import Q
|
||||
|
||||
from rag.nlp import huqie, term_weight, synonym
|
||||
|
||||
from rag.nlp import rag_tokenizer, term_weight, synonym
|
||||
|
||||
class EsQueryer:
|
||||
def __init__(self, es):
|
||||
self.tw = term_weight.Dealer()
|
||||
self.es = es
|
||||
self.syn = synonym.Dealer(None)
|
||||
self.syn = synonym.Dealer()
|
||||
self.flds = ["ask_tks^10", "ask_small_tks"]
|
||||
|
||||
@staticmethod
|
||||
@ -37,7 +36,7 @@ class EsQueryer:
|
||||
patts = [
|
||||
(r"是*(什么样的|哪家|一下|那家|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀)是*", ""),
|
||||
(r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "),
|
||||
(r"(^| )('s|'re|is|are|were|was|do|does|did|don't|doesn't|didn't|has|have|be|there|you|me|your|my|mine|just|please|may|i|should|would|wouldn't|will|won't|done|go|for|with|so|the|a|an|by|i'm|it's|he's|she's|they|they're|you're|as|by|on|in|at|up|out|down)", " ")
|
||||
(r"(^| )('s|'re|is|are|were|was|do|does|did|don't|doesn't|didn't|has|have|be|there|you|me|your|my|mine|just|please|may|i|should|would|wouldn't|will|won't|done|go|for|with|so|the|a|an|by|i'm|it's|he's|she's|they|they're|you're|as|by|on|in|at|up|out|down) ", " ")
|
||||
]
|
||||
for r, p in patts:
|
||||
txt = re.sub(r, p, txt, flags=re.IGNORECASE)
|
||||
@ -45,18 +44,20 @@ class EsQueryer:
|
||||
|
||||
def question(self, txt, tbl="qa", min_match="60%"):
|
||||
txt = re.sub(
|
||||
r"[ \r\n\t,,。??/`!!&]+",
|
||||
r"[ :\r\n\t,,。??/`!!&\^%%]+",
|
||||
" ",
|
||||
huqie.tradi2simp(
|
||||
huqie.strQ2B(
|
||||
rag_tokenizer.tradi2simp(
|
||||
rag_tokenizer.strQ2B(
|
||||
txt.lower()))).strip()
|
||||
txt = EsQueryer.rmWWW(txt)
|
||||
|
||||
if not self.isChinese(txt):
|
||||
tks = huqie.qie(txt).split(" ")
|
||||
q = copy.deepcopy(tks)
|
||||
for i in range(1, len(tks)):
|
||||
q.append("\"%s %s\"^2" % (tks[i - 1], tks[i]))
|
||||
tks = rag_tokenizer.tokenize(txt).split(" ")
|
||||
tks_w = self.tw.weights(tks)
|
||||
tks_w = [(re.sub(r"[ \\\"']+", "", tk), w) for tk, w in tks_w]
|
||||
q = ["{}^{:.4f}".format(tk, w) for tk, w in tks_w if tk]
|
||||
for i in range(1, len(tks_w)):
|
||||
q.append("\"%s %s\"^%.4f" % (tks_w[i - 1][0], tks_w[i][0], max(tks_w[i - 1][1], tks_w[i][1])*2))
|
||||
if not q:
|
||||
q.append(txt)
|
||||
return Q("bool",
|
||||
@ -65,7 +66,7 @@ class EsQueryer:
|
||||
boost=1)#, minimum_should_match=min_match)
|
||||
), tks
|
||||
|
||||
def needQieqie(tk):
|
||||
def need_fine_grained_tokenize(tk):
|
||||
if len(tk) < 4:
|
||||
return False
|
||||
if re.match(r"[0-9a-z\.\+#_\*-]+$", tk):
|
||||
@ -81,7 +82,7 @@ class EsQueryer:
|
||||
logging.info(json.dumps(twts, ensure_ascii=False))
|
||||
tms = []
|
||||
for tk, w in sorted(twts, key=lambda x: x[1] * -1):
|
||||
sm = huqie.qieqie(tk).split(" ") if needQieqie(tk) else []
|
||||
sm = rag_tokenizer.fine_grained_tokenize(tk).split(" ") if need_fine_grained_tokenize(tk) else []
|
||||
sm = [
|
||||
re.sub(
|
||||
r"[ ,\./;'\[\]\\`~!@#$%\^&\*\(\)=\+_<>\?:\"\{\}\|,。;‘’【】、!¥……()——《》?:“”-]+",
|
||||
@ -110,10 +111,10 @@ class EsQueryer:
|
||||
if len(twts) > 1:
|
||||
tms += f" (\"%s\"~4)^1.5" % (" ".join([t for t, _ in twts]))
|
||||
if re.match(r"[0-9a-z ]+$", tt):
|
||||
tms = f"(\"{tt}\" OR \"%s\")" % huqie.qie(tt)
|
||||
tms = f"(\"{tt}\" OR \"%s\")" % rag_tokenizer.tokenize(tt)
|
||||
|
||||
syns = " OR ".join(
|
||||
["\"%s\"^0.7" % EsQueryer.subSpecialChar(huqie.qie(s)) for s in syns])
|
||||
["\"%s\"^0.7" % EsQueryer.subSpecialChar(rag_tokenizer.tokenize(s)) for s in syns])
|
||||
if syns:
|
||||
tms = f"({tms})^5 OR ({syns})^0.7"
|
||||
|
||||
@ -136,7 +137,11 @@ class EsQueryer:
|
||||
from sklearn.metrics.pairwise import cosine_similarity as CosineSimilarity
|
||||
import numpy as np
|
||||
sims = CosineSimilarity([avec], bvecs)
|
||||
tksim = self.token_similarity(atks, btkss)
|
||||
return np.array(sims[0]) * vtweight + \
|
||||
np.array(tksim) * tkweight, tksim, sims[0]
|
||||
|
||||
def token_similarity(self, atks, btkss):
|
||||
def toDict(tks):
|
||||
d = {}
|
||||
if isinstance(tks, str):
|
||||
@ -149,9 +154,7 @@ class EsQueryer:
|
||||
|
||||
atks = toDict(atks)
|
||||
btkss = [toDict(tks) for tks in btkss]
|
||||
tksim = [self.similarity(atks, btks) for btks in btkss]
|
||||
return np.array(sims[0]) * vtweight + \
|
||||
np.array(tksim) * tkweight, tksim, sims[0]
|
||||
return [self.similarity(atks, btks) for btks in btkss]
|
||||
|
||||
def similarity(self, qtwt, dtwt):
|
||||
if isinstance(dtwt, type("")):
|
||||
|
||||
@ -14,7 +14,7 @@ from nltk.stem import PorterStemmer, WordNetLemmatizer
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
|
||||
|
||||
class Huqie:
|
||||
class RagTokenizer:
|
||||
def key_(self, line):
|
||||
return str(line.lower().encode("utf-8"))[2:-1]
|
||||
|
||||
@ -24,7 +24,7 @@ class Huqie:
|
||||
def loadDict_(self, fnm):
|
||||
print("[HUQIE]:Build trie", fnm, file=sys.stderr)
|
||||
try:
|
||||
of = open(fnm, "r")
|
||||
of = open(fnm, "r", encoding='utf-8')
|
||||
while True:
|
||||
line = of.readline()
|
||||
if not line:
|
||||
@ -241,11 +241,14 @@ class Huqie:
|
||||
|
||||
return self.score_(res[::-1])
|
||||
|
||||
def qie(self, line):
|
||||
def english_normalize_(self, tks):
|
||||
return [self.stemmer.stem(self.lemmatizer.lemmatize(t)) if re.match(r"[a-zA-Z_-]+$", t) else t for t in tks]
|
||||
|
||||
def tokenize(self, line):
|
||||
line = self._strQ2B(line).lower()
|
||||
line = self._tradi2simp(line)
|
||||
zh_num = len([1 for c in line if is_chinese(c)])
|
||||
if zh_num < len(line) * 0.2:
|
||||
if zh_num == 0:
|
||||
return " ".join([self.stemmer.stem(self.lemmatizer.lemmatize(t)) for t in word_tokenize(line)])
|
||||
|
||||
arr = re.split(self.SPLIT_CHAR, line)
|
||||
@ -293,12 +296,12 @@ class Huqie:
|
||||
|
||||
i = e + 1
|
||||
|
||||
res = " ".join(res)
|
||||
res = " ".join(self.english_normalize_(res))
|
||||
if self.DEBUG:
|
||||
print("[TKS]", self.merge_(res))
|
||||
return self.merge_(res)
|
||||
|
||||
def qieqie(self, tks):
|
||||
def fine_grained_tokenize(self, tks):
|
||||
tks = tks.split(" ")
|
||||
zh_num = len([1 for c in tks if c and is_chinese(c[0])])
|
||||
if zh_num < len(tks) * 0.2:
|
||||
@ -336,7 +339,7 @@ class Huqie:
|
||||
|
||||
res.append(stk)
|
||||
|
||||
return " ".join(res)
|
||||
return " ".join(self.english_normalize_(res))
|
||||
|
||||
|
||||
def is_chinese(s):
|
||||
@ -371,53 +374,53 @@ def naiveQie(txt):
|
||||
return tks
|
||||
|
||||
|
||||
hq = Huqie()
|
||||
qie = hq.qie
|
||||
qieqie = hq.qieqie
|
||||
tag = hq.tag
|
||||
freq = hq.freq
|
||||
loadUserDict = hq.loadUserDict
|
||||
addUserDict = hq.addUserDict
|
||||
tradi2simp = hq._tradi2simp
|
||||
strQ2B = hq._strQ2B
|
||||
tokenizer = RagTokenizer()
|
||||
tokenize = tokenizer.tokenize
|
||||
fine_grained_tokenize = tokenizer.fine_grained_tokenize
|
||||
tag = tokenizer.tag
|
||||
freq = tokenizer.freq
|
||||
loadUserDict = tokenizer.loadUserDict
|
||||
addUserDict = tokenizer.addUserDict
|
||||
tradi2simp = tokenizer._tradi2simp
|
||||
strQ2B = tokenizer._strQ2B
|
||||
|
||||
if __name__ == '__main__':
|
||||
huqie = Huqie(debug=True)
|
||||
tknzr = RagTokenizer(debug=True)
|
||||
# huqie.addUserDict("/tmp/tmp.new.tks.dict")
|
||||
tks = huqie.qie(
|
||||
tks = tknzr.tokenize(
|
||||
"哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈")
|
||||
print(huqie.qieqie(tks))
|
||||
tks = huqie.qie(
|
||||
print(tknzr.fine_grained_tokenize(tks))
|
||||
tks = tknzr.tokenize(
|
||||
"公开征求意见稿提出,境外投资者可使用自有人民币或外汇投资。使用外汇投资的,可通过债券持有人在香港人民币业务清算行及香港地区经批准可进入境内银行间外汇市场进行交易的境外人民币业务参加行(以下统称香港结算行)办理外汇资金兑换。香港结算行由此所产生的头寸可到境内银行间外汇市场平盘。使用外汇投资的,在其投资的债券到期或卖出后,原则上应兑换回外汇。")
|
||||
print(huqie.qieqie(tks))
|
||||
tks = huqie.qie(
|
||||
print(tknzr.fine_grained_tokenize(tks))
|
||||
tks = tknzr.tokenize(
|
||||
"多校划片就是一个小区对应多个小学初中,让买了学区房的家庭也不确定到底能上哪个学校。目的是通过这种方式为学区房降温,把就近入学落到实处。南京市长江大桥")
|
||||
print(huqie.qieqie(tks))
|
||||
tks = huqie.qie(
|
||||
print(tknzr.fine_grained_tokenize(tks))
|
||||
tks = tknzr.tokenize(
|
||||
"实际上当时他们已经将业务中心偏移到安全部门和针对政府企业的部门 Scripts are compiled and cached aaaaaaaaa")
|
||||
print(huqie.qieqie(tks))
|
||||
tks = huqie.qie("虽然我不怎么玩")
|
||||
print(huqie.qieqie(tks))
|
||||
tks = huqie.qie("蓝月亮如何在外资夹击中生存,那是全宇宙最有意思的")
|
||||
print(huqie.qieqie(tks))
|
||||
tks = huqie.qie(
|
||||
print(tknzr.fine_grained_tokenize(tks))
|
||||
tks = tknzr.tokenize("虽然我不怎么玩")
|
||||
print(tknzr.fine_grained_tokenize(tks))
|
||||
tks = tknzr.tokenize("蓝月亮如何在外资夹击中生存,那是全宇宙最有意思的")
|
||||
print(tknzr.fine_grained_tokenize(tks))
|
||||
tks = tknzr.tokenize(
|
||||
"涡轮增压发动机num最大功率,不像别的共享买车锁电子化的手段,我们接过来是否有意义,黄黄爱美食,不过,今天阿奇要讲到的这家农贸市场,说实话,还真蛮有特色的!不仅环境好,还打出了")
|
||||
print(huqie.qieqie(tks))
|
||||
tks = huqie.qie("这周日你去吗?这周日你有空吗?")
|
||||
print(huqie.qieqie(tks))
|
||||
tks = huqie.qie("Unity3D开发经验 测试开发工程师 c++双11双11 985 211 ")
|
||||
print(huqie.qieqie(tks))
|
||||
tks = huqie.qie(
|
||||
print(tknzr.fine_grained_tokenize(tks))
|
||||
tks = tknzr.tokenize("这周日你去吗?这周日你有空吗?")
|
||||
print(tknzr.fine_grained_tokenize(tks))
|
||||
tks = tknzr.tokenize("Unity3D开发经验 测试开发工程师 c++双11双11 985 211 ")
|
||||
print(tknzr.fine_grained_tokenize(tks))
|
||||
tks = tknzr.tokenize(
|
||||
"数据分析项目经理|数据分析挖掘|数据分析方向|商品数据分析|搜索数据分析 sql python hive tableau Cocos2d-")
|
||||
print(huqie.qieqie(tks))
|
||||
print(tknzr.fine_grained_tokenize(tks))
|
||||
if len(sys.argv) < 2:
|
||||
sys.exit()
|
||||
huqie.DEBUG = False
|
||||
huqie.loadUserDict(sys.argv[1])
|
||||
tknzr.DEBUG = False
|
||||
tknzr.loadUserDict(sys.argv[1])
|
||||
of = open(sys.argv[2], "r")
|
||||
while True:
|
||||
line = of.readline()
|
||||
if not line:
|
||||
break
|
||||
print(huqie.qie(line))
|
||||
print(tknzr.tokenize(line))
|
||||
of.close()
|
||||
@ -9,7 +9,7 @@ from dataclasses import dataclass
|
||||
|
||||
from rag.settings import es_logger
|
||||
from rag.utils import rmSpace
|
||||
from rag.nlp import huqie, query
|
||||
from rag.nlp import rag_tokenizer, query
|
||||
import numpy as np
|
||||
|
||||
|
||||
@ -52,22 +52,27 @@ class Dealer:
|
||||
def search(self, req, idxnm, emb_mdl=None):
|
||||
qst = req.get("question", "")
|
||||
bqry, keywords = self.qryr.question(qst)
|
||||
if req.get("kb_ids"):
|
||||
bqry.filter.append(Q("terms", kb_id=req["kb_ids"]))
|
||||
if req.get("doc_ids"):
|
||||
bqry.filter.append(Q("terms", doc_id=req["doc_ids"]))
|
||||
if "available_int" in req:
|
||||
if req["available_int"] == 0:
|
||||
bqry.filter.append(Q("range", available_int={"lt": 1}))
|
||||
else:
|
||||
bqry.filter.append(
|
||||
Q("bool", must_not=Q("range", available_int={"lt": 1})))
|
||||
def add_filters(bqry):
|
||||
nonlocal req
|
||||
if req.get("kb_ids"):
|
||||
bqry.filter.append(Q("terms", kb_id=req["kb_ids"]))
|
||||
if req.get("doc_ids"):
|
||||
bqry.filter.append(Q("terms", doc_id=req["doc_ids"]))
|
||||
if "available_int" in req:
|
||||
if req["available_int"] == 0:
|
||||
bqry.filter.append(Q("range", available_int={"lt": 1}))
|
||||
else:
|
||||
bqry.filter.append(
|
||||
Q("bool", must_not=Q("range", available_int={"lt": 1})))
|
||||
return bqry
|
||||
|
||||
bqry = add_filters(bqry)
|
||||
bqry.boost = 0.05
|
||||
|
||||
s = Search()
|
||||
pg = int(req.get("page", 1)) - 1
|
||||
ps = int(req.get("size", 1000))
|
||||
topk = int(req.get("topk", 1024))
|
||||
ps = int(req.get("size", topk))
|
||||
src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id", "title_tks", "important_kwd",
|
||||
"image_id", "doc_id", "q_512_vec", "q_768_vec", "position_int",
|
||||
"q_1024_vec", "q_1536_vec", "available_int", "content_with_weight"])
|
||||
@ -117,8 +122,7 @@ class Dealer:
|
||||
es_logger.info("TOTAL: {}".format(self.es.getTotal(res)))
|
||||
if self.es.getTotal(res) == 0 and "knn" in s:
|
||||
bqry, _ = self.qryr.question(qst, min_match="10%")
|
||||
if req.get("kb_ids"):
|
||||
bqry.filter.append(Q("terms", kb_id=req["kb_ids"]))
|
||||
bqry = add_filters(bqry)
|
||||
s["query"] = bqry.to_dict()
|
||||
s["knn"]["filter"] = bqry.to_dict()
|
||||
s["knn"]["similarity"] = 0.17
|
||||
@ -128,7 +132,7 @@ class Dealer:
|
||||
kwds = set([])
|
||||
for k in keywords:
|
||||
kwds.add(k)
|
||||
for kk in huqie.qieqie(k).split(" "):
|
||||
for kk in rag_tokenizer.fine_grained_tokenize(k).split(" "):
|
||||
if len(kk) < 2:
|
||||
continue
|
||||
if kk in kwds:
|
||||
@ -243,7 +247,7 @@ class Dealer:
|
||||
assert len(ans_v[0]) == len(chunk_v[0]), "The dimension of query and chunk do not match: {} vs. {}".format(
|
||||
len(ans_v[0]), len(chunk_v[0]))
|
||||
|
||||
chunks_tks = [huqie.qie(self.qryr.rmWWW(ck)).split(" ")
|
||||
chunks_tks = [rag_tokenizer.tokenize(self.qryr.rmWWW(ck)).split(" ")
|
||||
for ck in chunks]
|
||||
cites = {}
|
||||
thr = 0.63
|
||||
@ -251,7 +255,7 @@ class Dealer:
|
||||
for i, a in enumerate(pieces_):
|
||||
sim, tksim, vtsim = self.qryr.hybrid_similarity(ans_v[i],
|
||||
chunk_v,
|
||||
huqie.qie(
|
||||
rag_tokenizer.tokenize(
|
||||
self.qryr.rmWWW(pieces_[i])).split(" "),
|
||||
chunks_tks,
|
||||
tkweight, vtweight)
|
||||
@ -307,24 +311,49 @@ class Dealer:
|
||||
ins_tw, tkweight, vtweight)
|
||||
return sim, tksim, vtsim
|
||||
|
||||
def rerank_by_model(self, rerank_mdl, sres, query, tkweight=0.3,
|
||||
vtweight=0.7, cfield="content_ltks"):
|
||||
_, keywords = self.qryr.question(query)
|
||||
|
||||
for i in sres.ids:
|
||||
if isinstance(sres.field[i].get("important_kwd", []), str):
|
||||
sres.field[i]["important_kwd"] = [sres.field[i]["important_kwd"]]
|
||||
ins_tw = []
|
||||
for i in sres.ids:
|
||||
content_ltks = sres.field[i][cfield].split(" ")
|
||||
title_tks = [t for t in sres.field[i].get("title_tks", "").split(" ") if t]
|
||||
important_kwd = sres.field[i].get("important_kwd", [])
|
||||
tks = content_ltks + title_tks + important_kwd
|
||||
ins_tw.append(tks)
|
||||
|
||||
tksim = self.qryr.token_similarity(keywords, ins_tw)
|
||||
vtsim,_ = rerank_mdl.similarity(" ".join(keywords), [rmSpace(" ".join(tks)) for tks in ins_tw])
|
||||
|
||||
return tkweight*np.array(tksim) + vtweight*vtsim, tksim, vtsim
|
||||
|
||||
def hybrid_similarity(self, ans_embd, ins_embd, ans, inst):
|
||||
return self.qryr.hybrid_similarity(ans_embd,
|
||||
ins_embd,
|
||||
huqie.qie(ans).split(" "),
|
||||
huqie.qie(inst).split(" "))
|
||||
rag_tokenizer.tokenize(ans).split(" "),
|
||||
rag_tokenizer.tokenize(inst).split(" "))
|
||||
|
||||
def retrieval(self, question, embd_mdl, tenant_id, kb_ids, page, page_size, similarity_threshold=0.2,
|
||||
vector_similarity_weight=0.3, top=1024, doc_ids=None, aggs=True):
|
||||
vector_similarity_weight=0.3, top=1024, doc_ids=None, aggs=True, rerank_mdl=None):
|
||||
ranks = {"total": 0, "chunks": [], "doc_aggs": {}}
|
||||
if not question:
|
||||
return ranks
|
||||
req = {"kb_ids": kb_ids, "doc_ids": doc_ids, "size": page_size,
|
||||
"question": question, "vector": True, "topk": top,
|
||||
"similarity": similarity_threshold}
|
||||
"similarity": similarity_threshold,
|
||||
"available_int": 1}
|
||||
sres = self.search(req, index_name(tenant_id), embd_mdl)
|
||||
|
||||
sim, tsim, vsim = self.rerank(
|
||||
sres, question, 1 - vector_similarity_weight, vector_similarity_weight)
|
||||
if rerank_mdl:
|
||||
sim, tsim, vsim = self.rerank_by_model(rerank_mdl,
|
||||
sres, question, 1 - vector_similarity_weight, vector_similarity_weight)
|
||||
else:
|
||||
sim, tsim, vsim = self.rerank(
|
||||
sres, question, 1 - vector_similarity_weight, vector_similarity_weight)
|
||||
idx = np.argsort(sim * -1)
|
||||
|
||||
dim = len(sres.query_vector)
|
||||
@ -385,7 +414,7 @@ class Dealer:
|
||||
for r in re.finditer(r" ([a-z_]+_l?tks)( like | ?= ?)'([^']+)'", sql):
|
||||
fld, v = r.group(1), r.group(3)
|
||||
match = " MATCH({}, '{}', 'operator=OR;minimum_should_match=30%') ".format(
|
||||
fld, huqie.qieqie(huqie.qie(v)))
|
||||
fld, rag_tokenizer.fine_grained_tokenize(rag_tokenizer.tokenize(v)))
|
||||
replaces.append(
|
||||
("{}{}'{}'".format(
|
||||
r.group(1),
|
||||
@ -403,3 +432,13 @@ class Dealer:
|
||||
except Exception as e:
|
||||
chat_logger.error(f"SQL failure: {sql} =>" + str(e))
|
||||
return {"error": str(e)}
|
||||
|
||||
def chunk_list(self, doc_id, tenant_id, max_count=1024, fields=["docnm_kwd", "content_with_weight", "img_id"]):
|
||||
s = Search()
|
||||
s = s.query(Q("match", doc_id=doc_id))[0:max_count]
|
||||
s = s.to_dict()
|
||||
es_res = self.es.search(s, idxnm=index_name(tenant_id), timeout="600s", src=fields)
|
||||
res = []
|
||||
for index, chunk in enumerate(es_res['hits']['hits']):
|
||||
res.append({fld: chunk['_source'].get(fld) for fld in fields})
|
||||
return res
|
||||
|
||||
@ -17,7 +17,7 @@ class Dealer:
|
||||
try:
|
||||
self.dictionary = json.load(open(path, 'r'))
|
||||
except Exception as e:
|
||||
logging.warn("Miss synonym.json")
|
||||
logging.warn("Missing synonym.json")
|
||||
self.dictionary = {}
|
||||
|
||||
if not redis:
|
||||
|
||||
@ -4,7 +4,7 @@ import json
|
||||
import re
|
||||
import os
|
||||
import numpy as np
|
||||
from rag.nlp import huqie
|
||||
from rag.nlp import rag_tokenizer
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
|
||||
|
||||
@ -83,7 +83,7 @@ class Dealer:
|
||||
txt = re.sub(p, r, txt)
|
||||
|
||||
res = []
|
||||
for t in huqie.qie(txt).split(" "):
|
||||
for t in rag_tokenizer.tokenize(txt).split(" "):
|
||||
tk = t
|
||||
if (stpwd and tk in self.stop_words) or (
|
||||
re.match(r"[0-9]$", tk) and not num):
|
||||
@ -104,7 +104,7 @@ class Dealer:
|
||||
while i < len(tks):
|
||||
j = i
|
||||
if i == 0 and oneTerm(tks[i]) and len(
|
||||
tks) > 1 and len(tks[i + 1]) > 1: # 多 工位
|
||||
tks) > 1 and (len(tks[i + 1]) > 1 and not re.match(r"[0-9a-zA-Z]", tks[i + 1])): # 多 工位
|
||||
res.append(" ".join(tks[0:2]))
|
||||
i = 2
|
||||
continue
|
||||
@ -161,7 +161,7 @@ class Dealer:
|
||||
return m[self.ne[t]]
|
||||
|
||||
def postag(t):
|
||||
t = huqie.tag(t)
|
||||
t = rag_tokenizer.tag(t)
|
||||
if t in set(["r", "c", "d"]):
|
||||
return 0.3
|
||||
if t in set(["ns", "nt"]):
|
||||
@ -175,14 +175,14 @@ class Dealer:
|
||||
def freq(t):
|
||||
if re.match(r"[0-9. -]{2,}$", t):
|
||||
return 3
|
||||
s = huqie.freq(t)
|
||||
s = rag_tokenizer.freq(t)
|
||||
if not s and re.match(r"[a-z. -]+$", t):
|
||||
return 300
|
||||
if not s:
|
||||
s = 0
|
||||
|
||||
if not s and len(t) >= 4:
|
||||
s = [tt for tt in huqie.qieqie(t).split(" ") if len(tt) > 1]
|
||||
s = [tt for tt in rag_tokenizer.fine_grained_tokenize(t).split(" ") if len(tt) > 1]
|
||||
if len(s) > 1:
|
||||
s = np.min([freq(tt) for tt in s]) / 6.
|
||||
else:
|
||||
@ -198,7 +198,7 @@ class Dealer:
|
||||
elif re.match(r"[a-z. -]+$", t):
|
||||
return 300
|
||||
elif len(t) >= 4:
|
||||
s = [tt for tt in huqie.qieqie(t).split(" ") if len(tt) > 1]
|
||||
s = [tt for tt in rag_tokenizer.fine_grained_tokenize(t).split(" ") if len(tt) > 1]
|
||||
if len(s) > 1:
|
||||
return max(3, np.min([df(tt) for tt in s]) / 6.)
|
||||
|
||||
|
||||
115
rag/raptor.py
Normal file
115
rag/raptor.py
Normal file
@ -0,0 +1,115 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor, ALL_COMPLETED, wait
|
||||
from threading import Lock
|
||||
from typing import Tuple
|
||||
import umap
|
||||
import numpy as np
|
||||
from sklearn.mixture import GaussianMixture
|
||||
|
||||
from rag.utils import num_tokens_from_string, truncate
|
||||
|
||||
|
||||
class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
|
||||
def __init__(self, max_cluster, llm_model, embd_model, prompt, max_token=256, threshold=0.1):
|
||||
self._max_cluster = max_cluster
|
||||
self._llm_model = llm_model
|
||||
self._embd_model = embd_model
|
||||
self._threshold = threshold
|
||||
self._prompt = prompt
|
||||
self._max_token = max_token
|
||||
|
||||
def _get_optimal_clusters(self, embeddings: np.ndarray, random_state:int):
|
||||
max_clusters = min(self._max_cluster, len(embeddings))
|
||||
n_clusters = np.arange(1, max_clusters)
|
||||
bics = []
|
||||
for n in n_clusters:
|
||||
gm = GaussianMixture(n_components=n, random_state=random_state)
|
||||
gm.fit(embeddings)
|
||||
bics.append(gm.bic(embeddings))
|
||||
optimal_clusters = n_clusters[np.argmin(bics)]
|
||||
return optimal_clusters
|
||||
|
||||
def __call__(self, chunks: Tuple[str, np.ndarray], random_state, callback=None):
|
||||
layers = [(0, len(chunks))]
|
||||
start, end = 0, len(chunks)
|
||||
if len(chunks) <= 1: return
|
||||
|
||||
def summarize(ck_idx, lock):
|
||||
nonlocal chunks
|
||||
try:
|
||||
texts = [chunks[i][0] for i in ck_idx]
|
||||
len_per_chunk = int((self._llm_model.max_length - self._max_token)/len(texts))
|
||||
cluster_content = "\n".join([truncate(t, max(1, len_per_chunk)) for t in texts])
|
||||
cnt = self._llm_model.chat("You're a helpful assistant.",
|
||||
[{"role": "user", "content": self._prompt.format(cluster_content=cluster_content)}],
|
||||
{"temperature": 0.3, "max_tokens": self._max_token}
|
||||
)
|
||||
cnt = re.sub("(······\n由于长度的原因,回答被截断了,要继续吗?|For the content length reason, it stopped, continue?)", "", cnt)
|
||||
print("SUM:", cnt)
|
||||
embds, _ = self._embd_model.encode([cnt])
|
||||
with lock:
|
||||
chunks.append((cnt, embds[0]))
|
||||
except Exception as e:
|
||||
print(e, flush=True)
|
||||
traceback.print_stack(e)
|
||||
return e
|
||||
|
||||
labels = []
|
||||
while end - start > 1:
|
||||
embeddings = [embd for _, embd in chunks[start: end]]
|
||||
if len(embeddings) == 2:
|
||||
summarize([start, start+1], Lock())
|
||||
if callback:
|
||||
callback(msg="Cluster one layer: {} -> {}".format(end-start, len(chunks)-end))
|
||||
labels.extend([0,0])
|
||||
layers.append((end, len(chunks)))
|
||||
start = end
|
||||
end = len(chunks)
|
||||
continue
|
||||
|
||||
n_neighbors = int((len(embeddings) - 1) ** 0.8)
|
||||
reduced_embeddings = umap.UMAP(
|
||||
n_neighbors=max(2, n_neighbors), n_components=min(12, len(embeddings)-2), metric="cosine"
|
||||
).fit_transform(embeddings)
|
||||
n_clusters = self._get_optimal_clusters(reduced_embeddings, random_state)
|
||||
if n_clusters == 1:
|
||||
lbls = [0 for _ in range(len(reduced_embeddings))]
|
||||
else:
|
||||
gm = GaussianMixture(n_components=n_clusters, random_state=random_state)
|
||||
gm.fit(reduced_embeddings)
|
||||
probs = gm.predict_proba(reduced_embeddings)
|
||||
lbls = [np.where(prob > self._threshold)[0] for prob in probs]
|
||||
lbls = [lbl[0] if isinstance(lbl, np.ndarray) else lbl for lbl in lbls]
|
||||
lock = Lock()
|
||||
with ThreadPoolExecutor(max_workers=12) as executor:
|
||||
threads = []
|
||||
for c in range(n_clusters):
|
||||
ck_idx = [i+start for i in range(len(lbls)) if lbls[i] == c]
|
||||
threads.append(executor.submit(summarize, ck_idx, lock))
|
||||
wait(threads, return_when=ALL_COMPLETED)
|
||||
print([t.result() for t in threads])
|
||||
|
||||
assert len(chunks) - end == n_clusters, "{} vs. {}".format(len(chunks) - end, n_clusters)
|
||||
labels.extend(lbls)
|
||||
layers.append((end, len(chunks)))
|
||||
if callback:
|
||||
callback(msg="Cluster one layer: {} -> {}".format(end-start, len(chunks)-end))
|
||||
start = end
|
||||
end = len(chunks)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user