mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
197 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d6836444c9 | |||
| 3b30799b7e | |||
| e61da33672 | |||
| 6a71314d70 | |||
| 06e0c7d1a9 | |||
| 7600ebd263 | |||
| 21943ce0e2 | |||
| aa313e112a | |||
| 2c7428e2ee | |||
| 014f2ef900 | |||
| b418ce5643 | |||
| fe1c48178e | |||
| 35f13e882e | |||
| 85924e898e | |||
| 622b72db4b | |||
| a0a7b46cff | |||
| 37aacb3960 | |||
| 79bc9d97c9 | |||
| f150687dbc | |||
| b2a5482d2c | |||
| 5fdfb8d465 | |||
| 8b2c04abc4 | |||
| 83d0949498 | |||
| 244cf49ba4 | |||
| 651422127c | |||
| 11de7599e5 | |||
| 7a6e70d6b3 | |||
| 230865c4f7 | |||
| 4c9a3e918f | |||
| 5beb022ee1 | |||
| 170abf9b7f | |||
| afaa7144a5 | |||
| eaa1adb3b2 | |||
| fa76974e24 | |||
| f372bd8809 | |||
| 0284248c93 | |||
| d9dd1171a3 | |||
| fefea3a2a5 | |||
| 0e920a91dd | |||
| 63e3398f49 | |||
| cdcaae17c6 | |||
| 96e9d50060 | |||
| 5cab6c4ccb | |||
| b3b341173f | |||
| a9e4695b74 | |||
| 4f40f685d9 | |||
| ffb4cda475 | |||
| 5859a3df72 | |||
| 5c6a7cb4b8 | |||
| 4e2afcd3b8 | |||
| 11e6d84d46 | |||
| 53b9e7b52f | |||
| e5e9ca0015 | |||
| 150ab9c6a4 | |||
| f789463982 | |||
| 955801db2e | |||
| 93b2e80eb8 | |||
| 1a41b92f77 | |||
| 58a8f1f1b0 | |||
| daddfc9e1b | |||
| ecf5f6976f | |||
| e2448fb6dd | |||
| 9c9f2dbe3f | |||
| b3d579e2c1 | |||
| eb72d598b1 | |||
| 033a4cf21e | |||
| fda9b58ab7 | |||
| ca865df87f | |||
| f9f75aa119 | |||
| db42d0e0ae | |||
| df3d0f61bd | |||
| c6bc69cbc5 | |||
| 8c9df482ab | |||
| 1137b04154 | |||
| ec96426c00 | |||
| 4d22daefa7 | |||
| bcc92e04c9 | |||
| 9aa222f738 | |||
| 605cfdb8dc | |||
| 041d72b755 | |||
| 569e40544d | |||
| 3d605a23fe | |||
| 4f2816c01c | |||
| a0b461a18e | |||
| 7ce675030b | |||
| 217caecfda | |||
| ef8847eda7 | |||
| d78010c376 | |||
| 3444cb15e3 | |||
| 0151d42156 | |||
| 392f28882f | |||
| cdb3e6434a | |||
| bf5f6ec262 | |||
| 1a755e75c5 | |||
| 46ff897107 | |||
| f5d63bb7df | |||
| c54ec09519 | |||
| 7b3d700d5f | |||
| 744ff55c62 | |||
| c326f14fed | |||
| 07ddb8fcff | |||
| 84bcd8b3bc | |||
| f52970b038 | |||
| 39b96849a9 | |||
| f298e55ded | |||
| ed943b1b5b | |||
| 0c6d787f92 | |||
| a4f9aa2172 | |||
| c432ce6be5 | |||
| c5b32b2211 | |||
| 24efa86f26 | |||
| 38e551cc3d | |||
| ef95f08c48 | |||
| 3ced290eb5 | |||
| fab0f07379 | |||
| 8525f55ad0 | |||
| e6c024f8bf | |||
| c28bc41a96 | |||
| 29a59ed7e2 | |||
| f8b80f3f93 | |||
| 189007e44d | |||
| 3cffadc7a2 | |||
| 18e43831bc | |||
| 3356de55ed | |||
| 375e727f9a | |||
| a2b8ba472f | |||
| 00c7ddbc9b | |||
| 3e0bc9e36b | |||
| d6ba4bd255 | |||
| 84b4b38cbb | |||
| 4694604836 | |||
| 224c5472c8 | |||
| 409310aae9 | |||
| 9ff825f39d | |||
| 7b5d831296 | |||
| 42ee209084 | |||
| e4096fbc33 | |||
| 3aa5c2a699 | |||
| 2ddf278e2d | |||
| f46448d04c | |||
| ab17606e79 | |||
| 7c90b87715 | |||
| d2929e432e | |||
| 88daa349f9 | |||
| f29da49893 | |||
| 194e8ea696 | |||
| 810f997276 | |||
| 6daae7f226 | |||
| f9fe6ac642 | |||
| b4ad565df6 | |||
| 754d5ea364 | |||
| 26add87c3d | |||
| 986062a604 | |||
| 29ceeba95f | |||
| 849d9eb463 | |||
| dce7053c24 | |||
| 042f4c90c6 | |||
| c1583a3e1d | |||
| 17fa2e9e8e | |||
| ff237f2dbc | |||
| 50c99599f2 | |||
| 891ee85fa6 | |||
| a03f5dd9f6 | |||
| 415c4b7ed5 | |||
| d599707154 | |||
| 7f06712a30 | |||
| b08bb56f6c | |||
| 9bcccadebd | |||
| 1287558f24 | |||
| 6b389e01b5 | |||
| 8fcca1b958 | |||
| a1cf792245 | |||
| 978b580dcf | |||
| d197f33646 | |||
| 521d25d4e6 | |||
| ca1648052a | |||
| f34b913bd8 | |||
| 0d3ed37b48 | |||
| bc68f18c48 | |||
| 6e42687e65 | |||
| e4bd879686 | |||
| 78982d88e0 | |||
| fa5c7edab4 | |||
| 6fa34d5532 | |||
| 9e5427dc6e | |||
| a357190eff | |||
| bfcc2abe47 | |||
| f64ae9dc33 | |||
| 5a51bdd824 | |||
| b48c85dcf9 | |||
| f374dd38b6 | |||
| ccb72e6787 | |||
| 55823dbdf6 | |||
| 588207d7c1 | |||
| 2aa0cdde8f | |||
| 44d798d8f0 | |||
| 4150805073 |
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@ -75,12 +75,6 @@ jobs:
|
||||
# The body field does not support environment variable substitution directly.
|
||||
body_path: release_body.md
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# https://github.com/marketplace/actions/docker-login
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
|
||||
9
.github/workflows/tests.yml
vendored
9
.github/workflows/tests.yml
vendored
@ -32,12 +32,9 @@ jobs:
|
||||
# https://github.com/hmarr/debug-action
|
||||
#- uses: hmarr/debug-action@v2
|
||||
|
||||
- name: Show PR labels
|
||||
- name: Show who triggered this workflow
|
||||
run: |
|
||||
echo "Workflow triggered by ${{ github.event_name }}"
|
||||
if [[ ${{ github.event_name }} == 'pull_request' ]]; then
|
||||
echo "PR labels: ${{ join(github.event.pull_request.labels.*.name, ', ') }}"
|
||||
fi
|
||||
|
||||
- name: Ensure workspace ownership
|
||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||
@ -68,7 +65,7 @@ jobs:
|
||||
|
||||
- name: Start ragflow:nightly-slim
|
||||
run: |
|
||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Stop ragflow:nightly-slim
|
||||
@ -78,7 +75,7 @@ jobs:
|
||||
|
||||
- name: Start ragflow:nightly
|
||||
run: |
|
||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
||||
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Run sdk tests against Elasticsearch
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -38,3 +38,6 @@ sdk/python/dist/
|
||||
sdk/python/ragflow_sdk.egg-info/
|
||||
huggingface.co/
|
||||
nltk_data/
|
||||
|
||||
# Exclude hash-like temporary files like 9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
*[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]*
|
||||
|
||||
11
Dockerfile
11
Dockerfile
@ -62,11 +62,11 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less
|
||||
|
||||
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \
|
||||
pip3 config set global.trusted-host pypi.tuna.tsinghua.edu.cn; \
|
||||
pip3 config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||
pip3 config set global.trusted-host mirrors.aliyun.com; \
|
||||
mkdir -p /etc/uv && \
|
||||
echo "[[index]]" > /etc/uv/uv.toml && \
|
||||
echo 'url = "https://pypi.tuna.tsinghua.edu.cn/simple"' >> /etc/uv/uv.toml && \
|
||||
echo 'url = "https://mirrors.aliyun.com/pypi/simple"' >> /etc/uv/uv.toml && \
|
||||
echo "default = true" >> /etc/uv/uv.toml; \
|
||||
fi; \
|
||||
pipx install uv
|
||||
@ -150,9 +150,9 @@ COPY pyproject.toml uv.lock ./
|
||||
# uv records index url into uv.lock but doesn't failover among multiple indexes
|
||||
RUN --mount=type=cache,id=ragflow_uv,target=/root/.cache/uv,sharing=locked \
|
||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
sed -i 's|pypi.org|pypi.tuna.tsinghua.edu.cn|g' uv.lock; \
|
||||
sed -i 's|pypi.org|mirrors.aliyun.com/pypi|g' uv.lock; \
|
||||
else \
|
||||
sed -i 's|pypi.tuna.tsinghua.edu.cn|pypi.org|g' uv.lock; \
|
||||
sed -i 's|mirrors.aliyun.com/pypi|pypi.org|g' uv.lock; \
|
||||
fi; \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
uv sync --python 3.10 --frozen; \
|
||||
@ -196,6 +196,7 @@ COPY deepdoc deepdoc
|
||||
COPY rag rag
|
||||
COPY agent agent
|
||||
COPY graphrag graphrag
|
||||
COPY agentic_reasoning agentic_reasoning
|
||||
COPY pyproject.toml uv.lock ./
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
|
||||
24
README.md
24
README.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -80,7 +80,7 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
- 2025-02-05 Updates the model list of 'SILICONFLOW' and adds support for Deepseek-R1/DeepSeek-V3.
|
||||
- 2025-01-26 Optimizes knowledge graph extraction and application, offering various configuration options.
|
||||
- 2024-12-18 Upgrades Document Layout Analysis model in Deepdoc.
|
||||
- 2024-12-18 Upgrades Document Layout Analysis model in DeepDoc.
|
||||
- 2024-12-04 Adds support for pagerank score in knowledge base.
|
||||
- 2024-11-22 Adds more variables to Agent.
|
||||
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
||||
@ -173,17 +173,17 @@ releases! 🌟
|
||||
|
||||
3. Start up the server using the pre-built Docker images:
|
||||
|
||||
> The command below downloads the `v0.16.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download an RAGFlow edition different from `v0.16.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` for the full edition `v0.16.0`.
|
||||
> The command below downloads the `v0.17.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.17.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0` for the full edition `v0.17.0`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
|-------------------|-----------------|-----------------------|--------------------------|
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -204,9 +204,6 @@ releases! 🌟
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
||||
@ -240,7 +237,7 @@ to `<YOUR_SERVING_PORT>:80`.
|
||||
Updates to the above configurations require a reboot of all containers to take effect:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Switch doc engine from Elasticsearch to Infinity
|
||||
@ -253,12 +250,15 @@ RAGFlow uses Elasticsearch by default for storing full text and vectors. To swit
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> `-v` will delete the docker container volumes, and the existing data will be cleared.
|
||||
|
||||
2. Set `DOC_ENGINE` in **docker/.env** to `infinity`.
|
||||
|
||||
3. Start the containers:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
|
||||
21
README_id.md
21
README_id.md
@ -22,7 +22,7 @@
|
||||
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||
@ -41,7 +41,7 @@
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Daftar Isi</b></summary>
|
||||
<summary><b>📕 Daftar Isi </b> </summary>
|
||||
|
||||
- 💡 [Apa Itu RAGFlow?](#-apa-itu-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
@ -77,7 +77,7 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
- 2025-02-05 Memperbarui daftar model 'SILICONFLOW' dan menambahkan dukungan untuk Deepseek-R1/DeepSeek-V3.
|
||||
- 2025-01-26 Optimalkan ekstraksi dan penerapan grafik pengetahuan dan sediakan berbagai opsi konfigurasi.
|
||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di Deepdoc.
|
||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di DeepDoc.
|
||||
- 2024-12-04 Mendukung skor pagerank ke basis pengetahuan.
|
||||
- 2024-11-22 Peningkatan definisi dan penggunaan variabel di Agen.
|
||||
- 2024-11-01 Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||
@ -166,17 +166,17 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
3. Bangun image Docker pre-built dan jalankan server:
|
||||
|
||||
> Perintah di bawah ini mengunduh edisi v0.16.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.16.0-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0 untuk edisi lengkap v0.16.0.
|
||||
> Perintah di bawah ini mengunduh edisi v0.17.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.17.0-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0 untuk edisi lengkap v0.17.0.
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -197,9 +197,6 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||
@ -230,7 +227,7 @@ menjadi `<YOUR_SERVING_PORT>:80`.
|
||||
Pembaruan konfigurasi ini memerlukan reboot semua kontainer agar efektif:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
## 🔧 Membangun Docker Image tanpa Model Embedding
|
||||
|
||||
22
README_ja.md
22
README_ja.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -57,7 +57,7 @@
|
||||
|
||||
- 2025-02-05 シリコン フローの St およびモデル リストを更新し、Deep Seek-R1/Deep Seek-V3 のサポートを追加しました。
|
||||
- 2025-01-26 ナレッジ グラフの抽出と適用を最適化し、さまざまな構成オプションを提供します。
|
||||
- 2024-12-18 Deepdoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||
- 2024-12-18 DeepDoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||
- 2024-12-04 ナレッジ ベースへのページランク スコアをサポートしました。
|
||||
- 2024-11-22 エージェントでの変数の定義と使用法を改善しました。
|
||||
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
||||
@ -146,17 +146,17 @@
|
||||
|
||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.16.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.16.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.16.0 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0 と設定します。
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.17.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.17.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.17.0 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0 と設定します。
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -176,9 +176,6 @@
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
||||
@ -208,7 +205,7 @@
|
||||
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Elasticsearch から Infinity にドキュメントエンジンを切り替えます
|
||||
@ -219,11 +216,12 @@ RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトル
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` は docker コンテナのボリュームを削除し、既存のデータをクリアします。
|
||||
2. **docker/.env** の「DOC \_ ENGINE」を「infinity」に設定します。
|
||||
|
||||
3. 起動コンテナ:
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
> [!WARNING]
|
||||
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||
|
||||
20
README_ko.md
20
README_ko.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -57,7 +57,7 @@
|
||||
|
||||
- 2025-02-05 'SILICONFLOW' 모델 목록을 업데이트하고 Deepseek-R1/DeepSeek-V3에 대한 지원을 추가합니다.
|
||||
- 2025-01-26 지식 그래프 추출 및 적용을 최적화하고 다양한 구성 옵션을 제공합니다.
|
||||
- 2024-12-18 Deepdoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||
- 2024-12-18 DeepDoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||
- 2024-12-04 지식베이스에 대한 페이지랭크 점수를 지원합니다.
|
||||
|
||||
- 2024-11-22 에이전트의 변수 정의 및 사용을 개선했습니다.
|
||||
@ -147,17 +147,17 @@
|
||||
|
||||
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
||||
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.16.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.16.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.16.0을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0로 설정합니다.
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.17.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.17.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.17.0을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0로 설정합니다.
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -177,9 +177,6 @@
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network anormal` 오류가 발생할 수 있습니다.
|
||||
@ -209,7 +206,7 @@
|
||||
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Elasticsearch 에서 Infinity 로 문서 엔진 전환
|
||||
@ -220,6 +217,7 @@ RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및
|
||||
```bash
|
||||
$docker compose-f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 는 docker 컨테이너의 볼륨을 삭제하고 기존 데이터를 지우며, 이 작업은 컨테이너를 중지하는 것과 동일합니다.
|
||||
2. **docker/.env**의 "DOC_ENGINE" 을 "infinity" 로 설정합니다.
|
||||
3. 컨테이너 부팅:
|
||||
```bash
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
||||
@ -41,7 +41,7 @@
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Índice</b></summary>
|
||||
<summary><b>📕 Índice</b></summary>
|
||||
|
||||
- 💡 [O que é o RAGFlow?](#-o-que-é-o-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
@ -77,7 +77,7 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
- 05-02-2025 Atualiza a lista de modelos de 'SILICONFLOW' e adiciona suporte para Deepseek-R1/DeepSeek-V3.
|
||||
- 26-01-2025 Otimize a extração e aplicação de gráficos de conhecimento e forneça uma variedade de opções de configuração.
|
||||
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no Deepdoc.
|
||||
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no DeepDoc.
|
||||
- 04-12-2024 Adiciona suporte para pontuação de pagerank na base de conhecimento.
|
||||
- 22-11-2024 Adiciona mais variáveis para o Agente.
|
||||
- 01-11-2024 Adiciona extração de palavras-chave e geração de perguntas relacionadas aos blocos analisados para melhorar a precisão da recuperação.
|
||||
@ -166,17 +166,17 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
3. Inicie o servidor usando as imagens Docker pré-compiladas:
|
||||
|
||||
> O comando abaixo baixa a edição `v0.16.0-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.16.0-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` para a edição completa `v0.16.0`.
|
||||
> O comando abaixo baixa a edição `v0.17.0-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.17.0-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0` para a edição completa `v0.17.0`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| Tag da imagem RAGFlow | Tamanho da imagem (GB) | Possui modelos de incorporação? | Estável? |
|
||||
| --------------------- | ---------------------- | ------------------------------- | ------------------------ |
|
||||
| v0.16.0 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||
| v0.16.0-slim | ~2 | ❌ | Lançamento estável |
|
||||
| v0.17.0 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||
| v0.17.0-slim | ~2 | ❌ | Lançamento estável |
|
||||
| nightly | ~9 | :heavy_check_mark: | _Instável_ build noturno |
|
||||
| nightly-slim | ~2 | ❌ | _Instável_ build noturno |
|
||||
|
||||
@ -196,9 +196,6 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Rodando em todos os endereços (0.0.0.0)
|
||||
* Rodando em http://127.0.0.1:9380
|
||||
* Rodando em http://x.x.x.x:9380
|
||||
INFO:werkzeug:Pressione CTRL+C para sair
|
||||
```
|
||||
|
||||
> Se você pular essa etapa de confirmação e acessar diretamente o RAGFlow, seu navegador pode exibir um erro `network anormal`, pois, nesse momento, seu RAGFlow pode não estar totalmente inicializado.
|
||||
@ -228,7 +225,7 @@ Para atualizar a porta HTTP de serviço padrão (80), vá até [docker-compose.y
|
||||
Atualizações nas configurações acima exigem um reinício de todos os contêineres para que tenham efeito:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Mudar o mecanismo de documentos de Elasticsearch para Infinity
|
||||
@ -240,13 +237,13 @@ O RAGFlow usa o Elasticsearch por padrão para armazenar texto completo e vetore
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
|
||||
Note: `-v` irá deletar os volumes do contêiner, e os dados existentes serão apagados.
|
||||
2. Defina `DOC_ENGINE` no **docker/.env** para `infinity`.
|
||||
|
||||
3. Inicie os contêineres:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!ATENÇÃO]
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -56,7 +56,7 @@
|
||||
|
||||
- 2025-02-05 更新「SILICONFLOW」的型號清單並新增 Deepseek-R1/DeepSeek-V3 的支援。
|
||||
- 2025-01-26 最佳化知識圖譜的擷取與應用,提供了多種配置選擇。
|
||||
- 2024-12-18 升級了 Deepdoc 的文檔佈局分析模型。
|
||||
- 2024-12-18 升級了 DeepDoc 的文檔佈局分析模型。
|
||||
- 2024-12-04 支援知識庫的 Pagerank 分數。
|
||||
- 2024-11-22 完善了 Agent 中的變數定義和使用。
|
||||
- 2024-11-01 對解析後的 chunk 加入關鍵字抽取和相關問題產生以提高回想的準確度。
|
||||
@ -145,17 +145,17 @@
|
||||
|
||||
3. 進入 **docker** 資料夾,利用事先編譯好的 Docker 映像啟動伺服器:
|
||||
|
||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.16.0-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.16.0-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` 來下載 RAGFlow 鏡像的 `v0.16.0` 完整發行版。
|
||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.17.0-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.17.0-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0` 來下載 RAGFlow 鏡像的 `v0.17.0` 完整發行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -181,9 +181,6 @@
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> 如果您跳過這一步驟系統確認步驟就登入 RAGFlow,你的瀏覽器有可能會提示 `network anormal` 或 `網路異常`,因為 RAGFlow 可能並未完全啟動成功。
|
||||
@ -200,7 +197,7 @@
|
||||
|
||||
系統配置涉及以下三份文件:
|
||||
|
||||
- [.env](./docker/.env):存放一些基本的系統環境變量,例如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||
- [.env](./docker/.env):存放一些系統環境變量,例如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template):設定各類別後台服務。
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): 系統依賴該檔案完成啟動。
|
||||
|
||||
@ -215,7 +212,7 @@
|
||||
> 所有系統配置都需要透過系統重新啟動生效:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
###把文檔引擎從 Elasticsearch 切換成為 Infinity
|
||||
@ -227,13 +224,14 @@ RAGFlow 預設使用 Elasticsearch 儲存文字和向量資料. 如果要切換
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 將會刪除 docker 容器的 volumes,已有的資料會被清空。
|
||||
|
||||
2. 設定 **docker/.env** 目錄中的 `DOC_ENGINE` 為 `infinity`.
|
||||
|
||||
3. 啟動容器:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
@ -265,7 +263,7 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
export UV_INDEX=https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
2. 下載原始碼並安裝 Python 依賴:
|
||||
|
||||
26
README_zh.md
26
README_zh.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -57,7 +57,7 @@
|
||||
|
||||
- 2025-02-05 更新硅基流动的模型列表,增加了对 Deepseek-R1/DeepSeek-V3 的支持。
|
||||
- 2025-01-26 优化知识图谱的提取和应用,提供了多种配置选择。
|
||||
- 2024-12-18 升级了 Deepdoc 的文档布局分析模型。
|
||||
- 2024-12-18 升级了 DeepDoc 的文档布局分析模型。
|
||||
- 2024-12-04 支持知识库的 Pagerank 分数。
|
||||
- 2024-11-22 完善了 Agent 中的变量定义和使用。
|
||||
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
||||
@ -146,17 +146,17 @@
|
||||
|
||||
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
||||
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.16.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.16.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` 来下载 RAGFlow 镜像的 `v0.16.0` 完整发行版。
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.17.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.17.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0` 来下载 RAGFlow 镜像的 `v0.17.0` 完整发行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -182,12 +182,9 @@
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
||||
> 如果您在没有看到上面的提示信息出来之前,就尝试登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`。
|
||||
|
||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||
@ -216,7 +213,7 @@
|
||||
> 所有系统配置都需要通过系统重启生效:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### 把文档引擎从 Elasticsearch 切换成为 Infinity
|
||||
@ -228,13 +225,14 @@ RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 将会删除 docker 容器的 volumes,已有的数据会被清空。
|
||||
|
||||
2. 设置 **docker/.env** 目录中的 `DOC_ENGINE` 为 `infinity`.
|
||||
|
||||
3. 启动容器:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
@ -266,7 +264,7 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
export UV_INDEX=https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
2. 下载源代码并安装 Python 依赖:
|
||||
|
||||
@ -15,7 +15,6 @@
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
from abc import ABC
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
|
||||
@ -25,7 +24,7 @@ from agent.component import component_class
|
||||
from agent.component.base import ComponentBase
|
||||
|
||||
|
||||
class Canvas(ABC):
|
||||
class Canvas:
|
||||
"""
|
||||
dsl = {
|
||||
"components": {
|
||||
@ -162,7 +161,7 @@ class Canvas(ABC):
|
||||
self.components[k]["obj"].reset()
|
||||
self._embed_id = ""
|
||||
|
||||
def get_compnent_name(self, cid):
|
||||
def get_component_name(self, cid):
|
||||
for n in self.dsl["graph"]["nodes"]:
|
||||
if cid == n["id"]:
|
||||
return n["data"]["name"]
|
||||
@ -210,7 +209,7 @@ class Canvas(ABC):
|
||||
if c not in waiting:
|
||||
waiting.append(c)
|
||||
continue
|
||||
yield "*'{}'* is running...🕞".format(self.get_compnent_name(c))
|
||||
yield "*'{}'* is running...🕞".format(self.get_component_name(c))
|
||||
|
||||
if cpn.component_name.lower() == "iteration":
|
||||
st_cpn = cpn.get_start()
|
||||
|
||||
@ -555,7 +555,7 @@ class ComponentBase(ABC):
|
||||
eles.extend(self._canvas.get_component(cpn_id)["obj"]._param.query)
|
||||
continue
|
||||
|
||||
eles.append({"name": self._canvas.get_compnent_name(cpn_id), "key": cpn_id})
|
||||
eles.append({"name": self._canvas.get_component_name(cpn_id), "key": cpn_id})
|
||||
else:
|
||||
eles.append({"key": q["value"], "name": q["value"], "value": q["value"]})
|
||||
return eles
|
||||
|
||||
@ -80,6 +80,7 @@ class Categorize(Generate, ABC):
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
input = self.get_input()
|
||||
input = " - ".join(input["content"]) if "content" in input else ""
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(input), [{"role": "user", "content": "\nCategory: "}],
|
||||
self._param.gen_conf())
|
||||
@ -93,5 +94,5 @@ class Categorize(Generate, ABC):
|
||||
def debug(self, **kwargs):
|
||||
df = self._run([], **kwargs)
|
||||
cpn_id = df.iloc[0, 0]
|
||||
return Categorize.be_output(self._canvas.get_compnent_name(cpn_id))
|
||||
return Categorize.be_output(self._canvas.get_component_name(cpn_id))
|
||||
|
||||
|
||||
@ -52,15 +52,16 @@ class ExeSQLParam(GenerateParam):
|
||||
self.check_positive_integer(self.top_n, "Number of records")
|
||||
if self.database == "rag_flow":
|
||||
if self.host == "ragflow-mysql":
|
||||
raise ValueError("The host is not accessible.")
|
||||
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||
if self.password == "infini_rag_flow":
|
||||
raise ValueError("The host is not accessible.")
|
||||
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||
|
||||
|
||||
class ExeSQL(Generate, ABC):
|
||||
component_name = "ExeSQL"
|
||||
|
||||
def _refactor(self,ans):
|
||||
def _refactor(self, ans):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
||||
if match:
|
||||
ans = match.group(1) # Query content
|
||||
@ -78,7 +79,6 @@ class ExeSQL(Generate, ABC):
|
||||
ans = self.get_input()
|
||||
ans = "".join([str(a) for a in ans["content"]]) if "content" in ans else ""
|
||||
ans = self._refactor(ans)
|
||||
logging.info("db_type: ",self._param.db_type)
|
||||
if self._param.db_type in ["mysql", "mariadb"]:
|
||||
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
||||
port=self._param.port, password=self._param.password)
|
||||
@ -101,51 +101,50 @@ class ExeSQL(Generate, ABC):
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
self._loop += 1
|
||||
input_list=re.split(r';', ans.replace(r"\n", " "))
|
||||
input_list = re.split(r';', ans.replace(r"\n", " "))
|
||||
sql_res = []
|
||||
for i in range(len(input_list)):
|
||||
single_sql=input_list[i]
|
||||
single_sql = input_list[i]
|
||||
while self._loop <= self._param.loop:
|
||||
self._loop+=1
|
||||
self._loop += 1
|
||||
if not single_sql:
|
||||
break
|
||||
try:
|
||||
logging.info("single_sql: ", single_sql)
|
||||
cursor.execute(single_sql)
|
||||
if cursor.rowcount == 0:
|
||||
sql_res.append({"content": "No record in the database!"})
|
||||
break
|
||||
if self._param.db_type == 'mssql':
|
||||
single_res = pd.DataFrame.from_records(cursor.fetchmany(self._param.top_n),columns = [desc[0] for desc in cursor.description])
|
||||
single_res = pd.DataFrame.from_records(cursor.fetchmany(self._param.top_n),
|
||||
columns=[desc[0] for desc in cursor.description])
|
||||
else:
|
||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.top_n)])
|
||||
single_res.columns = [i[0] for i in cursor.description]
|
||||
sql_res.append({"content": single_res.to_markdown()})
|
||||
sql_res.append({"content": single_res.to_markdown(index=False, floatfmt=".6f")})
|
||||
break
|
||||
except Exception as e:
|
||||
single_sql = self._regenerate_sql(single_sql, str(e), **kwargs)
|
||||
single_sql = self._refactor(single_sql)
|
||||
if self._loop > self._param.loop:
|
||||
sql_res.append({"content": "Can't query the correct data via SQL statement."})
|
||||
# raise Exception("Maximum loop time exceeds. Can't query the correct data via SQL statement.")
|
||||
db.close()
|
||||
if not sql_res:
|
||||
return ExeSQL.be_output("")
|
||||
return pd.DataFrame(sql_res)
|
||||
|
||||
def _regenerate_sql(self, failed_sql, error_message,**kwargs):
|
||||
def _regenerate_sql(self, failed_sql, error_message, **kwargs):
|
||||
prompt = f'''
|
||||
## You are the Repair SQL Statement Helper, please modify the original SQL statement based on the SQL query error report.
|
||||
## The original SQL statement is as follows:{failed_sql}.
|
||||
## The contents of the SQL query error report is as follows:{error_message}.
|
||||
## Answer only the modified SQL statement. Please do not give any explanation, just answer the code.
|
||||
'''
|
||||
self._param.prompt=prompt
|
||||
self._param.prompt = prompt
|
||||
kwargs_ = deepcopy(kwargs)
|
||||
kwargs_["stream"] = False
|
||||
response = Generate._run(self, [], **kwargs_)
|
||||
try:
|
||||
regenerated_sql = response.loc[0,"content"]
|
||||
regenerated_sql = response.loc[0, "content"]
|
||||
return regenerated_sql
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to regenerate SQL: {e}")
|
||||
|
||||
@ -18,10 +18,10 @@ from functools import partial
|
||||
import pandas as pd
|
||||
from api.db import LLMType
|
||||
from api.db.services.conversation_service import structure_answer
|
||||
from api.db.services.dialog_service import message_fit_in
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from rag.prompts import message_fit_in
|
||||
|
||||
|
||||
class GenerateParam(ComponentParamBase):
|
||||
@ -69,10 +69,8 @@ class Generate(ComponentBase):
|
||||
component_name = "Generate"
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
|
||||
if para.get("component_id") \
|
||||
and para["component_id"].lower().find("answer") < 0 \
|
||||
and para["component_id"].lower().find("begin") < 0])
|
||||
inputs = self.get_input_elements()
|
||||
cpnts = set([i["key"] for i in inputs[1:] if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def set_cite(self, retrieval_res, answer):
|
||||
@ -110,10 +108,26 @@ class Generate(ComponentBase):
|
||||
return res
|
||||
|
||||
def get_input_elements(self):
|
||||
if self._param.parameters:
|
||||
return [{"key": "user", "name": "Input your question here:"}, *self._param.parameters]
|
||||
|
||||
return [{"key": "user", "name": "Input your question here:"}]
|
||||
key_set = set([])
|
||||
res = [{"key": "user", "name": "Input your question here:"}]
|
||||
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.prompt, flags=re.IGNORECASE):
|
||||
cpn_id = r.group(1)
|
||||
if cpn_id in key_set:
|
||||
continue
|
||||
if cpn_id.lower().find("begin@") == 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] != key:
|
||||
continue
|
||||
res.append({"key": r.group(1), "name": p["name"]})
|
||||
key_set.add(r.group(1))
|
||||
continue
|
||||
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||
if not cpn_nm:
|
||||
continue
|
||||
res.append({"key": cpn_id, "name": cpn_nm})
|
||||
key_set.add(cpn_id)
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
@ -121,22 +135,20 @@ class Generate(ComponentBase):
|
||||
|
||||
retrieval_res = []
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
if not para.get("component_id"):
|
||||
continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
for para in self.get_input_elements()[1:]:
|
||||
if para["key"].lower().find("begin@") == 0:
|
||||
cpn_id, key = para["key"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
kwargs[para["key"]] = p.get("value", "")
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
{"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
component_id = para["key"]
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
hist = self._canvas.get_history(1)
|
||||
@ -152,8 +164,8 @@ class Generate(ComponentBase):
|
||||
else:
|
||||
if cpn.component_name.lower() == "retrieval":
|
||||
retrieval_res.append(out)
|
||||
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
kwargs[para["key"]] = " - " + "\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||
|
||||
if retrieval_res:
|
||||
retrieval_res = pd.concat(retrieval_res, ignore_index=True)
|
||||
@ -175,17 +187,18 @@ class Generate(ComponentBase):
|
||||
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
||||
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||
return pd.DataFrame([res])
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
res = self.set_cite(retrieval_res, ans)
|
||||
@ -196,18 +209,18 @@ class Generate(ComponentBase):
|
||||
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
||||
res = None
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||
yield res
|
||||
self.set_output(res)
|
||||
return
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
||||
res = {"content": ans, "reference": []}
|
||||
@ -230,5 +243,6 @@ class Generate(ComponentBase):
|
||||
for n, v in kwargs.items():
|
||||
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": kwargs.get("user", "")}], self._param.gen_conf())
|
||||
u = kwargs.get("user")
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": u if u else "Output: "}], self._param.gen_conf())
|
||||
return pd.DataFrame([ans])
|
||||
|
||||
@ -35,12 +35,14 @@ class InvokeParam(ComponentParamBase):
|
||||
self.url = ""
|
||||
self.timeout = 60
|
||||
self.clean_html = False
|
||||
self.datatype = "json" # New parameter to determine data posting type
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ['get', 'post', 'put'])
|
||||
self.check_empty(self.url, "End point URL")
|
||||
self.check_positive_integer(self.timeout, "Timeout time in second")
|
||||
self.check_boolean(self.clean_html, "Clean HTML")
|
||||
self.check_valid_value(self.datatype.lower(), "Data post type", ['json', 'formdata']) # Check for valid datapost value
|
||||
|
||||
|
||||
class Invoke(ComponentBase, ABC):
|
||||
@ -94,6 +96,13 @@ class Invoke(ComponentBase, ABC):
|
||||
return Invoke.be_output(response.text)
|
||||
|
||||
if method == 'put':
|
||||
if self._param.datatype.lower() == 'json':
|
||||
response = requests.put(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.put(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
@ -105,11 +114,18 @@ class Invoke(ComponentBase, ABC):
|
||||
return Invoke.be_output(response.text)
|
||||
|
||||
if method == 'post':
|
||||
if self._param.datatype.lower() == 'json':
|
||||
response = requests.post(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.post(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
sections = HtmlParser()(None, response.content)
|
||||
return Invoke.be_output("\n".join(sections))
|
||||
|
||||
@ -19,11 +19,11 @@ from abc import ABC
|
||||
import pandas as pd
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import label_question
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
class RetrievalParam(ComponentParamBase):
|
||||
@ -43,7 +43,7 @@ class RetrievalParam(ComponentParamBase):
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keywords similarity weight")
|
||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keyword similarity weight")
|
||||
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
||||
|
||||
|
||||
|
||||
@ -13,7 +13,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
@ -21,36 +20,33 @@ from agent.component import GenerateParam, Generate
|
||||
|
||||
|
||||
class RewriteQuestionParam(GenerateParam):
|
||||
|
||||
"""
|
||||
Define the QuestionRewrite component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.temperature = 0.9
|
||||
self.prompt = ""
|
||||
self.language = ""
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
|
||||
def get_prompt(self, conv):
|
||||
self.prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
return f"""
|
||||
def get_prompt(self, conv, language, query):
|
||||
prompt = """
|
||||
Role: A helpful assistant
|
||||
Task: Generate a full user question that would follow the conversation.
|
||||
Requirements & Restrictions:
|
||||
- Text generated MUST be in the same language of the original user's question.
|
||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||
- DON'T generate anything except a refined question.
|
||||
- DON'T generate anything except a refined question."""
|
||||
|
||||
if language:
|
||||
prompt += f"""
|
||||
- Text generated MUST be in {language}"""
|
||||
|
||||
prompt += f"""
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
@ -68,7 +64,7 @@ USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
ASSISTANT: Mary Trump.
|
||||
User: What's her full name?
|
||||
USER: What's her full name?
|
||||
###############
|
||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||
######################
|
||||
@ -76,8 +72,8 @@ Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||
## Conversation
|
||||
{conv}
|
||||
###############
|
||||
"""
|
||||
return self.prompt
|
||||
"""
|
||||
return prompt
|
||||
|
||||
|
||||
class RewriteQuestion(Generate, ABC):
|
||||
@ -85,21 +81,62 @@ class RewriteQuestion(Generate, ABC):
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
hist = self._canvas.get_history(self._param.message_history_window_size)
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
conv = []
|
||||
for m in hist:
|
||||
if m["role"] not in ["user", "assistant"]:
|
||||
continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(conv), [{"role": "user", "content": "Output: "}],
|
||||
self._param.gen_conf())
|
||||
ans = chat_mdl.chat(self._param.get_prompt(conv, self.gen_lang(self._param.language), query),
|
||||
[{"role": "user", "content": "Output: "}], self._param.gen_conf())
|
||||
self._canvas.history.pop()
|
||||
self._canvas.history.append(("user", ans))
|
||||
|
||||
logging.debug(ans)
|
||||
return RewriteQuestion.be_output(ans)
|
||||
|
||||
|
||||
|
||||
@staticmethod
|
||||
def gen_lang(language):
|
||||
# convert code lang to language word for the prompt
|
||||
language_dict = {'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'ws': 'Samoan', 'am': 'Amharic',
|
||||
'ar': 'Arabic', 'hy': 'Armenian', 'az': 'Azerbaijani', 'eu': 'Basque', 'be': 'Belarusian',
|
||||
'bem': 'Bemba', 'bn': 'Bengali', 'bh': 'Bihari',
|
||||
'xx-bork': 'Bork', 'bs': 'Bosnian', 'br': 'Breton', 'bg': 'Bulgarian', 'bt': 'Bhutani',
|
||||
'km': 'Cambodian', 'ca': 'Catalan', 'chr': 'Cherokee', 'ny': 'Chichewa', 'zh-cn': 'Chinese',
|
||||
'zh-tw': 'Chinese', 'co': 'Corsican',
|
||||
'hr': 'Croatian', 'cs': 'Czech', 'da': 'Danish', 'nl': 'Dutch', 'xx-elmer': 'Elmer',
|
||||
'en': 'English', 'eo': 'Esperanto', 'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese',
|
||||
'tl': 'Filipino', 'fi': 'Finnish', 'fr': 'French',
|
||||
'fy': 'Frisian', 'gaa': 'Ga', 'gl': 'Galician', 'ka': 'Georgian', 'de': 'German',
|
||||
'el': 'Greek', 'kl': 'Greenlandic', 'gn': 'Guarani', 'gu': 'Gujarati', 'xx-hacker': 'Hacker',
|
||||
'ht': 'Haitian Creole', 'ha': 'Hausa', 'haw': 'Hawaiian',
|
||||
'iw': 'Hebrew', 'hi': 'Hindi', 'hu': 'Hungarian', 'is': 'Icelandic', 'ig': 'Igbo',
|
||||
'id': 'Indonesian', 'ia': 'Interlingua', 'ga': 'Irish', 'it': 'Italian', 'ja': 'Japanese',
|
||||
'jw': 'Javanese', 'kn': 'Kannada', 'kk': 'Kazakh', 'rw': 'Kinyarwanda',
|
||||
'rn': 'Kirundi', 'xx-klingon': 'Klingon', 'kg': 'Kongo', 'ko': 'Korean', 'kri': 'Krio',
|
||||
'ku': 'Kurdish', 'ckb': 'Kurdish (Sorani)', 'ky': 'Kyrgyz', 'lo': 'Laothian', 'la': 'Latin',
|
||||
'lv': 'Latvian', 'ln': 'Lingala', 'lt': 'Lithuanian',
|
||||
'loz': 'Lozi', 'lg': 'Luganda', 'ach': 'Luo', 'mk': 'Macedonian', 'mg': 'Malagasy',
|
||||
'ms': 'Malay', 'ml': 'Malayalam', 'mt': 'Maltese', 'mv': 'Maldivian', 'mi': 'Maori',
|
||||
'mr': 'Marathi', 'mfe': 'Mauritian Creole', 'mo': 'Moldavian', 'mn': 'Mongolian',
|
||||
'sr-me': 'Montenegrin', 'my': 'Burmese', 'ne': 'Nepali', 'pcm': 'Nigerian Pidgin',
|
||||
'nso': 'Northern Sotho', 'no': 'Norwegian', 'nn': 'Norwegian Nynorsk', 'oc': 'Occitan',
|
||||
'or': 'Oriya', 'om': 'Oromo', 'ps': 'Pashto', 'fa': 'Persian',
|
||||
'xx-pirate': 'Pirate', 'pl': 'Polish', 'pt': 'Portuguese', 'pt-br': 'Portuguese (Brazilian)',
|
||||
'pt-pt': 'Portuguese (Portugal)', 'pa': 'Punjabi', 'qu': 'Quechua', 'ro': 'Romanian',
|
||||
'rm': 'Romansh', 'nyn': 'Runyankole', 'ru': 'Russian', 'gd': 'Scots Gaelic',
|
||||
'sr': 'Serbian', 'sh': 'Serbo-Croatian', 'st': 'Sesotho', 'tn': 'Setswana',
|
||||
'crs': 'Seychellois Creole', 'sn': 'Shona', 'sd': 'Sindhi', 'si': 'Sinhalese', 'sk': 'Slovak',
|
||||
'sl': 'Slovenian', 'so': 'Somali', 'es': 'Spanish', 'es-419': 'Spanish (Latin America)',
|
||||
'su': 'Sundanese',
|
||||
'sw': 'Swahili', 'sv': 'Swedish', 'tg': 'Tajik', 'ta': 'Tamil', 'tt': 'Tatar', 'te': 'Telugu',
|
||||
'th': 'Thai', 'ti': 'Tigrinya', 'to': 'Tongan', 'lua': 'Tshiluba', 'tum': 'Tumbuka',
|
||||
'tr': 'Turkish', 'tk': 'Turkmen', 'tw': 'Twi',
|
||||
'ug': 'Uyghur', 'uk': 'Ukrainian', 'ur': 'Urdu', 'uz': 'Uzbek', 'vu': 'Vanuatu',
|
||||
'vi': 'Vietnamese', 'cy': 'Welsh', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish',
|
||||
'yo': 'Yoruba', 'zu': 'Zulu'}
|
||||
if language in language_dict:
|
||||
return language_dict[language]
|
||||
else:
|
||||
return ""
|
||||
|
||||
@ -38,27 +38,39 @@ class Template(ComponentBase):
|
||||
component_name = "Template"
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set(
|
||||
[
|
||||
para["component_id"].split("@")[0]
|
||||
for para in self._param.parameters
|
||||
if para.get("component_id")
|
||||
and para["component_id"].lower().find("answer") < 0
|
||||
and para["component_id"].lower().find("begin") < 0
|
||||
]
|
||||
)
|
||||
inputs = self.get_input_elements()
|
||||
cpnts = set([i["key"] for i in inputs if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def get_input_elements(self):
|
||||
key_set = set([])
|
||||
res = []
|
||||
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.content, flags=re.IGNORECASE):
|
||||
cpn_id = r.group(1)
|
||||
if cpn_id in key_set:
|
||||
continue
|
||||
if cpn_id.lower().find("begin@") == 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] != key:
|
||||
continue
|
||||
res.append({"key": r.group(1), "name": p["name"]})
|
||||
key_set.add(r.group(1))
|
||||
continue
|
||||
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||
if not cpn_nm:
|
||||
continue
|
||||
res.append({"key": cpn_id, "name": cpn_nm})
|
||||
key_set.add(cpn_id)
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
content = self._param.content
|
||||
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
if not para.get("component_id"):
|
||||
continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
for para in self.get_input_elements():
|
||||
if para["key"].lower().find("begin@") == 0:
|
||||
cpn_id, key = para["key"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
value = p.get("value", "")
|
||||
@ -68,6 +80,7 @@ class Template(ComponentBase):
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
component_id = para["key"]
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
hist = self._canvas.get_history(1)
|
||||
@ -114,7 +127,7 @@ class Template(ComponentBase):
|
||||
|
||||
def make_kwargs(self, para, kwargs, value):
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["component_id"], "content": value}
|
||||
{"component_id": para["key"], "content": value}
|
||||
)
|
||||
try:
|
||||
value = json.loads(value)
|
||||
|
||||
@ -8,9 +8,7 @@
|
||||
"components": {
|
||||
"Answer:SocialAdsWonder": {
|
||||
"downstream": [
|
||||
"Retrieval:SillyPartsCheer",
|
||||
"Retrieval:BrownStreetsRhyme",
|
||||
"Retrieval:OddSingersRefuse"
|
||||
"RewriteQuestion:WildIdeasTell"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
@ -19,8 +17,8 @@
|
||||
"params": {}
|
||||
},
|
||||
"upstream": [
|
||||
"begin",
|
||||
"ExeSQL:QuietRosesRun"
|
||||
"ExeSQL:QuietRosesRun",
|
||||
"begin"
|
||||
]
|
||||
},
|
||||
"ExeSQL:QuietRosesRun": {
|
||||
@ -55,42 +53,23 @@
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Generate:CuteSidesBuy"
|
||||
"Generate:BlueShirtsLaugh"
|
||||
]
|
||||
},
|
||||
"Generate:CuteSidesBuy": {
|
||||
"Generate:BlueShirtsLaugh": {
|
||||
"downstream": [
|
||||
"ExeSQL:QuietRosesRun"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"inputs": [],
|
||||
"output": {},
|
||||
"params": {
|
||||
"cite": false,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"max_tokens": 512,
|
||||
"message_history_window_size": 1,
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "Retrieval:SillyPartsCheer",
|
||||
"id": "2a77e574-a0a6-4a1a-af39-cb192f1d21f5",
|
||||
"key": "ddl_input"
|
||||
},
|
||||
{
|
||||
"component_id": "Retrieval:OddSingersRefuse",
|
||||
"id": "83941a85-0b59-408e-97e5-504964b0e090",
|
||||
"key": "db_input"
|
||||
},
|
||||
{
|
||||
"component_id": "Retrieval:BrownStreetsRhyme",
|
||||
"id": "c63d0ae6-7ee2-44a2-8a95-69d03c90cb44",
|
||||
"key": "sql_input"
|
||||
}
|
||||
],
|
||||
"parameters": [],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "\n##The user provides a question and you provide SQL. You will only respond with SQL code and not with any explanations.\n\n##You may use the following DDL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {ddl_input}.\n\n##You may use the following documentation as a reference for what tables might be available. Use responses to past questions also to guide you: {db_input}.\n\n##You may use the following SQL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {sql_input}.\n\n##Respond with only SQL code. Do not answer with any explanations -- just the code.",
|
||||
"prompt": "\n##The user provides a question and you provide SQL. You will only respond with SQL code and not with any explanations.\n\n##You may use the following DDL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:SillyPartsCheer}.\n\n##You may use the following documentation as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:OddSingersRefuse}.\n\n##You may use the following SQL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:BrownStreetsRhyme}.\n\n##Respond with only SQL code. Do not answer with any explanations -- just the code.",
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
@ -103,7 +82,7 @@
|
||||
},
|
||||
"Retrieval:BrownStreetsRhyme": {
|
||||
"downstream": [
|
||||
"Generate:CuteSidesBuy"
|
||||
"Generate:BlueShirtsLaugh"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
@ -124,12 +103,12 @@
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Answer:SocialAdsWonder"
|
||||
"RewriteQuestion:WildIdeasTell"
|
||||
]
|
||||
},
|
||||
"Retrieval:OddSingersRefuse": {
|
||||
"downstream": [
|
||||
"Generate:CuteSidesBuy"
|
||||
"Generate:BlueShirtsLaugh"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
@ -150,12 +129,12 @@
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Answer:SocialAdsWonder"
|
||||
"RewriteQuestion:WildIdeasTell"
|
||||
]
|
||||
},
|
||||
"Retrieval:SillyPartsCheer": {
|
||||
"downstream": [
|
||||
"Generate:CuteSidesBuy"
|
||||
"Generate:BlueShirtsLaugh"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
@ -175,6 +154,34 @@
|
||||
"top_n": 18
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"RewriteQuestion:WildIdeasTell"
|
||||
]
|
||||
},
|
||||
"RewriteQuestion:WildIdeasTell": {
|
||||
"downstream": [
|
||||
"Retrieval:OddSingersRefuse",
|
||||
"Retrieval:BrownStreetsRhyme",
|
||||
"Retrieval:SillyPartsCheer"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "RewriteQuestion",
|
||||
"params": {
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": true,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 6,
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Answer:SocialAdsWonder"
|
||||
]
|
||||
@ -202,20 +209,34 @@
|
||||
"graph": {
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-begin-Answer:SocialAdsWonderc",
|
||||
"id": "xy-edge__ExeSQL:QuietRosesRunc-Answer:SocialAdsWonderc",
|
||||
"markerEnd": "logo",
|
||||
"source": "begin",
|
||||
"sourceHandle": null,
|
||||
"source": "ExeSQL:QuietRosesRun",
|
||||
"sourceHandle": "c",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Answer:SocialAdsWonder",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Answer:SocialAdsWonderb-Retrieval:SillyPartsCheerc",
|
||||
"id": "xy-edge__begin-Answer:SocialAdsWonderc",
|
||||
"markerEnd": "logo",
|
||||
"source": "begin",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Answer:SocialAdsWonder",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__Answer:SocialAdsWonderb-RewriteQuestion:WildIdeasTellc",
|
||||
"markerEnd": "logo",
|
||||
"source": "Answer:SocialAdsWonder",
|
||||
"sourceHandle": "b",
|
||||
@ -223,27 +244,15 @@
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Retrieval:SillyPartsCheer",
|
||||
"target": "RewriteQuestion:WildIdeasTell",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Answer:SocialAdsWonderb-Retrieval:BrownStreetsRhymec",
|
||||
"id": "xy-edge__RewriteQuestion:WildIdeasTellb-Retrieval:OddSingersRefusec",
|
||||
"markerEnd": "logo",
|
||||
"source": "Answer:SocialAdsWonder",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Retrieval:BrownStreetsRhyme",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Answer:SocialAdsWonderb-Retrieval:OddSingersRefusec",
|
||||
"markerEnd": "logo",
|
||||
"source": "Answer:SocialAdsWonder",
|
||||
"source": "RewriteQuestion:WildIdeasTell",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
@ -251,51 +260,41 @@
|
||||
},
|
||||
"target": "Retrieval:OddSingersRefuse",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Retrieval:SillyPartsCheerb-Generate:CuteSidesBuyb",
|
||||
"id": "xy-edge__RewriteQuestion:WildIdeasTellb-Retrieval:BrownStreetsRhymec",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:SillyPartsCheer",
|
||||
"source": "RewriteQuestion:WildIdeasTell",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:CuteSidesBuy",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge"
|
||||
"target": "Retrieval:BrownStreetsRhyme",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Retrieval:BrownStreetsRhymeb-Generate:CuteSidesBuyb",
|
||||
"id": "xy-edge__RewriteQuestion:WildIdeasTellb-Retrieval:SillyPartsCheerc",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:BrownStreetsRhyme",
|
||||
"source": "RewriteQuestion:WildIdeasTell",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:CuteSidesBuy",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge"
|
||||
"target": "Retrieval:SillyPartsCheer",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Retrieval:OddSingersRefuseb-Generate:CuteSidesBuyb",
|
||||
"id": "xy-edge__Generate:BlueShirtsLaughc-ExeSQL:QuietRosesRunb",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:OddSingersRefuse",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:CuteSidesBuy",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge"
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__Generate:CuteSidesBuyc-ExeSQL:QuietRosesRunb",
|
||||
"markerEnd": "logo",
|
||||
"source": "Generate:CuteSidesBuy",
|
||||
"source": "Generate:BlueShirtsLaugh",
|
||||
"sourceHandle": "c",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
@ -307,16 +306,44 @@
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__ExeSQL:QuietRosesRunc-Answer:SocialAdsWonderc",
|
||||
"id": "xy-edge__Retrieval:SillyPartsCheerb-Generate:BlueShirtsLaughb",
|
||||
"markerEnd": "logo",
|
||||
"source": "ExeSQL:QuietRosesRun",
|
||||
"sourceHandle": "c",
|
||||
"source": "Retrieval:SillyPartsCheer",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Answer:SocialAdsWonder",
|
||||
"targetHandle": "c",
|
||||
"target": "Generate:BlueShirtsLaugh",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__Retrieval:BrownStreetsRhymeb-Generate:BlueShirtsLaughb",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:BrownStreetsRhyme",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:BlueShirtsLaugh",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__Retrieval:OddSingersRefuseb-Generate:BlueShirtsLaughb",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:OddSingersRefuse",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:BlueShirtsLaugh",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
}
|
||||
@ -362,8 +389,8 @@
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": -58.36886074370702,
|
||||
"y": 272.1213623212045
|
||||
"x": -265.59460323639587,
|
||||
"y": 271.1879130306969
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -58.36886074370702,
|
||||
@ -375,100 +402,6 @@
|
||||
"type": "logicNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "The large model modifies the original SQL statement based on the error message and returns the modified SQL statement."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "N: Fix SQL Statement"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 172,
|
||||
"id": "Note:SevenDancersMarry",
|
||||
"measured": {
|
||||
"height": 172,
|
||||
"width": 228
|
||||
},
|
||||
"position": {
|
||||
"x": -62.91736862436424,
|
||||
"y": 93.08952291375991
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -62.91736862436424,
|
||||
"y": 93.08952291375991
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"style": {
|
||||
"height": 172,
|
||||
"width": 228
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 228
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": true,
|
||||
"max_tokens": 512,
|
||||
"message_history_window_size": 1,
|
||||
"parameter": "Precise",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "Retrieval:SillyPartsCheer",
|
||||
"id": "2a77e574-a0a6-4a1a-af39-cb192f1d21f5",
|
||||
"key": "ddl_input"
|
||||
},
|
||||
{
|
||||
"component_id": "Retrieval:OddSingersRefuse",
|
||||
"id": "83941a85-0b59-408e-97e5-504964b0e090",
|
||||
"key": "db_input"
|
||||
},
|
||||
{
|
||||
"component_id": "Retrieval:BrownStreetsRhyme",
|
||||
"id": "c63d0ae6-7ee2-44a2-8a95-69d03c90cb44",
|
||||
"key": "sql_input"
|
||||
}
|
||||
],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "\n##The user provides a question and you provide SQL. You will only respond with SQL code and not with any explanations.\n\n##You may use the following DDL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {ddl_input}.\n\n##You may use the following documentation as a reference for what tables might be available. Use responses to past questions also to guide you: {db_input}.\n\n##You may use the following SQL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {sql_input}.\n\n##Respond with only SQL code. Do not answer with any explanations -- just the code.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Generate SQL Statement LLM"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 232,
|
||||
"id": "Generate:CuteSidesBuy",
|
||||
"measured": {
|
||||
"height": 232,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 191.98081287844155,
|
||||
"y": -255.36496490928363
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 191.98081287844155,
|
||||
"y": -255.36496490928363
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
@ -495,8 +428,8 @@
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 198.3020069445181,
|
||||
"y": -0.9595420072386389
|
||||
"x": 194.69889765569846,
|
||||
"y": 61.49435233230193
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 198.3020069445181,
|
||||
@ -534,8 +467,8 @@
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 231.17453176754782,
|
||||
"y": 123.02661106951555
|
||||
"x": 240.78282320440022,
|
||||
"y": 162.66081324653166
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 231.17453176754782,
|
||||
@ -573,8 +506,8 @@
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 267.7575479510707,
|
||||
"y": 249.15603226400776
|
||||
"x": 284.5720579655624,
|
||||
"y": 246.75395940479467
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 267.7575479510707,
|
||||
@ -596,15 +529,15 @@
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 176,
|
||||
"height": 165,
|
||||
"id": "Note:HeavyIconsFollow",
|
||||
"measured": {
|
||||
"height": 176,
|
||||
"width": 266
|
||||
"height": 165,
|
||||
"width": 347
|
||||
},
|
||||
"position": {
|
||||
"x": -626.6563777191027,
|
||||
"y": -48.82220889683933
|
||||
"x": -709.8631299685773,
|
||||
"y": 96.50319908555313
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -626.6563777191027,
|
||||
@ -619,7 +552,7 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 266
|
||||
"width": 347
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
@ -631,15 +564,15 @@
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 162,
|
||||
"height": 159,
|
||||
"id": "Note:PinkTaxesClean",
|
||||
"measured": {
|
||||
"height": 162,
|
||||
"width": 210
|
||||
"height": 159,
|
||||
"width": 259
|
||||
},
|
||||
"position": {
|
||||
"x": -52.004609812312424,
|
||||
"y": 336.95180237635077
|
||||
"x": -253.39933811515345,
|
||||
"y": 353.7538896054877
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -52.004609812312424,
|
||||
@ -654,7 +587,7 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 210
|
||||
"width": 259
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
@ -701,15 +634,15 @@
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 131,
|
||||
"height": 143,
|
||||
"id": "Note:HugeGroupsScream",
|
||||
"measured": {
|
||||
"height": 131,
|
||||
"width": 387
|
||||
"height": 143,
|
||||
"width": 390
|
||||
},
|
||||
"position": {
|
||||
"x": 606.1206536213404,
|
||||
"y": 113.09441734894426
|
||||
"x": 612.8793199038756,
|
||||
"y": 169.1868576959871
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 606.1206536213404,
|
||||
@ -724,7 +657,7 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 387
|
||||
"width": 390
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
@ -736,15 +669,15 @@
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 266,
|
||||
"height": 208,
|
||||
"id": "Note:GreenCrewsArrive",
|
||||
"measured": {
|
||||
"height": 266,
|
||||
"width": 266
|
||||
"height": 208,
|
||||
"width": 467
|
||||
},
|
||||
"position": {
|
||||
"x": 545.3423934788841,
|
||||
"y": -166.58872868890683
|
||||
"x": 649.3481710005742,
|
||||
"y": -87.70873445087781
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 545.3423934788841,
|
||||
@ -759,7 +692,7 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 266
|
||||
"width": 467
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
@ -771,15 +704,15 @@
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 175,
|
||||
"height": 196,
|
||||
"id": "Note:EightTurtlesLike",
|
||||
"measured": {
|
||||
"height": 175,
|
||||
"width": 265
|
||||
"height": 196,
|
||||
"width": 341
|
||||
},
|
||||
"position": {
|
||||
"x": 222.2150747084395,
|
||||
"y": -445.32694170868734
|
||||
"x": 134.0070839275931,
|
||||
"y": -345.41228234051727
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 222.2150747084395,
|
||||
@ -794,34 +727,34 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 265
|
||||
"width": 341
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "Executes the SQL statement in the database and returns the result.\n\nAfter configuring an accessible database, press 'Test' to ensure the accessibility."
|
||||
"text": "Executes the SQL statement in the database and returns the result.\n\nAfter configuring an accessible database, press 'Test' to ensure the accessibility.\n\nThe large model modifies the original SQL statement based on the error message and returns the modified SQL statement."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "N: Execute SQL"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 178,
|
||||
"height": 276,
|
||||
"id": "Note:FreshKidsTalk",
|
||||
"measured": {
|
||||
"height": 178,
|
||||
"width": 346
|
||||
"height": 276,
|
||||
"width": 336
|
||||
},
|
||||
"position": {
|
||||
"x": -293.35258272850365,
|
||||
"y": -206.3839921107096
|
||||
"x": -304.3577648765364,
|
||||
"y": -288.054469323955
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -251.5866574377311,
|
||||
"y": -372.2192837064241
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": true,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"style": {
|
||||
"height": 178,
|
||||
@ -829,7 +762,7 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 346
|
||||
"width": 336
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
@ -856,7 +789,7 @@
|
||||
"username": "root"
|
||||
},
|
||||
"label": "ExeSQL",
|
||||
"name": "ExeSQL_0"
|
||||
"name": "ExeSQL"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "ExeSQL:QuietRosesRun",
|
||||
@ -872,6 +805,79 @@
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "ragNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": true,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 6,
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "RewriteQuestion",
|
||||
"name": "RefineQuestion"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "RewriteQuestion:WildIdeasTell",
|
||||
"measured": {
|
||||
"height": 106,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": -7.734116293705583,
|
||||
"y": 236.92372325779243
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "rewriteNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 1,
|
||||
"parameter": "Precise",
|
||||
"parameters": [],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "\n##The user provides a question and you provide SQL. You will only respond with SQL code and not with any explanations.\n\n##You may use the following DDL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:SillyPartsCheer}.\n\n##You may use the following documentation as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:OddSingersRefuse}.\n\n##You may use the following SQL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:BrownStreetsRhyme}.\n\n##Respond with only SQL code. Do not answer with any explanations -- just the code.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Generate SQL Statement LLM"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Generate:BlueShirtsLaugh",
|
||||
"measured": {
|
||||
"height": 106,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 147.62383788095065,
|
||||
"y": -116.47462293167156
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -10,93 +10,103 @@
|
||||
"downstream": [],
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"inputs": [],
|
||||
"output": null,
|
||||
"params": {
|
||||
"debug_inputs": [],
|
||||
"inputs": [],
|
||||
"message_history_window_size": 22,
|
||||
"output": null,
|
||||
"output_var_name": "output",
|
||||
"post_answers": [],
|
||||
"query": []
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Generate:ChubbyCougarsRush"
|
||||
"Generate:FuzzyEmusWork"
|
||||
]
|
||||
},
|
||||
"Generate:ChubbyCougarsRush": {
|
||||
"Generate:FuzzyEmusWork": {
|
||||
"downstream": [
|
||||
"Answer:TinyGamesGuess"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"inputs": [],
|
||||
"output": null,
|
||||
"params": {
|
||||
"cite": false,
|
||||
"debug_inputs": [],
|
||||
"frequency_penalty": 0.7,
|
||||
"inputs": [],
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"max_tokens": 0,
|
||||
"message_history_window_size": 12,
|
||||
"message_history_window_size": 1,
|
||||
"output": null,
|
||||
"output_var_name": "output",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "73f48a67-b78f-4bcd-8326-a83c31073ab9",
|
||||
"key": "target_lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "c9142975-25b3-4199-8fce-aa0bc29a31f2",
|
||||
"key": "source_text"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:RichWordsDeny",
|
||||
"id": "6c824b2a-fe3b-4336-95b5-e85f676bef39",
|
||||
"key": "translation_1"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:SlimyFrogsArgue",
|
||||
"id": "f3bd4569-4852-43fa-b80a-e0dd27dd9e1c",
|
||||
"key": "reflection"
|
||||
}
|
||||
],
|
||||
"parameters": [],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read, then edit, a translation to {target_lang}, taking into\naccount a list of expert suggestions and constructive criticisms.\n\nThe source text, the initial translation, and the expert linguist suggestions are delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT>, <TRANSLATION></TRANSLATION> and <EXPERT_SUGGESTIONS></EXPERT_SUGGESTIONS> \\\nas follows:\n\n<SOURCE_TEXT>\n{source_text}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{translation_1}\n</TRANSLATION>\n\n<EXPERT_SUGGESTIONS>\n{reflection}\n</EXPERT_SUGGESTIONS>\n\nPlease take into account the expert suggestions when editing the translation. Edit the translation by ensuring:\n\n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {target_lang} grammar, spelling and punctuation rules and ensuring there are no unnecessary repetitions), \n(iii) style (by ensuring the translations reflect the style of the source text)\n(iv) terminology (inappropriate for context, inconsistent use), or\n(v) other errors.\n\nOutput only the new translation and nothing else.",
|
||||
"prompt": "Your task is to carefully read, then edit, a translation to {begin@lang}, taking into\naccount a list of expert suggestions and constructive criticisms.\n\nThe source text, the initial translation, and the expert linguist suggestions are delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT>, <TRANSLATION></TRANSLATION> and <EXPERT_SUGGESTIONS></EXPERT_SUGGESTIONS>\nas follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\n<EXPERT_SUGGESTIONS>\n{Generate:ShinySquidsSneeze}\n</EXPERT_SUGGESTIONS>\n\nPlease take into account the expert suggestions when editing the translation. Edit the translation by ensuring:\n\n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules and ensuring there are no unnecessary repetitions), \n(iii) style (by ensuring the translations reflect the style of the source text)\n(iv) terminology (inappropriate for context, inconsistent use), or\n(v) other errors.\n\nOutput only the new translation and nothing else.",
|
||||
"query": [],
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Generate:SlimyFrogsArgue"
|
||||
"Generate:ShinySquidsSneeze"
|
||||
]
|
||||
},
|
||||
"Generate:RichWordsDeny": {
|
||||
"Generate:ShinySquidsSneeze": {
|
||||
"downstream": [
|
||||
"Generate:SlimyFrogsArgue"
|
||||
"Generate:FuzzyEmusWork"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"inputs": [],
|
||||
"output": null,
|
||||
"params": {
|
||||
"cite": false,
|
||||
"debug_inputs": [],
|
||||
"frequency_penalty": 0.7,
|
||||
"inputs": [],
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"max_tokens": 0,
|
||||
"message_history_window_size": 12,
|
||||
"message_history_window_size": 1,
|
||||
"output": null,
|
||||
"output_var_name": "output",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "a36e78fb-b431-4ae6-afa8-77839587fcf8",
|
||||
"key": "lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "f8a704b7-693b-4480-aa9a-da4a83250059",
|
||||
"key": "file"
|
||||
}
|
||||
],
|
||||
"parameters": [],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You are a professional translator proficient in {lang}, with an exceptional ability to convert specialized academic papers into accessible popular science articles. Please assist me in translating the following paragraph into {lang}, ensuring that its style resembles that of popular science articles in {lang}.\n\nRequirements & Restrictions:\n - Use Markdown format to output.\n - DO NOT overlook any details.\n\n\n<ORIGINAL_TEXT>\n{file}\n\n<TRANSLATED_TEXT>",
|
||||
"prompt": "Your task is to carefully read a source text and a translation to {begin@lang}, and then give constructive criticisms and helpful suggestions to improve the translation. \n\nThe source text and initial translation, delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT> and <TRANSLATION></TRANSLATION>, are as follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\nWhen writing suggestions, pay attention to whether there are ways to improve the translation's \n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules, and ensuring there are no unnecessary repetitions),\n(iii) style (by ensuring the translations reflect the style of the source text and take into account any cultural context),\n(iv) terminology (by ensuring terminology use is consistent and reflects the source text domain; and by only ensuring you use equivalent idioms {begin@lang}).\n\nWrite a list of specific, helpful and constructive suggestions for improving the translation.\nEach suggestion should address one specific part of the translation.\nOutput only the suggestions and nothing else.",
|
||||
"query": [],
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Generate:VastKeysKick"
|
||||
]
|
||||
},
|
||||
"Generate:VastKeysKick": {
|
||||
"downstream": [
|
||||
"Generate:ShinySquidsSneeze"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"inputs": [],
|
||||
"output": null,
|
||||
"params": {
|
||||
"cite": false,
|
||||
"debug_inputs": [],
|
||||
"frequency_penalty": 0.7,
|
||||
"inputs": [],
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"max_tokens": 0,
|
||||
"message_history_window_size": 1,
|
||||
"output": null,
|
||||
"output_var_name": "output",
|
||||
"parameters": [],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You are a professional translator proficient in {begin@lang}, with an exceptional ability to convert specialized academic papers into accessible popular science articles. Please assist me in translating the following paragraph into {begin@lang}, ensuring that its style resembles that of popular science articles in {begin@lang}.\n\nRequirements & Restrictions:\n - Use Markdown format to output.\n - DO NOT overlook any details.\n\n\n<ORIGINAL_TEXT>\n{begin@file}\n\n<TRANSLATED_TEXT>",
|
||||
"query": [],
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
@ -106,58 +116,19 @@
|
||||
"begin"
|
||||
]
|
||||
},
|
||||
"Generate:SlimyFrogsArgue": {
|
||||
"downstream": [
|
||||
"Generate:ChubbyCougarsRush"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"cite": false,
|
||||
"frequency_penalty": 0.7,
|
||||
"inputs": [],
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"max_tokens": 0,
|
||||
"message_history_window_size": 12,
|
||||
"output_var_name": "output",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "b2f5e7ec-7f77-485f-af15-461d0f1ca913",
|
||||
"key": "target_lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "fbc44092-9f9e-4e85-b5b1-dbd808239d3d",
|
||||
"key": "source_text"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:RichWordsDeny",
|
||||
"id": "c253af54-61d4-40f3-9990-604e2212506f",
|
||||
"key": "translation_1"
|
||||
}
|
||||
],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read a source text and a translation to {target_lang}, and then give constructive criticisms and helpful suggestions to improve the translation. \n\nThe source text and initial translation, delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT> and <TRANSLATION></TRANSLATION>, are as follows:\n\n<SOURCE_TEXT>\n{source_text}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{translation_1}\n</TRANSLATION>\n\nWhen writing suggestions, pay attention to whether there are ways to improve the translation's \n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {target_lang} grammar, spelling and punctuation rules, and ensuring there are no unnecessary repetitions),\n(iii) style (by ensuring the translations reflect the style of the source text and take into account any cultural context),\n(iv) terminology (by ensuring terminology use is consistent and reflects the source text domain; and by only ensuring you use equivalent idioms {target_lang}).\n\nWrite a list of specific, helpful and constructive suggestions for improving the translation.\nEach suggestion should address one specific part of the translation.\nOutput only the suggestions and nothing else.",
|
||||
"query": [],
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Generate:RichWordsDeny"
|
||||
]
|
||||
},
|
||||
"begin": {
|
||||
"downstream": [
|
||||
"Generate:RichWordsDeny"
|
||||
"Generate:VastKeysKick"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Begin",
|
||||
"inputs": [],
|
||||
"output": null,
|
||||
"params": {
|
||||
"debug_inputs": [],
|
||||
"inputs": [],
|
||||
"message_history_window_size": 22,
|
||||
"output": {},
|
||||
"output": null,
|
||||
"output_var_name": "output",
|
||||
"prologue": "",
|
||||
"query": [
|
||||
@ -165,15 +136,13 @@
|
||||
"key": "lang",
|
||||
"name": "Target Language",
|
||||
"optional": false,
|
||||
"type": "line",
|
||||
"value": ""
|
||||
"type": "line"
|
||||
},
|
||||
{
|
||||
"key": "file",
|
||||
"name": "Files",
|
||||
"optional": false,
|
||||
"type": "file",
|
||||
"value": ""
|
||||
"type": "file"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -185,48 +154,36 @@
|
||||
"graph": {
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-begin-Generate:RichWordsDenyc",
|
||||
"id": "xy-edge__begin-Generate:VastKeysKickc",
|
||||
"markerEnd": "logo",
|
||||
"source": "begin",
|
||||
"sourceHandle": null,
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:RichWordsDeny",
|
||||
"target": "Generate:VastKeysKick",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Generate:RichWordsDenyb-Generate:SlimyFrogsArguec",
|
||||
"id": "xy-edge__Generate:VastKeysKickb-Generate:ShinySquidsSneezec",
|
||||
"markerEnd": "logo",
|
||||
"source": "Generate:RichWordsDeny",
|
||||
"source": "Generate:VastKeysKick",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:SlimyFrogsArgue",
|
||||
"target": "Generate:ShinySquidsSneeze",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Generate:SlimyFrogsArgueb-Generate:ChubbyCougarsRushc",
|
||||
"id": "xy-edge__Generate:FuzzyEmusWorkb-Answer:TinyGamesGuessc",
|
||||
"markerEnd": "logo",
|
||||
"source": "Generate:SlimyFrogsArgue",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:ChubbyCougarsRush",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Generate:ChubbyCougarsRushb-Answer:TinyGamesGuessc",
|
||||
"markerEnd": "logo",
|
||||
"source": "Generate:ChubbyCougarsRush",
|
||||
"source": "Generate:FuzzyEmusWork",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
@ -234,7 +191,22 @@
|
||||
},
|
||||
"target": "Answer:TinyGamesGuess",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__Generate:ShinySquidsSneezeb-Generate:FuzzyEmusWorkc",
|
||||
"markerEnd": "logo",
|
||||
"source": "Generate:ShinySquidsSneeze",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:FuzzyEmusWork",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
}
|
||||
],
|
||||
"nodes": [
|
||||
@ -247,15 +219,13 @@
|
||||
"key": "lang",
|
||||
"name": "Target Language",
|
||||
"optional": false,
|
||||
"type": "line",
|
||||
"value": ""
|
||||
"type": "line"
|
||||
},
|
||||
{
|
||||
"key": "file",
|
||||
"name": "Files",
|
||||
"optional": false,
|
||||
"type": "file",
|
||||
"value": ""
|
||||
"type": "file"
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -265,188 +235,24 @@
|
||||
"dragging": false,
|
||||
"height": 128,
|
||||
"id": "begin",
|
||||
"measured": {
|
||||
"height": 128,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": -383.5,
|
||||
"y": 143.5
|
||||
"y": 142.62256327439624
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -383.5,
|
||||
"y": 143.5
|
||||
},
|
||||
"selected": false,
|
||||
"selected": true,
|
||||
"sourcePosition": "left",
|
||||
"targetPosition": "right",
|
||||
"type": "beginNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 12,
|
||||
"parameter": "Precise",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "a36e78fb-b431-4ae6-afa8-77839587fcf8",
|
||||
"key": "lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "f8a704b7-693b-4480-aa9a-da4a83250059",
|
||||
"key": "file"
|
||||
}
|
||||
],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You are a professional translator proficient in {lang}, with an exceptional ability to convert specialized academic papers into accessible popular science articles. Please assist me in translating the following paragraph into {lang}, ensuring that its style resembles that of popular science articles in {lang}.\n\nRequirements & Restrictions:\n - Use Markdown format to output.\n - DO NOT overlook any details.\n\n\n<ORIGINAL_TEXT>\n{file}\n\n<TRANSLATED_TEXT>",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Translate directly"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 190,
|
||||
"id": "Generate:RichWordsDeny",
|
||||
"position": {
|
||||
"x": -98,
|
||||
"y": 113.359375
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -98,
|
||||
"y": 113.359375
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 512,
|
||||
"message_history_window_size": 12,
|
||||
"parameter": "Precise",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "b2f5e7ec-7f77-485f-af15-461d0f1ca913",
|
||||
"key": "target_lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "fbc44092-9f9e-4e85-b5b1-dbd808239d3d",
|
||||
"key": "source_text"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:RichWordsDeny",
|
||||
"id": "c253af54-61d4-40f3-9990-604e2212506f",
|
||||
"key": "translation_1"
|
||||
}
|
||||
],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read a source text and a translation to {target_lang}, and then give constructive criticisms and helpful suggestions to improve the translation. \n\nThe source text and initial translation, delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT> and <TRANSLATION></TRANSLATION>, are as follows:\n\n<SOURCE_TEXT>\n{source_text}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{translation_1}\n</TRANSLATION>\n\nWhen writing suggestions, pay attention to whether there are ways to improve the translation's \n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {target_lang} grammar, spelling and punctuation rules, and ensuring there are no unnecessary repetitions),\n(iii) style (by ensuring the translations reflect the style of the source text and take into account any cultural context),\n(iv) terminology (by ensuring terminology use is consistent and reflects the source text domain; and by only ensuring you use equivalent idioms {target_lang}).\n\nWrite a list of specific, helpful and constructive suggestions for improving the translation.\nEach suggestion should address one specific part of the translation.\nOutput only the suggestions and nothing else.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Reflect"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 232,
|
||||
"id": "Generate:SlimyFrogsArgue",
|
||||
"position": {
|
||||
"x": 178.5,
|
||||
"y": 91.859375
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 178.5,
|
||||
"y": 91.859375
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 512,
|
||||
"message_history_window_size": 12,
|
||||
"parameter": "Precise",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "73f48a67-b78f-4bcd-8326-a83c31073ab9",
|
||||
"key": "target_lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "c9142975-25b3-4199-8fce-aa0bc29a31f2",
|
||||
"key": "source_text"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:RichWordsDeny",
|
||||
"id": "6c824b2a-fe3b-4336-95b5-e85f676bef39",
|
||||
"key": "translation_1"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:SlimyFrogsArgue",
|
||||
"id": "f3bd4569-4852-43fa-b80a-e0dd27dd9e1c",
|
||||
"key": "reflection"
|
||||
}
|
||||
],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read, then edit, a translation to {target_lang}, taking into\naccount a list of expert suggestions and constructive criticisms.\n\nThe source text, the initial translation, and the expert linguist suggestions are delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT>, <TRANSLATION></TRANSLATION> and <EXPERT_SUGGESTIONS></EXPERT_SUGGESTIONS> \\\nas follows:\n\n<SOURCE_TEXT>\n{source_text}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{translation_1}\n</TRANSLATION>\n\n<EXPERT_SUGGESTIONS>\n{reflection}\n</EXPERT_SUGGESTIONS>\n\nPlease take into account the expert suggestions when editing the translation. Edit the translation by ensuring:\n\n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {target_lang} grammar, spelling and punctuation rules and ensuring there are no unnecessary repetitions), \n(iii) style (by ensuring the translations reflect the style of the source text)\n(iv) terminology (inappropriate for context, inconsistent use), or\n(v) other errors.\n\nOutput only the new translation and nothing else.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Improve"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 274,
|
||||
"id": "Generate:ChubbyCougarsRush",
|
||||
"position": {
|
||||
"x": 437,
|
||||
"y": 70.859375
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 437,
|
||||
"y": 70.859375
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {},
|
||||
@ -456,9 +262,13 @@
|
||||
"dragging": false,
|
||||
"height": 44,
|
||||
"id": "Answer:TinyGamesGuess",
|
||||
"measured": {
|
||||
"height": 44,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 688.5,
|
||||
"y": 183.859375
|
||||
"x": 645.5056004454161,
|
||||
"y": 182.98193827439627
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 688.5,
|
||||
@ -482,6 +292,10 @@
|
||||
"dragging": false,
|
||||
"height": 227,
|
||||
"id": "Note:MoodyKnivesCheat",
|
||||
"measured": {
|
||||
"height": 227,
|
||||
"width": 703
|
||||
},
|
||||
"position": {
|
||||
"x": 46.02198421645994,
|
||||
"y": -267.69527832581736
|
||||
@ -504,7 +318,7 @@
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "Many businesses use specialized terms that are not widely used on the internet and that LLMs thus don\u2019t know about, and there are also many terms that can be translated in multiple ways. For example, \u201dopen source\u201d in Spanish can be \u201cC\u00f3digo abierto\u201d or \u201cFuente abierta\u201d; both are fine, but it\u2019d better to pick one and stick with it for a single document.\n\nYou can add those glossary translation into prompt to any of `Translate directly` or 'Reflect'."
|
||||
"text": "Many businesses use specialized terms that are not widely used on the internet and that LLMs thus don’t know about, and there are also many terms that can be translated in multiple ways. For example, ”open source” in Spanish can be “Código abierto” or “Fuente abierta”; both are fine, but it’d better to pick one and stick with it for a single document.\n\nYou can add those glossary translation into prompt to any of `Translate directly` or 'Reflect'."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Tip: Add glossary "
|
||||
@ -513,6 +327,10 @@
|
||||
"dragging": false,
|
||||
"height": 181,
|
||||
"id": "Note:SourCarrotsAct",
|
||||
"measured": {
|
||||
"height": 181,
|
||||
"width": 832
|
||||
},
|
||||
"position": {
|
||||
"x": 65.0676250238289,
|
||||
"y": 397.6323270065299
|
||||
@ -531,6 +349,120 @@
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 832
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 1,
|
||||
"parameter": "Precise",
|
||||
"parameters": [],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You are a professional translator proficient in {begin@lang}, with an exceptional ability to convert specialized academic papers into accessible popular science articles. Please assist me in translating the following paragraph into {begin@lang}, ensuring that its style resembles that of popular science articles in {begin@lang}.\n\nRequirements & Restrictions:\n - Use Markdown format to output.\n - DO NOT overlook any details.\n\n\n<ORIGINAL_TEXT>\n{begin@file}\n\n<TRANSLATED_TEXT>",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Translate directly"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Generate:VastKeysKick",
|
||||
"measured": {
|
||||
"height": 106,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": -132.6338674989604,
|
||||
"y": 153.70663786774483
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 1,
|
||||
"parameter": "Precise",
|
||||
"parameters": [],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read a source text and a translation to {begin@lang}, and then give constructive criticisms and helpful suggestions to improve the translation. \n\nThe source text and initial translation, delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT> and <TRANSLATION></TRANSLATION>, are as follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\nWhen writing suggestions, pay attention to whether there are ways to improve the translation's \n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules, and ensuring there are no unnecessary repetitions),\n(iii) style (by ensuring the translations reflect the style of the source text and take into account any cultural context),\n(iv) terminology (by ensuring terminology use is consistent and reflects the source text domain; and by only ensuring you use equivalent idioms {begin@lang}).\n\nWrite a list of specific, helpful and constructive suggestions for improving the translation.\nEach suggestion should address one specific part of the translation.\nOutput only the suggestions and nothing else.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Reflect"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Generate:ShinySquidsSneeze",
|
||||
"measured": {
|
||||
"height": 106,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 121.1675336631696,
|
||||
"y": 152.92865408917177
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 1,
|
||||
"parameter": "Precise",
|
||||
"parameters": [],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read, then edit, a translation to {begin@lang}, taking into\naccount a list of expert suggestions and constructive criticisms.\n\nThe source text, the initial translation, and the expert linguist suggestions are delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT>, <TRANSLATION></TRANSLATION> and <EXPERT_SUGGESTIONS></EXPERT_SUGGESTIONS>\nas follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\n<EXPERT_SUGGESTIONS>\n{Generate:ShinySquidsSneeze}\n</EXPERT_SUGGESTIONS>\n\nPlease take into account the expert suggestions when editing the translation. Edit the translation by ensuring:\n\n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules and ensuring there are no unnecessary repetitions), \n(iii) style (by ensuring the translations reflect the style of the source text)\n(iv) terminology (inappropriate for context, inconsistent use), or\n(v) other errors.\n\nOutput only the new translation and nothing else.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Improve"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Generate:FuzzyEmusWork",
|
||||
"measured": {
|
||||
"height": 106,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 383.1474420163898,
|
||||
"y": 152.0472805236579
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1
agentic_reasoning/__init__.py
Normal file
1
agentic_reasoning/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
from .deep_research import DeepResearcher as DeepResearcher
|
||||
167
agentic_reasoning/deep_research.py
Normal file
167
agentic_reasoning/deep_research.py
Normal file
@ -0,0 +1,167 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
from functools import partial
|
||||
from agentic_reasoning.prompts import BEGIN_SEARCH_QUERY, BEGIN_SEARCH_RESULT, END_SEARCH_RESULT, MAX_SEARCH_LIMIT, \
|
||||
END_SEARCH_QUERY, REASON_PROMPT, RELEVANT_EXTRACTION_PROMPT
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from rag.nlp import extract_between
|
||||
from rag.prompts import kb_prompt
|
||||
from rag.utils.tavily_conn import Tavily
|
||||
|
||||
|
||||
class DeepResearcher:
|
||||
def __init__(self,
|
||||
chat_mdl: LLMBundle,
|
||||
prompt_config: dict,
|
||||
kb_retrieve: partial = None,
|
||||
kg_retrieve: partial = None
|
||||
):
|
||||
self.chat_mdl = chat_mdl
|
||||
self.prompt_config = prompt_config
|
||||
self._kb_retrieve = kb_retrieve
|
||||
self._kg_retrieve = kg_retrieve
|
||||
|
||||
def thinking(self, chunk_info: dict, question: str):
|
||||
def rm_query_tags(line):
|
||||
pattern = re.escape(BEGIN_SEARCH_QUERY) + r"(.*?)" + re.escape(END_SEARCH_QUERY)
|
||||
return re.sub(pattern, "", line)
|
||||
|
||||
def rm_result_tags(line):
|
||||
pattern = re.escape(BEGIN_SEARCH_RESULT) + r"(.*?)" + re.escape(END_SEARCH_RESULT)
|
||||
return re.sub(pattern, "", line)
|
||||
|
||||
executed_search_queries = []
|
||||
msg_hisotry = [{"role": "user", "content": f'Question:\"{question}\"\n'}]
|
||||
all_reasoning_steps = []
|
||||
think = "<think>"
|
||||
for ii in range(MAX_SEARCH_LIMIT + 1):
|
||||
if ii == MAX_SEARCH_LIMIT - 1:
|
||||
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
|
||||
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_hisotry.append({"role": "assistant", "content": summary_think})
|
||||
break
|
||||
|
||||
query_think = ""
|
||||
if msg_hisotry[-1]["role"] != "user":
|
||||
msg_hisotry.append({"role": "user", "content": "Continues reasoning with the new information.\n"})
|
||||
else:
|
||||
msg_hisotry[-1]["content"] += "\n\nContinues reasoning with the new information.\n"
|
||||
for ans in self.chat_mdl.chat_streamly(REASON_PROMPT, msg_hisotry, {"temperature": 0.7}):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
if not ans:
|
||||
continue
|
||||
query_think = ans
|
||||
yield {"answer": think + rm_query_tags(query_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
think += rm_query_tags(query_think)
|
||||
all_reasoning_steps.append(query_think)
|
||||
queries = extract_between(query_think, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY)
|
||||
if not queries:
|
||||
if ii > 0:
|
||||
break
|
||||
queries = [question]
|
||||
|
||||
for search_query in queries:
|
||||
logging.info(f"[THINK]Query: {ii}. {search_query}")
|
||||
msg_hisotry.append({"role": "assistant", "content": search_query})
|
||||
think += f"\n\n> {ii +1}. {search_query}\n\n"
|
||||
yield {"answer": think + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
summary_think = ""
|
||||
# The search query has been searched in previous steps.
|
||||
if search_query in executed_search_queries:
|
||||
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
|
||||
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_hisotry.append({"role": "user", "content": summary_think})
|
||||
think += summary_think
|
||||
continue
|
||||
|
||||
truncated_prev_reasoning = ""
|
||||
for i, step in enumerate(all_reasoning_steps):
|
||||
truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n"
|
||||
|
||||
prev_steps = truncated_prev_reasoning.split('\n\n')
|
||||
if len(prev_steps) <= 5:
|
||||
truncated_prev_reasoning = '\n\n'.join(prev_steps)
|
||||
else:
|
||||
truncated_prev_reasoning = ''
|
||||
for i, step in enumerate(prev_steps):
|
||||
if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
|
||||
truncated_prev_reasoning += step + '\n\n'
|
||||
else:
|
||||
if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
|
||||
truncated_prev_reasoning += '...\n\n'
|
||||
truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
|
||||
|
||||
# Retrieval procedure:
|
||||
# 1. KB search
|
||||
# 2. Web search (optional)
|
||||
# 3. KG search (optional)
|
||||
kbinfos = self._kb_retrieve(question=search_query) if self._kb_retrieve else {"chunks": [], "doc_aggs": []}
|
||||
|
||||
if self.prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(self.prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(" ".join(search_query))
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
if self.prompt_config.get("use_kg") and self._kg_retrieve:
|
||||
ck = self._kg_retrieve(question=search_query)
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
# Merge chunk info for citations
|
||||
if not chunk_info["chunks"]:
|
||||
for k in chunk_info.keys():
|
||||
chunk_info[k] = kbinfos[k]
|
||||
else:
|
||||
cids = [c["chunk_id"] for c in chunk_info["chunks"]]
|
||||
for c in kbinfos["chunks"]:
|
||||
if c["chunk_id"] in cids:
|
||||
continue
|
||||
chunk_info["chunks"].append(c)
|
||||
dids = [d["doc_id"] for d in chunk_info["doc_aggs"]]
|
||||
for d in kbinfos["doc_aggs"]:
|
||||
if d["doc_id"] in dids:
|
||||
continue
|
||||
chunk_info["doc_aggs"].append(d)
|
||||
|
||||
think += "\n\n"
|
||||
for ans in self.chat_mdl.chat_streamly(
|
||||
RELEVANT_EXTRACTION_PROMPT.format(
|
||||
prev_reasoning=truncated_prev_reasoning,
|
||||
search_query=search_query,
|
||||
document="\n".join(kb_prompt(kbinfos, 4096))
|
||||
),
|
||||
[{"role": "user",
|
||||
"content": f'Now you should analyze each web page and find helpful information based on the current search query "{search_query}" and previous reasoning steps.'}],
|
||||
{"temperature": 0.7}):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
if not ans:
|
||||
continue
|
||||
summary_think = ans
|
||||
yield {"answer": think + rm_result_tags(summary_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_hisotry.append(
|
||||
{"role": "user", "content": f"\n\n{BEGIN_SEARCH_RESULT}{summary_think}{END_SEARCH_RESULT}\n\n"})
|
||||
think += rm_result_tags(summary_think)
|
||||
logging.info(f"[THINK]Summary: {ii}. {summary_think}")
|
||||
|
||||
yield think + "</think>"
|
||||
112
agentic_reasoning/prompts.py
Normal file
112
agentic_reasoning/prompts.py
Normal file
@ -0,0 +1,112 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
||||
END_SEARCH_QUERY = "<|end_search_query|>"
|
||||
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
||||
END_SEARCH_RESULT = "<|end_search_result|>"
|
||||
MAX_SEARCH_LIMIT = 6
|
||||
|
||||
REASON_PROMPT = (
|
||||
"You are a reasoning assistant with the ability to perform dataset searches to help "
|
||||
"you answer the user's question accurately. You have special tools:\n\n"
|
||||
f"- To perform a search: write {BEGIN_SEARCH_QUERY} your query here {END_SEARCH_QUERY}.\n"
|
||||
f"Then, the system will search and analyze relevant content, then provide you with helpful information in the format {BEGIN_SEARCH_RESULT} ...search results... {END_SEARCH_RESULT}.\n\n"
|
||||
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
||||
"Once you have all the information you need, continue your reasoning.\n\n"
|
||||
"-- Example 1 --\n" ########################################
|
||||
"Question: \"Are both the directors of Jaws and Casino Royale from the same country?\"\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who is the director of Jaws?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nThe director of Jaws is Steven Spielberg...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information.\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Where is Steven Spielberg from?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nSteven Allan Spielberg is an American filmmaker...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who is the director of Casino Royale?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCasino Royale is a 2006 spy film directed by Martin Campbell...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Where is Martin Campbell from?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nMartin Campbell (born 24 October 1943) is a New Zealand film and television director...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\nIt's enough to answer the question\n"
|
||||
|
||||
"-- Example 2 --\n" #########################################
|
||||
"Question: \"When was the founder of craigslist born?\"\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who was the founder of craigslist?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCraigslist was founded by Craig Newmark...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information.\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY} When was Craig Newmark born?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCraig Newmark was born on December 6, 1952...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\nIt's enough to answer the question\n"
|
||||
"**Remember**:\n"
|
||||
f"- You have a dataset to search, so you just provide a proper search query.\n"
|
||||
f"- Use {BEGIN_SEARCH_QUERY} to request a dataset search and end with {END_SEARCH_QUERY}.\n"
|
||||
"- The language of query MUST be as the same as 'Question' or 'search result'.\n"
|
||||
"- When done searching, continue your reasoning.\n\n"
|
||||
'Please answer the following question. You should think step by step to solve it.\n\n'
|
||||
)
|
||||
|
||||
RELEVANT_EXTRACTION_PROMPT = """**Task Instruction:**
|
||||
|
||||
You are tasked with reading and analyzing web pages based on the following inputs: **Previous Reasoning Steps**, **Current Search Query**, and **Searched Web Pages**. Your objective is to extract relevant and helpful information for **Current Search Query** from the **Searched Web Pages** and seamlessly integrate this information into the **Previous Reasoning Steps** to continue reasoning for the original question.
|
||||
|
||||
**Guidelines:**
|
||||
|
||||
1. **Analyze the Searched Web Pages:**
|
||||
- Carefully review the content of each searched web page.
|
||||
- Identify factual information that is relevant to the **Current Search Query** and can aid in the reasoning process for the original question.
|
||||
|
||||
2. **Extract Relevant Information:**
|
||||
- Select the information from the Searched Web Pages that directly contributes to advancing the **Previous Reasoning Steps**.
|
||||
- Ensure that the extracted information is accurate and relevant.
|
||||
|
||||
3. **Output Format:**
|
||||
- **If the web pages provide helpful information for current search query:** Present the information beginning with `**Final Information**` as shown below.
|
||||
- The language of query **MUST BE** as the same as 'Search Query' or 'Web Pages'.\n"
|
||||
**Final Information**
|
||||
|
||||
[Helpful information]
|
||||
|
||||
- **If the web pages do not provide any helpful information for current search query:** Output the following text.
|
||||
|
||||
**Final Information**
|
||||
|
||||
No helpful information found.
|
||||
|
||||
**Inputs:**
|
||||
- **Previous Reasoning Steps:**
|
||||
{prev_reasoning}
|
||||
|
||||
- **Current Search Query:**
|
||||
{search_query}
|
||||
|
||||
- **Searched Web Pages:**
|
||||
{document}
|
||||
|
||||
"""
|
||||
@ -119,8 +119,9 @@ def register_page(page_path):
|
||||
sys.modules[module_name] = page
|
||||
spec.loader.exec_module(page)
|
||||
page_name = getattr(page, "page_name", page_name)
|
||||
sdk_path = "\\sdk\\" if sys.platform.startswith("win") else "/sdk/"
|
||||
url_prefix = (
|
||||
f"/api/{API_VERSION}" if "/sdk/" in path else f"/{API_VERSION}/{page_name}"
|
||||
f"/api/{API_VERSION}" if sdk_path in path else f"/{API_VERSION}/{page_name}"
|
||||
)
|
||||
|
||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||
|
||||
@ -25,7 +25,7 @@ from api.db import FileType, LLMType, ParserType, FileSource
|
||||
from api.db.db_models import APIToken, Task, File
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||
from api.db.services.dialog_service import DialogService, chat, keyword_extraction, label_question
|
||||
from api.db.services.dialog_service import DialogService, chat
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
@ -38,6 +38,8 @@ from api.utils.api_utils import server_error_response, get_data_error_result, ge
|
||||
generate_confirmation_token
|
||||
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
from rag.app.tag import label_question
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
|
||||
@ -19,9 +19,10 @@ import json
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.services.dialog_service import keyword_extraction, label_question
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.settings import PAGERANK_FLD
|
||||
from rag.utils import rmSpace
|
||||
from api.db import LLMType, ParserType
|
||||
@ -93,12 +94,14 @@ def get():
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
tenant_id = tenants[0].tenant_id
|
||||
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
||||
for tenant in tenants:
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant.tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant.tenant_id), kb_ids)
|
||||
if chunk:
|
||||
break
|
||||
if chunk is None:
|
||||
return server_error_response(Exception("Chunk not found"))
|
||||
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
|
||||
@ -25,13 +25,14 @@ from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import DialogService, chat, ask, label_question
|
||||
from api.db.services.dialog_service import DialogService, chat, ask
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle, TenantService
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
|
||||
@ -18,6 +18,7 @@ from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api import settings
|
||||
@ -57,11 +58,6 @@ def set_dialog():
|
||||
|
||||
if not prompt_config["system"]:
|
||||
prompt_config["system"] = default_prompt["system"]
|
||||
# if len(prompt_config["parameters"]) < 1:
|
||||
# prompt_config["parameters"] = default_prompt["parameters"]
|
||||
# for p in prompt_config["parameters"]:
|
||||
# if p["key"] == "knowledge":break
|
||||
# else: prompt_config["parameters"].append(default_prompt["parameters"][0])
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["optional"]:
|
||||
@ -74,22 +70,19 @@ def set_dialog():
|
||||
e, tenant = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids"))
|
||||
embd_count = len(set([kb.embd_id for kb in kbs]))
|
||||
if embd_count != 1:
|
||||
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids", []))
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = len(set(embd_ids))
|
||||
if embd_count > 1:
|
||||
return get_data_error_result(message=f'Datasets use different embedding models: {[kb.embd_id for kb in kbs]}"')
|
||||
|
||||
llm_id = req.get("llm_id", tenant.llm_id)
|
||||
if not dialog_id:
|
||||
if not req.get("kb_ids"):
|
||||
return get_data_error_result(
|
||||
message="Fail! Please select knowledgebase!")
|
||||
|
||||
dia = {
|
||||
"id": get_uuid(),
|
||||
"tenant_id": current_user.id,
|
||||
"name": name,
|
||||
"kb_ids": req["kb_ids"],
|
||||
"kb_ids": req.get("kb_ids", []),
|
||||
"description": description,
|
||||
"llm_id": llm_id,
|
||||
"llm_setting": llm_setting,
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from flask import request
|
||||
@ -300,11 +299,12 @@ def knowledge_graph(kb_id):
|
||||
"kb_id": [kb_id],
|
||||
"knowledge_graph_kwd": ["graph"]
|
||||
}
|
||||
|
||||
obj = {"graph": {}, "mind_map": {}}
|
||||
try:
|
||||
if not settings.docStoreConn.indexExist(search.index_name(kb.tenant_id), kb_id):
|
||||
return get_json_result(data=obj)
|
||||
sres = settings.retrievaler.search(req, search.index_name(kb.tenant_id), [kb_id])
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
if not len(sres.ids):
|
||||
return get_json_result(data=obj)
|
||||
|
||||
for id in sres.ids[:1]:
|
||||
@ -319,5 +319,7 @@ def knowledge_graph(kb_id):
|
||||
if "nodes" in obj["graph"]:
|
||||
obj["graph"]["nodes"] = sorted(obj["graph"]["nodes"], key=lambda x: x.get("pagerank", 0), reverse=True)[:256]
|
||||
if "edges" in obj["graph"]:
|
||||
obj["graph"]["edges"] = sorted(obj["graph"]["edges"], key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
||||
node_id_set = { o["id"] for o in obj["graph"]["nodes"] }
|
||||
filtered_edges = [o for o in obj["graph"]["edges"] if o["source"] != o["target"] and o["source"] in node_id_set and o["target"] in node_id_set]
|
||||
obj["graph"]["edges"] = sorted(filtered_edges, key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
||||
return get_json_result(data=obj)
|
||||
@ -152,6 +152,7 @@ def add_llm():
|
||||
|
||||
elif factory == "Tencent Cloud":
|
||||
req["api_key"] = apikey_json(["tencent_cloud_sid", "tencent_cloud_sk"])
|
||||
return set_api_key()
|
||||
|
||||
elif factory == "Bedrock":
|
||||
# For Bedrock, due to its special authentication method
|
||||
@ -171,6 +172,10 @@ def add_llm():
|
||||
llm_name = req["llm_name"] + "___OpenAI-API"
|
||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
||||
|
||||
elif factory == "VLLM":
|
||||
llm_name = req["llm_name"] + "___VLLM"
|
||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
||||
|
||||
elif factory == "XunFei Spark":
|
||||
llm_name = req["llm_name"]
|
||||
if req["model_type"] == "chat":
|
||||
@ -209,66 +214,69 @@ def add_llm():
|
||||
}
|
||||
|
||||
msg = ""
|
||||
mdl_nm = llm["llm_name"].split("___")[0]
|
||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||
mdl = EmbeddingModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"])
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0:
|
||||
raise Exception("Fail")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access embedding model({mdl_nm})." + str(e)
|
||||
elif llm["model_type"] == LLMType.CHAT.value:
|
||||
mdl = ChatModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||
"temperature": 0.9})
|
||||
if not tc:
|
||||
if not tc and m.find("**ERROR**:") >= 0:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.RERANK:
|
||||
try:
|
||||
mdl = RerankModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!", "Ohh, my friend!"])
|
||||
if len(arr) == 0:
|
||||
raise Exception("Not known.")
|
||||
except KeyError:
|
||||
msg += f"{factory} dose not support this model({mdl_nm})"
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
||||
mdl = CvModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
with open(os.path.join(get_project_base_directory(), "web/src/assets/yay.jpg"), "rb") as f:
|
||||
m, tc = mdl.describe(f.read())
|
||||
if not tc:
|
||||
if not m and not tc:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||
elif llm["model_type"] == LLMType.TTS:
|
||||
mdl = TTSModel[factory](
|
||||
key=llm["api_key"], model_name=llm["llm_name"], base_url=llm["api_base"]
|
||||
key=llm["api_key"], model_name=mdl_nm, base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
for resp in mdl.tts("Hello~ Ragflower!"):
|
||||
pass
|
||||
except RuntimeError as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||
else:
|
||||
# TODO: check other type of models
|
||||
pass
|
||||
|
||||
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
|
||||
from flask import request
|
||||
from api import settings
|
||||
from api.db import StatusEnum
|
||||
@ -41,7 +43,8 @@ def create(tenant_id):
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_count = list(set([kb.embd_id for kb in kbs]))
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) != 1:
|
||||
return get_result(message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
@ -176,7 +179,8 @@ def update(tenant_id, chat_id):
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_count = list(set([kb.embd_id for kb in kbs]))
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) != 1:
|
||||
return get_result(
|
||||
message='Datasets use different embedding models."',
|
||||
@ -316,7 +320,8 @@ def list_chat(tenant_id):
|
||||
for kb_id in res["kb_ids"]:
|
||||
kb = KnowledgebaseService.query(id=kb_id)
|
||||
if not kb:
|
||||
return get_error_data_result(message=f"Don't exist the kb {kb_id}")
|
||||
logging.WARN(f"Don't exist the kb {kb_id}")
|
||||
continue
|
||||
kb_list.append(kb[0].to_json())
|
||||
del res["kb_ids"]
|
||||
res["datasets"] = kb_list
|
||||
|
||||
@ -16,11 +16,11 @@
|
||||
from flask import request, jsonify
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import label_question
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from api.utils.api_utils import validate_request, build_error_result, apikey_required
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
@manager.route('/dify/retrieval', methods=['POST']) # noqa: F821
|
||||
|
||||
@ -16,7 +16,6 @@
|
||||
import pathlib
|
||||
import datetime
|
||||
|
||||
from api.db.services.dialog_service import keyword_extraction, label_question
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.nlp import rag_tokenizer
|
||||
from api.db import LLMType, ParserType
|
||||
@ -39,6 +38,8 @@ from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import construct_json_result, get_parser_config
|
||||
from rag.nlp import search
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.app.tag import label_question
|
||||
from rag.utils import rmSpace
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
@ -255,6 +256,10 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
)
|
||||
if not DocumentService.update_by_id(document_id, {"name": req["name"]}):
|
||||
return get_error_data_result(message="Database error (Document rename)!")
|
||||
if "meta_fields" in req:
|
||||
if not isinstance(req["meta_fields"], dict):
|
||||
return get_error_data_result(message="meta_fields must be a dictionary")
|
||||
DocumentService.update_meta_fields(document_id, req["meta_fields"])
|
||||
|
||||
informs = File2DocumentService.get_by_document_id(document_id)
|
||||
if informs:
|
||||
@ -472,10 +477,12 @@ def list_docs(dataset_id, tenant_id):
|
||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}. ")
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
if not DocumentService.query(id=id, kb_id=dataset_id):
|
||||
|
||||
if id and not DocumentService.query(id=id, kb_id=dataset_id):
|
||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||
if not DocumentService.query(name=name, kb_id=dataset_id):
|
||||
if name and not DocumentService.query(name=name, kb_id=dataset_id):
|
||||
return get_error_data_result(message=f"You don't own the document {name}.")
|
||||
|
||||
page = int(request.args.get("page", 1))
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_size = int(request.args.get("page_size", 30))
|
||||
@ -729,7 +736,7 @@ def stop_parsing(tenant_id, dataset_id):
|
||||
)
|
||||
info = {"run": "2", "progress": 0, "chunk_num": 0}
|
||||
DocumentService.update_by_id(id, info)
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), dataset_id)
|
||||
settings.docStoreConn.delete({"doc_id": doc[0].id}, search.index_name(tenant_id), dataset_id)
|
||||
return get_result()
|
||||
|
||||
|
||||
@ -1301,7 +1308,7 @@ def retrieval_test(tenant_id):
|
||||
if not KnowledgebaseService.accessible(kb_id=id, user_id=tenant_id):
|
||||
return get_error_data_result(f"You don't own the dataset {id}.")
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
embd_nms = list(set([TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs])) # remove vendor suffix for comparison
|
||||
if len(embd_nms) != 1:
|
||||
return get_result(
|
||||
message='Datasets use different embedding models."',
|
||||
|
||||
@ -15,13 +15,13 @@
|
||||
#
|
||||
import re
|
||||
import json
|
||||
from api.db import LLMType
|
||||
from flask import request, Response
|
||||
import time
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.conversation_service import ConversationService, iframe_completion
|
||||
from api.db.services.conversation_service import completion as rag_completion
|
||||
from api.db.services.canvas_service import completion as agent_completion
|
||||
from api.db.services.dialog_service import ask
|
||||
from api.db.services.dialog_service import ask, chat
|
||||
from agent.canvas import Canvas
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import APIToken
|
||||
@ -30,11 +30,12 @@ from api.db.services.canvas_service import UserCanvasService
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_error_data_result
|
||||
from api.utils.api_utils import get_error_data_result, validate_request
|
||||
from api.utils.api_utils import get_result, token_required
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.file_service import FileService
|
||||
|
||||
|
||||
from flask import jsonify, request, Response
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
@ -68,6 +69,11 @@ def create(tenant_id, chat_id):
|
||||
@token_required
|
||||
def create_agent_session(tenant_id, agent_id):
|
||||
req = request.json
|
||||
if not request.is_json:
|
||||
req = request.form
|
||||
files = request.files
|
||||
user_id = request.args.get('user_id', '')
|
||||
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
if not e:
|
||||
return get_error_data_result("Agent not found.")
|
||||
@ -84,11 +90,29 @@ def create_agent_session(tenant_id, agent_id):
|
||||
if query:
|
||||
for ele in query:
|
||||
if not ele["optional"]:
|
||||
if not req.get(ele["key"]):
|
||||
return get_error_data_result(f"`{ele['key']}` is required")
|
||||
if ele["type"] == "file":
|
||||
if files is None or not files.get(ele["key"]):
|
||||
return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
|
||||
upload_file = files.get(ele["key"])
|
||||
file_content = FileService.parse_docs([upload_file], user_id)
|
||||
file_name = upload_file.filename
|
||||
ele["value"] = file_name + "\n" + file_content
|
||||
else:
|
||||
if req is None or not req.get(ele["key"]):
|
||||
return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
|
||||
ele["value"] = req[ele["key"]]
|
||||
if ele["optional"]:
|
||||
if req.get(ele["key"]):
|
||||
else:
|
||||
if ele["type"] == "file":
|
||||
if files is not None and files.get(ele["key"]):
|
||||
upload_file = files.get(ele["key"])
|
||||
file_content = FileService.parse_docs([upload_file], user_id)
|
||||
file_name = upload_file.filename
|
||||
ele["value"] = file_name + "\n" + file_content
|
||||
else:
|
||||
if "value" in ele:
|
||||
ele.pop("value")
|
||||
else:
|
||||
if req is not None and req.get(ele["key"]):
|
||||
ele["value"] = req[ele['key']]
|
||||
else:
|
||||
if "value" in ele:
|
||||
@ -100,7 +124,7 @@ def create_agent_session(tenant_id, agent_id):
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": req.get("user_id", "") if isinstance(req, dict) else "",
|
||||
"user_id": user_id,
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||
"source": "agent",
|
||||
"dsl": cvs.dsl
|
||||
@ -136,8 +160,10 @@ def update(tenant_id, chat_id, session_id):
|
||||
@token_required
|
||||
def chat_completion(tenant_id, chat_id):
|
||||
req = request.json
|
||||
if not req or not req.get("session_id"):
|
||||
if not req:
|
||||
req = {"question": ""}
|
||||
if not req.get("session_id"):
|
||||
req["question"]=""
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||
if req.get("session_id"):
|
||||
@ -159,6 +185,169 @@ def chat_completion(tenant_id, chat_id):
|
||||
return get_result(data=answer)
|
||||
|
||||
|
||||
@manager.route('chats_openai/<chat_id>/chat/completions', methods=['POST']) # noqa: F821
|
||||
@validate_request("model", "messages") # noqa: F821
|
||||
@token_required
|
||||
def chat_completion_openai_like(tenant_id, chat_id):
|
||||
"""
|
||||
OpenAI-like chat completion API that simulates the behavior of OpenAI's completions endpoint.
|
||||
|
||||
This function allows users to interact with a model and receive responses based on a series of historical messages.
|
||||
If `stream` is set to True (by default), the response will be streamed in chunks, mimicking the OpenAI-style API.
|
||||
Set `stream` to False explicitly, the response will be returned in a single complete answer.
|
||||
Example usage:
|
||||
|
||||
curl -X POST https://ragflow_address.com/api/v1/chats_openai/<chat_id>/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $RAGFLOW_API_KEY" \
|
||||
-d '{
|
||||
"model": "model",
|
||||
"messages": [{"role": "user", "content": "Say this is a test!"}],
|
||||
"stream": true
|
||||
}'
|
||||
|
||||
Alternatively, you can use Python's `OpenAI` client:
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
model = "model"
|
||||
client = OpenAI(api_key="ragflow-api-key", base_url=f"http://ragflow_address/api/v1/chats_openai/<chat_id>")
|
||||
|
||||
completion = client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Who are you?"},
|
||||
{"role": "assistant", "content": "I am an AI assistant named..."},
|
||||
{"role": "user", "content": "Can you tell me how to install neovim"},
|
||||
],
|
||||
stream=True
|
||||
)
|
||||
|
||||
stream = True
|
||||
if stream:
|
||||
for chunk in completion:
|
||||
print(chunk)
|
||||
else:
|
||||
print(completion.choices[0].message.content)
|
||||
"""
|
||||
req = request.json
|
||||
|
||||
messages = req.get("messages", [])
|
||||
# To prevent empty [] input
|
||||
if len(messages) < 1:
|
||||
return get_error_data_result("You have to provide messages.")
|
||||
if messages[-1]["role"] != "user":
|
||||
return get_error_data_result("The last content of this conversation is not from user.")
|
||||
|
||||
prompt = messages[-1]["content"]
|
||||
# Treat context tokens as reasoning tokens
|
||||
context_token_used = sum(len(message["content"]) for message in messages)
|
||||
|
||||
dia = DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value)
|
||||
if not dia:
|
||||
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||
dia = dia[0]
|
||||
|
||||
# Filter system and non-sense assistant messages
|
||||
msg = None
|
||||
msg = [m for m in messages if m["role"] != "system" and (m["role"] != "assistant" or msg)]
|
||||
|
||||
if req.get("stream", True):
|
||||
# The value for the usage field on all chunks except for the last one will be null.
|
||||
# The usage field on the last chunk contains token usage statistics for the entire request.
|
||||
# The choices field on the last chunk will always be an empty array [].
|
||||
def streamed_response_generator(chat_id, dia, msg):
|
||||
token_used = 0
|
||||
response = {
|
||||
"id": f"chatcmpl-{chat_id}",
|
||||
"choices": [
|
||||
{
|
||||
"delta": {
|
||||
"content": "",
|
||||
"role": "assistant",
|
||||
"function_call": None,
|
||||
"tool_calls": None
|
||||
},
|
||||
"finish_reason": None,
|
||||
"index": 0,
|
||||
"logprobs": None
|
||||
}
|
||||
],
|
||||
"created": int(time.time()),
|
||||
"model": "model",
|
||||
"object": "chat.completion.chunk",
|
||||
"system_fingerprint": "",
|
||||
"usage": None
|
||||
}
|
||||
|
||||
try:
|
||||
for ans in chat(dia, msg, True):
|
||||
answer = ans["answer"]
|
||||
incremental = answer[token_used:]
|
||||
token_used += len(incremental)
|
||||
response["choices"][0]["delta"]["content"] = incremental
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
except Exception as e:
|
||||
response["choices"][0]["delta"]["content"] = "**ERROR**: " + str(e)
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
|
||||
# The last chunk
|
||||
response["choices"][0]["delta"]["content"] = None
|
||||
response["choices"][0]["finish_reason"] = "stop"
|
||||
response["usage"] = {
|
||||
"prompt_tokens": len(prompt),
|
||||
"completion_tokens": token_used,
|
||||
"total_tokens": len(prompt) + token_used
|
||||
}
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
yield "data:[DONE]\n\n"
|
||||
|
||||
|
||||
resp = Response(streamed_response_generator(chat_id, dia, msg), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, False):
|
||||
# focus answer content only
|
||||
answer = ans
|
||||
break
|
||||
content = answer["answer"]
|
||||
|
||||
response = {
|
||||
"id": f"chatcmpl-{chat_id}",
|
||||
"object": "chat.completion",
|
||||
"created": int(time.time()),
|
||||
"model": req.get("model", ""),
|
||||
"usage": {
|
||||
"prompt_tokens": len(prompt),
|
||||
"completion_tokens": len(content),
|
||||
"total_tokens": len(prompt) + len(content),
|
||||
"completion_tokens_details": {
|
||||
"reasoning_tokens": context_token_used,
|
||||
"accepted_prediction_tokens": len(content),
|
||||
"rejected_prediction_tokens": 0 # 0 for simplicity
|
||||
}
|
||||
},
|
||||
"choices": [
|
||||
{
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": content
|
||||
},
|
||||
"logprobs": None,
|
||||
"finish_reason": "stop",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/completions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def agent_completions(tenant_id, agent_id):
|
||||
|
||||
@ -23,6 +23,8 @@ from api.db.services.dialog_service import DialogService, chat
|
||||
from api.utils import get_uuid
|
||||
import json
|
||||
|
||||
from rag.prompts import chunks_format
|
||||
|
||||
|
||||
class ConversationService(CommonService):
|
||||
model = Conversation
|
||||
@ -53,18 +55,7 @@ def structure_answer(conv, ans, message_id, session_id):
|
||||
reference = {}
|
||||
ans["reference"] = {}
|
||||
|
||||
def get_value(d, k1, k2):
|
||||
return d.get(k1, d.get(k2))
|
||||
|
||||
chunk_list = [{
|
||||
"id": get_value(chunk, "chunk_id", "id"),
|
||||
"content": get_value(chunk, "content", "content_with_weight"),
|
||||
"document_id": get_value(chunk, "doc_id", "document_id"),
|
||||
"document_name": get_value(chunk, "docnm_kwd", "document_name"),
|
||||
"dataset_id": get_value(chunk, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(chunk, "image_id", "img_id"),
|
||||
"positions": get_value(chunk, "positions", "position_int"),
|
||||
} for chunk in reference.get("chunks", [])]
|
||||
chunk_list = chunks_format(reference)
|
||||
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
|
||||
@ -15,28 +15,24 @@
|
||||
#
|
||||
import logging
|
||||
import binascii
|
||||
import os
|
||||
import json
|
||||
import json_repair
|
||||
import time
|
||||
from functools import partial
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from copy import deepcopy
|
||||
from timeit import default_timer as timer
|
||||
import datetime
|
||||
from datetime import timedelta
|
||||
from agentic_reasoning import DeepResearcher
|
||||
from api.db import LLMType, ParserType, StatusEnum
|
||||
from api.db.db_models import Dialog, DB
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||
from api.db.services.llm_service import TenantLLMService, LLMBundle
|
||||
from api import settings
|
||||
from graphrag.utils import get_tags_from_cache, set_tags_to_cache
|
||||
from rag.app.resume import forbidden_select_fields4resume
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp.search import index_name
|
||||
from rag.settings import TAG_FLD
|
||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.prompts import kb_prompt, message_fit_in, llm_id2llm_type, keyword_extraction, full_question, chunks_format
|
||||
from rag.utils import rmSpace, num_tokens_from_string
|
||||
from rag.utils.tavily_conn import Tavily
|
||||
|
||||
|
||||
class DialogService(CommonService):
|
||||
@ -65,128 +61,49 @@ class DialogService(CommonService):
|
||||
return list(chats.dicts())
|
||||
|
||||
|
||||
def message_fit_in(msg, max_length=4000):
|
||||
def count():
|
||||
nonlocal msg
|
||||
tks_cnts = []
|
||||
for m in msg:
|
||||
tks_cnts.append(
|
||||
{"role": m["role"], "count": num_tokens_from_string(m["content"])})
|
||||
total = 0
|
||||
for m in tks_cnts:
|
||||
total += m["count"]
|
||||
return total
|
||||
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
||||
if len(msg) > 1:
|
||||
msg_.append(msg[-1])
|
||||
msg = msg_
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
ll = num_tokens_from_string(msg_[0]["content"])
|
||||
ll2 = num_tokens_from_string(msg_[-1]["content"])
|
||||
if ll / (ll + ll2) > 0.8:
|
||||
m = msg_[0]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
||||
msg[0]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
m = msg_[1]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
||||
msg[1]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
|
||||
def llm_id2llm_type(llm_id):
|
||||
llm_id, _ = TenantLLMService.split_model_name_and_factory(llm_id)
|
||||
fnm = os.path.join(get_project_base_directory(), "conf")
|
||||
llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
|
||||
for llm_factory in llm_factories["factory_llm_infos"]:
|
||||
for llm in llm_factory["llm"]:
|
||||
if llm_id == llm["llm_name"]:
|
||||
return llm["model_type"].strip(",")[-1]
|
||||
|
||||
|
||||
def kb_prompt(kbinfos, max_tokens):
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
used_token_count = 0
|
||||
chunks_num = 0
|
||||
for i, c in enumerate(knowledges):
|
||||
used_token_count += num_tokens_from_string(c)
|
||||
chunks_num += 1
|
||||
if max_tokens * 0.97 < used_token_count:
|
||||
knowledges = knowledges[:i]
|
||||
break
|
||||
|
||||
docs = DocumentService.get_by_ids([ck["doc_id"] for ck in kbinfos["chunks"][:chunks_num]])
|
||||
docs = {d.id: d.meta_fields for d in docs}
|
||||
|
||||
doc2chunks = defaultdict(lambda: {"chunks": [], "meta": []})
|
||||
for ck in kbinfos["chunks"][:chunks_num]:
|
||||
doc2chunks[ck["docnm_kwd"]]["chunks"].append(ck["content_with_weight"])
|
||||
doc2chunks[ck["docnm_kwd"]]["meta"] = docs.get(ck["doc_id"], {})
|
||||
|
||||
knowledges = []
|
||||
for nm, cks_meta in doc2chunks.items():
|
||||
txt = f"Document: {nm} \n"
|
||||
for k,v in cks_meta["meta"].items():
|
||||
txt += f"{k}: {v}\n"
|
||||
txt += "Relevant fragments as following:\n"
|
||||
for i, chunk in enumerate(cks_meta["chunks"], 1):
|
||||
txt += f"{i}. {chunk}\n"
|
||||
knowledges.append(txt)
|
||||
return knowledges
|
||||
|
||||
|
||||
def label_question(question, kbs):
|
||||
tags = None
|
||||
tag_kb_ids = []
|
||||
for kb in kbs:
|
||||
if kb.parser_config.get("tag_kb_ids"):
|
||||
tag_kb_ids.extend(kb.parser_config["tag_kb_ids"])
|
||||
if tag_kb_ids:
|
||||
all_tags = get_tags_from_cache(tag_kb_ids)
|
||||
if not all_tags:
|
||||
all_tags = settings.retrievaler.all_tags_in_portion(kb.tenant_id, tag_kb_ids)
|
||||
set_tags_to_cache(all_tags, tag_kb_ids)
|
||||
def chat_solo(dialog, messages, stream=True):
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
all_tags = json.loads(all_tags)
|
||||
tag_kbs = KnowledgebaseService.get_by_ids(tag_kb_ids)
|
||||
tags = settings.retrievaler.tag_query(question,
|
||||
list(set([kb.tenant_id for kb in tag_kbs])),
|
||||
tag_kb_ids,
|
||||
all_tags,
|
||||
kb.parser_config.get("topn_tags", 3)
|
||||
)
|
||||
return tags
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
prompt_config = dialog.prompt_config
|
||||
tts_mdl = None
|
||||
if prompt_config.get("tts"):
|
||||
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||
msg = [{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])}
|
||||
for m in messages if m["role"] != "system"]
|
||||
if stream:
|
||||
last_ans = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt_config.get("system", ""), msg, dialog.llm_setting):
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans):]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt":"", "created_at": time.time()}
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt_config.get("system", ""), msg, dialog.llm_setting)
|
||||
user_content = msg[-1].get("content", "[content not available]")
|
||||
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, answer), "prompt": "", "created_at": time.time()}
|
||||
|
||||
|
||||
def chat(dialog, messages, stream=True, **kwargs):
|
||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||
if not dialog.kb_ids:
|
||||
for ans in chat_solo(dialog, messages, stream):
|
||||
yield ans
|
||||
return
|
||||
|
||||
chat_start_ts = timer()
|
||||
|
||||
# Get llm model name and model provider name
|
||||
llm_id, model_provider = TenantLLMService.split_model_name_and_factory(dialog.llm_id)
|
||||
|
||||
# Get llm model instance by model and provide name
|
||||
llm = LLMService.query(llm_name=llm_id) if not model_provider else LLMService.query(llm_name=llm_id, fid=model_provider)
|
||||
|
||||
if not llm:
|
||||
# Model name is provided by tenant, but not system built-in
|
||||
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id) if not model_provider else \
|
||||
TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id, llm_factory=model_provider)
|
||||
if not llm:
|
||||
raise LookupError("LLM(%s) not found" % dialog.llm_id)
|
||||
max_tokens = 8192
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
max_tokens = llm[0].max_tokens
|
||||
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
max_tokens = llm_model_config.get("max_tokens", 8192)
|
||||
|
||||
check_llm_ts = timer()
|
||||
|
||||
@ -204,9 +121,6 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
||||
if "doc_ids" in messages[-1]:
|
||||
attachments = messages[-1]["doc_ids"]
|
||||
for m in messages[:-1]:
|
||||
if "doc_ids" in m:
|
||||
attachments.extend(m["doc_ids"])
|
||||
|
||||
create_retriever_ts = timer()
|
||||
|
||||
@ -258,9 +172,11 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
|
||||
bind_reranker_ts = timer()
|
||||
generate_keyword_ts = bind_reranker_ts
|
||||
thought = ""
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
|
||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
knowledges = []
|
||||
else:
|
||||
if prompt_config.get("keyword", False):
|
||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||
@ -268,6 +184,19 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
|
||||
knowledges = []
|
||||
if prompt_config.get("reasoning", False):
|
||||
reasoner = DeepResearcher(chat_mdl,
|
||||
prompt_config,
|
||||
partial(retriever.retrieval, embd_mdl=embd_mdl, tenant_ids=tenant_ids, kb_ids=dialog.kb_ids, page=1, page_size=dialog.top_n, similarity_threshold=0.2, vector_similarity_weight=0.3))
|
||||
|
||||
for think in reasoner.thinking(kbinfos, " ".join(questions)):
|
||||
if isinstance(think, str):
|
||||
thought = think
|
||||
knowledges = [t for t in think.split("\n") if t]
|
||||
elif stream:
|
||||
yield think
|
||||
else:
|
||||
kbinfos = retriever.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
@ -275,6 +204,11 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(" ".join(questions), kbs)
|
||||
)
|
||||
if prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(" ".join(questions))
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
if prompt_config.get("use_kg"):
|
||||
ck = settings.kg_retrievaler.retrieval(" ".join(questions),
|
||||
tenant_ids,
|
||||
@ -284,12 +218,12 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
retrieval_ts = timer()
|
||||
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
|
||||
logging.debug(
|
||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
|
||||
retrieval_ts = timer()
|
||||
if not knowledges and prompt_config.get("empty_response"):
|
||||
empty_res = prompt_config["empty_response"]
|
||||
yield {"answer": empty_res, "reference": kbinfos, "audio_binary": tts(tts_mdl, empty_res)}
|
||||
@ -314,9 +248,12 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
def decorate_answer(answer):
|
||||
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts
|
||||
|
||||
finish_chat_ts = timer()
|
||||
|
||||
refs = []
|
||||
ans = answer.split("</think>")
|
||||
think = ""
|
||||
if len(ans) == 2:
|
||||
think = ans[0] + "</think>"
|
||||
answer = ans[1]
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
answer, idx = retriever.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
@ -354,26 +291,28 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
|
||||
|
||||
prompt = f"{prompt}\n\n - Total: {total_time_cost:.1f}ms\n - Check LLM: {check_llm_time_cost:.1f}ms\n - Create retriever: {create_retriever_time_cost:.1f}ms\n - Bind embedding: {bind_embedding_time_cost:.1f}ms\n - Bind LLM: {bind_llm_time_cost:.1f}ms\n - Tune question: {refine_question_time_cost:.1f}ms\n - Bind reranker: {bind_reranker_time_cost:.1f}ms\n - Generate keyword: {generate_keyword_time_cost:.1f}ms\n - Retrieval: {retrieval_time_cost:.1f}ms\n - Generate answer: {generate_result_time_cost:.1f}ms"
|
||||
return {"answer": answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt)}
|
||||
return {"answer": think+answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt), "created_at": time.time()}
|
||||
|
||||
if stream:
|
||||
last_ans = ""
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt, msg[1:], gen_conf):
|
||||
if thought:
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans):]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield {"answer": thought+answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
delta_ans = answer[len(last_ans):]
|
||||
if delta_ans:
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield decorate_answer(answer)
|
||||
yield {"answer": thought+answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield decorate_answer(thought+answer)
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
|
||||
logging.debug("User: {}|Assistant: {}".format(
|
||||
msg[-1]["content"], answer))
|
||||
user_content = msg[-1].get("content", "[content not available]")
|
||||
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||
res = decorate_answer(answer)
|
||||
res["audio_binary"] = tts(tts_mdl, answer)
|
||||
yield res
|
||||
@ -506,172 +445,6 @@ Please write the SQL, only SQL, without any other explanations or text.
|
||||
}
|
||||
|
||||
|
||||
def relevant(tenant_id, llm_id, question, contents: list):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are a grader assessing relevance of a retrieved document to a user question.
|
||||
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
||||
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||
No other words needed except 'yes' or 'no'.
|
||||
"""
|
||||
if not contents:
|
||||
return False
|
||||
contents = "Documents: \n" + " - ".join(contents)
|
||||
contents = f"Question: {question}\n" + contents
|
||||
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
||||
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
||||
if ans.lower().find("yes") >= 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def rewrite(tenant_id, llm_id, question):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
|
||||
return ans
|
||||
|
||||
|
||||
def keyword_extraction(chat_mdl, content, topn=3):
|
||||
prompt = f"""
|
||||
Role: You're a text analyzer.
|
||||
Task: extract the most important keywords/phrases of a given piece of text content.
|
||||
Requirements:
|
||||
- Summarize the text content, and give top {topn} important keywords/phrases.
|
||||
- The keywords MUST be in language of the given piece of text content.
|
||||
- The keywords are delimited by ENGLISH COMMA.
|
||||
- Keywords ONLY in output.
|
||||
|
||||
### Text Content
|
||||
{content}
|
||||
|
||||
"""
|
||||
msg = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": "Output: "}
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||
if isinstance(kwd, tuple):
|
||||
kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >= 0:
|
||||
return ""
|
||||
return kwd
|
||||
|
||||
|
||||
def question_proposal(chat_mdl, content, topn=3):
|
||||
prompt = f"""
|
||||
Role: You're a text analyzer.
|
||||
Task: propose {topn} questions about a given piece of text content.
|
||||
Requirements:
|
||||
- Understand and summarize the text content, and propose top {topn} important questions.
|
||||
- The questions SHOULD NOT have overlapping meanings.
|
||||
- The questions SHOULD cover the main content of the text as much as possible.
|
||||
- The questions MUST be in language of the given piece of text content.
|
||||
- One question per line.
|
||||
- Question ONLY in output.
|
||||
|
||||
### Text Content
|
||||
{content}
|
||||
|
||||
"""
|
||||
msg = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": "Output: "}
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||
if isinstance(kwd, tuple):
|
||||
kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >= 0:
|
||||
return ""
|
||||
return kwd
|
||||
|
||||
|
||||
def full_question(tenant_id, llm_id, messages):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
conv = []
|
||||
for m in messages:
|
||||
if m["role"] not in ["user", "assistant"]:
|
||||
continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
today = datetime.date.today().isoformat()
|
||||
yesterday = (datetime.date.today() - timedelta(days=1)).isoformat()
|
||||
tomorrow = (datetime.date.today() + timedelta(days=1)).isoformat()
|
||||
prompt = f"""
|
||||
Role: A helpful assistant
|
||||
|
||||
Task and steps:
|
||||
1. Generate a full user question that would follow the conversation.
|
||||
2. If the user's question involves relative date, you need to convert it into absolute date based on the current date, which is {today}. For example: 'yesterday' would be converted to {yesterday}.
|
||||
|
||||
Requirements & Restrictions:
|
||||
- Text generated MUST be in the same language of the original user's question.
|
||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||
- DON'T generate anything except a refined question.
|
||||
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
|
||||
# Example 1
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
###############
|
||||
Output: What's the name of Donald Trump's mother?
|
||||
|
||||
------------
|
||||
# Example 2
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
ASSISTANT: Mary Trump.
|
||||
User: What's her full name?
|
||||
###############
|
||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||
|
||||
------------
|
||||
# Example 3
|
||||
## Conversation
|
||||
USER: What's the weather today in London?
|
||||
ASSISTANT: Cloudy.
|
||||
USER: What's about tomorrow in Rochester?
|
||||
###############
|
||||
Output: What's the weather in Rochester on {tomorrow}?
|
||||
######################
|
||||
|
||||
# Real Data
|
||||
## Conversation
|
||||
{conv}
|
||||
###############
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": "Output: "}], {"temperature": 0.2})
|
||||
return ans if ans.find("**ERROR**") < 0 else messages[-1]["content"]
|
||||
|
||||
|
||||
def tts(tts_mdl, text):
|
||||
if not tts_mdl or not text:
|
||||
return
|
||||
@ -738,7 +511,7 @@ def ask(question, kb_ids, tenant_id):
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
return {"answer": answer, "reference": refs}
|
||||
return {"answer": answer, "reference": chunks_format(refs)}
|
||||
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt, msg, {"temperature": 0.1}):
|
||||
@ -747,62 +520,3 @@ def ask(question, kb_ids, tenant_id):
|
||||
yield decorate_answer(answer)
|
||||
|
||||
|
||||
def content_tagging(chat_mdl, content, all_tags, examples, topn=3):
|
||||
prompt = f"""
|
||||
Role: You're a text analyzer.
|
||||
|
||||
Task: Tag (put on some labels) to a given piece of text content based on the examples and the entire tag set.
|
||||
|
||||
Steps::
|
||||
- Comprehend the tag/label set.
|
||||
- Comprehend examples which all consist of both text content and assigned tags with relevance score in format of JSON.
|
||||
- Summarize the text content, and tag it with top {topn} most relevant tags from the set of tag/label and the corresponding relevance score.
|
||||
|
||||
Requirements
|
||||
- The tags MUST be from the tag set.
|
||||
- The output MUST be in JSON format only, the key is tag and the value is its relevance score.
|
||||
- The relevance score must be range from 1 to 10.
|
||||
- Keywords ONLY in output.
|
||||
|
||||
# TAG SET
|
||||
{", ".join(all_tags)}
|
||||
|
||||
"""
|
||||
for i, ex in enumerate(examples):
|
||||
prompt += """
|
||||
# Examples {}
|
||||
### Text Content
|
||||
{}
|
||||
|
||||
Output:
|
||||
{}
|
||||
|
||||
""".format(i, ex["content"], json.dumps(ex[TAG_FLD], indent=2, ensure_ascii=False))
|
||||
|
||||
prompt += f"""
|
||||
# Real Data
|
||||
### Text Content
|
||||
{content}
|
||||
|
||||
"""
|
||||
msg = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": "Output: "}
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.5})
|
||||
if isinstance(kwd, tuple):
|
||||
kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >= 0:
|
||||
raise Exception(kwd)
|
||||
|
||||
try:
|
||||
return json_repair.loads(kwd)
|
||||
except json_repair.JSONDecodeError:
|
||||
try:
|
||||
result = kwd.replace(prompt[:-1], '').replace('user', '').replace('model', '').strip()
|
||||
result = '{' + result.split('{')[1].split('}')[0] + '}'
|
||||
return json_repair.loads(result)
|
||||
except Exception as e:
|
||||
logging.exception(f"JSON parsing error: {result} -> {e}")
|
||||
raise e
|
||||
|
||||
@ -372,13 +372,17 @@ class DocumentService(CommonService):
|
||||
"progress_msg": "Task is queued...",
|
||||
"process_begin_at": get_format_time()
|
||||
})
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_meta_fields(cls, doc_id, meta_fields):
|
||||
return cls.update_by_id(doc_id, {"meta_fields": meta_fields})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_progress(cls):
|
||||
MSG = {
|
||||
"raptor": "Start RAPTOR (Recursive Abstractive Processing for Tree-Organized Retrieval).",
|
||||
"graphrag": "Start Graph Extraction",
|
||||
"graphrag": "Entities extraction progress",
|
||||
"graph_resolution": "Start Graph Resolution",
|
||||
"graph_community": "Start Graph Community Reports Generation"
|
||||
}
|
||||
@ -500,6 +504,9 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
assert e, "Conversation not found!"
|
||||
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not dia.kb_ids:
|
||||
raise LookupError("No knowledge base associated with this conversation. "
|
||||
"Please add a knowledge base before uploading documents")
|
||||
kb_id = dia.kb_ids[0]
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
|
||||
@ -86,8 +86,7 @@ class TenantLLMService(CommonService):
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def model_instance(cls, tenant_id, llm_type,
|
||||
llm_name=None, lang="Chinese"):
|
||||
def get_model_config(cls, tenant_id, llm_type, llm_name=None):
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
raise LookupError("Tenant not found")
|
||||
@ -124,7 +123,13 @@ class TenantLLMService(CommonService):
|
||||
if not mdlnm:
|
||||
raise LookupError(f"Type of {llm_type} model is not set.")
|
||||
raise LookupError("Model({}) not authorized".format(mdlnm))
|
||||
return model_config
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def model_instance(cls, tenant_id, llm_type,
|
||||
llm_name=None, lang="Chinese"):
|
||||
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
if model_config["llm_factory"] not in EmbeddingModel:
|
||||
return
|
||||
@ -173,40 +178,39 @@ class TenantLLMService(CommonService):
|
||||
def increase_usage(cls, tenant_id, llm_type, used_tokens, llm_name=None):
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
raise LookupError("Tenant not found")
|
||||
logging.error(f"Tenant not found: {tenant_id}")
|
||||
return 0
|
||||
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
mdlnm = tenant.embd_id
|
||||
elif llm_type == LLMType.SPEECH2TEXT.value:
|
||||
mdlnm = tenant.asr_id
|
||||
elif llm_type == LLMType.IMAGE2TEXT.value:
|
||||
mdlnm = tenant.img2txt_id
|
||||
elif llm_type == LLMType.CHAT.value:
|
||||
mdlnm = tenant.llm_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.RERANK:
|
||||
mdlnm = tenant.rerank_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.TTS:
|
||||
mdlnm = tenant.tts_id if not llm_name else llm_name
|
||||
else:
|
||||
assert False, "LLM type error"
|
||||
llm_map = {
|
||||
LLMType.EMBEDDING.value: tenant.embd_id,
|
||||
LLMType.SPEECH2TEXT.value: tenant.asr_id,
|
||||
LLMType.IMAGE2TEXT.value: tenant.img2txt_id,
|
||||
LLMType.CHAT.value: tenant.llm_id if not llm_name else llm_name,
|
||||
LLMType.RERANK.value: tenant.rerank_id if not llm_name else llm_name,
|
||||
LLMType.TTS.value: tenant.tts_id if not llm_name else llm_name
|
||||
}
|
||||
|
||||
mdlnm = llm_map.get(llm_type)
|
||||
if mdlnm is None:
|
||||
logging.error(f"LLM type error: {llm_type}")
|
||||
return 0
|
||||
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||
|
||||
num = 0
|
||||
try:
|
||||
if llm_factory:
|
||||
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory)
|
||||
else:
|
||||
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name)
|
||||
if not tenant_llms:
|
||||
return num
|
||||
else:
|
||||
tenant_llm = tenant_llms[0]
|
||||
num = cls.model.update(used_tokens=tenant_llm.used_tokens + used_tokens) \
|
||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_factory == tenant_llm.llm_factory, cls.model.llm_name == llm_name) \
|
||||
.execute()
|
||||
num = cls.model.update(
|
||||
used_tokens=cls.model.used_tokens + used_tokens
|
||||
).where(
|
||||
cls.model.tenant_id == tenant_id,
|
||||
cls.model.llm_name == llm_name,
|
||||
cls.model.llm_factory == llm_factory if llm_factory else True
|
||||
).execute()
|
||||
except Exception:
|
||||
logging.exception("TenantLLMService.increase_usage got exception")
|
||||
logging.exception(
|
||||
"TenantLLMService.increase_usage got exception,Failed to update used_tokens for tenant_id=%s, llm_name=%s",
|
||||
tenant_id, llm_name)
|
||||
return 0
|
||||
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@ -229,10 +233,8 @@ class LLMBundle(object):
|
||||
tenant_id, llm_type, llm_name, lang=lang)
|
||||
assert self.mdl, "Can't find model for {}/{}/{}".format(
|
||||
tenant_id, llm_type, llm_name)
|
||||
self.max_length = 8192
|
||||
for lm in LLMService.query(llm_name=llm_name):
|
||||
self.max_length = lm.max_tokens
|
||||
break
|
||||
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||
self.max_length = model_config.get("max_tokens", 8192)
|
||||
|
||||
def encode(self, texts: list):
|
||||
embeddings, used_tokens = self.mdl.encode(texts)
|
||||
|
||||
@ -28,6 +28,7 @@ import sys
|
||||
import time
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import threading
|
||||
|
||||
from werkzeug.serving import run_simple
|
||||
from api import settings
|
||||
@ -42,15 +43,21 @@ from api.versions import get_ragflow_version
|
||||
from api.utils import show_configs
|
||||
from rag.settings import print_rag_settings
|
||||
|
||||
stop_event = threading.Event()
|
||||
|
||||
def update_progress():
|
||||
while True:
|
||||
time.sleep(6)
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
DocumentService.update_progress()
|
||||
stop_event.wait(6)
|
||||
except Exception:
|
||||
logging.exception("update_progress exception")
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
logging.info("Received interrupt signal, shutting down...")
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.info(r"""
|
||||
@ -96,6 +103,9 @@ if __name__ == '__main__':
|
||||
RuntimeConfig.init_env()
|
||||
RuntimeConfig.init_config(JOB_SERVER_HOST=settings.HOST_IP, HTTP_PORT=settings.HOST_PORT)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
thread = ThreadPoolExecutor(max_workers=1)
|
||||
thread.submit(update_progress)
|
||||
|
||||
@ -112,4 +122,6 @@ if __name__ == '__main__':
|
||||
)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
os.kill(os.getpid(), signal.SIGKILL)
|
||||
|
||||
@ -66,75 +66,28 @@ def init_settings():
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||
LLM = get_base_config("user_default_llm", {})
|
||||
LLM_DEFAULT_MODELS = LLM.get("default_models", {})
|
||||
LLM_FACTORY = LLM.get("factory", "Tongyi-Qianwen")
|
||||
LLM_BASE_URL = LLM.get("base_url")
|
||||
|
||||
global CHAT_MDL, EMBEDDING_MDL, RERANK_MDL, ASR_MDL, IMAGE2TEXT_MDL
|
||||
if not LIGHTEN:
|
||||
default_llm = {
|
||||
"Tongyi-Qianwen": {
|
||||
"chat_model": "qwen-plus",
|
||||
"embedding_model": "text-embedding-v2",
|
||||
"image2text_model": "qwen-vl-max",
|
||||
"asr_model": "paraformer-realtime-8k-v1",
|
||||
},
|
||||
"OpenAI": {
|
||||
"chat_model": "gpt-3.5-turbo",
|
||||
"embedding_model": "text-embedding-ada-002",
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"Azure-OpenAI": {
|
||||
"chat_model": "gpt-35-turbo",
|
||||
"embedding_model": "text-embedding-ada-002",
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"ZHIPU-AI": {
|
||||
"chat_model": "glm-3-turbo",
|
||||
"embedding_model": "embedding-2",
|
||||
"image2text_model": "glm-4v",
|
||||
"asr_model": "",
|
||||
},
|
||||
"Ollama": {
|
||||
"chat_model": "qwen-14B-chat",
|
||||
"embedding_model": "flag-embedding",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"Moonshot": {
|
||||
"chat_model": "moonshot-v1-8k",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"DeepSeek": {
|
||||
"chat_model": "deepseek-chat",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"VolcEngine": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"BAAI": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "BAAI/bge-large-zh-v1.5",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
"rerank_model": "BAAI/bge-reranker-v2-m3",
|
||||
}
|
||||
}
|
||||
EMBEDDING_MDL = "BAAI/bge-large-zh-v1.5@BAAI"
|
||||
|
||||
if LLM_FACTORY:
|
||||
CHAT_MDL = default_llm[LLM_FACTORY]["chat_model"] + f"@{LLM_FACTORY}"
|
||||
ASR_MDL = default_llm[LLM_FACTORY]["asr_model"] + f"@{LLM_FACTORY}"
|
||||
IMAGE2TEXT_MDL = default_llm[LLM_FACTORY]["image2text_model"] + f"@{LLM_FACTORY}"
|
||||
EMBEDDING_MDL = default_llm["BAAI"]["embedding_model"] + "@BAAI"
|
||||
RERANK_MDL = default_llm["BAAI"]["rerank_model"] + "@BAAI"
|
||||
if LLM_DEFAULT_MODELS:
|
||||
CHAT_MDL = LLM_DEFAULT_MODELS.get("chat_model", CHAT_MDL)
|
||||
EMBEDDING_MDL = LLM_DEFAULT_MODELS.get("embedding_model", EMBEDDING_MDL)
|
||||
RERANK_MDL = LLM_DEFAULT_MODELS.get("rerank_model", RERANK_MDL)
|
||||
ASR_MDL = LLM_DEFAULT_MODELS.get("asr_model", ASR_MDL)
|
||||
IMAGE2TEXT_MDL = LLM_DEFAULT_MODELS.get("image2text_model", IMAGE2TEXT_MDL)
|
||||
|
||||
# factory can be specified in the config name with "@". LLM_FACTORY will be used if not specified
|
||||
CHAT_MDL = CHAT_MDL + (f"@{LLM_FACTORY}" if "@" not in CHAT_MDL and CHAT_MDL != "" else "")
|
||||
EMBEDDING_MDL = EMBEDDING_MDL + (f"@{LLM_FACTORY}" if "@" not in EMBEDDING_MDL and EMBEDDING_MDL != "" else "")
|
||||
RERANK_MDL = RERANK_MDL + (f"@{LLM_FACTORY}" if "@" not in RERANK_MDL and RERANK_MDL != "" else "")
|
||||
ASR_MDL = ASR_MDL + (f"@{LLM_FACTORY}" if "@" not in ASR_MDL and ASR_MDL != "" else "")
|
||||
IMAGE2TEXT_MDL = IMAGE2TEXT_MDL + (
|
||||
f"@{LLM_FACTORY}" if "@" not in IMAGE2TEXT_MDL and IMAGE2TEXT_MDL != "" else "")
|
||||
|
||||
global API_KEY, PARSERS, HOST_IP, HOST_PORT, SECRET_KEY
|
||||
API_KEY = LLM.get("api_key", "")
|
||||
|
||||
@ -70,6 +70,12 @@ def show_configs():
|
||||
if "password" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["password"] = "*" * 8
|
||||
if "access_key" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["access_key"] = "*" * 8
|
||||
if "secret_key" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["secret_key"] = "*" * 8
|
||||
msg += f"\n\t{k}: {v}"
|
||||
logging.info(msg)
|
||||
|
||||
@ -351,6 +357,26 @@ def decrypt(line):
|
||||
line), "Fail to decrypt password!").decode('utf-8')
|
||||
|
||||
|
||||
def decrypt2(crypt_text):
|
||||
from base64 import b64decode, b16decode
|
||||
from Crypto.Cipher import PKCS1_v1_5 as Cipher_PKCS1_v1_5
|
||||
from Crypto.PublicKey import RSA
|
||||
decode_data = b64decode(crypt_text)
|
||||
if len(decode_data) == 127:
|
||||
hex_fixed = '00' + decode_data.hex()
|
||||
decode_data = b16decode(hex_fixed.upper())
|
||||
|
||||
file_path = os.path.join(
|
||||
file_utils.get_project_base_directory(),
|
||||
"conf",
|
||||
"private.pem")
|
||||
pem = open(file_path).read()
|
||||
rsa_key = RSA.importKey(pem, "Welcome")
|
||||
cipher = Cipher_PKCS1_v1_5.new(rsa_key)
|
||||
decrypt_text = cipher.decrypt(decode_data, None)
|
||||
return (b64decode(decrypt_text)).decode()
|
||||
|
||||
|
||||
def download_img(url):
|
||||
if not url:
|
||||
return ""
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -5,25 +5,25 @@ mysql:
|
||||
name: 'rag_flow'
|
||||
user: 'root'
|
||||
password: 'infini_rag_flow'
|
||||
host: 'mysql'
|
||||
host: 'localhost'
|
||||
port: 5455
|
||||
max_connections: 100
|
||||
stale_timeout: 30
|
||||
minio:
|
||||
user: 'rag_flow'
|
||||
password: 'infini_rag_flow'
|
||||
host: 'minio:9000'
|
||||
host: 'localhost:9000'
|
||||
es:
|
||||
hosts: 'http://es01:1200'
|
||||
hosts: 'http://localhost:1200'
|
||||
username: 'elastic'
|
||||
password: 'infini_rag_flow'
|
||||
infinity:
|
||||
uri: 'infinity:23817'
|
||||
uri: 'localhost:23817'
|
||||
db_name: 'default_db'
|
||||
redis:
|
||||
db: 1
|
||||
password: 'infini_rag_flow'
|
||||
host: 'redis:6379'
|
||||
host: 'localhost:6379'
|
||||
|
||||
# postgres:
|
||||
# name: 'rag_flow'
|
||||
@ -37,6 +37,12 @@ redis:
|
||||
# access_key: 'access_key'
|
||||
# secret_key: 'secret_key'
|
||||
# region: 'region'
|
||||
# oss:
|
||||
# access_key: 'access_key'
|
||||
# secret_key: 'secret_key'
|
||||
# endpoint_url: 'http://oss-cn-hangzhou.aliyuncs.com'
|
||||
# region: 'cn-hangzhou'
|
||||
# bucket: 'bucket_name'
|
||||
# azure:
|
||||
# auth_type: 'sas'
|
||||
# container_url: 'container_url'
|
||||
|
||||
@ -1,6 +1,3 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
@ -14,19 +11,51 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from openpyxl import load_workbook
|
||||
from openpyxl import load_workbook, Workbook
|
||||
import sys
|
||||
from io import BytesIO
|
||||
|
||||
from rag.nlp import find_codec
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
class RAGFlowExcelParser:
|
||||
def html(self, fnm, chunk_rows=256):
|
||||
if isinstance(fnm, str):
|
||||
wb = load_workbook(fnm)
|
||||
|
||||
# if isinstance(fnm, str):
|
||||
# wb = load_workbook(fnm)
|
||||
# else:
|
||||
# wb = load_workbook(BytesIO(fnm))++
|
||||
|
||||
s_fnm = fnm
|
||||
if not isinstance(fnm, str):
|
||||
s_fnm = BytesIO(fnm)
|
||||
else:
|
||||
wb = load_workbook(BytesIO(fnm))
|
||||
pass
|
||||
|
||||
try:
|
||||
wb = load_workbook(s_fnm)
|
||||
except Exception as e:
|
||||
print(f'****wxy: file parser error: {e}, s_fnm={s_fnm}, trying convert files')
|
||||
df = pd.read_excel(s_fnm)
|
||||
wb = Workbook()
|
||||
# if len(wb.worksheets) > 0:
|
||||
# del wb.worksheets[0]
|
||||
# else: pass
|
||||
ws = wb.active
|
||||
ws.title = "Data"
|
||||
for col_num, column_name in enumerate(df.columns, 1):
|
||||
ws.cell(row=1, column=col_num, value=column_name)
|
||||
else:
|
||||
pass
|
||||
for row_num, row in enumerate(df.values, 2):
|
||||
for col_num, value in enumerate(row, 1):
|
||||
ws.cell(row=row_num, column=col_num, value=value)
|
||||
else:
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
|
||||
tb_chunks = []
|
||||
for sheetname in wb.sheetnames:
|
||||
@ -45,7 +74,7 @@ class RAGFlowExcelParser:
|
||||
tb += f"<table><caption>{sheetname}</caption>"
|
||||
tb += tb_rows_0
|
||||
for r in list(
|
||||
rows[1 + chunk_i * chunk_rows : 1 + (chunk_i + 1) * chunk_rows]
|
||||
rows[1 + chunk_i * chunk_rows: 1 + (chunk_i + 1) * chunk_rows]
|
||||
):
|
||||
tb += "<tr>"
|
||||
for i, c in enumerate(r):
|
||||
@ -60,10 +89,41 @@ class RAGFlowExcelParser:
|
||||
return tb_chunks
|
||||
|
||||
def __call__(self, fnm):
|
||||
if isinstance(fnm, str):
|
||||
wb = load_workbook(fnm)
|
||||
# if isinstance(fnm, str):
|
||||
# wb = load_workbook(fnm)
|
||||
# else:
|
||||
# wb = load_workbook(BytesIO(fnm))
|
||||
|
||||
s_fnm = fnm
|
||||
if not isinstance(fnm, str):
|
||||
s_fnm = BytesIO(fnm)
|
||||
else:
|
||||
wb = load_workbook(BytesIO(fnm))
|
||||
pass
|
||||
|
||||
try:
|
||||
wb = load_workbook(s_fnm)
|
||||
except Exception as e:
|
||||
print(f'****wxy: file parser error: {e}, s_fnm={s_fnm}, trying convert files')
|
||||
df = pd.read_excel(s_fnm)
|
||||
wb = Workbook()
|
||||
if len(wb.worksheets) > 0:
|
||||
del wb.worksheets[0]
|
||||
else:
|
||||
pass
|
||||
ws = wb.active
|
||||
ws.title = "Data"
|
||||
for col_num, column_name in enumerate(df.columns, 1):
|
||||
ws.cell(row=1, column=col_num, value=column_name)
|
||||
else:
|
||||
pass
|
||||
for row_num, row in enumerate(df.values, 2):
|
||||
for col_num, value in enumerate(row, 1):
|
||||
ws.cell(row=row_num, column=col_num, value=value)
|
||||
else:
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
|
||||
res = []
|
||||
for sheetname in wb.sheetnames:
|
||||
ws = wb[sheetname]
|
||||
@ -104,3 +164,4 @@ class RAGFlowExcelParser:
|
||||
if __name__ == "__main__":
|
||||
psr = RAGFlowExcelParser()
|
||||
psr(sys.argv[1])
|
||||
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
from timeit import default_timer as timer
|
||||
|
||||
import xgboost as xgb
|
||||
from io import BytesIO
|
||||
@ -277,7 +278,11 @@ class RAGFlowPdfParser:
|
||||
b["SP"] = ii
|
||||
|
||||
def __ocr(self, pagenum, img, chars, ZM=3):
|
||||
start = timer()
|
||||
bxs = self.ocr.detect(np.array(img))
|
||||
logging.info(f"__ocr detecting boxes of a image cost ({timer() - start}s)")
|
||||
|
||||
start = timer()
|
||||
if not bxs:
|
||||
self.boxes.append([])
|
||||
return
|
||||
@ -308,14 +313,22 @@ class RAGFlowPdfParser:
|
||||
else:
|
||||
bxs[ii]["text"] += c["text"]
|
||||
|
||||
logging.info(f"__ocr sorting {len(chars)} chars cost {timer() - start}s")
|
||||
start = timer()
|
||||
boxes_to_reg = []
|
||||
img_np = np.array(img)
|
||||
for b in bxs:
|
||||
if not b["text"]:
|
||||
left, right, top, bott = b["x0"] * ZM, b["x1"] * \
|
||||
ZM, b["top"] * ZM, b["bottom"] * ZM
|
||||
b["text"] = self.ocr.recognize(np.array(img),
|
||||
np.array([[left, top], [right, top], [right, bott], [left, bott]],
|
||||
dtype=np.float32))
|
||||
b["box_image"] = self.ocr.get_rotate_crop_image(img_np, np.array([[left, top], [right, top], [right, bott], [left, bott]], dtype=np.float32))
|
||||
boxes_to_reg.append(b)
|
||||
del b["txt"]
|
||||
texts = self.ocr.recognize_batch([b["box_image"] for b in boxes_to_reg])
|
||||
for i in range(len(boxes_to_reg)):
|
||||
boxes_to_reg[i]["text"] = texts[i]
|
||||
del boxes_to_reg[i]["box_image"]
|
||||
logging.info(f"__ocr recognize {len(bxs)} boxes cost {timer() - start}s")
|
||||
bxs = [b for b in bxs if b["text"]]
|
||||
if self.mean_height[-1] == 0:
|
||||
self.mean_height[-1] = np.median([b["bottom"] - b["top"]
|
||||
@ -951,13 +964,14 @@ class RAGFlowPdfParser:
|
||||
self.page_cum_height = [0]
|
||||
self.page_layout = []
|
||||
self.page_from = page_from
|
||||
start = timer()
|
||||
try:
|
||||
self.pdf = pdfplumber.open(fnm) if isinstance(
|
||||
fnm, str) else pdfplumber.open(BytesIO(fnm))
|
||||
self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
|
||||
enumerate(self.pdf.pages[page_from:page_to])]
|
||||
try:
|
||||
self.page_chars = [[{**c, 'top': c['top'], 'bottom': c['bottom']} for c in page.dedupe_chars().chars if self._has_color(c)] for page in self.pdf.pages[page_from:page_to]]
|
||||
self.page_chars = [[c for c in page.dedupe_chars().chars if self._has_color(c)] for page in self.pdf.pages[page_from:page_to]]
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to extract characters for pages {page_from}-{page_to}: {str(e)}")
|
||||
self.page_chars = [[] for _ in range(page_to - page_from)] # If failed to extract, using empty list instead.
|
||||
@ -965,6 +979,7 @@ class RAGFlowPdfParser:
|
||||
self.total_page = len(self.pdf.pages)
|
||||
except Exception:
|
||||
logging.exception("RAGFlowPdfParser __images__")
|
||||
logging.info(f"__images__ dedupe_chars cost {timer() - start}s")
|
||||
|
||||
self.outlines = []
|
||||
try:
|
||||
@ -994,7 +1009,7 @@ class RAGFlowPdfParser:
|
||||
else:
|
||||
self.is_english = False
|
||||
|
||||
# st = timer()
|
||||
start = timer()
|
||||
for i, img in enumerate(self.page_images):
|
||||
chars = self.page_chars[i] if not self.is_english else []
|
||||
self.mean_height.append(
|
||||
@ -1016,7 +1031,7 @@ class RAGFlowPdfParser:
|
||||
self.__ocr(i + 1, img, chars, zoomin)
|
||||
if callback and i % 6 == 5:
|
||||
callback(prog=(i + 1) * 0.6 / len(self.page_images), msg="")
|
||||
# print("OCR:", timer()-st)
|
||||
logging.info(f"__images__ {len(self.page_images)} pages cost {timer() - start}s")
|
||||
|
||||
if not self.is_english and not any(
|
||||
[c for c in self.page_chars]) and self.boxes:
|
||||
|
||||
@ -51,11 +51,13 @@ class RAGFlowTxtParser:
|
||||
s = t
|
||||
if s < len(delimiter):
|
||||
dels.extend(list(delimiter[s:]))
|
||||
dels = [re.escape(d) for d in delimiter if d]
|
||||
dels = [re.escape(d) for d in dels if d]
|
||||
dels = [d for d in dels if d]
|
||||
dels = "|".join(dels)
|
||||
secs = re.split(r"(%s)" % dels, txt)
|
||||
for sec in secs:
|
||||
if re.match(f"^{dels}$", sec):
|
||||
continue
|
||||
add_chunk(sec)
|
||||
|
||||
return [[c, ""] for c in cks]
|
||||
|
||||
@ -31,6 +31,7 @@ import onnxruntime as ort
|
||||
|
||||
from .postprocess import build_post_process
|
||||
|
||||
loaded_models = {}
|
||||
|
||||
def transform(data, ops=None):
|
||||
""" transform """
|
||||
@ -67,6 +68,12 @@ def create_operators(op_param_list, global_config=None):
|
||||
|
||||
def load_model(model_dir, nm):
|
||||
model_file_path = os.path.join(model_dir, nm + ".onnx")
|
||||
global loaded_models
|
||||
loaded_model = loaded_models.get(model_file_path)
|
||||
if loaded_model:
|
||||
logging.info(f"load_model {model_file_path} reuses cached model")
|
||||
return loaded_model
|
||||
|
||||
if not os.path.exists(model_file_path):
|
||||
raise ValueError("not find model file path {}".format(
|
||||
model_file_path))
|
||||
@ -102,15 +109,17 @@ def load_model(model_dir, nm):
|
||||
provider_options=[cuda_provider_options]
|
||||
)
|
||||
run_options.add_run_config_entry("memory.enable_memory_arena_shrinkage", "gpu:0")
|
||||
logging.info(f"TextRecognizer {nm} uses GPU")
|
||||
logging.info(f"load_model {model_file_path} uses GPU")
|
||||
else:
|
||||
sess = ort.InferenceSession(
|
||||
model_file_path,
|
||||
options=options,
|
||||
providers=['CPUExecutionProvider'])
|
||||
run_options.add_run_config_entry("memory.enable_memory_arena_shrinkage", "cpu")
|
||||
logging.info(f"TextRecognizer {nm} uses CPU")
|
||||
return sess, sess.get_inputs()[0], run_options
|
||||
logging.info(f"load_model {model_file_path} uses CPU")
|
||||
loaded_model = (sess, run_options)
|
||||
loaded_models[model_file_path] = loaded_model
|
||||
return loaded_model
|
||||
|
||||
|
||||
class TextRecognizer(object):
|
||||
@ -123,7 +132,8 @@ class TextRecognizer(object):
|
||||
"use_space_char": True
|
||||
}
|
||||
self.postprocess_op = build_post_process(postprocess_params)
|
||||
self.predictor, self.input_tensor, self.run_options = load_model(model_dir, 'rec')
|
||||
self.predictor, self.run_options = load_model(model_dir, 'rec')
|
||||
self.input_tensor = self.predictor.get_inputs()[0]
|
||||
|
||||
def resize_norm_img(self, img, max_wh_ratio):
|
||||
imgC, imgH, imgW = self.rec_image_shape
|
||||
@ -408,7 +418,8 @@ class TextDetector(object):
|
||||
"unclip_ratio": 1.5, "use_dilation": False, "score_mode": "fast", "box_type": "quad"}
|
||||
|
||||
self.postprocess_op = build_post_process(postprocess_params)
|
||||
self.predictor, self.input_tensor, self.run_options = load_model(model_dir, 'det')
|
||||
self.predictor, self.run_options = load_model(model_dir, 'det')
|
||||
self.input_tensor = self.predictor.get_inputs()[0]
|
||||
|
||||
img_h, img_w = self.input_tensor.shape[2:]
|
||||
if isinstance(img_h, str) or isinstance(img_w, str):
|
||||
@ -609,6 +620,16 @@ class OCR(object):
|
||||
return ""
|
||||
return text
|
||||
|
||||
def recognize_batch(self, img_list):
|
||||
rec_res, elapse = self.text_recognizer(img_list)
|
||||
texts = []
|
||||
for i in range(len(rec_res)):
|
||||
text, score = rec_res[i]
|
||||
if score < self.drop_score:
|
||||
text = ""
|
||||
texts.append(text)
|
||||
return texts
|
||||
|
||||
def __call__(self, img, cls=True):
|
||||
time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0}
|
||||
|
||||
|
||||
@ -145,18 +145,6 @@ class ToCHWImage(object):
|
||||
return data
|
||||
|
||||
|
||||
class Fasttext(object):
|
||||
def __init__(self, path="None", **kwargs):
|
||||
import fasttext
|
||||
self.fast_model = fasttext.load_model(path)
|
||||
|
||||
def __call__(self, data):
|
||||
label = data['label']
|
||||
fast_label = self.fast_model[label]
|
||||
data['fast_label'] = fast_label
|
||||
return data
|
||||
|
||||
|
||||
class KeepKeys(object):
|
||||
def __init__(self, keep_keys, **kwargs):
|
||||
self.keep_keys = keep_keys
|
||||
|
||||
@ -19,16 +19,14 @@ import os
|
||||
import math
|
||||
import numpy as np
|
||||
import cv2
|
||||
from copy import deepcopy
|
||||
from functools import cmp_to_key
|
||||
|
||||
import onnxruntime as ort
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from .operators import * # noqa: F403
|
||||
from .operators import preprocess
|
||||
from . import operators
|
||||
|
||||
from .ocr import load_model
|
||||
|
||||
class Recognizer(object):
|
||||
def __init__(self, label_list, task_name, model_dir=None):
|
||||
@ -47,51 +45,7 @@ class Recognizer(object):
|
||||
model_dir = os.path.join(
|
||||
get_project_base_directory(),
|
||||
"rag/res/deepdoc")
|
||||
model_file_path = os.path.join(model_dir, task_name + ".onnx")
|
||||
if not os.path.exists(model_file_path):
|
||||
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc",
|
||||
local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"),
|
||||
local_dir_use_symlinks=False)
|
||||
model_file_path = os.path.join(model_dir, task_name + ".onnx")
|
||||
else:
|
||||
model_file_path = os.path.join(model_dir, task_name + ".onnx")
|
||||
|
||||
if not os.path.exists(model_file_path):
|
||||
raise ValueError("not find model file path {}".format(
|
||||
model_file_path))
|
||||
|
||||
def cuda_is_available():
|
||||
try:
|
||||
import torch
|
||||
if torch.cuda.is_available():
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
return False
|
||||
|
||||
# https://github.com/microsoft/onnxruntime/issues/9509#issuecomment-951546580
|
||||
# Shrink GPU memory after execution
|
||||
self.run_options = ort.RunOptions()
|
||||
|
||||
if cuda_is_available():
|
||||
options = ort.SessionOptions()
|
||||
options.enable_cpu_mem_arena = False
|
||||
cuda_provider_options = {
|
||||
"device_id": 0, # Use specific GPU
|
||||
"gpu_mem_limit": 512 * 1024 * 1024, # Limit gpu memory
|
||||
"arena_extend_strategy": "kNextPowerOfTwo", # gpu memory allocation strategy
|
||||
}
|
||||
self.ort_sess = ort.InferenceSession(
|
||||
model_file_path, options=options,
|
||||
providers=['CUDAExecutionProvider'],
|
||||
provider_options=[cuda_provider_options]
|
||||
)
|
||||
self.run_options.add_run_config_entry("memory.enable_memory_arena_shrinkage", "gpu:0")
|
||||
logging.info(f"Recognizer {task_name} uses GPU")
|
||||
else:
|
||||
self.ort_sess = ort.InferenceSession(model_file_path, providers=['CPUExecutionProvider'])
|
||||
self.run_options.add_run_config_entry("memory.enable_memory_arena_shrinkage", "cpu")
|
||||
logging.info(f"Recognizer {task_name} uses CPU")
|
||||
self.ort_sess, self.run_options = load_model(model_dir, task_name)
|
||||
self.input_names = [node.name for node in self.ort_sess.get_inputs()]
|
||||
self.output_names = [node.name for node in self.ort_sess.get_outputs()]
|
||||
self.input_shape = self.ort_sess.get_inputs()[0].shape[2:4]
|
||||
@ -99,30 +53,22 @@ class Recognizer(object):
|
||||
|
||||
@staticmethod
|
||||
def sort_Y_firstly(arr, threashold):
|
||||
# sort using y1 first and then x1
|
||||
arr = sorted(arr, key=lambda r: (r["top"], r["x0"]))
|
||||
for i in range(len(arr) - 1):
|
||||
for j in range(i, -1, -1):
|
||||
# restore the order using th
|
||||
if abs(arr[j + 1]["top"] - arr[j]["top"]) < threashold \
|
||||
and arr[j + 1]["x0"] < arr[j]["x0"]:
|
||||
tmp = deepcopy(arr[j])
|
||||
arr[j] = deepcopy(arr[j + 1])
|
||||
arr[j + 1] = deepcopy(tmp)
|
||||
def cmp(c1, c2):
|
||||
diff = c1["top"] - c2["top"]
|
||||
if abs(diff) < threashold:
|
||||
diff = c1["x0"] - c2["x0"]
|
||||
return diff
|
||||
arr = sorted(arr, key=cmp_to_key(cmp))
|
||||
return arr
|
||||
|
||||
@staticmethod
|
||||
def sort_X_firstly(arr, threashold, copy=True):
|
||||
# sort using y1 first and then x1
|
||||
arr = sorted(arr, key=lambda r: (r["x0"], r["top"]))
|
||||
for i in range(len(arr) - 1):
|
||||
for j in range(i, -1, -1):
|
||||
# restore the order using th
|
||||
if abs(arr[j + 1]["x0"] - arr[j]["x0"]) < threashold \
|
||||
and arr[j + 1]["top"] < arr[j]["top"]:
|
||||
tmp = deepcopy(arr[j]) if copy else arr[j]
|
||||
arr[j] = deepcopy(arr[j + 1]) if copy else arr[j + 1]
|
||||
arr[j + 1] = deepcopy(tmp) if copy else tmp
|
||||
def sort_X_firstly(arr, threashold):
|
||||
def cmp(c1, c2):
|
||||
diff = c1["x0"] - c2["x0"]
|
||||
if abs(diff) < threashold:
|
||||
diff = c1["top"] - c2["top"]
|
||||
return diff
|
||||
arr = sorted(arr, key=cmp_to_key(cmp))
|
||||
return arr
|
||||
|
||||
@staticmethod
|
||||
@ -145,8 +91,6 @@ class Recognizer(object):
|
||||
arr[j + 1] = tmp
|
||||
return arr
|
||||
|
||||
return sorted(arr, key=lambda r: (r.get("C", r["x0"]), r["top"]))
|
||||
|
||||
@staticmethod
|
||||
def sort_R_firstly(arr, thr=0):
|
||||
# sort using y1 first and then x1
|
||||
|
||||
@ -177,7 +177,7 @@ class TableStructureRecognizer(Recognizer):
|
||||
colwm = np.min(colwm) if colwm else 0
|
||||
crosspage = len(set([b["page_number"] for b in boxes])) > 1
|
||||
if crosspage:
|
||||
boxes = Recognizer.sort_X_firstly(boxes, colwm / 2, False)
|
||||
boxes = Recognizer.sort_X_firstly(boxes, colwm / 2)
|
||||
else:
|
||||
boxes = Recognizer.sort_C_firstly(boxes, colwm / 2)
|
||||
boxes[0]["cn"] = 0
|
||||
|
||||
16
docker/.env
16
docker/.env
@ -80,13 +80,13 @@ REDIS_PASSWORD=infini_rag_flow
|
||||
SVR_HTTP_PORT=9380
|
||||
|
||||
# The RAGFlow Docker image to download.
|
||||
# Defaults to the v0.16.0-slim edition, which is the RAGFlow Docker image without embedding models.
|
||||
RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0-slim
|
||||
# Defaults to the v0.17.0-slim edition, which is the RAGFlow Docker image without embedding models.
|
||||
RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0-slim
|
||||
#
|
||||
# To download the RAGFlow Docker image with embedding models, uncomment the following line instead:
|
||||
# RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0
|
||||
# RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0
|
||||
#
|
||||
# The Docker image of the v0.16.0 edition includes:
|
||||
# The Docker image of the v0.17.0 edition includes:
|
||||
# - Built-in embedding models:
|
||||
# - BAAI/bge-large-zh-v1.5
|
||||
# - BAAI/bge-reranker-v2-m3
|
||||
@ -138,3 +138,11 @@ TIMEZONE='Asia/Shanghai'
|
||||
# - `ERROR`
|
||||
# For example, following line changes the log level of `ragflow.es_conn` to `DEBUG`:
|
||||
# LOG_LEVELS=ragflow.es_conn=DEBUG
|
||||
|
||||
# aliyun OSS configuration
|
||||
# STORAGE_IMPL=OSS
|
||||
# ACCESS_KEY=xxx
|
||||
# SECRET_KEY=eee
|
||||
# ENDPOINT=http://oss-cn-hangzhou.aliyuncs.com
|
||||
# REGION=cn-hangzhou
|
||||
# BUCKET=ragflow65536
|
||||
|
||||
@ -78,8 +78,8 @@ The [.env](./.env) file contains important environment variables for Docker.
|
||||
- `RAGFLOW-IMAGE`
|
||||
The Docker image edition. Available editions:
|
||||
|
||||
- `infiniflow/ragflow:v0.16.0-slim` (default): The RAGFlow Docker image without embedding models.
|
||||
- `infiniflow/ragflow:v0.16.0`: The RAGFlow Docker image with embedding models including:
|
||||
- `infiniflow/ragflow:v0.17.0-slim` (default): The RAGFlow Docker image without embedding models.
|
||||
- `infiniflow/ragflow:v0.17.0`: The RAGFlow Docker image with embedding models including:
|
||||
- Built-in embedding models:
|
||||
- `BAAI/bge-large-zh-v1.5`
|
||||
- `BAAI/bge-reranker-v2-m3`
|
||||
|
||||
@ -3,7 +3,7 @@ services:
|
||||
container_name: ragflow-es-01
|
||||
profiles:
|
||||
- elasticsearch
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
|
||||
image: elasticsearch:${STACK_VERSION}
|
||||
volumes:
|
||||
- esdata01:/usr/share/elasticsearch/data
|
||||
ports:
|
||||
@ -114,6 +114,7 @@ services:
|
||||
restart: on-failure
|
||||
|
||||
redis:
|
||||
# swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/valkey/valkey:8
|
||||
image: valkey/valkey:8
|
||||
container_name: ragflow-redis
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD} --maxmemory 128mb --maxmemory-policy allkeys-lru
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
# The RAGFlow team do not actively maintain docker-compose-gpu.yml, so use them at your own risk.
|
||||
# However, you are welcome to file a pull request to improve it.
|
||||
# Pull requests to improve it are welcome.
|
||||
include:
|
||||
- ./docker-compose-base.yml
|
||||
|
||||
|
||||
@ -37,6 +37,13 @@ redis:
|
||||
# access_key: 'access_key'
|
||||
# secret_key: 'secret_key'
|
||||
# region: 'region'
|
||||
# oss:
|
||||
# access_key: '${ACCESS_KEY}'
|
||||
# secret_key: '${SECRET_KEY}'
|
||||
# endpoint_url: '${ENDPOINT}'
|
||||
# region: '${REGION}'
|
||||
# bucket: '${BUCKET}'
|
||||
# prefix_path: '${OSS_PREFIX_PATH}'
|
||||
# azure:
|
||||
# auth_type: 'sas'
|
||||
# container_url: 'container_url'
|
||||
@ -52,6 +59,12 @@ redis:
|
||||
# factory: 'Tongyi-Qianwen'
|
||||
# api_key: 'sk-xxxxxxxxxxxxx'
|
||||
# base_url: ''
|
||||
# default_models:
|
||||
# chat_model: 'qwen-plus'
|
||||
# embedding_model: 'BAAI/bge-large-zh-v1.5@BAAI'
|
||||
# rerank_model: ''
|
||||
# asr_model: ''
|
||||
# image2text_model: ''
|
||||
# oauth:
|
||||
# github:
|
||||
# client_id: xxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
@ -97,8 +97,8 @@ The [.env](https://github.com/infiniflow/ragflow/blob/main/docker/.env) file con
|
||||
- `RAGFLOW-IMAGE`
|
||||
The Docker image edition. Available editions:
|
||||
|
||||
- `infiniflow/ragflow:v0.16.0-slim` (default): The RAGFlow Docker image without embedding models.
|
||||
- `infiniflow/ragflow:v0.16.0`: The RAGFlow Docker image with embedding models including:
|
||||
- `infiniflow/ragflow:v0.17.0-slim` (default): The RAGFlow Docker image without embedding models.
|
||||
- `infiniflow/ragflow:v0.17.0`: The RAGFlow Docker image with embedding models including:
|
||||
- Built-in embedding models:
|
||||
- `BAAI/bge-large-zh-v1.5`
|
||||
- `BAAI/bge-reranker-v2-m3`
|
||||
|
||||
@ -15,8 +15,8 @@ Please note that some of your settings may consume a significant amount of time.
|
||||
## 1. Accelerate document indexing
|
||||
|
||||
- Use GPU to reduce embedding time.
|
||||
- On the configuration page of your knowledge base, toggle off **Use RAPTOR to enhance retrieval**.
|
||||
- The **Knowledge Graph** chunk method (GraphRAG) is time-consuming.
|
||||
- On the configuration page of your knowledge base, switch off **Use RAPTOR to enhance retrieval**.
|
||||
- Extracting knowledge graph (GraphRAG) is time-consuming.
|
||||
- Disable **Auto-keyword** and **Auto-question** on the configuration page of yor knowledge base, as both depend on the LLM.
|
||||
|
||||
## 2. Accelerate question answering
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"label": "Agent Components",
|
||||
"position": 3,
|
||||
"position": 20,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "A complete reference for RAGFlow's agent components."
|
||||
@ -33,7 +33,7 @@ Click the dropdown menu of **Model** to show the model configuration window.
|
||||
- **Model**: The chat model to use.
|
||||
- Ensure you set the chat model correctly on the **Model providers** page.
|
||||
- You can use different models for different components to increase flexibility or improve overall performance.
|
||||
- **Freedom**: A shortcut to **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty** settings, indicating the freedom level of the model.
|
||||
- **Preset configurations**: A shortcut to **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty** settings, indicating the freedom level of the model. From **Improvise**, **Precise**, to **Balance**, each preset configuration corresponds to a unique combination of **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**.
|
||||
This parameter has three options:
|
||||
- **Improvise**: Produces more creative responses.
|
||||
- **Precise**: (Default) Produces more conservative responses.
|
||||
@ -58,7 +58,7 @@ Click the dropdown menu of **Model** to show the model configuration window.
|
||||
|
||||
:::tip NOTE
|
||||
- It is not necessary to stick with the same model for all components. If a specific model is not performing well for a particular task, consider using a different one.
|
||||
- If you are uncertain about the mechanism behind **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**, you can simply choose one of the three options of **Freedom**.
|
||||
- If you are uncertain about the mechanism behind **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**, simply choose one of the three options of **Preset configurations**.
|
||||
:::
|
||||
|
||||
### Message window size
|
||||
@ -24,7 +24,7 @@ Click the dropdown menu of **Model** to show the model configuration window.
|
||||
- **Model**: The chat model to use.
|
||||
- Ensure you set the chat model correctly on the **Model providers** page.
|
||||
- You can use different models for different components to increase flexibility or improve overall performance.
|
||||
- **Freedom**: A shortcut to **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty** settings, indicating the freedom level of the model.
|
||||
- **Preset configurations**: A shortcut to **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty** settings, indicating the freedom level of the model. From **Improvise**, **Precise**, to **Balance**, each preset configuration corresponds to a unique combination of **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**.
|
||||
This parameter has three options:
|
||||
- **Improvise**: Produces more creative responses.
|
||||
- **Precise**: (Default) Produces more conservative responses.
|
||||
@ -49,7 +49,7 @@ Click the dropdown menu of **Model** to show the model configuration window.
|
||||
|
||||
:::tip NOTE
|
||||
- It is not necessary to stick with the same model for all components. If a specific model is not performing well for a particular task, consider using a different one.
|
||||
- If you are uncertain about the mechanism behind **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**, you can simply choose one of the three options of **Freedom**.
|
||||
- If you are uncertain about the mechanism behind **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**, simply choose one of the three options of **Preset configurations**.
|
||||
:::
|
||||
|
||||
### System prompt
|
||||
@ -76,6 +76,11 @@ When writing suggestions, pay attention to whether there are ways to improve the
|
||||
|
||||
Where `{source_text}` and `{target_lang}` are global variables defined by the **Begin** component, while `{translation_1}` is the output of another **Generate** component with the component ID **Translate directly**.
|
||||
|
||||
|
||||
:::danger IMPORTANT
|
||||
A **Generate** component relies on keys (variables) to specify its data inputs. Its immediate upstream component is *not* necessarily its data input, and the arrows in the workflow indicate *only* the processing sequence. Keys in a **Generate** component are used in conjunction with the system prompt to specify data inputs for the LLM. Use a forward slash `/` to show the keys to use.
|
||||
:::
|
||||
|
||||
### Cite
|
||||
|
||||
This toggle sets whether to cite the original text as reference.
|
||||
@ -95,19 +100,6 @@ This feature is used for multi-turn dialogue *only*.
|
||||
:::
|
||||
|
||||
|
||||
### Key (Variable)
|
||||
|
||||
:::danger IMPORTANT
|
||||
A **Generate** component relies on keys (variables) to specify its data inputs. Its immediate upstream component is *not* necessarily its data input, and the arrows in the workflow indicate *only* the processing sequence.
|
||||
:::
|
||||
|
||||

|
||||
|
||||
Keys in a **Generate** component are used in conjunction with the system prompt to specify data inputs for the LLM. As shown in the above screenshot, the values are categorized into two groups:
|
||||
|
||||
- **Component Output**: The value of the key should be a component ID.
|
||||
- **Begin Input**: The value of the key should be the name of a global variable defined in the **Begin** component.
|
||||
|
||||
## Examples
|
||||
|
||||
You can explore our three-step interpreter agent template, where a **Generate** component (component ID: **Reflect**) takes three global variables:
|
||||
@ -34,7 +34,7 @@ Click the dropdown menu of **Model** to show the model configuration window.
|
||||
- **Model**: The chat model to use.
|
||||
- Ensure you set the chat model correctly on the **Model providers** page.
|
||||
- You can use different models for different components to increase flexibility or improve overall performance.
|
||||
- **Freedom**: A shortcut to **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty** settings, indicating the freedom level of the model.
|
||||
- **Preset configurations**: A shortcut to **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty** settings, indicating the freedom level of the model. From **Improvise**, **Precise**, to **Balance**, each preset configuration corresponds to a unique combination of **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**.
|
||||
This parameter has three options:
|
||||
- **Improvise**: Produces more creative responses.
|
||||
- **Precise**: (Default) Produces more conservative responses.
|
||||
@ -59,7 +59,7 @@ Click the dropdown menu of **Model** to show the model configuration window.
|
||||
|
||||
:::tip NOTE
|
||||
- It is not necessary to stick with the same model for all components. If a specific model is not performing well for a particular task, consider using a different one.
|
||||
- If you are uncertain about the mechanism behind **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**, you can simply choose one of the three options of **Freedom**.
|
||||
- If you are uncertain about the mechanism behind **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**, simply choose one of the three options of **Preset**.
|
||||
:::
|
||||
|
||||
|
||||
@ -30,7 +30,7 @@ RAGFlow employs a combination of weighted keyword similarity and weighted vector
|
||||
|
||||
Defaults to 0.2.
|
||||
|
||||
### Keywords similarity weight
|
||||
### Keyword similarity weight
|
||||
|
||||
This parameter sets the weight of keyword similarity in the combined similarity score. The total of the two weights must equal 1.0. Its default value is 0.7, which means the weight of vector similarity in the combined search is 1 - 0.7 = 0.3.
|
||||
|
||||
@ -16,7 +16,7 @@ A **Rewrite** component uses a specified LLM to rewrite a user query from the **
|
||||
A **Rewrite** component is essential when you need to optimize a user query based on the context of previous conversations. It is usually the upstream component of a **Retrieval** component.
|
||||
|
||||
:::tip NOTE
|
||||
See also the [Keyword](https://ragflow.io/docs/dev/keyword_component) component, a similar component used for multi-turn optimization.
|
||||
See also the [Keyword](./keyword.mdx) component, a similar component used for multi-turn optimization.
|
||||
:::
|
||||
|
||||
## Configurations
|
||||
@ -32,7 +32,7 @@ Click the dropdown menu of **Model** to show the model configuration window.
|
||||
- **Model**: The chat model to use.
|
||||
- Ensure you set the chat model correctly on the **Model providers** page.
|
||||
- You can use different models for different components to increase flexibility or improve overall performance.
|
||||
- **Freedom**: A shortcut to **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty** settings, indicating the freedom level of the model.
|
||||
- **Preset configurations**: A shortcut to **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty** settings, indicating the freedom level of the model. From **Improvise**, **Precise**, to **Balance**, each preset configuration corresponds to a unique combination of **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**.
|
||||
This parameter has three options:
|
||||
- **Improvise**: Produces more creative responses.
|
||||
- **Precise**: (Default) Produces more conservative responses.
|
||||
@ -57,7 +57,7 @@ Click the dropdown menu of **Model** to show the model configuration window.
|
||||
|
||||
:::tip NOTE
|
||||
- It is not necessary to stick with the same model for all components. If a specific model is not performing well for a particular task, consider using a different one.
|
||||
- If you are uncertain about the mechanism behind **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**, you can simply choose one of the three options of **Freedom**.
|
||||
- If you are uncertain about the mechanism behind **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**, simply choose one of the three options of **Preset configurations**.
|
||||
:::
|
||||
|
||||
|
||||
@ -13,7 +13,7 @@ A **Switch** component evaluates conditions based on the output of specific comp
|
||||
|
||||
## Scenarios
|
||||
|
||||
A **Switch** component is essential for condition-based direction of execution flow. While it shares similarities with the [Categorize](https://ragflow.io/docs/dev/categorize_component) component, which is also used in multi-pronged strategies, the key distinction lies in their approach: the evaluation of the **Switch** component is rule-based, whereas the **Categorize** component involves AI and uses an LLM for decision-making.
|
||||
A **Switch** component is essential for condition-based direction of execution flow. While it shares similarities with the [Categorize](./categorize.mdx) component, which is also used in multi-pronged strategies, the key distinction lies in their approach: the evaluation of the **Switch** component is rule-based, whereas the **Categorize** component involves AI and uses an LLM for decision-making.
|
||||
|
||||
## Configurations
|
||||
|
||||
@ -5,7 +5,7 @@ slug: /text2sql_agent
|
||||
|
||||
# Create a Text2SQL agent
|
||||
|
||||
Build a Text2SQL agent leverging RAGFlow's RAG capabilities. Contributed by @TeslaZY.
|
||||
Build a Text2SQL agent leveraging RAGFlow's RAG capabilities. Contributed by @TeslaZY.
|
||||
|
||||
## Scenario
|
||||
|
||||
@ -343,7 +343,7 @@ Synonyms: laptop computer,laptop pc
|
||||
3. Create a Retrieval node and name it Thesaurus; create an ExeSQL node.
|
||||
4. Configure the Q->SQL, DDL, DB_Description, and TextSQL_Thesaurus knowledge bases. Please refer to the following:
|
||||

|
||||
5. Configure the Generate node, named LLM‘s prompt:
|
||||
5. Configure the Generate node, named LLM's prompt:
|
||||
- Add this content to the prompt provided by the template to provide the thesaurus content to the LLM:
|
||||
```plaintext
|
||||
## You may use the following Thesaurus statements. For example, what I ask is from Synonyms, you must use Standard noun to generate SQL. Use responses to past questions also to guide you: {sql_thesaurus}.
|
||||
@ -383,7 +383,7 @@ Since version 0.15.0, ragflow has introduced step-by-step execution for Agent co
|
||||
Find all customers who has bought a mobile phone
|
||||
```
|
||||

|
||||
3. As the image shows, no matching information was retrieved from the Q->SQL knowledge base, yet a similar question exists within the database. Adjust the Rerank model, "Similarity threshold," or "Keywords similarity weight" accordingly to return relevant content.
|
||||
3. As the image shows, no matching information was retrieved from the Q->SQL knowledge base, yet a similar question exists within the database. Adjust the Rerank model, "Similarity threshold," or "Keyword similarity weight" accordingly to return relevant content.
|
||||

|
||||

|
||||
|
||||
|
||||
8
docs/guides/configure_knowledge_base/_category_.json
Normal file
8
docs/guides/configure_knowledge_base/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Configure a knowledge base",
|
||||
"position": 0,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "Guides on configuring a knowledge base."
|
||||
}
|
||||
}
|
||||
@ -42,9 +42,9 @@ RAGFlow offers multiple chunking template to facilitate chunking files of differ
|
||||
| **Template** | Description | File format |
|
||||
|--------------|-----------------------------------------------------------------------|------------------------------------------------------|
|
||||
| General | Files are consecutively chunked based on a preset chunk token number. | DOCX, EXCEL, PPT, PDF, TXT, JPEG, JPG, PNG, TIF, GIF |
|
||||
| Q&A | | EXCEL, CSV/TXT |
|
||||
| Q&A | | XLSX, CSV/TXT |
|
||||
| Manual | | PDF |
|
||||
| Table | | EXCEL, CSV/TXT |
|
||||
| Table | | XLSX, CSV/TXT |
|
||||
| Paper | | PDF |
|
||||
| Book | | DOCX, PDF, TXT |
|
||||
| Laws | | DOCX, PDF, TXT |
|
||||
@ -52,7 +52,7 @@ RAGFlow offers multiple chunking template to facilitate chunking files of differ
|
||||
| Picture | | JPEG, JPG, PNG, TIF, GIF |
|
||||
| One | The entire document is chunked as one. | DOCX, EXCEL, PDF, TXT |
|
||||
|
||||
You can also change the chunk template for a particular file on the **Datasets** page.
|
||||
You can also change a file's chunk method on the **Datasets** page.
|
||||
|
||||

|
||||
|
||||
@ -128,7 +128,7 @@ RAGFlow uses multiple recall of both full-text search and vector search in its c
|
||||
|
||||
## Search for knowledge base
|
||||
|
||||
As of RAGFlow v0.16.0, the search feature is still in a rudimentary form, supporting only knowledge base search by name.
|
||||
As of RAGFlow v0.17.0, the search feature is still in a rudimentary form, supporting only knowledge base search by name.
|
||||
|
||||

|
||||
|
||||
@ -0,0 +1,84 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
slug: /construct_knowledge_graph
|
||||
---
|
||||
|
||||
# Construct knowledge graph
|
||||
|
||||
Generate a knowledge graph for your knowledge base.
|
||||
|
||||
---
|
||||
|
||||
To enhance multi-hop question-answering, RAGFlow adds a knowledge graph construction step between data extraction and indexing, as illustrated below. This step creates additional chunks from existing ones generated by your specified chunk method.
|
||||
|
||||

|
||||
|
||||
As of v0.17.0, RAGFlow supports constructing a knowledge graph on a knowledge base, allowing you to construct a *unified* graph across multiple files within your knowledge base. When a newly uploaded file starts parsing, the generated graph will automatically update.
|
||||
|
||||
:::danger WARNING
|
||||
Constructing a knowledge graph requires significant memory, computational resources, and tokens.
|
||||
:::
|
||||
|
||||
## Scenarios
|
||||
|
||||
Knowledge graphs are especially useful for multi-hop question-answering involving *nested* logic. They outperform traditional extraction approaches when you are performing question answering on books or works with complex entities and relationships.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
The system's default chat model is used to generate knowledge graph. Before proceeding, ensure that you have a chat model properly configured:
|
||||
|
||||

|
||||
|
||||
## Configurations
|
||||
|
||||
### Entity types (*Required*)
|
||||
|
||||
The types of the entities to extract from your knowledge base. The default types are: **organization**, **person**, **event**, and **category**. Add or remove types to suit your specific knowledge base.
|
||||
|
||||
### Method
|
||||
|
||||
The method to use to construct knowledge graph:
|
||||
|
||||
- **General**: Use prompts provided by [GraphRAG](https://github.com/microsoft/graphrag) to extract entities and relationships.
|
||||
- **Light**: (Default) Use prompts provided by [LightRAG](https://github.com/HKUDS/LightRAG) to extract entities and relationships. This option consumes fewer tokens, less memory, and fewer computational resources.
|
||||
|
||||
### Entity resolution
|
||||
|
||||
Whether to enable entity resolution. You can think of this as an entity deduplication switch. When enabled, the LLM will combine similar entities - e.g., '2025' and 'the year of 2025', or 'IT' and 'Information Technology' - to construct a more effective graph.
|
||||
|
||||
- (Default) Disable entity resolution.
|
||||
- Enable entity resolution. This option consumes more tokens.
|
||||
|
||||
### Community report generation
|
||||
|
||||
In a knowledge graph, a community is a cluster of entities linked by relationships. You can have the LLM generate an abstract for each community, known as a community report. See [here](https://www.microsoft.com/en-us/research/blog/graphrag-improving-global-search-via-dynamic-community-selection/) for more information. This indicates whether to generate community reports:
|
||||
|
||||
- Generate community reports. This option consumes more tokens.
|
||||
- (Default) Do not generate community reports.
|
||||
|
||||
## Procedure
|
||||
|
||||
1. On the **Configuration** page of your knowledge base, switch on **Extract knowledge graph** or adjust its settings as needed, and click **Save** to confirm your changes.
|
||||
|
||||
- *The default knowledge graph configurations for your knowledge base are now set and files uploaded from this point onward will automatically use these settings during parsing.*
|
||||
- *Files parsed before this update will retain their original knowledge graph settings.*
|
||||
|
||||
2. The knowledge graph of your knowledge base does *not* automatically update *until* a newly uploaded file is parsed.
|
||||
|
||||
_A **Knowledge graph** entry appears under **Configuration** once a knowledge graph is created._
|
||||
|
||||
3. Click **Knowledge graph** to view the details of the generated graph.
|
||||
|
||||
## Frequently asked questions
|
||||
|
||||
### Can I have different knowledge graph settings for different files in my knowledge base?
|
||||
|
||||
Yes, you can. Just one graph is generated per knowledge base. The smaller graphs of your files will be *combined* into one big, unified graph at the end of the graph extraction process.
|
||||
|
||||
### Does the knowledge graph automatically update when I remove a related file?
|
||||
|
||||
Nope. The knowledge graph does *not* automatically update *until* a newly uploaded graph is parsed.
|
||||
|
||||
### How to remove a generated knowledge graph?
|
||||
|
||||
To remove the generated knowledge graph, delete all related files in your knowledge base. Although the **Knowledge graph** entry will still be visible, the graph has actually been deleted.
|
||||
82
docs/guides/configure_knowledge_base/run_retrieval_test.md
Normal file
82
docs/guides/configure_knowledge_base/run_retrieval_test.md
Normal file
@ -0,0 +1,82 @@
|
||||
---
|
||||
sidebar_position: 10
|
||||
slug: /run_retrieval_test
|
||||
---
|
||||
|
||||
# Run retrieval test
|
||||
|
||||
Conduct a retrieval test on your knowledge base to check whether the intended chunks can be retrieved.
|
||||
|
||||
---
|
||||
|
||||
After your files are uploaded and parsed, it is recommended that you run a retrieval test before proceeding with the chat assistant configuration. Just like fine-tuning a precision instrument, RAGFlow requires careful tuning to deliver optimal question answering performance. Your knowledge base settings, chat assistant configurations, and the specified large and small models can all significantly impact the final results. Running a retrieval test verifies whether the intended chunks can be recovered, allowing you to quickly identify areas for improvement or pinpoint any issue that needs addressing. For instance, when debugging your question answering system, if you know that the correct chunks can be retrieved, you can focus your efforts elsewhere.
|
||||
|
||||
During a retrieval test, chunks created from your specified chunk method are retrieved using a hybrid search. This search combines weighted keyword similarity with either weighted vector cosine similarity or a weighted reranking score, depending on your settings:
|
||||
|
||||
- If no rerank model is selected, weighted keyword similarity will be combined with weighted vector cosine similarity.
|
||||
- If a rerank model is selected, weighted keyword similarity will be combined with weighted vector reranking score.
|
||||
|
||||
In contrast, chunks created from [knowledge graph construction](./construct_knowledge_graph.md) are retrieved solely using vector cosine similarity.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Your files are uploaded and successfully parsed before running a retrieval test.
|
||||
- A knowledge graph must be successfully built before enabling **Use knowledge graph**.
|
||||
|
||||
## Configurations
|
||||
|
||||
### Similarity threshold
|
||||
|
||||
This sets the bar for retrieving chunks: chunks with similarities below the threshold will be filtered out. By default, the threshold is set to 0.2.
|
||||
|
||||
### Keyword similarity weight
|
||||
|
||||
This sets the weight of keyword similarity in the combined similarity score, whether used with vector cosine similarity or a reranking score. By default, it is set to 0.7, making the weight of the other component 0.3 (1 - 0.7).
|
||||
|
||||
### Rerank model
|
||||
|
||||
- If left empty, RAGFlow will use a combination of weighted keyword similarity and weighted vector cosine similarity.
|
||||
- If a rerank model is selected, weighted keyword similarity will be combined with weighted vector reranking score.
|
||||
|
||||
:::danger IMPORTANT
|
||||
Using a rerank model will significantly increase the time to receive a response.
|
||||
:::
|
||||
|
||||
### Use knowledge graph
|
||||
|
||||
In a knowledge graph, an entity description, a relationship description, or a community report each exists as an independent chunk. This switch indicates whether to add these chunks to the retrieval.
|
||||
|
||||
The switch is disabled by default. When enabled, RAGFlow performs the following during a retrieval test:
|
||||
|
||||
1. Extract entities and entity types from your query using the LLM.
|
||||
2. Retrieve top N entities from the graph based on their PageRank values, using the extracted entity types.
|
||||
3. Find similar entities and their N-hop relationships from the graph using the embeddings of the extracted query entities.
|
||||
4. Retrieve similar relationships from the graph using the query embedding.
|
||||
5. Rank these retrieved entities and relationships by multiplying each one's PageRank value with its similarity score to the query, returning the top n as the final retrieval.
|
||||
6. Retrieve the report for the community involving the most entities in the final retrieval.
|
||||
*The retrieved entity descriptions, relationship descriptions, and the top 1 community report are sent to the LLM for content generation.*
|
||||
|
||||
:::danger IMPORTANT
|
||||
Using a knowledge graph in a retrieval test will significantly increase the time to receive a response.
|
||||
:::
|
||||
|
||||
### Test text
|
||||
|
||||
This field is where you put in your testing query.
|
||||
|
||||
## Procedure
|
||||
|
||||
1. Navigate to the **Retrieval testing** page of your knowledge base, enter your query in **Test text**, and click **Testing** to run the test.
|
||||
2. If the results are unsatisfactory, tune the options listed in the Configuration section and rerun the test.
|
||||
|
||||
*The following is a screenshot of a retrieval test conducted without using knowledge graph. It demonstrates a hybrid search combining weighted keyword similarity and weighted vector cosine similarity. The overall hybrid similarity score is 28.56, calculated as 25.17 (term similarity score) x 0.7 + 36.49 (vector similarity score) x 0.3:*
|
||||

|
||||
|
||||
*The following is a screenshot of a retrieval test conducted using a knowledge graph. It shows that only vector similarity is used for knowledge graph-generated chunks:*
|
||||

|
||||
|
||||
## Frequently asked questions
|
||||
|
||||
### Is an LLM used when the Use Knowledge Graph switch is enabled?
|
||||
|
||||
Yes, your LLM will be involved to analyze your query and extract the related entities and relationship from the knowledge graph. This also explains why additional tokens and time will be consumed.
|
||||
22
docs/guides/configure_knowledge_base/set_metadata.md
Normal file
22
docs/guides/configure_knowledge_base/set_metadata.md
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
slug: /set_metada
|
||||
---
|
||||
|
||||
# Set metadata
|
||||
|
||||
Add metadata to an uploaded file
|
||||
|
||||
---
|
||||
|
||||
On the **Dataset** page of your knowledge base, you can add metadata to any uploaded file. This approach enables you to 'tag' additional information like URL, author, date, and more to an existing file or dataset. In an AI-powered chat, such information will be sent to the LLM with the retrieved chunks for content generation.
|
||||
|
||||
For example, if you have a dataset of HTML files and want the LLM to cite the source URL when responding to your query, add a `"url"` parameter to each file's metadata.
|
||||
|
||||

|
||||
|
||||
:::tip NOTE
|
||||
Ensure that your metadata is in JSON format; otherwise, your updates will not be applied.
|
||||
:::
|
||||
|
||||

|
||||
@ -59,20 +59,20 @@ success
|
||||
|
||||
### 2. Ensure Ollama is accessible
|
||||
|
||||
If RAGFlow runs in Docker and Ollama runs on the same host machine, check if ollama is accessiable from inside the RAGFlow container:
|
||||
If RAGFlow runs in Docker and Ollama runs on the same host machine, check if ollama is accessible from inside the RAGFlow container:
|
||||
```bash
|
||||
sudo docker exec -it ragflow-server bash
|
||||
root@8136b8c3e914:/ragflow# curl http://host.docker.internal:11434/
|
||||
Ollama is running
|
||||
```
|
||||
|
||||
If RAGFlow runs from source code and Ollama runs on the same host machine, check if ollama is accessiable from RAGFlow host machine:
|
||||
If RAGFlow runs from source code and Ollama runs on the same host machine, check if ollama is accessible from RAGFlow host machine:
|
||||
```bash
|
||||
curl http://localhost:11434/
|
||||
Ollama is running
|
||||
```
|
||||
|
||||
If RAGFlow and Ollama run on different machines, check if ollama is accessiable from RAGFlow host machine:
|
||||
If RAGFlow and Ollama run on different machines, check if ollama is accessible from RAGFlow host machine:
|
||||
```bash
|
||||
curl http://${IP_OF_OLLAMA_MACHINE}:11434/
|
||||
Ollama is running
|
||||
|
||||
@ -12,8 +12,8 @@ A guide explaining how to build a RAGFlow Docker image from its source code. By
|
||||
## Target Audience
|
||||
|
||||
- Developers who have added new features or modified the existing code and require a Docker image to view and debug their changes.
|
||||
- Developers looking to build a RAGFlow Docker image for an ARM64 platform.
|
||||
- Testers looking to explore the latest features of RAGFlow in a Docker image.
|
||||
- Developers seeking to build a RAGFlow Docker image for an ARM64 platform.
|
||||
- Testers aiming to explore the latest features of RAGFlow in a Docker image.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@ -21,6 +21,7 @@ A guide explaining how to build a RAGFlow Docker image from its source code. By
|
||||
- RAM ≥ 16 GB
|
||||
- Disk ≥ 50 GB
|
||||
- Docker ≥ 24.0.0 & Docker Compose ≥ v2.26.1
|
||||
- For ARM64 platforms, please upgrade the `xgboost` version in **pyproject.toml** to `1.6.0` and ensure **unixODBC** is properly installed.
|
||||
|
||||
## Build a Docker image
|
||||
|
||||
@ -41,6 +42,8 @@ While we also test RAGFlow on ARM64 platforms, we do not maintain RAGFlow Docker
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv run download_deps.py
|
||||
docker build -f Dockerfile.deps -t infiniflow/ragflow_deps .
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
@ -57,12 +60,41 @@ While we also test RAGFlow on ARM64 platforms, we do not maintain RAGFlow Docker
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface_hub nltk
|
||||
python3 download_deps.py
|
||||
uv run download_deps.py
|
||||
docker build -f Dockerfile.deps -t infiniflow/ragflow_deps .
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Launch a RAGFlow Service from Docker for MacOS
|
||||
|
||||
After building the infiniflow/ragflow:nightly-slim image, you are ready to launch a fully-functional RAGFlow service with all the required components, such as Elasticsearch, MySQL, MinIO, Redis, and more.
|
||||
|
||||
## Example: Apple M2 Pro (Sequoia)
|
||||
|
||||
1. Edit Docker Compose Configuration
|
||||
|
||||
Open the `docker/docker-compose-base.yml` file. Find the `infinity.image` setting and change the image reference from `infiniflow/infinity:v0.6.0-dev3` to `infiniflow/ragflow:nightly-slim` to use the pre-built image.
|
||||
|
||||
```yaml
|
||||
infinity:
|
||||
container_name: ragflow-infinity
|
||||
image: infiniflow/ragflow:nightly-slim # here
|
||||
volumes:
|
||||
- ...
|
||||
- ...
|
||||
...
|
||||
```
|
||||
|
||||
2. Launch the Service
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
$ docker compose -f docker-compose-macos.yml up -d
|
||||
```
|
||||
|
||||
3. Access the RAGFlow Service
|
||||
|
||||
Once the setup is complete, open your web browser and navigate to http://127.0.0.1 or your server's \<IP_ADDRESS\>; (the default port is \<PORT\> = 80). You will be directed to the RAGFlow welcome page. Enjoy!🍻
|
||||
@ -3,7 +3,7 @@ sidebar_position: 2
|
||||
slug: /launch_ragflow_from_source
|
||||
---
|
||||
|
||||
# Launch the RAGFlow Service from Source
|
||||
# Launch a RAGFlow Service from Source
|
||||
|
||||
A guide explaining how to set up a RAGFlow service from its source code. By following this guide, you'll be able to debug using the source code.
|
||||
|
||||
|
||||
@ -81,4 +81,4 @@ RAGFlow's file management allows you to download an uploaded file:
|
||||
|
||||

|
||||
|
||||
> As of RAGFlow v0.16.0, bulk download is not supported, nor can you download an entire folder.
|
||||
> As of RAGFlow v0.17.0, bulk download is not supported, nor can you download an entire folder.
|
||||
|
||||
@ -46,7 +46,7 @@ You start an AI conversation by creating an assistant.
|
||||
4. Update **Model Setting**:
|
||||
|
||||
- In **Model**: you select the chat model. Though you have selected the default chat model in **System Model Settings**, RAGFlow allows you to choose an alternative chat model for your dialogue.
|
||||
- **Freedom** refers to the level that the LLM improvises. From **Improvise**, **Precise**, to **Balance**, each freedom level corresponds to a unique combination of **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**.
|
||||
- **Preset configurations** refers to the level that the LLM improvises. From **Improvise**, **Precise**, to **Balance**, each preset configuration corresponds to a unique combination of **Temperature**, **Top P**, **Presence penalty**, and **Frequency penalty**.
|
||||
- **Temperature**: Level of the prediction randomness of the LLM. The higher the value, the more creative the LLM is.
|
||||
- **Top P** is also known as "nucleus sampling". See [here](https://en.wikipedia.org/wiki/Top-p_sampling) for more information.
|
||||
- **Max Tokens**: The maximum length of the LLM's responses. Note that the responses may be curtailed if this value is set too low.
|
||||
|
||||
@ -62,16 +62,16 @@ To upgrade RAGFlow, you must upgrade **both** your code **and** your Docker imag
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
2. Switch to the latest, officially published release, e.g., `v0.16.0`:
|
||||
2. Switch to the latest, officially published release, e.g., `v0.17.0`:
|
||||
|
||||
```bash
|
||||
git checkout -f v0.16.0
|
||||
git checkout -f v0.17.0
|
||||
```
|
||||
|
||||
3. Update **ragflow/docker/.env** as follows:
|
||||
|
||||
```bash
|
||||
RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0
|
||||
RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0
|
||||
```
|
||||
|
||||
4. Update the RAGFlow image and restart RAGFlow:
|
||||
|
||||
@ -39,7 +39,7 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
|
||||
|
||||
`vm.max_map_count`. This value sets the maximum number of memory map areas a process may have. Its default value is 65530. While most applications require fewer than a thousand maps, reducing this value can result in abnormal behaviors, and the system will throw out-of-memory errors when a process reaches the limitation.
|
||||
|
||||
RAGFlow v0.16.0 uses Elasticsearch or [Infinity](https://github.com/infiniflow/infinity) for multiple recall. Setting the value of `vm.max_map_count` correctly is crucial to the proper functioning of the Elasticsearch component.
|
||||
RAGFlow v0.17.0 uses Elasticsearch or [Infinity](https://github.com/infiniflow/infinity) for multiple recall. Setting the value of `vm.max_map_count` correctly is crucial to the proper functioning of the Elasticsearch component.
|
||||
|
||||
<Tabs
|
||||
defaultValue="linux"
|
||||
@ -178,18 +178,18 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow
|
||||
$ git checkout -f v0.16.0
|
||||
$ cd ragflow/docker
|
||||
$ git checkout -f v0.17.0
|
||||
```
|
||||
|
||||
3. Use the pre-built Docker images and start up the server:
|
||||
|
||||
:::tip NOTE
|
||||
The command below downloads the `v0.16.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download an RAGFlow edition different from `v0.15.1-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.15.1` for the full edition `v0.15.1`.
|
||||
The command below downloads the `v0.17.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.17.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0` for the full edition `v0.17.0`.
|
||||
:::
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
@ -198,8 +198,8 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models and Python packages? | Stable? |
|
||||
| ------------------- | --------------- | ----------------------------------------- | ------------------------ |
|
||||
| `v0.16.0` | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| `v0.16.0-slim` | ≈2 | ❌ | Stable release |
|
||||
| `v0.17.0` | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| `v0.17.0-slim` | ≈2 | ❌ | Stable release |
|
||||
| `nightly` | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||
| `nightly-slim` | ≈2 | ❌ | *Unstable* nightly build |
|
||||
|
||||
@ -223,9 +223,6 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anomaly` error because, at that moment, your RAGFlow may not be fully initialized.
|
||||
|
||||
@ -22,6 +22,35 @@ The "garbage in garbage out" status quo remains unchanged despite the fact that
|
||||
|
||||
---
|
||||
|
||||
### Where to find the version of RAGFlow? How to interpret it?
|
||||
|
||||
You can find the RAGFlow version number on the **System** page of the UI:
|
||||
|
||||

|
||||
|
||||
If you build RAGFlow from source, the version number is also in the system log:
|
||||
|
||||
```
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
2025-02-18 10:10:43,835 INFO 1445658 RAGFlow version: v0.17.0-50-g6daae7f2 full
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- `v0.17.0`: The officially published release.
|
||||
- `50`: The number of git commits since the official release.
|
||||
- `g6daae7f2`: `g` is the prefix, and `6daae7f2` is the first seven characters of the current commit ID.
|
||||
- `full`/`slim`: The RAGFlow edition.
|
||||
- `full`: The full RAGFlow edition.
|
||||
- `slim`: The RAGFlow edition without embedding models and Python packages.
|
||||
|
||||
---
|
||||
|
||||
### Why does it take longer for RAGFlow to parse a document than LangChain?
|
||||
|
||||
We put painstaking effort into document pre-processing tasks like layout analysis, table structure recognition, and OCR (Optical Character Recognition) using our vision models. This contributes to the additional time required.
|
||||
@ -42,10 +71,10 @@ We officially support x86 CPU and nvidia GPU. While we also test RAGFlow on ARM6
|
||||
|
||||
### Which embedding models can be deployed locally?
|
||||
|
||||
RAGFlow offers two Docker image editions, `v0.16.0-slim` and `v0.16.0`:
|
||||
RAGFlow offers two Docker image editions, `v0.17.0-slim` and `v0.17.0`:
|
||||
|
||||
- `infiniflow/ragflow:v0.16.0-slim` (default): The RAGFlow Docker image without embedding models.
|
||||
- `infiniflow/ragflow:v0.16.0`: The RAGFlow Docker image with embedding models including:
|
||||
- `infiniflow/ragflow:v0.17.0-slim` (default): The RAGFlow Docker image without embedding models.
|
||||
- `infiniflow/ragflow:v0.17.0`: The RAGFlow Docker image with embedding models including:
|
||||
- Built-in embedding models:
|
||||
- `BAAI/bge-large-zh-v1.5`
|
||||
- `BAAI/bge-reranker-v2-m3`
|
||||
@ -87,7 +116,7 @@ Yes, we support enhancing user queries based on existing context of an ongoing c
|
||||
|
||||
1. On the **Chat** page, hover over the desired assistant and select **Edit**.
|
||||
2. In the **Chat Configuration** popup, click the **Prompt Engine** tab.
|
||||
3. Toggle on **Multi-turn optimization** to enable this feature.
|
||||
3. Switch on **Multi-turn optimization** to enable this feature.
|
||||
|
||||
---
|
||||
|
||||
@ -316,13 +345,13 @@ Your IP address or port number may be incorrect. If you are using the default co
|
||||
A correct Ollama IP address and port is crucial to adding models to Ollama:
|
||||
|
||||
- If you are on demo.ragflow.io, ensure that the server hosting Ollama has a publicly accessible IP address. Note that 127.0.0.1 is not a publicly accessible IP address.
|
||||
- If you deploy RAGFlow locally, ensure that Ollama and RAGFlow are in the same LAN and can comunicate with each other.
|
||||
- If you deploy RAGFlow locally, ensure that Ollama and RAGFlow are in the same LAN and can communicate with each other.
|
||||
|
||||
See [Deploy a local LLM](../guides/deploy_local_llm.mdx) for more information.
|
||||
|
||||
---
|
||||
|
||||
#### Do you offer examples of using deepdoc to parse PDF or other files?
|
||||
#### Do you offer examples of using DeepDoc to parse PDF or other files?
|
||||
|
||||
Yes, we do. See the Python files under the **rag/app** folder.
|
||||
|
||||
|
||||
@ -9,6 +9,154 @@ A complete reference for RAGFlow's RESTful API. Before proceeding, please ensure
|
||||
|
||||
---
|
||||
|
||||
## OpenAI-Compatible API
|
||||
|
||||
---
|
||||
|
||||
### Create chat completion
|
||||
|
||||
**POST** `/api/v1/chats_openai/{chat_id}/chat/completions`
|
||||
|
||||
Creates a model response for a given chat conversation.
|
||||
|
||||
This API follows the same request and response format as OpenAI's API. It allows you to interact with the model in a manner similar to how you would with [OpenAI's API](https://platform.openai.com/docs/api-reference/chat/create).
|
||||
|
||||
#### Request
|
||||
|
||||
- Method: POST
|
||||
- URL: `/api/v1/chats_openai/{chat_id}/chat/completions`
|
||||
- Headers:
|
||||
- `'content-Type: application/json'`
|
||||
- `'Authorization: Bearer <YOUR_API_KEY>'`
|
||||
- Body:
|
||||
- `"model"`: `string`
|
||||
- `"messages"`: `object list`
|
||||
- `"stream"`: `boolean`
|
||||
|
||||
##### Request example
|
||||
|
||||
```bash
|
||||
curl --request POST \
|
||||
--url http://{address}/api/v1/chats_openai/{chat_id}/chat/completions \
|
||||
--header 'Content-Type: application/json' \
|
||||
--header 'Authorization: Bearer <YOUR_API_KEY>' \
|
||||
--data '{
|
||||
"model": "model",
|
||||
"messages": [{"role": "user", "content": "Say this is a test!"}],
|
||||
"stream": true
|
||||
}'
|
||||
```
|
||||
|
||||
##### Request Parameters
|
||||
|
||||
- `model` (*Body parameter*) `string`, *Required*
|
||||
The model used to generate the response. The server will parse this automatically, so you can set it to any value for now.
|
||||
|
||||
- `messages` (*Body parameter*) `list[object]`, *Required*
|
||||
A list of historical chat messages used to generate the response. This must contain at least one message with the `user` role.
|
||||
|
||||
- `stream` (*Body parameter*) `boolean`
|
||||
Whether to receive the response as a stream. Set this to `false` explicitly if you prefer to receive the entire response in one go instead of as a stream.
|
||||
|
||||
#### Response
|
||||
|
||||
Stream:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "chatcmpl-3a9c3572f29311efa69751e139332ced",
|
||||
"choices": [
|
||||
{
|
||||
"delta": {
|
||||
"content": "This is a test. If you have any specific questions or need information, feel",
|
||||
"role": "assistant",
|
||||
"function_call": null,
|
||||
"tool_calls": null
|
||||
},
|
||||
"finish_reason": null,
|
||||
"index": 0,
|
||||
"logprobs": null
|
||||
}
|
||||
],
|
||||
"created": 1740543996,
|
||||
"model": "model",
|
||||
"object": "chat.completion.chunk",
|
||||
"system_fingerprint": "",
|
||||
"usage": null
|
||||
}
|
||||
// omit duplicated information
|
||||
{"choices":[{"delta":{"content":" free to ask, and I will do my best to provide an answer based on","role":"assistant"}}]}
|
||||
{"choices":[{"delta":{"content":" the knowledge I have. If your question is unrelated to the provided knowledge base,","role":"assistant"}}]}
|
||||
{"choices":[{"delta":{"content":" I will let you know.","role":"assistant"}}]}
|
||||
// the last chunk
|
||||
{
|
||||
"id": "chatcmpl-3a9c3572f29311efa69751e139332ced",
|
||||
"choices": [
|
||||
{
|
||||
"delta": {
|
||||
"content": null,
|
||||
"role": "assistant",
|
||||
"function_call": null,
|
||||
"tool_calls": null
|
||||
},
|
||||
"finish_reason": "stop",
|
||||
"index": 0,
|
||||
"logprobs": null
|
||||
}
|
||||
],
|
||||
"created": 1740543996,
|
||||
"model": "model",
|
||||
"object": "chat.completion.chunk",
|
||||
"system_fingerprint": "",
|
||||
"usage": {
|
||||
"prompt_tokens": 18,
|
||||
"completion_tokens": 225,
|
||||
"total_tokens": 243
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Non-stream:
|
||||
|
||||
```json
|
||||
{
|
||||
"choices":[
|
||||
{
|
||||
"finish_reason":"stop",
|
||||
"index":0,
|
||||
"logprobs":null,
|
||||
"message":{
|
||||
"content":"This is a test. If you have any specific questions or need information, feel free to ask, and I will do my best to provide an answer based on the knowledge I have. If your question is unrelated to the provided knowledge base, I will let you know.",
|
||||
"role":"assistant"
|
||||
}
|
||||
}
|
||||
],
|
||||
"created":1740543499,
|
||||
"id":"chatcmpl-3a9c3572f29311efa69751e139332ced",
|
||||
"model":"model",
|
||||
"object":"chat.completion",
|
||||
"usage":{
|
||||
"completion_tokens":246,
|
||||
"completion_tokens_details":{
|
||||
"accepted_prediction_tokens":246,
|
||||
"reasoning_tokens":18,
|
||||
"rejected_prediction_tokens":0
|
||||
},
|
||||
"prompt_tokens":18,
|
||||
"total_tokens":264
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Failure:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 102,
|
||||
"message": "The last content of this conversation is not from user."
|
||||
}
|
||||
```
|
||||
|
||||
## DATASET MANAGEMENT
|
||||
|
||||
---
|
||||
@ -2171,18 +2319,19 @@ Creates a session with an agent.
|
||||
#### Request
|
||||
|
||||
- Method: POST
|
||||
- URL: `/api/v1/agents/{agent_id}/sessions`
|
||||
- URL: `/api/v1/agents/{agent_id}/sessions?user_id={user_id}`
|
||||
- Headers:
|
||||
- `'content-Type: application/json'`
|
||||
- `'content-Type: application/json' or 'multipart/form-data'`
|
||||
- `'Authorization: Bearer <YOUR_API_KEY>'`
|
||||
- Body:
|
||||
- the required parameters:`str`
|
||||
- the optional parameters:`str`
|
||||
- `"user_id"`: `string`
|
||||
The optional user-defined ID.
|
||||
- other parameters:
|
||||
The parameters specified in the **Begin** component.
|
||||
|
||||
##### Request example
|
||||
If `begin` component in the agent doesn't have required parameters:
|
||||
|
||||
If the **Begin** component in your agent does not take required parameters:
|
||||
|
||||
```bash
|
||||
curl --request POST \
|
||||
--url http://{address}/api/v1/agents/{agent_id}/sessions \
|
||||
@ -2191,7 +2340,9 @@ curl --request POST \
|
||||
--data '{
|
||||
}'
|
||||
```
|
||||
If `begin` component in the agent has required parameters:
|
||||
|
||||
If the **Begin** component in your agent takes required parameters:
|
||||
|
||||
```bash
|
||||
curl --request POST \
|
||||
--url http://{address}/api/v1/agents/{agent_id}/sessions \
|
||||
@ -2203,10 +2354,22 @@ curl --request POST \
|
||||
}'
|
||||
```
|
||||
|
||||
If the **Begin** component in your agent takes required file parameters:
|
||||
|
||||
```bash
|
||||
curl --request POST \
|
||||
--url http://{address}/api/v1/agents/{agent_id}/sessions?user_id={user_id} \
|
||||
--header 'Content-Type: multipart/form-data' \
|
||||
--header 'Authorization: Bearer <YOUR_API_KEY>' \
|
||||
--form '<FILE_KEY>=@./test1.png'
|
||||
```
|
||||
|
||||
##### Request parameters
|
||||
|
||||
- `agent_id`: (*Path parameter*)
|
||||
The ID of the associated agent.
|
||||
- `user_id`: (*Filter parameter*)
|
||||
The optional user-defined ID for parsing docs (especially images) when creating a session while uploading files.
|
||||
|
||||
#### Response
|
||||
|
||||
@ -2358,7 +2521,7 @@ Asks a specified agent a question to start an AI-powered conversation.
|
||||
- `"user_id"`: `string`(optional)
|
||||
- other parameters: `string`
|
||||
##### Request example
|
||||
If the `begin` component doesn't have parameters, the following code will create a session.
|
||||
If the **Begin** component does not take parameters, the following code will create a session.
|
||||
```bash
|
||||
curl --request POST \
|
||||
--url http://{address}/api/v1/agents/{agent_id}/completions \
|
||||
@ -2368,7 +2531,7 @@ curl --request POST \
|
||||
{
|
||||
}'
|
||||
```
|
||||
If the `begin` component have parameters, the following code will create a session.
|
||||
If the **Begin** component takes parameters, the following code will create a session.
|
||||
```bash
|
||||
curl --request POST \
|
||||
--url http://{address}/api/v1/agents/{agent_id}/completions \
|
||||
@ -2394,7 +2557,6 @@ curl --request POST \
|
||||
}'
|
||||
```
|
||||
|
||||
|
||||
##### Request Parameters
|
||||
|
||||
- `agent_id`: (*Path parameter*), `string`
|
||||
@ -2410,9 +2572,10 @@ curl --request POST \
|
||||
- `"user_id"`: (*Body parameter*), `string`
|
||||
The optional user-defined ID. Valid *only* when no `session_id` is provided.
|
||||
- Other parameters: (*Body Parameter*)
|
||||
The parameters in the begin component.
|
||||
Parameters specified in the **Begin** component.
|
||||
|
||||
#### Response
|
||||
success without `session_id` provided and with no parameters in the `begin` component:
|
||||
success without `session_id` provided and with no parameters specified in the **Begin** component:
|
||||
```json
|
||||
data:{
|
||||
"code": 0,
|
||||
@ -2430,7 +2593,8 @@ data:{
|
||||
"data": true
|
||||
}
|
||||
```
|
||||
Success without `session_id` provided and with parameters in the `begin` component:
|
||||
|
||||
Success without `session_id` provided and with parameters specified in the **Begin** component:
|
||||
|
||||
```json
|
||||
data:{
|
||||
@ -2466,7 +2630,7 @@ data:{
|
||||
}
|
||||
data:
|
||||
```
|
||||
Success with parameters in the `begin` component:
|
||||
Success with parameters specified in the **Begin** component:
|
||||
```json
|
||||
data:{
|
||||
"code": 0,
|
||||
@ -2545,7 +2709,6 @@ data:{
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Failure:
|
||||
|
||||
```json
|
||||
|
||||
@ -13,10 +13,63 @@ Run the following command to download the Python SDK:
|
||||
```bash
|
||||
pip install ragflow-sdk
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
---
|
||||
|
||||
## OpenAI-Compatible API
|
||||
|
||||
---
|
||||
|
||||
### Create chat completion
|
||||
|
||||
Creates a model response for the given historical chat conversation via OpenAI's API.
|
||||
|
||||
#### Parameters
|
||||
|
||||
##### model: `str`, *Required*
|
||||
|
||||
The model used to generate the response. The server will parse this automatically, so you can set it to any value for now.
|
||||
|
||||
##### messages: `list[object]`, *Required*
|
||||
|
||||
A list of historical chat messages used to generate the response. This must contain at least one message with the `user` role.
|
||||
|
||||
##### stream: `boolean`
|
||||
|
||||
Whether to receive the response as a stream. Set this to `false` explicitly if you prefer to receive the entire response in one go instead of as a stream.
|
||||
|
||||
#### Returns
|
||||
|
||||
- Success: Response [message](https://platform.openai.com/docs/api-reference/chat/create) like OpenAI
|
||||
- Failure: `Exception`
|
||||
|
||||
#### Examples
|
||||
|
||||
```python
|
||||
from openai import OpenAI
|
||||
|
||||
model = "model"
|
||||
client = OpenAI(api_key="ragflow-api-key", base_url=f"http://ragflow_address/api/v1/chats_openai/<chat_id>")
|
||||
|
||||
completion = client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Who are you?"},
|
||||
],
|
||||
stream=True
|
||||
)
|
||||
|
||||
stream = True
|
||||
if stream:
|
||||
for chunk in completion:
|
||||
print(chunk)
|
||||
else:
|
||||
print(completion.choices[0].message.content)
|
||||
```
|
||||
|
||||
## DATASET MANAGEMENT
|
||||
|
||||
---
|
||||
@ -262,6 +315,7 @@ A dictionary representing the attributes to update, with the following keys:
|
||||
- `"email"`: Email
|
||||
- `"knowledge_graph"`: Knowledge Graph
|
||||
Ensure your LLM is properly configured on the **Settings** page before selecting this. Please also note that Knowledge Graph consumes a large number of Tokens!
|
||||
- `"meta_fields"`: `dict[str, Any]` The meta fields of the dataset.
|
||||
|
||||
#### Returns
|
||||
|
||||
@ -1461,7 +1515,7 @@ In streaming mode, not all responses include a reference, as this depends on the
|
||||
|
||||
##### question: `str`
|
||||
|
||||
The question to start an AI-powered conversation. If the `begin` component takes parameters, a question is not required.
|
||||
The question to start an AI-powered conversation. Ifthe **Begin** component takes parameters, a question is not required.
|
||||
|
||||
##### stream: `bool`
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ A complete list of models supported by RAGFlow, which will continue to expand.
|
||||
<APITable>
|
||||
```
|
||||
|
||||
| Provider | Chat | Embedding | Rerank | Img2txt | Sequence2txt | TTS |
|
||||
| Provider | Chat | Embedding | Rerank | Img2txt | Speech2txt | TTS |
|
||||
| --------------------- | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| Anthropic | :heavy_check_mark: | | | | | |
|
||||
| Azure-OpenAI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
@ -35,15 +35,18 @@ A complete list of models supported by RAGFlow, which will continue to expand.
|
||||
| LM-Studio | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| MiniMax | :heavy_check_mark: | | | | | |
|
||||
| Mistral | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| ModelScope | :heavy_check_mark: | | | | | |
|
||||
| Moonshot | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| novita.ai | :heavy_check_mark: | | | | | |
|
||||
| NVIDIA | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| Ollama | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| OpenAI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| OpenAI-API-Compatible | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| VLLM | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| OpenRouter | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| PerfXCloud | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| Replicate | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| PPIO | :heavy_check_mark: | | | | | |
|
||||
| SILICONFLOW | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| StepFun | :heavy_check_mark: | | | | | |
|
||||
| Tencent Hunyuan | :heavy_check_mark: | | | | | |
|
||||
|
||||
@ -7,6 +7,45 @@ slug: /release_notes
|
||||
|
||||
Key features, improvements and bug fixes in the latest releases.
|
||||
|
||||
## v0.16.0
|
||||
|
||||
Released on February 6, 2025.
|
||||
|
||||
### New features
|
||||
|
||||
- Supports DeepSeek R1 and DeepSeek V3.
|
||||
- GraphRAG refactor: Knowledge graph is dynamically built on an entire knowledge base (dataset) rather than on an individual file, and automatically updated when a newly uploaded file starts parsing. See [here](https://ragflow.io/docs/dev/construct_knowledge_graph).
|
||||
- Adds an **Iteration** agent component and a **Research report generator** agent template. See [here](./guides/agent/agent_component_reference/iteration.mdx).
|
||||
- New UI language: Portuguese.
|
||||
- Allows setting metadata for a specific file in a knowledge base to enhance AI-powered chats. See [here](./guides/configure_knowledge_base/set_metadata.md).
|
||||
- Upgrades RAGFlow's document engine [Infinity](https://github.com/infiniflow/infinity) to v0.6.0.dev3.
|
||||
- Supports GPU acceleration for DeepDoc (see [docker-compose-gpu.yml](https://github.com/infiniflow/ragflow/blob/main/docker/docker-compose-gpu.yml)).
|
||||
- Supports creating and referencing a **Tag** knowledge base as a key milestone towards bridging the semantic gap between query and response.
|
||||
|
||||
:::danger IMPORTANT
|
||||
The **Tag knowledge base** feature is *unavailable* on the [Infinity](https://github.com/infiniflow/infinity) document engine.
|
||||
:::
|
||||
|
||||
### Documentation
|
||||
|
||||
#### Added documents
|
||||
|
||||
- [Construct knowledge graph](./guides/configure_knowledge_base/construct_knowledge_graph.md)
|
||||
- [Set metadata](./guides/configure_knowledge_base/set_metadata.md)
|
||||
- [Begin component](./guides/agent/agent_component_reference/begin.mdx)
|
||||
- [Generate component](./guides/agent/agent_component_reference/generate.mdx)
|
||||
- [Interact component](./guides/agent/agent_component_reference/interact.mdx)
|
||||
- [Retrieval component](./guides/agent/agent_component_reference/retrieval.mdx)
|
||||
- [Categorize component](./guides/agent/agent_component_reference/categorize.mdx)
|
||||
- [Keyword component](./guides/agent/agent_component_reference/keyword.mdx)
|
||||
- [Message component](./guides/agent/agent_component_reference/message.mdx)
|
||||
- [Rewrite component](./guides/agent/agent_component_reference/rewrite.mdx)
|
||||
- [Switch component](./guides/agent/agent_component_reference/switch.mdx)
|
||||
- [Concentrator component](./guides/agent/agent_component_reference/concentrator.mdx)
|
||||
- [Template component](./guides/agent/agent_component_reference/template.mdx)
|
||||
- [Iteration component](./guides/agent/agent_component_reference/iteration.mdx)
|
||||
- [Note component](./guides/agent/agent_component_reference/note.mdx)
|
||||
|
||||
## v0.15.1
|
||||
|
||||
Released on December 25, 2024.
|
||||
@ -60,7 +99,7 @@ Released on December 18, 2024.
|
||||
|
||||
### Improvements
|
||||
|
||||
- Upgrades the Document Layout Analysis model in Deepdoc.
|
||||
- Upgrades the Document Layout Analysis model in DeepDoc.
|
||||
- Significantly enhances the retrieval performance when using [Infinity](https://github.com/infiniflow/infinity) as document engine.
|
||||
|
||||
### Related APIs
|
||||
@ -233,7 +272,7 @@ Released on August 26, 2024.
|
||||
- Incorporates monitoring for the task executor.
|
||||
- Introduces Agent tools **GitHub**, **DeepL**, **BaiduFanyi**, **QWeather**, and **GoogleScholar**.
|
||||
- Supports chunking of EML files.
|
||||
- Supports more LLMs or model services: **GPT-4o-mini**, **PerfXCloud**, **TogetherAI**, **Upstage**, **Novita.AI**, **01.AI**, **SiliconFlow**, **XunFei Spark**, **Baidu Yiyan**, and **Tencent Hunyuan**.
|
||||
- Supports more LLMs or model services: **GPT-4o-mini**, **PerfXCloud**, **TogetherAI**, **Upstage**, **Novita.AI**, **01.AI**, **SiliconFlow**, **PPIO**, **XunFei Spark**, **Baidu Yiyan**, and **Tencent Hunyuan**.
|
||||
|
||||
## v0.9.0
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user