Compare commits

...

12 Commits

Author SHA1 Message Date
6ca1aef52e Fix: catch non-begin component output (#7827)
### What problem does this PR solve?

Catch non-begin component output

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2025-05-23 20:07:39 +08:00
d285a12b85 Fix: Fixed the issue that the script text of the code operator is not displayed after refreshing the page after saving the script text of the code operator #4977 (#7826)
### What problem does this PR solve?

Fix: Fixed the issue that the script text of the code operator is not
displayed after refreshing the page after saving the script text of the
code operator #4977

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2025-05-23 19:02:31 +08:00
cbb90e171d Refa: update gemini2.5 (#7822)
### What problem does this PR solve?

Update gemini2.5

### Type of change

- [x] Refactoring
2025-05-23 18:57:59 +08:00
59934b63aa Docs: Added code component reference (#7821)
### What problem does this PR solve?

### Type of change

- [x] Documentation Update
2025-05-23 18:30:02 +08:00
31e229ff78 Docs: update for v0.19.0 (#7823)
### What problem does this PR solve?

update for v0.19.0

### Type of change

- [x] Documentation Update
2025-05-23 18:18:58 +08:00
f21d023260 Docs: Added v0.19.0 release notes (#7818)
### What problem does this PR solve?

### Type of change

- [x] Documentation Update
2025-05-23 16:02:00 +08:00
40423878eb Docs: Added instructions on cross-language search (#7812) (#7813)
### What problem does this PR solve?


### Type of change


- [x] Documentation Update
2025-05-23 14:32:13 +08:00
db8a3f3480 Feat: add claude4 models (#7809)
### What problem does this PR solve?

Add claude4 models.

### Type of change

- [x] New Feature (non-breaking change which adds functionality)
2025-05-23 13:35:59 +08:00
0cf8c5bedb Feat: more robust fallbacks for citations (#7801)
### What problem does this PR solve?

Add more robust fallbacks for citations

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
- [x] New Feature (non-breaking change which adds functionality)
2025-05-23 09:46:38 +08:00
47c5cdccf6 Feat: change default models (#7777)
### What problem does this PR solve?

change default models to buildin models
https://github.com/infiniflow/ragflow/issues/7774

### Type of change

- [x] New Feature (non-breaking change which adds functionality)
2025-05-22 11:59:12 +08:00
0c2b8182e4 Fix: wrong type hint (#7738)
### What problem does this PR solve?

Wrong hint type. #7729.

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2025-05-20 17:21:42 +08:00
4a7ed9afef Feat: sandox enhancement (#7739)
### What problem does this PR solve?

1. Add sandbox options for max memory and timeout.
2. ​Malicious code detection for Python only.​​

### Type of change

- [x] New Feature (non-breaking change which adds functionality)
2025-05-20 17:21:28 +08:00
42 changed files with 477 additions and 307 deletions

View File

@ -22,7 +22,7 @@
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
</a>
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
</a>
<a href="https://github.com/infiniflow/ragflow/releases/latest">
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
@ -178,7 +178,7 @@ releases! 🌟
> All Docker images are built for x86 platforms. We don't currently offer Docker images for ARM64.
> If you are on an ARM64 platform, follow [this guide](https://ragflow.io/docs/dev/build_docker_image) to build a Docker image compatible with your system.
> The command below downloads the `v0.18.0-slim` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.18.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` for the full edition `v0.18.0`.
> The command below downloads the `v0.19.0-slim` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.19.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` for the full edition `v0.19.0`.
```bash
$ cd ragflow/docker
@ -191,8 +191,8 @@ releases! 🌟
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|-------------------|-----------------|-----------------------|--------------------------|
| v0.18.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.18.0-slim | &approx;2 | ❌ | Stable release |
| v0.19.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.19.0-slim | &approx;2 | ❌ | Stable release |
| nightly | &approx;9 | :heavy_check_mark: | _Unstable_ nightly build |
| nightly-slim | &approx;2 | ❌ | _Unstable_ nightly build |

View File

@ -22,7 +22,7 @@
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
</a>
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
</a>
<a href="https://github.com/infiniflow/ragflow/releases/latest">
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
@ -173,7 +173,7 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
> Semua gambar Docker dibangun untuk platform x86. Saat ini, kami tidak menawarkan gambar Docker untuk ARM64.
> Jika Anda menggunakan platform ARM64, [silakan gunakan panduan ini untuk membangun gambar Docker yang kompatibel dengan sistem Anda](https://ragflow.io/docs/dev/build_docker_image).
> Perintah di bawah ini mengunduh edisi v0.18.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.18.0-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0 untuk edisi lengkap v0.18.0.
> Perintah di bawah ini mengunduh edisi v0.19.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.19.0-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0 untuk edisi lengkap v0.19.0.
```bash
$ cd ragflow/docker
@ -186,8 +186,8 @@ $ docker compose -f docker-compose.yml up -d
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
| ----------------- | --------------- | --------------------- | ------------------------ |
| v0.18.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.18.0-slim | &approx;2 | ❌ | Stable release |
| v0.19.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.19.0-slim | &approx;2 | ❌ | Stable release |
| nightly | &approx;9 | :heavy_check_mark: | _Unstable_ nightly build |
| nightly-slim | &approx;2 | ❌ | _Unstable_ nightly build |

View File

@ -22,7 +22,7 @@
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
</a>
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
</a>
<a href="https://github.com/infiniflow/ragflow/releases/latest">
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
@ -152,7 +152,7 @@
> 現在、公式に提供されているすべての Docker イメージは x86 アーキテクチャ向けにビルドされており、ARM64 用の Docker イメージは提供されていません。
> ARM64 アーキテクチャのオペレーティングシステムを使用している場合は、[このドキュメント](https://ragflow.io/docs/dev/build_docker_image)を参照して Docker イメージを自分でビルドしてください。
> 以下のコマンドは、RAGFlow Docker イメージの v0.18.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.18.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.18.0 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0 と設定します。
> 以下のコマンドは、RAGFlow Docker イメージの v0.19.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.19.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.19.0 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0 と設定します。
```bash
$ cd ragflow/docker
@ -165,8 +165,8 @@
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
| ----------------- | --------------- | --------------------- | ------------------------ |
| v0.18.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.18.0-slim | &approx;2 | ❌ | Stable release |
| v0.19.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.19.0-slim | &approx;2 | ❌ | Stable release |
| nightly | &approx;9 | :heavy_check_mark: | _Unstable_ nightly build |
| nightly-slim | &approx;2 | ❌ | _Unstable_ nightly build |

View File

@ -22,7 +22,7 @@
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
</a>
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
</a>
<a href="https://github.com/infiniflow/ragflow/releases/latest">
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
@ -152,7 +152,7 @@
> 모든 Docker 이미지는 x86 플랫폼을 위해 빌드되었습니다. 우리는 현재 ARM64 플랫폼을 위한 Docker 이미지를 제공하지 않습니다.
> ARM64 플랫폼을 사용 중이라면, [시스템과 호환되는 Docker 이미지를 빌드하려면 이 가이드를 사용해 주세요](https://ragflow.io/docs/dev/build_docker_image).
> 아래 명령어는 RAGFlow Docker 이미지의 v0.18.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.18.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.18.0을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0로 설정합니다.
> 아래 명령어는 RAGFlow Docker 이미지의 v0.19.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.19.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.19.0을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0로 설정합니다.
```bash
$ cd ragflow/docker
@ -165,8 +165,8 @@
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
| ----------------- | --------------- | --------------------- | ------------------------ |
| v0.18.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.18.0-slim | &approx;2 | ❌ | Stable release |
| v0.19.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.19.0-slim | &approx;2 | ❌ | Stable release |
| nightly | &approx;9 | :heavy_check_mark: | _Unstable_ nightly build |
| nightly-slim | &approx;2 | ❌ | _Unstable_ nightly build |

View File

@ -22,7 +22,7 @@
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
</a>
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
</a>
<a href="https://github.com/infiniflow/ragflow/releases/latest">
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
@ -172,7 +172,7 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
> Todas as imagens Docker são construídas para plataformas x86. Atualmente, não oferecemos imagens Docker para ARM64.
> Se você estiver usando uma plataforma ARM64, por favor, utilize [este guia](https://ragflow.io/docs/dev/build_docker_image) para construir uma imagem Docker compatível com o seu sistema.
> O comando abaixo baixa a edição `v0.18.0-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.18.0-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` para a edição completa `v0.18.0`.
> O comando abaixo baixa a edição `v0.19.0-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.19.0-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` para a edição completa `v0.19.0`.
```bash
$ cd ragflow/docker
@ -185,8 +185,8 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
| Tag da imagem RAGFlow | Tamanho da imagem (GB) | Possui modelos de incorporação? | Estável? |
| --------------------- | ---------------------- | ------------------------------- | ------------------------ |
| v0.18.0 | ~9 | :heavy_check_mark: | Lançamento estável |
| v0.18.0-slim | ~2 | ❌ | Lançamento estável |
| v0.19.0 | ~9 | :heavy_check_mark: | Lançamento estável |
| v0.19.0-slim | ~2 | ❌ | Lançamento estável |
| nightly | ~9 | :heavy_check_mark: | _Instável_ build noturno |
| nightly-slim | ~2 | ❌ | _Instável_ build noturno |

View File

@ -21,7 +21,7 @@
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
</a>
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
</a>
<a href="https://github.com/infiniflow/ragflow/releases/latest">
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
@ -151,7 +151,7 @@
> 所有 Docker 映像檔都是為 x86 平台建置的。目前,我們不提供 ARM64 平台的 Docker 映像檔。
> 如果您使用的是 ARM64 平台,請使用 [這份指南](https://ragflow.io/docs/dev/build_docker_image) 來建置適合您系統的 Docker 映像檔。
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.18.0-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.18.0-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` 來下載 RAGFlow 鏡像的 `v0.18.0` 完整發行版。
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.19.0-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.19.0-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` 來下載 RAGFlow 鏡像的 `v0.19.0` 完整發行版。
```bash
$ cd ragflow/docker
@ -164,8 +164,8 @@
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
| ----------------- | --------------- | --------------------- | ------------------------ |
| v0.18.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.18.0-slim | &approx;2 | ❌ | Stable release |
| v0.19.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.19.0-slim | &approx;2 | ❌ | Stable release |
| nightly | &approx;9 | :heavy_check_mark: | _Unstable_ nightly build |
| nightly-slim | &approx;2 | ❌ | _Unstable_ nightly build |

View File

@ -22,7 +22,7 @@
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
</a>
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
</a>
<a href="https://github.com/infiniflow/ragflow/releases/latest">
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
@ -152,7 +152,7 @@
> 请注意,目前官方提供的所有 Docker 镜像均基于 x86 架构构建,并不提供基于 ARM64 的 Docker 镜像。
> 如果你的操作系统是 ARM64 架构,请参考[这篇文档](https://ragflow.io/docs/dev/build_docker_image)自行构建 Docker 镜像。
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.18.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.18.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` 来下载 RAGFlow 镜像的 `v0.18.0` 完整发行版。
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.19.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.19.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` 来下载 RAGFlow 镜像的 `v0.19.0` 完整发行版。
```bash
$ cd ragflow/docker
@ -165,8 +165,8 @@
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
| ----------------- | --------------- | --------------------- | ------------------------ |
| v0.18.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.18.0-slim | &approx;2 | ❌ | Stable release |
| v0.19.0 | &approx;9 | :heavy_check_mark: | Stable release |
| v0.19.0-slim | &approx;2 | ❌ | Stable release |
| nightly | &approx;9 | :heavy_check_mark: | _Unstable_ nightly build |
| nightly-slim | &approx;2 | ❌ | _Unstable_ nightly build |

View File

@ -79,7 +79,7 @@ class Code(ComponentBase, ABC):
def _run(self, history, **kwargs):
arguments = {}
for input in self._param.arguments:
assert "@" in input["component_id"], "Each code argument should bind to a specific compontent"
if "@" in input["component_id"]:
component_id = input["component_id"].split("@")[0]
refered_component_key = input["component_id"].split("@")[1]
refered_component = self._canvas.get_component(component_id)["obj"]
@ -88,6 +88,14 @@ class Code(ComponentBase, ABC):
if param["key"] == refered_component_key:
if "value" in param:
arguments[input["name"]] = param["value"]
else:
cpn = self._canvas.get_component(input["component_id"])["obj"]
if cpn.component_name.lower() == "answer":
arguments[input["name"]] = self._canvas.get_history(1)[0]["content"]
continue
_, out = cpn.output(allow_partial=False)
if not out.empty:
arguments[input["name"]] = "\n".join(out["content"])
return self._execute_code(
language=self._param.lang,

View File

@ -16,6 +16,7 @@
import logging
from flask import request
from api import settings
from api.db import StatusEnum
from api.db.services.dialog_service import DialogService
@ -23,11 +24,10 @@ from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.llm_service import TenantLLMService
from api.db.services.user_service import TenantService
from api.utils import get_uuid
from api.utils.api_utils import get_error_data_result, token_required, get_result, check_duplicate_ids
from api.utils.api_utils import check_duplicate_ids, get_error_data_result, get_result, token_required
@manager.route('/chats', methods=['POST']) # noqa: F821
@manager.route("/chats", methods=["POST"]) # noqa: F821
@token_required
def create(tenant_id):
req = request.json
@ -45,15 +45,16 @@ def create(tenant_id):
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
embd_count = list(set(embd_ids))
if len(embd_count) > 1:
return get_result(message='Datasets use different embedding models."',
code=settings.RetCode.AUTHENTICATION_ERROR)
return get_result(message='Datasets use different embedding models."', code=settings.RetCode.AUTHENTICATION_ERROR)
req["kb_ids"] = ids
# llm
llm = req.get("llm")
if llm:
if "model_name" in llm:
req["llm_id"] = llm.pop("model_name")
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=req["llm_id"], model_type="chat"):
if req.get("llm_id") is not None:
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(req["llm_id"])
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory, model_type="chat"):
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
req["llm_setting"] = req.pop("llm")
e, tenant = TenantService.get_by_id(tenant_id)
@ -61,12 +62,7 @@ def create(tenant_id):
return get_error_data_result(message="Tenant not found!")
# prompt
prompt = req.get("prompt")
key_mapping = {"parameters": "variables",
"prologue": "opener",
"quote": "show_quote",
"system": "prompt",
"rerank_id": "rerank_model",
"vector_similarity_weight": "keywords_similarity_weight"}
key_mapping = {"parameters": "variables", "prologue": "opener", "quote": "show_quote", "system": "prompt", "rerank_id": "rerank_model", "vector_similarity_weight": "keywords_similarity_weight"}
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id", "top_k"]
if prompt:
for new_key, old_key in key_mapping.items():
@ -85,9 +81,7 @@ def create(tenant_id):
req["rerank_id"] = req.get("rerank_id", "")
if req.get("rerank_id"):
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,
llm_name=req.get("rerank_id"),
model_type="rerank"):
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id, llm_name=req.get("rerank_id"), model_type="rerank"):
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
if not req.get("llm_id"):
req["llm_id"] = tenant.llm_id
@ -106,27 +100,24 @@ def create(tenant_id):
{knowledge}
The above is the knowledge base.""",
"prologue": "Hi! I'm your assistant, what can I do for you?",
"parameters": [
{"key": "knowledge", "optional": False}
],
"parameters": [{"key": "knowledge", "optional": False}],
"empty_response": "Sorry! No relevant content was found in the knowledge base!",
"quote": True,
"tts": False,
"refine_multiturn": True
"refine_multiturn": True,
}
key_list_2 = ["system", "prologue", "parameters", "empty_response", "quote", "tts", "refine_multiturn"]
if "prompt_config" not in req:
req['prompt_config'] = {}
req["prompt_config"] = {}
for key in key_list_2:
temp = req['prompt_config'].get(key)
if (not temp and key == 'system') or (key not in req["prompt_config"]):
req['prompt_config'][key] = default_prompt[key]
for p in req['prompt_config']["parameters"]:
temp = req["prompt_config"].get(key)
if (not temp and key == "system") or (key not in req["prompt_config"]):
req["prompt_config"][key] = default_prompt[key]
for p in req["prompt_config"]["parameters"]:
if p["optional"]:
continue
if req['prompt_config']["system"].find("{%s}" % p["key"]) < 0:
return get_error_data_result(
message="Parameter '{}' is not used".format(p["key"]))
if req["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
return get_error_data_result(message="Parameter '{}' is not used".format(p["key"]))
# save
if not DialogService.save(**req):
return get_error_data_result(message="Fail to new a chat!")
@ -141,10 +132,7 @@ def create(tenant_id):
renamed_dict[new_key] = value
res["prompt"] = renamed_dict
del res["prompt_config"]
new_dict = {"similarity_threshold": res["similarity_threshold"],
"keywords_similarity_weight": 1-res["vector_similarity_weight"],
"top_n": res["top_n"],
"rerank_model": res['rerank_id']}
new_dict = {"similarity_threshold": res["similarity_threshold"], "keywords_similarity_weight": 1 - res["vector_similarity_weight"], "top_n": res["top_n"], "rerank_model": res["rerank_id"]}
res["prompt"].update(new_dict)
for key in key_list:
del res[key]
@ -156,11 +144,11 @@ def create(tenant_id):
return get_result(data=res)
@manager.route('/chats/<chat_id>', methods=['PUT']) # noqa: F821
@manager.route("/chats/<chat_id>", methods=["PUT"]) # noqa: F821
@token_required
def update(tenant_id, chat_id):
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
return get_error_data_result(message='You do not own the chat')
return get_error_data_result(message="You do not own the chat")
req = request.json
ids = req.get("dataset_ids")
if "show_quotation" in req:
@ -179,9 +167,7 @@ def update(tenant_id, chat_id):
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
embd_count = list(set(embd_ids))
if len(embd_count) != 1:
return get_result(
message='Datasets use different embedding models."',
code=settings.RetCode.AUTHENTICATION_ERROR)
return get_result(message='Datasets use different embedding models."', code=settings.RetCode.AUTHENTICATION_ERROR)
req["kb_ids"] = ids
llm = req.get("llm")
if llm:
@ -195,12 +181,7 @@ def update(tenant_id, chat_id):
return get_error_data_result(message="Tenant not found!")
# prompt
prompt = req.get("prompt")
key_mapping = {"parameters": "variables",
"prologue": "opener",
"quote": "show_quote",
"system": "prompt",
"rerank_id": "rerank_model",
"vector_similarity_weight": "keywords_similarity_weight"}
key_mapping = {"parameters": "variables", "prologue": "opener", "quote": "show_quote", "system": "prompt", "rerank_id": "rerank_model", "vector_similarity_weight": "keywords_similarity_weight"}
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id", "top_k"]
if prompt:
for new_key, old_key in key_mapping.items():
@ -214,16 +195,12 @@ def update(tenant_id, chat_id):
res = res.to_json()
if req.get("rerank_id"):
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,
llm_name=req.get("rerank_id"),
model_type="rerank"):
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id, llm_name=req.get("rerank_id"), model_type="rerank"):
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
if "name" in req:
if not req.get("name"):
return get_error_data_result(message="`name` cannot be empty.")
if req["name"].lower() != res["name"].lower() \
and len(
DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
if req["name"].lower() != res["name"].lower() and len(DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
return get_error_data_result(message="Duplicated chat name in updating chat.")
if "prompt_config" in req:
res["prompt_config"].update(req["prompt_config"])
@ -246,7 +223,7 @@ def update(tenant_id, chat_id):
return get_result()
@manager.route('/chats', methods=['DELETE']) # noqa: F821
@manager.route("/chats", methods=["DELETE"]) # noqa: F821
@token_required
def delete(tenant_id):
errors = []
@ -276,27 +253,20 @@ def delete(tenant_id):
if errors:
if success_count > 0:
return get_result(
data={"success_count": success_count, "errors": errors},
message=f"Partially deleted {success_count} chats with {len(errors)} errors"
)
return get_result(data={"success_count": success_count, "errors": errors}, message=f"Partially deleted {success_count} chats with {len(errors)} errors")
else:
return get_error_data_result(message="; ".join(errors))
if duplicate_messages:
if success_count > 0:
return get_result(
message=f"Partially deleted {success_count} chats with {len(duplicate_messages)} errors",
data={"success_count": success_count, "errors": duplicate_messages}
)
return get_result(message=f"Partially deleted {success_count} chats with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages})
else:
return get_error_data_result(message=";".join(duplicate_messages))
return get_result()
@manager.route('/chats', methods=['GET']) # noqa: F821
@manager.route("/chats", methods=["GET"]) # noqa: F821
@token_required
def list_chat(tenant_id):
id = request.args.get("id")
@ -316,13 +286,15 @@ def list_chat(tenant_id):
if not chats:
return get_result(data=[])
list_assts = []
key_mapping = {"parameters": "variables",
key_mapping = {
"parameters": "variables",
"prologue": "opener",
"quote": "show_quote",
"system": "prompt",
"rerank_id": "rerank_model",
"vector_similarity_weight": "keywords_similarity_weight",
"do_refer": "show_quotation"}
"do_refer": "show_quotation",
}
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
for res in chats:
renamed_dict = {}
@ -331,10 +303,7 @@ def list_chat(tenant_id):
renamed_dict[new_key] = value
res["prompt"] = renamed_dict
del res["prompt_config"]
new_dict = {"similarity_threshold": res["similarity_threshold"],
"keywords_similarity_weight": 1-res["vector_similarity_weight"],
"top_n": res["top_n"],
"rerank_model": res['rerank_id']}
new_dict = {"similarity_threshold": res["similarity_threshold"], "keywords_similarity_weight": 1 - res["vector_similarity_weight"], "top_n": res["top_n"], "rerank_model": res["rerank_id"]}
res["prompt"].update(new_dict)
for key in key_list:
del res[key]

View File

@ -13,36 +13,37 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
import logging
import re
from datetime import datetime
from flask import request, session, redirect
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import login_required, current_user, login_user, logout_user
from flask import redirect, request, session
from flask_login import current_user, login_required, login_user, logout_user
from werkzeug.security import check_password_hash, generate_password_hash
from api import settings
from api.apps.auth import get_auth_client
from api.db import FileType, UserTenantRole
from api.db.db_models import TenantLLM
from api.db.services.llm_service import TenantLLMService, LLMService
from api.utils.api_utils import (
server_error_response,
validate_request,
get_data_error_result,
)
from api.db.services.file_service import FileService
from api.db.services.llm_service import LLMService, TenantLLMService
from api.db.services.user_service import TenantService, UserService, UserTenantService
from api.utils import (
get_uuid,
get_format_time,
decrypt,
download_img,
current_timestamp,
datetime_format,
decrypt,
download_img,
get_format_time,
get_uuid,
)
from api.utils.api_utils import (
construct_response,
get_data_error_result,
get_json_result,
server_error_response,
validate_request,
)
from api.db import UserTenantRole, FileType
from api import settings
from api.db.services.user_service import UserService, TenantService, UserTenantService
from api.db.services.file_service import FileService
from api.utils.api_utils import get_json_result, construct_response
from api.apps.auth import get_auth_client
@manager.route("/login", methods=["POST", "GET"]) # noqa: F821
@ -77,9 +78,7 @@ def login():
type: object
"""
if not request.json:
return get_json_result(
data=False, code=settings.RetCode.AUTHENTICATION_ERROR, message="Unauthorized!"
)
return get_json_result(data=False, code=settings.RetCode.AUTHENTICATION_ERROR, message="Unauthorized!")
email = request.json.get("email", "")
users = UserService.query(email=email)
@ -94,9 +93,7 @@ def login():
try:
password = decrypt(password)
except BaseException:
return get_json_result(
data=False, code=settings.RetCode.SERVER_ERROR, message="Fail to crypt password"
)
return get_json_result(data=False, code=settings.RetCode.SERVER_ERROR, message="Fail to crypt password")
user = UserService.query_user(email, password)
if user:
@ -124,19 +121,17 @@ def get_login_channels():
try:
channels = []
for channel, config in settings.OAUTH_CONFIG.items():
channels.append({
channels.append(
{
"channel": channel,
"display_name": config.get("display_name", channel.title()),
"icon": config.get("icon", "sso"),
})
}
)
return get_json_result(data=channels)
except Exception as e:
logging.exception(e)
return get_json_result(
data=[],
message=f"Load channels failure, error: {str(e)}",
code=settings.RetCode.EXCEPTION_ERROR
)
return get_json_result(data=[], message=f"Load channels failure, error: {str(e)}", code=settings.RetCode.EXCEPTION_ERROR)
@manager.route("/login/<channel>", methods=["GET"]) # noqa: F821
@ -434,9 +429,7 @@ def user_info_from_feishu(access_token):
"Content-Type": "application/json; charset=utf-8",
"Authorization": f"Bearer {access_token}",
}
res = requests.get(
"https://open.feishu.cn/open-apis/authen/v1/user_info", headers=headers
)
res = requests.get("https://open.feishu.cn/open-apis/authen/v1/user_info", headers=headers)
user_info = res.json()["data"]
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
return user_info
@ -446,17 +439,13 @@ def user_info_from_github(access_token):
import requests
headers = {"Accept": "application/json", "Authorization": f"token {access_token}"}
res = requests.get(
f"https://api.github.com/user?access_token={access_token}", headers=headers
)
res = requests.get(f"https://api.github.com/user?access_token={access_token}", headers=headers)
user_info = res.json()
email_info = requests.get(
f"https://api.github.com/user/emails?access_token={access_token}",
headers=headers,
).json()
user_info["email"] = next(
(email for email in email_info if email["primary"]), None
)["email"]
user_info["email"] = next((email for email in email_info if email["primary"]), None)["email"]
return user_info
@ -516,9 +505,7 @@ def setting_user():
request_data = request.json
if request_data.get("password"):
new_password = request_data.get("new_password")
if not check_password_hash(
current_user.password, decrypt(request_data["password"])
):
if not check_password_hash(current_user.password, decrypt(request_data["password"])):
return get_json_result(
data=False,
code=settings.RetCode.AUTHENTICATION_ERROR,
@ -549,9 +536,7 @@ def setting_user():
return get_json_result(data=True)
except Exception as e:
logging.exception(e)
return get_json_result(
data=False, message="Update failure!", code=settings.RetCode.EXCEPTION_ERROR
)
return get_json_result(data=False, message="Update failure!", code=settings.RetCode.EXCEPTION_ERROR)
@manager.route("/info", methods=["GET"]) # noqa: F821
@ -643,7 +628,21 @@ def user_register(user_id, user):
"model_type": llm.model_type,
"api_key": settings.API_KEY,
"api_base": settings.LLM_BASE_URL,
"max_tokens": llm.max_tokens if llm.max_tokens else 8192
"max_tokens": llm.max_tokens if llm.max_tokens else 8192,
}
)
if settings.LIGHTEN != 1:
for buildin_embedding_model in settings.BUILTIN_EMBEDDING_MODELS:
mdlnm, fid = TenantLLMService.split_model_name_and_factory(buildin_embedding_model)
tenant_llm.append(
{
"tenant_id": user_id,
"llm_factory": fid,
"llm_name": mdlnm,
"model_type": "embedding",
"api_key": "",
"api_base": "",
"max_tokens": 1024 if buildin_embedding_model == "BAAI/bge-large-zh-v1.5@BAAI" else 512,
}
)

View File

@ -302,7 +302,7 @@ def chat(dialog, messages, stream=True, **kwargs):
if "max_tokens" in gen_conf:
gen_conf["max_tokens"] = min(gen_conf["max_tokens"], max_tokens - used_token_count)
def repair_bad_citation_formats(answer: str, kbinfos: dict, idx: dict):
def repair_bad_citation_formats(answer: str, kbinfos: dict, idx: set):
max_index = len(kbinfos["chunks"])
def safe_add(i):
@ -327,8 +327,8 @@ def chat(dialog, messages, stream=True, **kwargs):
find_and_replace(r"\$\[(\d+)\]\$") # $[12]$
find_and_replace(r"\$\$(\d+)\${2,}") # $$12$$$$
find_and_replace(r"\$(\d+)\$") # $12$
find_and_replace(r"#(\d+)\$\$") # #12$$
find_and_replace(r"##(\d+)\$") # ##12$
find_and_replace(r"(#{2,})(\d+)(\${2,})", group_index=2) # 2+ # and 2+ $
find_and_replace(r"(#{2,})(\d+)(#{1,})", group_index=2) # 2+ # and 1+ #
find_and_replace(r"##(\d+)#{2,}") # ##12###
find_and_replace(r"【(\d+)】") # 【12】
find_and_replace(r"ref\s*(\d+)", flags=re.IGNORECASE) # ref12, ref 12, REF 12
@ -623,4 +623,3 @@ def ask(question, kb_ids, tenant_id):
answer = ans
yield {"answer": answer, "reference": {}}
yield decorate_answer(answer)

View File

@ -81,7 +81,7 @@ def init_settings():
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
LLM = get_base_config("user_default_llm", {})
LLM_DEFAULT_MODELS = LLM.get("default_models", {})
LLM_FACTORY = LLM.get("factory", "Tongyi-Qianwen")
LLM_FACTORY = LLM.get("factory")
LLM_BASE_URL = LLM.get("base_url")
try:
REGISTER_ENABLED = int(os.environ.get("REGISTER_ENABLED", "1"))

View File

@ -567,7 +567,7 @@
{
"name": "Youdao",
"logo": "",
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
"tags": "TEXT EMBEDDING",
"status": "1",
"llm": [
{
@ -755,7 +755,7 @@
{
"name": "BAAI",
"logo": "",
"tags": "TEXT EMBEDDING, TEXT RE-RANK",
"tags": "TEXT EMBEDDING",
"status": "1",
"llm": [
{
@ -996,7 +996,7 @@
"status": "1",
"llm": [
{
"llm_name": "gemini-2.5-flash-preview-04-17",
"llm_name": "gemini-2.5-flash-preview-05-20",
"tags": "LLM,CHAT,1024K,IMAGE2TEXT",
"max_tokens": 1048576,
"model_type": "image2text",
@ -1023,7 +1023,7 @@
"model_type": "image2text"
},
{
"llm_name": "gemini-2.5-pro-exp-03-25",
"llm_name": "gemini-2.5-pro-preview-05-06",
"tags": "LLM,IMAGE2TEXT,1024K",
"max_tokens": 1048576,
"model_type": "image2text"
@ -3133,6 +3133,20 @@
"tags": "LLM",
"status": "1",
"llm": [
{
"llm_name": "claude-opus-4-20250514",
"tags": "LLM,IMAGE2TEXT,200k",
"max_tokens": 204800,
"model_type": "image2text",
"is_tools": true
},
{
"llm_name": "claude-sonnet-4-20250514",
"tags": "LLM,IMAGE2TEXT,200k",
"max_tokens": 204800,
"model_type": "image2text",
"is_tools": true
},
{
"llm_name": "claude-3-7-sonnet-20250219",
"tags": "LLM,IMAGE2TEXT,200k",

View File

@ -91,13 +91,13 @@ REDIS_PASSWORD=infini_rag_flow
SVR_HTTP_PORT=9380
# The RAGFlow Docker image to download.
# Defaults to the v0.18.0-slim edition, which is the RAGFlow Docker image without embedding models.
RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0-slim
# Defaults to the v0.19.0-slim edition, which is the RAGFlow Docker image without embedding models.
RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0-slim
#
# To download the RAGFlow Docker image with embedding models, uncomment the following line instead:
# RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0
# RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0
#
# The Docker image of the v0.18.0 edition includes built-in embedding models:
# The Docker image of the v0.19.0 edition includes built-in embedding models:
# - BAAI/bge-large-zh-v1.5
# - maidalun1020/bce-embedding-base_v1
#
@ -169,6 +169,8 @@ REGISTER_ENABLED=1
# SANDBOX_BASE_NODEJS_IMAGE=infiniflow/sandbox-base-nodejs:latest
# SANDBOX_EXECUTOR_MANAGER_PORT=9385
# SANDBOX_ENABLE_SECCOMP=false
# SANDBOX_MAX_MEMORY=256m # b, k, m, g
# SANDBOX_TIMEOUT=10s # s, m, 1m30s
# Important: To enable sandbox, you must re-declare the compose profiles.
# 1. Comment out the COMPOSE_PROFILES line above.

View File

@ -78,8 +78,8 @@ The [.env](./.env) file contains important environment variables for Docker.
- `RAGFLOW-IMAGE`
The Docker image edition. Available editions:
- `infiniflow/ragflow:v0.18.0-slim` (default): The RAGFlow Docker image without embedding models.
- `infiniflow/ragflow:v0.18.0`: The RAGFlow Docker image with embedding models including:
- `infiniflow/ragflow:v0.19.0-slim` (default): The RAGFlow Docker image without embedding models.
- `infiniflow/ragflow:v0.19.0`: The RAGFlow Docker image with embedding models including:
- Built-in embedding models:
- `BAAI/bge-large-zh-v1.5`
- `maidalun1020/bce-embedding-base_v1`

View File

@ -124,6 +124,8 @@ services:
- SANDBOX_BASE_PYTHON_IMAGE=${SANDBOX_BASE_PYTHON_IMAGE:-infiniflow/sandbox-base-python:latest}
- SANDBOX_BASE_NODEJS_IMAGE=${SANDBOX_BASE_NODEJS_IMAGE:-infiniflow/sandbox-base-nodejs:latest}
- SANDBOX_ENABLE_SECCOMP=${SANDBOX_ENABLE_SECCOMP:-false}
- SANDBOX_MAX_MEMORY=${SANDBOX_MAX_MEMORY:-256m}
- SANDBOX_TIMEOUT=${SANDBOX_TIMEOUT:-10s}
healthcheck:
test: ["CMD", "curl", "http://localhost:9385/healthz"]
interval: 10s

View File

@ -99,8 +99,8 @@ RAGFlow utilizes MinIO as its object storage solution, leveraging its scalabilit
- `RAGFLOW-IMAGE`
The Docker image edition. Available editions:
- `infiniflow/ragflow:v0.18.0-slim` (default): The RAGFlow Docker image without embedding models.
- `infiniflow/ragflow:v0.18.0`: The RAGFlow Docker image with embedding models including:
- `infiniflow/ragflow:v0.19.0-slim` (default): The RAGFlow Docker image without embedding models.
- `infiniflow/ragflow:v0.19.0`: The RAGFlow Docker image with embedding models including:
- Built-in embedding models:
- `BAAI/bge-large-zh-v1.5`
- `maidalun1020/bce-embedding-base_v1`

View File

@ -77,7 +77,7 @@ After building the infiniflow/ragflow:nightly-slim image, you are ready to launc
1. Edit Docker Compose Configuration
Open the `docker/.env` file. Find the `RAGFLOW_IMAGE` setting and change the image reference from `infiniflow/ragflow:v0.18.0-slim` to `infiniflow/ragflow:nightly-slim` to use the pre-built image.
Open the `docker/.env` file. Find the `RAGFLOW_IMAGE` setting and change the image reference from `infiniflow/ragflow:v0.19.0-slim` to `infiniflow/ragflow:nightly-slim` to use the pre-built image.
2. Launch the Service

View File

@ -30,17 +30,17 @@ The "garbage in garbage out" status quo remains unchanged despite the fact that
Each RAGFlow release is available in two editions:
- **Slim edition**: excludes built-in embedding models and is identified by a **-slim** suffix added to the version name. Example: `infiniflow/ragflow:v0.18.0-slim`
- **Full edition**: includes built-in embedding models and has no suffix added to the version name. Example: `infiniflow/ragflow:v0.18.0`
- **Slim edition**: excludes built-in embedding models and is identified by a **-slim** suffix added to the version name. Example: `infiniflow/ragflow:v0.19.0-slim`
- **Full edition**: includes built-in embedding models and has no suffix added to the version name. Example: `infiniflow/ragflow:v0.19.0`
---
### Which embedding models can be deployed locally?
RAGFlow offers two Docker image editions, `v0.18.0-slim` and `v0.18.0`:
RAGFlow offers two Docker image editions, `v0.19.0-slim` and `v0.19.0`:
- `infiniflow/ragflow:v0.18.0-slim` (default): The RAGFlow Docker image without embedding models.
- `infiniflow/ragflow:v0.18.0`: The RAGFlow Docker image with embedding models including:
- `infiniflow/ragflow:v0.19.0-slim` (default): The RAGFlow Docker image without embedding models.
- `infiniflow/ragflow:v0.19.0`: The RAGFlow Docker image with embedding models including:
- Built-in embedding models:
- `BAAI/bge-large-zh-v1.5`
- `maidalun1020/bce-embedding-base_v1`

View File

@ -0,0 +1,50 @@
---
sidebar_position: 13
slug: /code_component
---
# Code component
A component that enables users to integrate Python or JavaScript codes into their Agent for dynamic data processing.
---
## Scenarios
A **Code** component is essential when you need to integrate complex code logic (Python or JavaScript) into your Agent for dynamic data processing.
## Input variables
You can specify multiple input sources for the **Code** component. Click **+ Add variable** in the **Input variables** section to include the desired input variables.
After defining an input variable, you are required to select from the dropdown menu:
- A component ID under **Component Output**, or
- A global variable under **Begin input**, which is defined in the **Begin** component.
## Coding field
### A Python code example
```Python
def main(arg1: str, arg2: str) -> dict:
return {
"result": arg1 + arg2,
}
```
### A JavaScript code example
```JavaScript
const axios = require('axios');
async function main(args) {
try {
const response = await axios.get('https://github.com/infiniflow/ragflow');
console.log('Body:', response.data);
} catch (error) {
console.error('Error:', error.message);
}
}
```

View File

@ -1,5 +1,5 @@
---
sidebar_position: 13
sidebar_position: 18
slug: /note_component
---

View File

@ -42,9 +42,13 @@ You start an AI conversation by creating an assistant.
- **Rerank model** sets the reranker model to use. It is left empty by default.
- If **Rerank model** is left empty, the hybrid score system uses keyword similarity and vector similarity, and the default weight assigned to the vector similarity component is 1-0.7=0.3.
- If **Rerank model** is selected, the hybrid score system uses keyword similarity and reranker score, and the default weight assigned to the reranker score is 1-0.7=0.3.
- **Cross-language search**: Optional
Select one or more target languages from the dropdown menu. The systems default chat model will then translate your query into the selected target language(s). This translation ensures accurate semantic matching across languages, allowing you to retrieve relevant results regardless of language differences.
- When selecting target languages, please ensure that these languages are present in the knowledge base to guarantee an effective search.
- If no target language is selected, the system will search only in the language of your query, which may cause relevant information in other languages to be missed.
- **Variable** refers to the variables (keys) to be used in the system prompt. `{knowledge}` is a reserved variable. Click **Add** to add more variables for the system prompt.
- If you are uncertain about the logic behind **Variable**, leave it *as-is*.
- As of v0.18.0, if you add custom variables here, the only way you can pass in their values is to call:
- As of v0.19.0, if you add custom variables here, the only way you can pass in their values is to call:
- HTTP method [Converse with chat assistant](../../references/http_api_reference.md#converse-with-chat-assistant), or
- Python method [Converse with chat assistant](../../references/python_api_reference.md#converse-with-chat-assistant).

View File

@ -67,6 +67,10 @@ The following embedding models can be deployed locally:
- BAAI/bge-large-zh-v1.5
- maidalun1020/bce-embedding-base_v1
:::danger IMPORTANT
Please note these two embedding models support both English and Chinese. If your knowledge base contains other languages, the performance may be COMPROMISED.
:::
### Upload file
- RAGFlow's **File Management** allows you to link a file to multiple knowledge bases, in which case each target knowledge base holds a reference to the file.
@ -124,7 +128,7 @@ See [Run retrieval test](./run_retrieval_test.md) for details.
## Search for knowledge base
As of RAGFlow v0.18.0, the search feature is still in a rudimentary form, supporting only knowledge base search by name.
As of RAGFlow v0.19.0, the search feature is still in a rudimentary form, supporting only knowledge base search by name.
![search knowledge base](https://github.com/infiniflow/ragflow/assets/93570324/836ae94c-2438-42be-879e-c7ad2a59693e)

View File

@ -60,6 +60,15 @@ The switch is disabled by default. When enabled, RAGFlow performs the following
Using a knowledge graph in a retrieval test will significantly increase the time to receive a response.
:::
### Cross-language search
To perform a cross-language search, select one or more target languages from the dropdown menu. The systems default chat model will then translate your query entered in the Test text field into the selected target language(s). This translation ensures accurate semantic matching across languages, allowing you to retrieve relevant results regardless of language differences.
:::tip NOTE
- When selecting target languages, please ensure that these languages are present in the knowledge base to guarantee an effective search.
- If no target language is selected, the system will search only in the language of your query, which may cause relevant information in other languages to be missed.
:::
### Test text
This field is where you put in your testing query.

View File

@ -87,4 +87,4 @@ RAGFlow's file management allows you to download an uploaded file:
![download_file](https://github.com/infiniflow/ragflow/assets/93570324/cf3b297f-7d9b-4522-bf5f-4f45743e4ed5)
> As of RAGFlow v0.18.0, bulk download is not supported, nor can you download an entire folder.
> As of RAGFlow v0.19.0, bulk download is not supported, nor can you download an entire folder.

View File

@ -18,7 +18,7 @@ RAGFlow ships with a built-in [Langfuse](https://langfuse.com) integration so th
Langfuse stores traces, spans and prompt payloads in a purpose-built observability backend and offers filtering and visualisations on top.
:::info NOTE
• RAGFlow **≥ 0.18.0** (contains the Langfuse connector)
• RAGFlow **≥ 0.19.0** (contains the Langfuse connector)
• A Langfuse workspace (cloud or self-hosted) with a _Project Public Key_ and _Secret Key_
:::

View File

@ -66,16 +66,16 @@ To upgrade RAGFlow, you must upgrade **both** your code **and** your Docker imag
git clone https://github.com/infiniflow/ragflow.git
```
2. Switch to the latest, officially published release, e.g., `v0.18.0`:
2. Switch to the latest, officially published release, e.g., `v0.19.0`:
```bash
git checkout -f v0.18.0
git checkout -f v0.19.0
```
3. Update **ragflow/docker/.env** as follows:
```bash
RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0
RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0
```
4. Update the RAGFlow image and restart RAGFlow:
@ -92,10 +92,10 @@ To upgrade RAGFlow, you must upgrade **both** your code **and** your Docker imag
1. From an environment with Internet access, pull the required Docker image.
2. Save the Docker image to a **.tar** file.
```bash
docker save -o ragflow.v0.18.0.tar infiniflow/ragflow:v0.18.0
docker save -o ragflow.v0.19.0.tar infiniflow/ragflow:v0.19.0
```
3. Copy the **.tar** file to the target server.
4. Load the **.tar** file into Docker:
```bash
docker load -i ragflow.v0.18.0.tar
docker load -i ragflow.v0.19.0.tar
```

View File

@ -44,7 +44,7 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
`vm.max_map_count`. This value sets the maximum number of memory map areas a process may have. Its default value is 65530. While most applications require fewer than a thousand maps, reducing this value can result in abnormal behaviors, and the system will throw out-of-memory errors when a process reaches the limitation.
RAGFlow v0.18.0 uses Elasticsearch or [Infinity](https://github.com/infiniflow/infinity) for multiple recall. Setting the value of `vm.max_map_count` correctly is crucial to the proper functioning of the Elasticsearch component.
RAGFlow v0.19.0 uses Elasticsearch or [Infinity](https://github.com/infiniflow/infinity) for multiple recall. Setting the value of `vm.max_map_count` correctly is crucial to the proper functioning of the Elasticsearch component.
<Tabs
defaultValue="linux"
@ -184,13 +184,13 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
```bash
$ git clone https://github.com/infiniflow/ragflow.git
$ cd ragflow/docker
$ git checkout -f v0.18.0
$ git checkout -f v0.19.0
```
3. Use the pre-built Docker images and start up the server:
:::tip NOTE
The command below downloads the `v0.18.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.18.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` for the full edition `v0.18.0`.
The command below downloads the `v0.19.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.19.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` for the full edition `v0.19.0`.
:::
```bash
@ -205,10 +205,10 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
<APITable>
```
| RAGFlow image tag | Image size (GB) | Has embedding models and Python packages? | Stable? |
| RAGFlow image tag | Image size (GB) | Has embedding models and Python packages?:collision: | Stable? |
| ------------------- | --------------- | ----------------------------------------- | ------------------------ |
| `v0.18.0` | &approx;9 | :heavy_check_mark: | Stable release |
| `v0.18.0-slim` | &approx;2 | ❌ | Stable release |
| `v0.19.0` | &approx;9 | :heavy_check_mark: | Stable release |
| `v0.19.0-slim` | &approx;2 | ❌ | Stable release |
| `nightly` | &approx;9 | :heavy_check_mark: | *Unstable* nightly build |
| `nightly-slim` | &approx;2 | ❌ | *Unstable* nightly build |
@ -216,6 +216,15 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
</APITable>
```
:::danger IMPORTANT
:collision: The embedding models included in `v0.19.0` and `nightly` are:
- BAAI/bge-large-zh-v1.5
- maidalun1020/bce-embedding-base_v1
Please note these two embedding models support both English and Chinese. If your knowledge base contains other languages, the performance may be COMPROMISED.
:::
4. Check the server status after having the server up and running:
```bash

View File

@ -0,0 +1,26 @@
---
sidebar_position: 0
slug: /glossary
---
# Glossary
Definitions of key terms and basic concepts related to RAGFlow.
---
import TOCInline from '@theme/TOCInline';
<TOCInline toc={toc} />
---
## C
### Cross-language search
Cross-language search (also known as cross-lingual retrieval) is a feature introduced in version 0.19.0. It enables users to submit queries in one language (for example, English) and retrieve relevant documents written in other languages such as Chinese or Spanish. This feature is enabled by the systems default chat model, which translates queries to ensure accurate matching of semantic meaning across languages.
By enabling cross-language search, users can effortlessly access a broader range of information regardless of language barriers, significantly enhancing the systems usability and inclusiveness.
This feature is available in the retrieval test and chat assistant settings. See [Run retrieval test](../guides/dataset/run_retrieval_test.md) and [Start AI chat](../guides/chat/start_chat.md) for further details.

View File

@ -1,5 +1,5 @@
---
sidebar_position: 1
sidebar_position: 4
slug: /http_api_reference
---

View File

@ -1,5 +1,5 @@
---
sidebar_position: 2
sidebar_position: 5
slug: /python_api_reference
---

View File

@ -1,5 +1,5 @@
---
sidebar_position: 0
sidebar_position: 1
slug: /supported_models
---

View File

@ -9,10 +9,36 @@ Key features, improvements and bug fixes in the latest releases.
:::info
Each RAGFlow release is available in two editions:
- **Slim edition**: excludes built-in embedding models and is identified by a **-slim** suffix added to the version name. Example: `infiniflow/ragflow:v0.18.0-slim`
- **Full edition**: includes built-in embedding models and has no suffix added to the version name. Example: `infiniflow/ragflow:v0.18.0`
- **Slim edition**: excludes built-in embedding models and is identified by a **-slim** suffix added to the version name. Example: `infiniflow/ragflow:v0.19.0-slim`
- **Full edition**: includes built-in embedding models and has no suffix added to the version name. Example: `infiniflow/ragflow:v0.19.0`
:::
:::danger IMPORTANT
:collision: The embedding models included in a full edition are:
- BAAI/bge-large-zh-v1.5
- maidalun1020/bce-embedding-base_v1
Please note these two embedding models support both English and Chinese. If your knowledge base contains other languages, the performance may be COMPROMISED.
:::
## v0.19.0
Released on May 26, 2025.
### New features
- Cross-language search is supported in the Knowledge and Chat modules, enhancing search accuracy and user experience in multilingual environments, such as in Chinese-English knowledge bases.
- Agent component: A new Code component supports Python and JavaScript scripts, enabling developers to handle more complex tasks like dynamic data processing.
- Enhanced image display: Images in Chat and Search now render directly within responses, rather than as external references. Knowledge retrieval testing can retrieve images directly, instead of texts extracted from images.
- Claude 4 and ChatGPT o3: Developers can now use the newly released, most advanced Claude model alongside OpenAIs latest ChatGPT o3 inference model.
> The following features are contributed by our community contributors:
- Agent component: Enables tool calling within the Generate Component. Thanks to [notsyncing](https://github.com/notsyncing).
- Markdown rendering: Image references in a markdown file can be displayed after chunking. Thanks to [Woody-Hu](https://github.com/Woody-Hu).
- Document engine support: OpenSearch can now be used as RAGFlow's document engine. Thanks to [pyyuhao](https://github.com/pyyuhao).
## v0.18.0
Released on April 23, 2025.

View File

@ -27,13 +27,13 @@ env:
REDIS_PASSWORD: infini_rag_flow_helm
# The RAGFlow Docker image to download.
# Defaults to the v0.18.0-slim edition, which is the RAGFlow Docker image without embedding models.
RAGFLOW_IMAGE: infiniflow/ragflow:v0.18.0-slim
# Defaults to the v0.19.0-slim edition, which is the RAGFlow Docker image without embedding models.
RAGFLOW_IMAGE: infiniflow/ragflow:v0.19.0-slim
#
# To download the RAGFlow Docker image with embedding models, uncomment the following line instead:
# RAGFLOW_IMAGE: infiniflow/ragflow:v0.18.0
# RAGFLOW_IMAGE: infiniflow/ragflow:v0.19.0
#
# The Docker image of the v0.18.0 edition includes:
# The Docker image of the v0.19.0 edition includes:
# - Built-in embedding models:
# - BAAI/bge-large-zh-v1.5
# - BAAI/bge-reranker-v2-m3

View File

@ -1,6 +1,6 @@
[project]
name = "ragflow"
version = "0.18.0"
version = "0.19.0"
description = "[RAGFlow](https://ragflow.io/) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models) to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted data."
authors = [{ name = "Zhichang Yu", email = "yuzhichang@gmail.com" }]
license-files = ["LICENSE"]

View File

@ -1,6 +1,6 @@
[project]
name = "ragflow-sdk"
version = "0.18.0"
version = "0.19.0"
description = "Python client sdk of [RAGFlow](https://github.com/infiniflow/ragflow). RAGFlow is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding."
authors = [{ name = "Zhichang Yu", email = "yuzhichang@gmail.com" }]
license = { text = "Apache License, Version 2.0" }

View File

@ -20,7 +20,9 @@ import pytest
import requests
HOST_ADDRESS = os.getenv("HOST_ADDRESS", "http://127.0.0.1:9380")
ZHIPU_AI_API_KEY = os.getenv("ZHIPU_AI_API_KEY", "ca148e43209c40109e2bc2f56281dd11.BltyA2N1B043B7Ra")
if ZHIPU_AI_API_KEY is None:
pytest.exit("Error: Environment variable ZHIPU_AI_API_KEY must be set")
# def generate_random_email():
# return 'user_' + ''.join(random.choices(string.ascii_lowercase + string.digits, k=8))+'@1.com'
@ -87,3 +89,64 @@ def get_auth():
@pytest.fixture(scope="session")
def get_email():
return EMAIL
def get_my_llms(auth, name):
url = HOST_ADDRESS + "/v1/llm/my_llms"
authorization = {"Authorization": auth}
response = requests.get(url=url, headers=authorization)
res = response.json()
if res.get("code") != 0:
raise Exception(res.get("message"))
if name in res.get("data"):
return True
return False
def add_models(auth):
url = HOST_ADDRESS + "/v1/llm/set_api_key"
authorization = {"Authorization": auth}
models_info = {
"ZHIPU-AI": {"llm_factory": "ZHIPU-AI", "api_key": ZHIPU_AI_API_KEY},
}
for name, model_info in models_info.items():
if not get_my_llms(auth, name):
response = requests.post(url=url, headers=authorization, json=model_info)
res = response.json()
if res.get("code") != 0:
pytest.exit(f"Critical error in add_models: {res.get('message')}")
def get_tenant_info(auth):
url = HOST_ADDRESS + "/v1/user/tenant_info"
authorization = {"Authorization": auth}
response = requests.get(url=url, headers=authorization)
res = response.json()
if res.get("code") != 0:
raise Exception(res.get("message"))
return res["data"].get("tenant_id")
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info(get_auth):
auth = get_auth
try:
add_models(auth)
tenant_id = get_tenant_info(auth)
except Exception as e:
pytest.exit(f"Error in set_tenant_info: {str(e)}")
url = HOST_ADDRESS + "/v1/user/set_tenant_info"
authorization = {"Authorization": get_auth}
tenant_info = {
"tenant_id": tenant_id,
"llm_id": "glm-4-flash@ZHIPU-AI",
"embd_id": "BAAI/bge-large-zh-v1.5@BAAI",
"img2txt_id": "glm-4v@ZHIPU-AI",
"asr_id": "",
"tts_id": None,
}
response = requests.post(url=url, headers=authorization, json=tenant_info)
res = response.json()
if res.get("code") != 0:
raise Exception(res.get("message"))

View File

@ -16,7 +16,6 @@
import os
import pytest
import requests
from common import (
add_chunk,
batch_create_datasets,
@ -49,9 +48,6 @@ MARKER_EXPRESSIONS = {
"p3": "p1 or p2 or p3",
}
HOST_ADDRESS = os.getenv("HOST_ADDRESS", "http://127.0.0.1:9380")
ZHIPU_AI_API_KEY = os.getenv("ZHIPU_AI_API_KEY", "ca148e43209c40109e2bc2f56281dd11.BltyA2N1B043B7Ra")
if ZHIPU_AI_API_KEY is None:
pytest.exit("Error: Environment variable ZHIPU_AI_API_KEY must be set")
def pytest_addoption(parser: pytest.Parser) -> None:
@ -85,67 +81,6 @@ def get_http_api_auth(get_api_key_fixture):
return RAGFlowHttpApiAuth(get_api_key_fixture)
def get_my_llms(auth, name):
url = HOST_ADDRESS + "/v1/llm/my_llms"
authorization = {"Authorization": auth}
response = requests.get(url=url, headers=authorization)
res = response.json()
if res.get("code") != 0:
raise Exception(res.get("message"))
if name in res.get("data"):
return True
return False
def add_models(auth):
url = HOST_ADDRESS + "/v1/llm/set_api_key"
authorization = {"Authorization": auth}
models_info = {
"ZHIPU-AI": {"llm_factory": "ZHIPU-AI", "api_key": ZHIPU_AI_API_KEY},
}
for name, model_info in models_info.items():
if not get_my_llms(auth, name):
response = requests.post(url=url, headers=authorization, json=model_info)
res = response.json()
if res.get("code") != 0:
pytest.exit(f"Critical error in add_models: {res.get('message')}")
def get_tenant_info(auth):
url = HOST_ADDRESS + "/v1/user/tenant_info"
authorization = {"Authorization": auth}
response = requests.get(url=url, headers=authorization)
res = response.json()
if res.get("code") != 0:
raise Exception(res.get("message"))
return res["data"].get("tenant_id")
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info(get_auth):
auth = get_auth
try:
add_models(auth)
tenant_id = get_tenant_info(auth)
except Exception as e:
pytest.exit(f"Error in set_tenant_info: {str(e)}")
url = HOST_ADDRESS + "/v1/user/set_tenant_info"
authorization = {"Authorization": get_auth}
tenant_info = {
"tenant_id": tenant_id,
"llm_id": "glm-4-flash@ZHIPU-AI",
"embd_id": "BAAI/bge-large-zh-v1.5@BAAI",
"img2txt_id": "glm-4v@ZHIPU-AI",
"asr_id": "",
"tts_id": None,
}
response = requests.post(url=url, headers=authorization, json=tenant_info)
res = response.json()
if res.get("code") != 0:
raise Exception(res.get("message"))
@pytest.fixture(scope="function")
def clear_datasets(request, get_http_api_auth):
def cleanup():

View File

@ -14,8 +14,9 @@
# limitations under the License.
#
from ragflow_sdk import RAGFlow
from common import HOST_ADDRESS
from ragflow_sdk import RAGFlow
from ragflow_sdk.modules.chat import Chat
def test_create_chat_with_name(get_api_key_fixture):
@ -31,7 +32,18 @@ def test_create_chat_with_name(get_api_key_fixture):
docs = kb.upload_documents(documents)
for doc in docs:
doc.add_chunk("This is a test to add chunk")
rag.create_chat("test_create_chat", dataset_ids=[kb.id])
llm = Chat.LLM(
rag,
{
"model_name": "glm-4-flash@ZHIPU-AI",
"temperature": 0.1,
"top_p": 0.3,
"presence_penalty": 0.4,
"frequency_penalty": 0.7,
"max_tokens": 512,
},
)
rag.create_chat("test_create_chat", dataset_ids=[kb.id], llm=llm)
def test_update_chat_with_name(get_api_key_fixture):
@ -47,7 +59,18 @@ def test_update_chat_with_name(get_api_key_fixture):
docs = kb.upload_documents(documents)
for doc in docs:
doc.add_chunk("This is a test to add chunk")
chat = rag.create_chat("test_update_chat", dataset_ids=[kb.id])
llm = Chat.LLM(
rag,
{
"model_name": "glm-4-flash@ZHIPU-AI",
"temperature": 0.1,
"top_p": 0.3,
"presence_penalty": 0.4,
"frequency_penalty": 0.7,
"max_tokens": 512,
},
)
chat = rag.create_chat("test_update_chat", dataset_ids=[kb.id], llm=llm)
chat.update({"name": "new_chat"})
@ -64,7 +87,18 @@ def test_delete_chats_with_success(get_api_key_fixture):
docs = kb.upload_documents(documents)
for doc in docs:
doc.add_chunk("This is a test to add chunk")
chat = rag.create_chat("test_delete_chat", dataset_ids=[kb.id])
llm = Chat.LLM(
rag,
{
"model_name": "glm-4-flash@ZHIPU-AI",
"temperature": 0.1,
"top_p": 0.3,
"presence_penalty": 0.4,
"frequency_penalty": 0.7,
"max_tokens": 512,
},
)
chat = rag.create_chat("test_delete_chat", dataset_ids=[kb.id], llm=llm)
rag.delete_chats(ids=[chat.id])
@ -81,6 +115,17 @@ def test_list_chats_with_success(get_api_key_fixture):
docs = kb.upload_documents(documents)
for doc in docs:
doc.add_chunk("This is a test to add chunk")
rag.create_chat("test_list_1", dataset_ids=[kb.id])
rag.create_chat("test_list_2", dataset_ids=[kb.id])
llm = Chat.LLM(
rag,
{
"model_name": "glm-4-flash@ZHIPU-AI",
"temperature": 0.1,
"top_p": 0.3,
"presence_penalty": 0.4,
"frequency_penalty": 0.7,
"max_tokens": 512,
},
)
rag.create_chat("test_list_1", dataset_ids=[kb.id], llm=llm)
rag.create_chat("test_list_2", dataset_ids=[kb.id], llm=llm)
rag.list_chats()

2
sdk/python/uv.lock generated
View File

@ -342,7 +342,7 @@ wheels = [
[[package]]
name = "ragflow-sdk"
version = "0.18.0"
version = "0.19.0"
source = { virtual = "." }
dependencies = [
{ name = "beartype" },

2
uv.lock generated
View File

@ -4813,7 +4813,7 @@ wheels = [
[[package]]
name = "ragflow"
version = "0.18.0"
version = "0.19.0"
source = { virtual = "." }
dependencies = [
{ name = "akshare" },

View File

@ -5,7 +5,8 @@ import { DynamicInputVariable } from './dynamic-input-variable';
import { CodeTemplateStrMap, ProgrammingLanguage } from '@/constants/agent';
import { ICodeForm } from '@/interfaces/database/flow';
import { useEffect } from 'react';
import { useCallback } from 'react';
import useGraphStore from '../../store';
import styles from './index.less';
loader.config({ paths: { vs: '/vs' } });
@ -17,16 +18,20 @@ const options = [
const CodeForm = ({ onValuesChange, form, node }: IOperatorForm) => {
const formData = node?.data.form as ICodeForm;
const updateNodeForm = useGraphStore((state) => state.updateNodeForm);
useEffect(() => {
setTimeout(() => {
// TODO: Direct operation zustand is more elegant
form?.setFieldValue(
'script',
CodeTemplateStrMap[formData.lang as ProgrammingLanguage],
const handleChange = useCallback(
(value: ProgrammingLanguage) => {
if (node?.id) {
updateNodeForm(
node?.id,
CodeTemplateStrMap[value as ProgrammingLanguage],
['script'],
);
}
},
[node?.id, updateNodeForm],
);
}, 0);
}, [form, formData.lang]);
return (
<Form
@ -42,9 +47,10 @@ const CodeForm = ({ onValuesChange, form, node }: IOperatorForm) => {
label={
<Form.Item name={'lang'} className={styles.languageItem}>
<Select
defaultValue={'python'}
defaultValue={ProgrammingLanguage.Python}
popupMatchSelectWidth={false}
options={options}
onChange={handleChange}
/>
</Form.Item>
}