mirror of
https://github.com/infiniflow/ragflow.git
synced 2026-01-04 03:25:30 +08:00
Compare commits
40 Commits
ff2c70608d
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 1f4a17863f | |||
| 4d3a3a97ef | |||
| ff1020ccfb | |||
| ca3bd2cf9f | |||
| eb661c028d | |||
| 10c28c5ecd | |||
| 96810b7d97 | |||
| 365f9b01ae | |||
| 7d4d687dde | |||
| 6a664fea3b | |||
| dcdc1b0ec7 | |||
| 4af4c36e60 | |||
| 05e5244d94 | |||
| c2ee2bf7fe | |||
| 461c81e14a | |||
| 675d18d359 | |||
| 750335978c | |||
| ae7c623a35 | |||
| f24bdc0f83 | |||
| 07ef35b7e6 | |||
| 7c9823a1ff | |||
| a0c3bcf798 | |||
| 1a4a7d1705 | |||
| f141947085 | |||
| a07e947644 | |||
| ae4692a845 | |||
| 7dac269429 | |||
| ec5575dce2 | |||
| 6fee60e110 | |||
| 52f91c2388 | |||
| 348265afc1 | |||
| a7e466142d | |||
| 2fccf3924d | |||
| 4705d07e11 | |||
| 68be3b9a3d | |||
| e2d17d808b | |||
| 95edbd43ba | |||
| b96d553cd8 | |||
| bffdb5fb11 | |||
| 109e782493 |
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
@ -10,6 +10,12 @@ on:
|
||||
tags:
|
||||
- "v*.*.*" # normal release
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
actions: read
|
||||
checks: read
|
||||
statuses: read
|
||||
|
||||
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
@ -76,6 +82,14 @@ jobs:
|
||||
# The body field does not support environment variable substitution directly.
|
||||
body_path: release_body.md
|
||||
|
||||
- name: Build and push image
|
||||
run: |
|
||||
sudo docker login --username infiniflow --password-stdin <<< ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
sudo docker build --build-arg NEED_MIRROR=1 --build-arg HTTPS_PROXY=${HTTPS_PROXY} --build-arg HTTP_PROXY=${HTTP_PROXY} -t infiniflow/ragflow:${RELEASE_TAG} -f Dockerfile .
|
||||
sudo docker tag infiniflow/ragflow:${RELEASE_TAG} infiniflow/ragflow:latest
|
||||
sudo docker push infiniflow/ragflow:${RELEASE_TAG}
|
||||
sudo docker push infiniflow/ragflow:latest
|
||||
|
||||
- name: Build and push ragflow-sdk
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: |
|
||||
@ -85,11 +99,3 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: |
|
||||
cd admin/client && uv build && uv publish --token ${{ secrets.PYPI_API_TOKEN }}
|
||||
|
||||
- name: Build and push image
|
||||
run: |
|
||||
sudo docker login --username infiniflow --password-stdin <<< ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
sudo docker build --build-arg NEED_MIRROR=1 --build-arg HTTPS_PROXY=${HTTPS_PROXY} --build-arg HTTP_PROXY=${HTTP_PROXY} -t infiniflow/ragflow:${RELEASE_TAG} -f Dockerfile .
|
||||
sudo docker tag infiniflow/ragflow:${RELEASE_TAG} infiniflow/ragflow:latest
|
||||
sudo docker push infiniflow/ragflow:${RELEASE_TAG}
|
||||
sudo docker push infiniflow/ragflow:latest
|
||||
|
||||
@ -19,17 +19,17 @@ RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co
|
||||
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/,target=/deps \
|
||||
cp -r /deps/nltk_data /root/ && \
|
||||
cp /deps/tika-server-standard-3.0.0.jar /deps/tika-server-standard-3.0.0.jar.md5 /ragflow/ && \
|
||||
cp /deps/tika-server-standard-3.2.3.jar /deps/tika-server-standard-3.2.3.jar.md5 /ragflow/ && \
|
||||
cp /deps/cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
|
||||
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard-3.0.0.jar"
|
||||
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard-3.2.3.jar"
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Setup apt
|
||||
# Python package and implicit dependencies:
|
||||
# opencv-python: libglib2.0-0 libglx-mesa0 libgl1
|
||||
# aspose-slides: pkg-config libicu-dev libgdiplus libssl1.1_1.1.1f-1ubuntu2_amd64.deb
|
||||
# python-pptx: default-jdk tika-server-standard-3.0.0.jar
|
||||
# python-pptx: default-jdk tika-server-standard-3.2.3.jar
|
||||
# selenium: libatk-bridge2.0-0 chrome-linux64-121-0-6167-85
|
||||
# Building C extensions: libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev
|
||||
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
FROM scratch
|
||||
|
||||
# Copy resources downloaded via download_deps.py
|
||||
COPY chromedriver-linux64-121-0-6167-85 chrome-linux64-121-0-6167-85 cl100k_base.tiktoken libssl1.1_1.1.1f-1ubuntu2_amd64.deb libssl1.1_1.1.1f-1ubuntu2_arm64.deb tika-server-standard-3.0.0.jar tika-server-standard-3.0.0.jar.md5 libssl*.deb uv-x86_64-unknown-linux-gnu.tar.gz /
|
||||
COPY chromedriver-linux64-121-0-6167-85 chrome-linux64-121-0-6167-85 cl100k_base.tiktoken libssl1.1_1.1.1f-1ubuntu2_amd64.deb libssl1.1_1.1.1f-1ubuntu2_arm64.deb tika-server-standard-3.2.3.jar tika-server-standard-3.2.3.jar.md5 libssl*.deb uv-x86_64-unknown-linux-gnu.tar.gz /
|
||||
|
||||
COPY nltk_data /nltk_data
|
||||
|
||||
|
||||
16
README.md
16
README.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -37,7 +37,7 @@
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/12241">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
@ -72,7 +72,7 @@
|
||||
|
||||
## 💡 What is RAGFlow?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) is a leading open-source Retrieval-Augmented Generation (RAG) engine that fuses cutting-edge RAG with Agent capabilities to create a superior context layer for LLMs. It offers a streamlined RAG workflow adaptable to enterprises of any scale. Powered by a converged context engine and pre-built agent templates, RAGFlow enables developers to transform complex data into high-fidelity, production-ready AI systems with exceptional efficiency and precision.
|
||||
[RAGFlow](https://ragflow.io/) is a leading open-source Retrieval-Augmented Generation ([RAG](https://ragflow.io/basics/what-is-rag)) engine that fuses cutting-edge RAG with Agent capabilities to create a superior context layer for LLMs. It offers a streamlined RAG workflow adaptable to enterprises of any scale. Powered by a converged [context engine](https://ragflow.io/basics/what-is-agent-context-engine) and pre-built agent templates, RAGFlow enables developers to transform complex data into high-fidelity, production-ready AI systems with exceptional efficiency and precision.
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
@ -188,15 +188,15 @@ releases! 🌟
|
||||
> All Docker images are built for x86 platforms. We don't currently offer Docker images for ARM64.
|
||||
> If you are on an ARM64 platform, follow [this guide](https://ragflow.io/docs/dev/build_docker_image) to build a Docker image compatible with your system.
|
||||
|
||||
> The command below downloads the `v0.23.0` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.23.0`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server.
|
||||
> The command below downloads the `v0.23.1` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.23.1`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
|
||||
# git checkout v0.23.0
|
||||
|
||||
# git checkout v0.23.1
|
||||
# Optional: use a stable tag (see releases: https://github.com/infiniflow/ragflow/releases)
|
||||
# This step ensures the **entrypoint.sh** file in the code matches the Docker image version.
|
||||
|
||||
|
||||
# Use CPU for DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
@ -396,7 +396,7 @@ docker build --platform linux/amd64 \
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
See the [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
See the [RAGFlow Roadmap 2026](https://github.com/infiniflow/ragflow/issues/12241)
|
||||
|
||||
## 🏄 Community
|
||||
|
||||
|
||||
14
README_id.md
14
README_id.md
@ -22,7 +22,7 @@
|
||||
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||
@ -37,7 +37,7 @@
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Dokumentasi</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Peta Jalan</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/12241">Peta Jalan</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
@ -72,7 +72,7 @@
|
||||
|
||||
## 💡 Apa Itu RAGFlow?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) adalah mesin RAG (Retrieval-Augmented Generation) open-source terkemuka yang mengintegrasikan teknologi RAG mutakhir dengan kemampuan Agent untuk menciptakan lapisan kontekstual superior bagi LLM. Menyediakan alur kerja RAG yang efisien dan dapat diadaptasi untuk perusahaan segala skala. Didukung oleh mesin konteks terkonvergensi dan template Agent yang telah dipra-bangun, RAGFlow memungkinkan pengembang mengubah data kompleks menjadi sistem AI kesetiaan-tinggi dan siap-produksi dengan efisiensi dan presisi yang luar biasa.
|
||||
[RAGFlow](https://ragflow.io/) adalah mesin [RAG](https://ragflow.io/basics/what-is-rag) (Retrieval-Augmented Generation) open-source terkemuka yang mengintegrasikan teknologi RAG mutakhir dengan kemampuan Agent untuk menciptakan lapisan kontekstual superior bagi LLM. Menyediakan alur kerja RAG yang efisien dan dapat diadaptasi untuk perusahaan segala skala. Didukung oleh mesin konteks terkonvergensi dan template Agent yang telah dipra-bangun, RAGFlow memungkinkan pengembang mengubah data kompleks menjadi sistem AI kesetiaan-tinggi dan siap-produksi dengan efisiensi dan presisi yang luar biasa.
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
@ -188,12 +188,12 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
> Semua gambar Docker dibangun untuk platform x86. Saat ini, kami tidak menawarkan gambar Docker untuk ARM64.
|
||||
> Jika Anda menggunakan platform ARM64, [silakan gunakan panduan ini untuk membangun gambar Docker yang kompatibel dengan sistem Anda](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
> Perintah di bawah ini mengunduh edisi v0.23.0 dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.23.0, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server.
|
||||
> Perintah di bawah ini mengunduh edisi v0.23.1 dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.23.1, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
|
||||
# git checkout v0.23.0
|
||||
|
||||
# git checkout v0.23.1
|
||||
# Opsional: gunakan tag stabil (lihat releases: https://github.com/infiniflow/ragflow/releases)
|
||||
# This steps ensures the **entrypoint.sh** file in the code matches the Docker image version.
|
||||
|
||||
@ -368,7 +368,7 @@ docker build --platform linux/amd64 \
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
Lihat [Roadmap RAGFlow 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
Lihat [Roadmap RAGFlow 2026](https://github.com/infiniflow/ragflow/issues/12241)
|
||||
|
||||
## 🏄 Komunitas
|
||||
|
||||
|
||||
16
README_ja.md
16
README_ja.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -37,7 +37,7 @@
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/12241">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
@ -53,7 +53,7 @@
|
||||
|
||||
## 💡 RAGFlow とは?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) は、先進的なRAG(Retrieval-Augmented Generation)技術と Agent 機能を融合し、大規模言語モデル(LLM)に優れたコンテキスト層を構築する最先端のオープンソース RAG エンジンです。あらゆる規模の企業に対応可能な合理化された RAG ワークフローを提供し、統合型コンテキストエンジンと事前構築されたAgentテンプレートにより、開発者が複雑なデータを驚異的な効率性と精度で高精細なプロダクションレディAIシステムへ変換することを可能にします。
|
||||
[RAGFlow](https://ragflow.io/) は、先進的な[RAG](https://ragflow.io/basics/what-is-rag)(Retrieval-Augmented Generation)技術と Agent 機能を融合し、大規模言語モデル(LLM)に優れたコンテキスト層を構築する最先端のオープンソース RAG エンジンです。あらゆる規模の企業に対応可能な合理化された RAG ワークフローを提供し、統合型[コンテキストエンジン](https://ragflow.io/basics/what-is-agent-context-engine)と事前構築されたAgentテンプレートにより、開発者が複雑なデータを驚異的な効率性と精度で高精細なプロダクションレディAIシステムへ変換することを可能にします。
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
@ -168,12 +168,12 @@
|
||||
> 現在、公式に提供されているすべての Docker イメージは x86 アーキテクチャ向けにビルドされており、ARM64 用の Docker イメージは提供されていません。
|
||||
> ARM64 アーキテクチャのオペレーティングシステムを使用している場合は、[このドキュメント](https://ragflow.io/docs/dev/build_docker_image)を参照して Docker イメージを自分でビルドしてください。
|
||||
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.23.0 エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.23.0 とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.23.1 エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.23.1 とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
|
||||
# git checkout v0.23.0
|
||||
# git checkout v0.23.1
|
||||
# 任意: 安定版タグを利用 (一覧: https://github.com/infiniflow/ragflow/releases)
|
||||
# この手順は、コード内の entrypoint.sh ファイルが Docker イメージのバージョンと一致していることを確認します。
|
||||
|
||||
@ -194,8 +194,8 @@
|
||||
|
||||
> `v0.22.0` 以降、当プロジェクトでは slim エディションのみを提供し、イメージタグに **-slim** サフィックスを付けなくなりました。
|
||||
|
||||
1. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
|
||||
1. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
|
||||
```bash
|
||||
$ docker logs -f docker-ragflow-cpu-1
|
||||
```
|
||||
@ -368,7 +368,7 @@ docker build --platform linux/amd64 \
|
||||
|
||||
## 📜 ロードマップ
|
||||
|
||||
[RAGFlow ロードマップ 2025](https://github.com/infiniflow/ragflow/issues/4214) を参照
|
||||
[RAGFlow ロードマップ 2026](https://github.com/infiniflow/ragflow/issues/12241) を参照
|
||||
|
||||
## 🏄 コミュニティ
|
||||
|
||||
|
||||
14
README_ko.md
14
README_ko.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -37,7 +37,7 @@
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/12241">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
@ -54,7 +54,7 @@
|
||||
|
||||
## 💡 RAGFlow란?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) 는 최첨단 RAG(Retrieval-Augmented Generation)와 Agent 기능을 융합하여 대규모 언어 모델(LLM)을 위한 우수한 컨텍스트 계층을 생성하는 선도적인 오픈소스 RAG 엔진입니다. 모든 규모의 기업에 적용 가능한 효율적인 RAG 워크플로를 제공하며, 통합 컨텍스트 엔진과 사전 구축된 Agent 템플릿을 통해 개발자들이 복잡한 데이터를 예외적인 효율성과 정밀도로 고급 구현도의 프로덕션 준비 완료 AI 시스템으로 변환할 수 있도록 지원합니다.
|
||||
[RAGFlow](https://ragflow.io/) 는 최첨단 [RAG](https://ragflow.io/basics/what-is-rag)(Retrieval-Augmented Generation)와 Agent 기능을 융합하여 대규모 언어 모델(LLM)을 위한 우수한 컨텍스트 계층을 생성하는 선도적인 오픈소스 RAG 엔진입니다. 모든 규모의 기업에 적용 가능한 효율적인 RAG 워크플로를 제공하며, 통합 [컨텍스트 엔진](https://ragflow.io/basics/what-is-agent-context-engine)과 사전 구축된 Agent 템플릿을 통해 개발자들이 복잡한 데이터를 예외적인 효율성과 정밀도로 고급 구현도의 프로덕션 준비 완료 AI 시스템으로 변환할 수 있도록 지원합니다.
|
||||
|
||||
## 🎮 데모
|
||||
|
||||
@ -170,12 +170,12 @@
|
||||
> 모든 Docker 이미지는 x86 플랫폼을 위해 빌드되었습니다. 우리는 현재 ARM64 플랫폼을 위한 Docker 이미지를 제공하지 않습니다.
|
||||
> ARM64 플랫폼을 사용 중이라면, [시스템과 호환되는 Docker 이미지를 빌드하려면 이 가이드를 사용해 주세요](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.23.0 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.23.0과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오.
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.23.1 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.23.1과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
|
||||
# git checkout v0.23.0
|
||||
|
||||
# git checkout v0.23.1
|
||||
# Optional: use a stable tag (see releases: https://github.com/infiniflow/ragflow/releases)
|
||||
# 이 단계는 코드의 entrypoint.sh 파일이 Docker 이미지 버전과 일치하도록 보장합니다.
|
||||
|
||||
@ -372,7 +372,7 @@ docker build --platform linux/amd64 \
|
||||
|
||||
## 📜 로드맵
|
||||
|
||||
[RAGFlow 로드맵 2025](https://github.com/infiniflow/ragflow/issues/4214)을 확인하세요.
|
||||
[RAGFlow 로드맵 2026](https://github.com/infiniflow/ragflow/issues/12241)을 확인하세요.
|
||||
|
||||
## 🏄 커뮤니티
|
||||
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
||||
@ -37,7 +37,7 @@
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Documentação</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/12241">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
@ -73,7 +73,7 @@
|
||||
|
||||
## 💡 O que é o RAGFlow?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) é um mecanismo de RAG (Retrieval-Augmented Generation) open-source líder que fusiona tecnologias RAG de ponta com funcionalidades Agent para criar uma camada contextual superior para LLMs. Oferece um fluxo de trabalho RAG otimizado adaptável a empresas de qualquer escala. Alimentado por um motor de contexto convergente e modelos Agent pré-construídos, o RAGFlow permite que desenvolvedores transformem dados complexos em sistemas de IA de alta fidelidade e pronto para produção com excepcional eficiência e precisão.
|
||||
[RAGFlow](https://ragflow.io/) é um mecanismo de [RAG](https://ragflow.io/basics/what-is-rag) (Retrieval-Augmented Generation) open-source líder que fusiona tecnologias RAG de ponta com funcionalidades Agent para criar uma camada contextual superior para LLMs. Oferece um fluxo de trabalho RAG otimizado adaptável a empresas de qualquer escala. Alimentado por [um motor de contexto](https://ragflow.io/basics/what-is-agent-context-engine) convergente e modelos Agent pré-construídos, o RAGFlow permite que desenvolvedores transformem dados complexos em sistemas de IA de alta fidelidade e pronto para produção com excepcional eficiência e precisão.
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
@ -188,12 +188,12 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
> Todas as imagens Docker são construídas para plataformas x86. Atualmente, não oferecemos imagens Docker para ARM64.
|
||||
> Se você estiver usando uma plataforma ARM64, por favor, utilize [este guia](https://ragflow.io/docs/dev/build_docker_image) para construir uma imagem Docker compatível com o seu sistema.
|
||||
|
||||
> O comando abaixo baixa a edição`v0.23.0` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.23.0`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor.
|
||||
> O comando abaixo baixa a edição`v0.23.1` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.23.1`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
|
||||
# git checkout v0.23.0
|
||||
|
||||
# git checkout v0.23.1
|
||||
# Opcional: use uma tag estável (veja releases: https://github.com/infiniflow/ragflow/releases)
|
||||
# Esta etapa garante que o arquivo entrypoint.sh no código corresponda à versão da imagem do Docker.
|
||||
|
||||
@ -385,7 +385,7 @@ docker build --platform linux/amd64 \
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
Veja o [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
Veja o [RAGFlow Roadmap 2026](https://github.com/infiniflow/ragflow/issues/12241)
|
||||
|
||||
## 🏄 Comunidade
|
||||
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -37,7 +37,7 @@
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/12241">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
@ -72,7 +72,7 @@
|
||||
|
||||
## 💡 RAGFlow 是什麼?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) 是一款領先的開源 RAG(Retrieval-Augmented Generation)引擎,通過融合前沿的 RAG 技術與 Agent 能力,為大型語言模型提供卓越的上下文層。它提供可適配任意規模企業的端到端 RAG 工作流,憑藉融合式上下文引擎與預置的 Agent 模板,助力開發者以極致效率與精度將複雜數據轉化為高可信、生產級的人工智能系統。
|
||||
[RAGFlow](https://ragflow.io/) 是一款領先的開源 [RAG](https://ragflow.io/basics/what-is-rag)(Retrieval-Augmented Generation)引擎,通過融合前沿的 RAG 技術與 Agent 能力,為大型語言模型提供卓越的上下文層。它提供可適配任意規模企業的端到端 RAG 工作流,憑藉融合式[上下文引擎](https://ragflow.io/basics/what-is-agent-context-engine)與預置的 Agent 模板,助力開發者以極致效率與精度將複雜數據轉化為高可信、生產級的人工智能系統。
|
||||
|
||||
## 🎮 Demo 試用
|
||||
|
||||
@ -187,12 +187,12 @@
|
||||
> 所有 Docker 映像檔都是為 x86 平台建置的。目前,我們不提供 ARM64 平台的 Docker 映像檔。
|
||||
> 如果您使用的是 ARM64 平台,請使用 [這份指南](https://ragflow.io/docs/dev/build_docker_image) 來建置適合您系統的 Docker 映像檔。
|
||||
|
||||
> 執行以下指令會自動下載 RAGFlow Docker 映像 `v0.23.0`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.23.0` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。
|
||||
> 執行以下指令會自動下載 RAGFlow Docker 映像 `v0.23.1`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.23.1` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
|
||||
# git checkout v0.23.0
|
||||
|
||||
# git checkout v0.23.1
|
||||
# 可選:使用穩定版標籤(查看發佈:https://github.com/infiniflow/ragflow/releases)
|
||||
# 此步驟確保程式碼中的 entrypoint.sh 檔案與 Docker 映像版本一致。
|
||||
|
||||
@ -399,7 +399,7 @@ docker build --platform linux/amd64 \
|
||||
|
||||
## 📜 路線圖
|
||||
|
||||
詳見 [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214) 。
|
||||
詳見 [RAGFlow Roadmap 2026](https://github.com/infiniflow/ragflow/issues/12241) 。
|
||||
|
||||
## 🏄 開源社群
|
||||
|
||||
|
||||
16
README_zh.md
16
README_zh.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.23.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -37,7 +37,7 @@
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/12241">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
@ -72,7 +72,7 @@
|
||||
|
||||
## 💡 RAGFlow 是什么?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) 是一款领先的开源检索增强生成(RAG)引擎,通过融合前沿的 RAG 技术与 Agent 能力,为大型语言模型提供卓越的上下文层。它提供可适配任意规模企业的端到端 RAG 工作流,凭借融合式上下文引擎与预置的 Agent 模板,助力开发者以极致效率与精度将复杂数据转化为高可信、生产级的人工智能系统。
|
||||
[RAGFlow](https://ragflow.io/) 是一款领先的开源检索增强生成([RAG](https://ragflow.io/basics/what-is-rag))引擎,通过融合前沿的 RAG 技术与 Agent 能力,为大型语言模型提供卓越的上下文层。它提供可适配任意规模企业的端到端 RAG 工作流,凭借融合式[上下文引擎](https://ragflow.io/basics/what-is-agent-context-engine)与预置的 Agent 模板,助力开发者以极致效率与精度将复杂数据转化为高可信、生产级的人工智能系统。
|
||||
|
||||
## 🎮 Demo 试用
|
||||
|
||||
@ -188,12 +188,12 @@
|
||||
> 请注意,目前官方提供的所有 Docker 镜像均基于 x86 架构构建,并不提供基于 ARM64 的 Docker 镜像。
|
||||
> 如果你的操作系统是 ARM64 架构,请参考[这篇文档](https://ragflow.io/docs/dev/build_docker_image)自行构建 Docker 镜像。
|
||||
|
||||
> 运行以下命令会自动下载 RAGFlow Docker 镜像 `v0.23.0`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.23.0` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。
|
||||
> 运行以下命令会自动下载 RAGFlow Docker 镜像 `v0.23.1`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.23.1` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
|
||||
# git checkout v0.23.0
|
||||
|
||||
# git checkout v0.23.1
|
||||
# 可选:使用稳定版本标签(查看发布:https://github.com/infiniflow/ragflow/releases)
|
||||
# 这一步确保代码中的 entrypoint.sh 文件与 Docker 镜像的版本保持一致。
|
||||
|
||||
@ -204,7 +204,7 @@
|
||||
# sed -i '1i DEVICE=gpu' .env
|
||||
# docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
|
||||
> 注意:在 `v0.22.0` 之前的版本,我们会同时提供包含 embedding 模型的镜像和不含 embedding 模型的 slim 镜像。具体如下:
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
@ -402,7 +402,7 @@ docker build --platform linux/amd64 \
|
||||
|
||||
## 📜 路线图
|
||||
|
||||
详见 [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214) 。
|
||||
详见 [RAGFlow Roadmap 2026](https://github.com/infiniflow/ragflow/issues/12241) 。
|
||||
|
||||
## 🏄 开源社区
|
||||
|
||||
|
||||
@ -48,7 +48,7 @@ It consists of a server-side Service and a command-line client (CLI), both imple
|
||||
1. Ensure the Admin Service is running.
|
||||
2. Install ragflow-cli.
|
||||
```bash
|
||||
pip install ragflow-cli==0.23.0
|
||||
pip install ragflow-cli==0.23.1
|
||||
```
|
||||
3. Launch the CLI client:
|
||||
```bash
|
||||
|
||||
@ -53,6 +53,8 @@ sql_command: list_services
|
||||
| alter_user_role
|
||||
| show_user_permission
|
||||
| show_version
|
||||
| grant_admin
|
||||
| revoke_admin
|
||||
|
||||
// meta command definition
|
||||
meta_command: "\\" meta_command_name [meta_args]
|
||||
@ -77,6 +79,7 @@ DROP: "DROP"i
|
||||
USER: "USER"i
|
||||
ALTER: "ALTER"i
|
||||
ACTIVE: "ACTIVE"i
|
||||
ADMIN: "ADMIN"i
|
||||
PASSWORD: "PASSWORD"i
|
||||
DATASETS: "DATASETS"i
|
||||
OF: "OF"i
|
||||
@ -123,6 +126,9 @@ revoke_permission: REVOKE action_list ON identifier FROM ROLE identifier ";"
|
||||
alter_user_role: ALTER USER quoted_string SET ROLE identifier ";"
|
||||
show_user_permission: SHOW USER PERMISSION quoted_string ";"
|
||||
|
||||
grant_admin: GRANT ADMIN quoted_string ";"
|
||||
revoke_admin: REVOKE ADMIN quoted_string ";"
|
||||
|
||||
show_version: SHOW VERSION ";"
|
||||
|
||||
action_list: identifier ("," identifier)*
|
||||
@ -249,6 +255,14 @@ class AdminTransformer(Transformer):
|
||||
def show_version(self, items):
|
||||
return {"type": "show_version"}
|
||||
|
||||
def grant_admin(self, items):
|
||||
user_name = items[2]
|
||||
return {"type": "grant_admin", "user_name": user_name}
|
||||
|
||||
def revoke_admin(self, items):
|
||||
user_name = items[2]
|
||||
return {"type": "revoke_admin", "user_name": user_name}
|
||||
|
||||
def action_list(self, items):
|
||||
return items
|
||||
|
||||
@ -286,6 +300,43 @@ def encode_to_base64(input_string):
|
||||
return base64_encoded.decode("utf-8")
|
||||
|
||||
|
||||
def show_help():
|
||||
"""Help info"""
|
||||
help_text = """
|
||||
Commands:
|
||||
LIST SERVICES
|
||||
SHOW SERVICE <service>
|
||||
STARTUP SERVICE <service>
|
||||
SHUTDOWN SERVICE <service>
|
||||
RESTART SERVICE <service>
|
||||
LIST USERS
|
||||
SHOW USER <user>
|
||||
DROP USER <user>
|
||||
CREATE USER <user> <password>
|
||||
ALTER USER PASSWORD <user> <new_password>
|
||||
ALTER USER ACTIVE <user> <on/off>
|
||||
LIST DATASETS OF <user>
|
||||
LIST AGENTS OF <user>
|
||||
CREATE ROLE <role>
|
||||
DROP ROLE <role>
|
||||
ALTER ROLE <role> SET DESCRIPTION <description>
|
||||
LIST ROLES
|
||||
SHOW ROLE <role>
|
||||
GRANT <action_list> ON <function> TO ROLE <role>
|
||||
REVOKE <action_list> ON <function> TO ROLE <role>
|
||||
ALTER USER <user> SET ROLE <role>
|
||||
SHOW USER PERMISSION <user>
|
||||
SHOW VERSION
|
||||
GRANT ADMIN <user>
|
||||
REVOKE ADMIN <user>
|
||||
|
||||
Meta Commands:
|
||||
\\?, \\h, \\help Show this help
|
||||
\\q, \\quit, \\exit Quit the CLI
|
||||
"""
|
||||
print(help_text)
|
||||
|
||||
|
||||
class AdminCLI(Cmd):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
@ -370,7 +421,7 @@ class AdminCLI(Cmd):
|
||||
res_json = response.json()
|
||||
error_code = res_json.get("code", -1)
|
||||
if error_code == 0:
|
||||
self.session.headers.update({"Content-Type": "application/json", "Authorization": response.headers["Authorization"], "User-Agent": "RAGFlow-CLI/0.23.0"})
|
||||
self.session.headers.update({"Content-Type": "application/json", "Authorization": response.headers["Authorization"], "User-Agent": "RAGFlow-CLI/0.23.1"})
|
||||
print("Authentication successful.")
|
||||
return True
|
||||
else:
|
||||
@ -566,6 +617,10 @@ class AdminCLI(Cmd):
|
||||
self._show_user_permission(command_dict)
|
||||
case "show_version":
|
||||
self._show_version(command_dict)
|
||||
case "grant_admin":
|
||||
self._grant_admin(command_dict)
|
||||
case "revoke_admin":
|
||||
self._revoke_admin(command_dict)
|
||||
case "meta":
|
||||
self._handle_meta_command(command_dict)
|
||||
case _:
|
||||
@ -698,6 +753,33 @@ class AdminCLI(Cmd):
|
||||
else:
|
||||
print(f"Unknown activate status: {activate_status}.")
|
||||
|
||||
|
||||
def _grant_admin(self, command):
|
||||
user_name_tree: Tree = command["user_name"]
|
||||
user_name: str = user_name_tree.children[0].strip("'\"")
|
||||
url = f"http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/admin"
|
||||
# print(f"Grant admin: {url}")
|
||||
# return
|
||||
response = self.session.put(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
print(res_json["message"])
|
||||
else:
|
||||
print(f"Fail to grant {user_name} admin authorization, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _revoke_admin(self, command):
|
||||
user_name_tree: Tree = command["user_name"]
|
||||
user_name: str = user_name_tree.children[0].strip("'\"")
|
||||
url = f"http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/admin"
|
||||
# print(f"Revoke admin: {url}")
|
||||
# return
|
||||
response = self.session.delete(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
print(res_json["message"])
|
||||
else:
|
||||
print(f"Fail to revoke {user_name} admin authorization, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _handle_list_datasets(self, command):
|
||||
username_tree: Tree = command["user_name"]
|
||||
user_name: str = username_tree.children[0].strip("'\"")
|
||||
@ -873,36 +955,12 @@ class AdminCLI(Cmd):
|
||||
args = command.get("args", [])
|
||||
|
||||
if meta_command in ["?", "h", "help"]:
|
||||
self.show_help()
|
||||
show_help()
|
||||
elif meta_command in ["q", "quit", "exit"]:
|
||||
print("Goodbye!")
|
||||
else:
|
||||
print(f"Meta command '{meta_command}' with args {args}")
|
||||
|
||||
def show_help(self):
|
||||
"""Help info"""
|
||||
help_text = """
|
||||
Commands:
|
||||
LIST SERVICES
|
||||
SHOW SERVICE <service>
|
||||
STARTUP SERVICE <service>
|
||||
SHUTDOWN SERVICE <service>
|
||||
RESTART SERVICE <service>
|
||||
LIST USERS
|
||||
SHOW USER <user>
|
||||
DROP USER <user>
|
||||
CREATE USER <user> <password>
|
||||
ALTER USER PASSWORD <user> <new_password>
|
||||
ALTER USER ACTIVE <user> <on/off>
|
||||
LIST DATASETS OF <user>
|
||||
LIST AGENTS OF <user>
|
||||
|
||||
Meta Commands:
|
||||
\\?, \\h, \\help Show this help
|
||||
\\q, \\quit, \\exit Quit the CLI
|
||||
"""
|
||||
print(help_text)
|
||||
|
||||
|
||||
def main():
|
||||
import sys
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "ragflow-cli"
|
||||
version = "0.23.0"
|
||||
version = "0.23.1"
|
||||
description = "Admin Service's client of [RAGFlow](https://github.com/infiniflow/ragflow). The Admin Service provides user management and system monitoring. "
|
||||
authors = [{ name = "Lynn", email = "lynn_inf@hotmail.com" }]
|
||||
license = { text = "Apache License, Version 2.0" }
|
||||
|
||||
2
admin/client/uv.lock
generated
2
admin/client/uv.lock
generated
@ -196,7 +196,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "ragflow-cli"
|
||||
version = "0.23.0"
|
||||
version = "0.23.1"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "beartype" },
|
||||
|
||||
@ -158,6 +158,36 @@ def alter_user_activate_status(username):
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/users/<username>/admin', methods=['PUT'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def grant_admin(username):
|
||||
try:
|
||||
if current_user.email == username:
|
||||
return error_response(f"can't grant current user: {username}", 409)
|
||||
msg = UserMgr.grant_admin(username)
|
||||
return success_response(None, msg)
|
||||
|
||||
except AdminException as e:
|
||||
return error_response(e.message, e.code)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
@admin_bp.route('/users/<username>/admin', methods=['DELETE'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def revoke_admin(username):
|
||||
try:
|
||||
if current_user.email == username:
|
||||
return error_response(f"can't grant current user: {username}", 409)
|
||||
msg = UserMgr.revoke_admin(username)
|
||||
return success_response(None, msg)
|
||||
|
||||
except AdminException as e:
|
||||
return error_response(e.message, e.code)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
@admin_bp.route('/users/<username>', methods=['GET'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
|
||||
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import logging
|
||||
import re
|
||||
from werkzeug.security import check_password_hash
|
||||
@ -135,6 +137,38 @@ class UserMgr:
|
||||
UserService.update_user(usr.id, {"is_active": target_status})
|
||||
return f"Turn {_activate_status} user activate status successfully!"
|
||||
|
||||
@staticmethod
|
||||
def grant_admin(username: str):
|
||||
# use email to find user. check exist and unique.
|
||||
user_list = UserService.query_user_by_email(username)
|
||||
if not user_list:
|
||||
raise UserNotFoundError(username)
|
||||
elif len(user_list) > 1:
|
||||
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||
# check activate status different from new
|
||||
usr = user_list[0]
|
||||
if usr.is_superuser:
|
||||
return f"{usr} is already superuser!"
|
||||
# update is_active
|
||||
UserService.update_user(usr.id, {"is_superuser": True})
|
||||
return "Grant successfully!"
|
||||
|
||||
@staticmethod
|
||||
def revoke_admin(username: str):
|
||||
# use email to find user. check exist and unique.
|
||||
user_list = UserService.query_user_by_email(username)
|
||||
if not user_list:
|
||||
raise UserNotFoundError(username)
|
||||
elif len(user_list) > 1:
|
||||
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||
# check activate status different from new
|
||||
usr = user_list[0]
|
||||
if not usr.is_superuser:
|
||||
return f"{usr} isn't superuser, yet!"
|
||||
# update is_active
|
||||
UserService.update_user(usr.id, {"is_superuser": False})
|
||||
return "Revoke successfully!"
|
||||
|
||||
|
||||
class UserServiceMgr:
|
||||
|
||||
@ -179,10 +213,14 @@ class ServiceMgr:
|
||||
|
||||
@staticmethod
|
||||
def get_all_services():
|
||||
doc_engine = os.getenv('DOC_ENGINE', 'elasticsearch')
|
||||
result = []
|
||||
configs = SERVICE_CONFIGS.configs
|
||||
for service_id, config in enumerate(configs):
|
||||
config_dict = config.to_dict()
|
||||
if config_dict['service_type'] == 'retrieval':
|
||||
if config_dict['extra']['retrieval_type'] != doc_engine:
|
||||
continue
|
||||
try:
|
||||
service_detail = ServiceMgr.get_service_details(service_id)
|
||||
if "status" in service_detail:
|
||||
|
||||
@ -202,7 +202,7 @@ class Retrieval(ToolBase, ABC):
|
||||
kbinfos["chunks"] = settings.retriever.retrieval_by_children(kbinfos["chunks"],
|
||||
[kb.tenant_id for kb in kbs])
|
||||
if self._param.use_kg:
|
||||
ck = settings.kg_retriever.retrieval(query,
|
||||
ck = await settings.kg_retriever.retrieval(query,
|
||||
[kb.tenant_id for kb in kbs],
|
||||
kb_ids,
|
||||
embd_mdl,
|
||||
@ -215,7 +215,7 @@ class Retrieval(ToolBase, ABC):
|
||||
kbinfos = {"chunks": [], "doc_aggs": []}
|
||||
|
||||
if self._param.use_kg and kbs:
|
||||
ck = settings.kg_retriever.retrieval(query, [kb.tenant_id for kb in kbs], filtered_kb_ids, embd_mdl,
|
||||
ck = await settings.kg_retriever.retrieval(query, [kb.tenant_id for kb in kbs], filtered_kb_ids, embd_mdl,
|
||||
LLMBundle(kbs[0].tenant_id, LLMType.CHAT))
|
||||
if self.check_if_canceled("Retrieval processing"):
|
||||
return
|
||||
|
||||
@ -381,7 +381,7 @@ async def retrieval_test():
|
||||
rank_feature=labels
|
||||
)
|
||||
if use_kg:
|
||||
ck = settings.kg_retriever.retrieval(_question,
|
||||
ck = await settings.kg_retriever.retrieval(_question,
|
||||
tenant_ids,
|
||||
kb_ids,
|
||||
embd_mdl,
|
||||
|
||||
@ -150,7 +150,7 @@ async def retrieval(tenant_id):
|
||||
)
|
||||
|
||||
if use_kg:
|
||||
ck = settings.kg_retriever.retrieval(question,
|
||||
ck = await settings.kg_retriever.retrieval(question,
|
||||
[tenant_id],
|
||||
[kb_id],
|
||||
embd_mdl,
|
||||
|
||||
@ -1579,7 +1579,7 @@ async def retrieval_test(tenant_id):
|
||||
if cks:
|
||||
ranks["chunks"] = cks
|
||||
if use_kg:
|
||||
ck = settings.kg_retriever.retrieval(question, [k.tenant_id for k in kbs], kb_ids, embd_mdl, LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
ck = await settings.kg_retriever.retrieval(question, [k.tenant_id for k in kbs], kb_ids, embd_mdl, LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
|
||||
|
||||
@ -60,7 +60,7 @@ async def create(tenant_id, chat_id):
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}],
|
||||
"user_id": req.get("user_id", ""),
|
||||
"reference": [{}],
|
||||
"reference": [],
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_error_data_result(message="`name` can not be empty.")
|
||||
@ -1116,7 +1116,7 @@ async def retrieval_test_embedded():
|
||||
local_doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"), rank_feature=labels
|
||||
)
|
||||
if use_kg:
|
||||
ck = settings.kg_retriever.retrieval(_question, tenant_ids, kb_ids, embd_mdl,
|
||||
ck = await settings.kg_retriever.retrieval(_question, tenant_ids, kb_ids, embd_mdl,
|
||||
LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
|
||||
@ -421,7 +421,7 @@ async def async_chat(dialog, messages, stream=True, **kwargs):
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
if prompt_config.get("use_kg"):
|
||||
ck = settings.kg_retriever.retrieval(" ".join(questions), tenant_ids, dialog.kb_ids, embd_mdl,
|
||||
ck = await settings.kg_retriever.retrieval(" ".join(questions), tenant_ids, dialog.kb_ids, embd_mdl,
|
||||
LLMBundle(dialog.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
@ -164,7 +164,7 @@ class UserService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_users(cls):
|
||||
users = cls.model.select()
|
||||
users = cls.model.select().order_by(cls.model.email)
|
||||
return list(users)
|
||||
|
||||
|
||||
|
||||
@ -132,7 +132,10 @@ class FileSource(StrEnum):
|
||||
ASANA = "asana"
|
||||
GITHUB = "github"
|
||||
GITLAB = "gitlab"
|
||||
|
||||
IMAP = "imap"
|
||||
BITBUCKET = "bitbucket"
|
||||
ZENDESK = "zendesk"
|
||||
|
||||
class PipelineTaskType(StrEnum):
|
||||
PARSE = "Parse"
|
||||
DOWNLOAD = "Download"
|
||||
|
||||
@ -34,10 +34,11 @@ from .google_drive.connector import GoogleDriveConnector
|
||||
from .jira.connector import JiraConnector
|
||||
from .sharepoint_connector import SharePointConnector
|
||||
from .teams_connector import TeamsConnector
|
||||
from .webdav_connector import WebDAVConnector
|
||||
from .moodle_connector import MoodleConnector
|
||||
from .airtable_connector import AirtableConnector
|
||||
from .asana_connector import AsanaConnector
|
||||
from .imap_connector import ImapConnector
|
||||
from .zendesk_connector import ZendeskConnector
|
||||
from .config import BlobType, DocumentSource
|
||||
from .models import Document, TextSection, ImageSection, BasicExpertInfo
|
||||
from .exceptions import (
|
||||
@ -60,7 +61,6 @@ __all__ = [
|
||||
"JiraConnector",
|
||||
"SharePointConnector",
|
||||
"TeamsConnector",
|
||||
"WebDAVConnector",
|
||||
"MoodleConnector",
|
||||
"BlobType",
|
||||
"DocumentSource",
|
||||
@ -75,4 +75,6 @@ __all__ = [
|
||||
"UnexpectedValidationError",
|
||||
"AirtableConnector",
|
||||
"AsanaConnector",
|
||||
"ImapConnector",
|
||||
"ZendeskConnector",
|
||||
]
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
from datetime import datetime, timezone
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import Any, Generator
|
||||
|
||||
import requests
|
||||
|
||||
@ -8,8 +8,8 @@ from pyairtable import Api as AirtableApi
|
||||
|
||||
from common.data_source.config import AIRTABLE_CONNECTOR_SIZE_THRESHOLD, INDEX_BATCH_SIZE, DocumentSource
|
||||
from common.data_source.exceptions import ConnectorMissingCredentialError
|
||||
from common.data_source.interfaces import LoadConnector
|
||||
from common.data_source.models import Document, GenerateDocumentsOutput
|
||||
from common.data_source.interfaces import LoadConnector, PollConnector
|
||||
from common.data_source.models import Document, GenerateDocumentsOutput, SecondsSinceUnixEpoch
|
||||
from common.data_source.utils import extract_size_bytes, get_file_ext
|
||||
|
||||
class AirtableClientNotSetUpError(PermissionError):
|
||||
@ -19,7 +19,7 @@ class AirtableClientNotSetUpError(PermissionError):
|
||||
)
|
||||
|
||||
|
||||
class AirtableConnector(LoadConnector):
|
||||
class AirtableConnector(LoadConnector, PollConnector):
|
||||
"""
|
||||
Lightweight Airtable connector.
|
||||
|
||||
@ -132,6 +132,26 @@ class AirtableConnector(LoadConnector):
|
||||
if batch:
|
||||
yield batch
|
||||
|
||||
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> Generator[list[Document], None, None]:
|
||||
"""Poll source to get documents"""
|
||||
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
|
||||
end_dt = datetime.fromtimestamp(end, tz=timezone.utc)
|
||||
|
||||
for batch in self.load_from_state():
|
||||
filtered: list[Document] = []
|
||||
|
||||
for doc in batch:
|
||||
if not doc.doc_updated_at:
|
||||
continue
|
||||
|
||||
doc_dt = doc.doc_updated_at.astimezone(timezone.utc)
|
||||
|
||||
if start_dt <= doc_dt < end_dt:
|
||||
filtered.append(doc)
|
||||
|
||||
if filtered:
|
||||
yield filtered
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
|
||||
0
common/data_source/bitbucket/__init__.py
Normal file
0
common/data_source/bitbucket/__init__.py
Normal file
388
common/data_source/bitbucket/connector.py
Normal file
388
common/data_source/bitbucket/connector.py
Normal file
@ -0,0 +1,388 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Iterator
|
||||
from datetime import datetime
|
||||
from datetime import timezone
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from typing_extensions import override
|
||||
|
||||
from common.data_source.config import INDEX_BATCH_SIZE
|
||||
from common.data_source.config import DocumentSource
|
||||
from common.data_source.config import REQUEST_TIMEOUT_SECONDS
|
||||
from common.data_source.exceptions import (
|
||||
ConnectorMissingCredentialError,
|
||||
CredentialExpiredError,
|
||||
InsufficientPermissionsError,
|
||||
UnexpectedValidationError,
|
||||
)
|
||||
from common.data_source.interfaces import CheckpointedConnector
|
||||
from common.data_source.interfaces import CheckpointOutput
|
||||
from common.data_source.interfaces import IndexingHeartbeatInterface
|
||||
from common.data_source.interfaces import SecondsSinceUnixEpoch
|
||||
from common.data_source.interfaces import SlimConnectorWithPermSync
|
||||
from common.data_source.models import ConnectorCheckpoint
|
||||
from common.data_source.models import ConnectorFailure
|
||||
from common.data_source.models import DocumentFailure
|
||||
from common.data_source.models import SlimDocument
|
||||
from common.data_source.bitbucket.utils import (
|
||||
build_auth_client,
|
||||
list_repositories,
|
||||
map_pr_to_document,
|
||||
paginate,
|
||||
PR_LIST_RESPONSE_FIELDS,
|
||||
SLIM_PR_LIST_RESPONSE_FIELDS,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import httpx
|
||||
|
||||
|
||||
class BitbucketConnectorCheckpoint(ConnectorCheckpoint):
|
||||
"""Checkpoint state for resumable Bitbucket PR indexing.
|
||||
|
||||
Fields:
|
||||
repos_queue: Materialized list of repository slugs to process.
|
||||
current_repo_index: Index of the repository currently being processed.
|
||||
next_url: Bitbucket "next" URL for continuing pagination within the current repo.
|
||||
"""
|
||||
|
||||
repos_queue: list[str] = []
|
||||
current_repo_index: int = 0
|
||||
next_url: str | None = None
|
||||
|
||||
|
||||
class BitbucketConnector(
|
||||
CheckpointedConnector[BitbucketConnectorCheckpoint],
|
||||
SlimConnectorWithPermSync,
|
||||
):
|
||||
"""Connector for indexing Bitbucket Cloud pull requests.
|
||||
|
||||
Args:
|
||||
workspace: Bitbucket workspace ID.
|
||||
repositories: Comma-separated list of repository slugs to index.
|
||||
projects: Comma-separated list of project keys to index all repositories within.
|
||||
batch_size: Max number of documents to yield per batch.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
workspace: str,
|
||||
repositories: str | None = None,
|
||||
projects: str | None = None,
|
||||
batch_size: int = INDEX_BATCH_SIZE,
|
||||
) -> None:
|
||||
self.workspace = workspace
|
||||
self._repositories = (
|
||||
[s.strip() for s in repositories.split(",") if s.strip()]
|
||||
if repositories
|
||||
else None
|
||||
)
|
||||
self._projects: list[str] | None = (
|
||||
[s.strip() for s in projects.split(",") if s.strip()] if projects else None
|
||||
)
|
||||
self.batch_size = batch_size
|
||||
self.email: str | None = None
|
||||
self.api_token: str | None = None
|
||||
|
||||
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
|
||||
"""Load API token-based credentials.
|
||||
|
||||
Expects a dict with keys: `bitbucket_email`, `bitbucket_api_token`.
|
||||
"""
|
||||
self.email = credentials.get("bitbucket_email")
|
||||
self.api_token = credentials.get("bitbucket_api_token")
|
||||
if not self.email or not self.api_token:
|
||||
raise ConnectorMissingCredentialError("Bitbucket")
|
||||
return None
|
||||
|
||||
def _client(self) -> httpx.Client:
|
||||
"""Build an authenticated HTTP client or raise if credentials missing."""
|
||||
if not self.email or not self.api_token:
|
||||
raise ConnectorMissingCredentialError("Bitbucket")
|
||||
return build_auth_client(self.email, self.api_token)
|
||||
|
||||
def _iter_pull_requests_for_repo(
|
||||
self,
|
||||
client: httpx.Client,
|
||||
repo_slug: str,
|
||||
params: dict[str, Any] | None = None,
|
||||
start_url: str | None = None,
|
||||
on_page: Callable[[str | None], None] | None = None,
|
||||
) -> Iterator[dict[str, Any]]:
|
||||
base = f"https://api.bitbucket.org/2.0/repositories/{self.workspace}/{repo_slug}/pullrequests"
|
||||
yield from paginate(
|
||||
client,
|
||||
base,
|
||||
params,
|
||||
start_url=start_url,
|
||||
on_page=on_page,
|
||||
)
|
||||
|
||||
def _build_params(
|
||||
self,
|
||||
fields: str = PR_LIST_RESPONSE_FIELDS,
|
||||
start: SecondsSinceUnixEpoch | None = None,
|
||||
end: SecondsSinceUnixEpoch | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Build Bitbucket fetch params.
|
||||
|
||||
Always include OPEN, MERGED, and DECLINED PRs. If both ``start`` and
|
||||
``end`` are provided, apply a single updated_on time window.
|
||||
"""
|
||||
|
||||
def _iso(ts: SecondsSinceUnixEpoch) -> str:
|
||||
return datetime.fromtimestamp(ts, tz=timezone.utc).isoformat()
|
||||
|
||||
def _tc_epoch(
|
||||
lower_epoch: SecondsSinceUnixEpoch | None,
|
||||
upper_epoch: SecondsSinceUnixEpoch | None,
|
||||
) -> str | None:
|
||||
if lower_epoch is not None and upper_epoch is not None:
|
||||
lower_iso = _iso(lower_epoch)
|
||||
upper_iso = _iso(upper_epoch)
|
||||
return f'(updated_on > "{lower_iso}" AND updated_on <= "{upper_iso}")'
|
||||
return None
|
||||
|
||||
params: dict[str, Any] = {"fields": fields, "pagelen": 50}
|
||||
time_clause = _tc_epoch(start, end)
|
||||
q = '(state = "OPEN" OR state = "MERGED" OR state = "DECLINED")'
|
||||
if time_clause:
|
||||
q = f"{q} AND {time_clause}"
|
||||
params["q"] = q
|
||||
return params
|
||||
|
||||
def _iter_target_repositories(self, client: httpx.Client) -> Iterator[str]:
|
||||
"""Yield repository slugs based on configuration.
|
||||
|
||||
Priority:
|
||||
- repositories list
|
||||
- projects list (list repos by project key)
|
||||
- workspace (all repos)
|
||||
"""
|
||||
if self._repositories:
|
||||
for slug in self._repositories:
|
||||
yield slug
|
||||
return
|
||||
if self._projects:
|
||||
for project_key in self._projects:
|
||||
for repo in list_repositories(client, self.workspace, project_key):
|
||||
slug_val = repo.get("slug")
|
||||
if isinstance(slug_val, str) and slug_val:
|
||||
yield slug_val
|
||||
return
|
||||
for repo in list_repositories(client, self.workspace, None):
|
||||
slug_val = repo.get("slug")
|
||||
if isinstance(slug_val, str) and slug_val:
|
||||
yield slug_val
|
||||
|
||||
@override
|
||||
def load_from_checkpoint(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: BitbucketConnectorCheckpoint,
|
||||
) -> CheckpointOutput[BitbucketConnectorCheckpoint]:
|
||||
"""Resumable PR ingestion across repos and pages within a time window.
|
||||
|
||||
Yields Documents (or ConnectorFailure for per-PR mapping failures) and returns
|
||||
an updated checkpoint that records repo position and next page URL.
|
||||
"""
|
||||
new_checkpoint = copy.deepcopy(checkpoint)
|
||||
|
||||
with self._client() as client:
|
||||
# Materialize target repositories once
|
||||
if not new_checkpoint.repos_queue:
|
||||
# Preserve explicit order; otherwise ensure deterministic ordering
|
||||
repos_list = list(self._iter_target_repositories(client))
|
||||
new_checkpoint.repos_queue = sorted(set(repos_list))
|
||||
new_checkpoint.current_repo_index = 0
|
||||
new_checkpoint.next_url = None
|
||||
|
||||
repos = new_checkpoint.repos_queue
|
||||
if not repos or new_checkpoint.current_repo_index >= len(repos):
|
||||
new_checkpoint.has_more = False
|
||||
return new_checkpoint
|
||||
|
||||
repo_slug = repos[new_checkpoint.current_repo_index]
|
||||
|
||||
first_page_params = self._build_params(
|
||||
fields=PR_LIST_RESPONSE_FIELDS,
|
||||
start=start,
|
||||
end=end,
|
||||
)
|
||||
|
||||
def _on_page(next_url: str | None) -> None:
|
||||
new_checkpoint.next_url = next_url
|
||||
|
||||
for pr in self._iter_pull_requests_for_repo(
|
||||
client,
|
||||
repo_slug,
|
||||
params=first_page_params,
|
||||
start_url=new_checkpoint.next_url,
|
||||
on_page=_on_page,
|
||||
):
|
||||
try:
|
||||
document = map_pr_to_document(pr, self.workspace, repo_slug)
|
||||
yield document
|
||||
except Exception as e:
|
||||
pr_id = pr.get("id")
|
||||
pr_link = (
|
||||
f"https://bitbucket.org/{self.workspace}/{repo_slug}/pull-requests/{pr_id}"
|
||||
if pr_id is not None
|
||||
else None
|
||||
)
|
||||
yield ConnectorFailure(
|
||||
failed_document=DocumentFailure(
|
||||
document_id=(
|
||||
f"{DocumentSource.BITBUCKET.value}:{self.workspace}:{repo_slug}:pr:{pr_id}"
|
||||
if pr_id is not None
|
||||
else f"{DocumentSource.BITBUCKET.value}:{self.workspace}:{repo_slug}:pr:unknown"
|
||||
),
|
||||
document_link=pr_link,
|
||||
),
|
||||
failure_message=f"Failed to process Bitbucket PR: {e}",
|
||||
exception=e,
|
||||
)
|
||||
|
||||
# Advance to next repository (if any) and set has_more accordingly
|
||||
new_checkpoint.current_repo_index += 1
|
||||
new_checkpoint.next_url = None
|
||||
new_checkpoint.has_more = new_checkpoint.current_repo_index < len(repos)
|
||||
|
||||
return new_checkpoint
|
||||
|
||||
@override
|
||||
def build_dummy_checkpoint(self) -> BitbucketConnectorCheckpoint:
|
||||
"""Create an initial checkpoint with work remaining."""
|
||||
return BitbucketConnectorCheckpoint(has_more=True)
|
||||
|
||||
@override
|
||||
def validate_checkpoint_json(
|
||||
self, checkpoint_json: str
|
||||
) -> BitbucketConnectorCheckpoint:
|
||||
"""Validate and deserialize a checkpoint instance from JSON."""
|
||||
return BitbucketConnectorCheckpoint.model_validate_json(checkpoint_json)
|
||||
|
||||
def retrieve_all_slim_docs_perm_sync(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch | None = None,
|
||||
end: SecondsSinceUnixEpoch | None = None,
|
||||
callback: IndexingHeartbeatInterface | None = None,
|
||||
) -> Iterator[list[SlimDocument]]:
|
||||
"""Return only document IDs for all existing pull requests."""
|
||||
batch: list[SlimDocument] = []
|
||||
params = self._build_params(
|
||||
fields=SLIM_PR_LIST_RESPONSE_FIELDS,
|
||||
start=start,
|
||||
end=end,
|
||||
)
|
||||
with self._client() as client:
|
||||
for slug in self._iter_target_repositories(client):
|
||||
for pr in self._iter_pull_requests_for_repo(
|
||||
client, slug, params=params
|
||||
):
|
||||
pr_id = pr["id"]
|
||||
doc_id = f"{DocumentSource.BITBUCKET.value}:{self.workspace}:{slug}:pr:{pr_id}"
|
||||
batch.append(SlimDocument(id=doc_id))
|
||||
if len(batch) >= self.batch_size:
|
||||
yield batch
|
||||
batch = []
|
||||
if callback:
|
||||
if callback.should_stop():
|
||||
# Note: this is not actually used for permission sync yet, just pruning
|
||||
raise RuntimeError(
|
||||
"bitbucket_pr_sync: Stop signal detected"
|
||||
)
|
||||
callback.progress("bitbucket_pr_sync", len(batch))
|
||||
if batch:
|
||||
yield batch
|
||||
|
||||
def validate_connector_settings(self) -> None:
|
||||
"""Validate Bitbucket credentials and workspace access by probing a lightweight endpoint.
|
||||
|
||||
Raises:
|
||||
CredentialExpiredError: on HTTP 401
|
||||
InsufficientPermissionsError: on HTTP 403
|
||||
UnexpectedValidationError: on any other failure
|
||||
"""
|
||||
try:
|
||||
with self._client() as client:
|
||||
url = f"https://api.bitbucket.org/2.0/repositories/{self.workspace}"
|
||||
resp = client.get(
|
||||
url,
|
||||
params={"pagelen": 1, "fields": "pagelen"},
|
||||
timeout=REQUEST_TIMEOUT_SECONDS,
|
||||
)
|
||||
if resp.status_code == 401:
|
||||
raise CredentialExpiredError(
|
||||
"Invalid or expired Bitbucket credentials (HTTP 401)."
|
||||
)
|
||||
if resp.status_code == 403:
|
||||
raise InsufficientPermissionsError(
|
||||
"Insufficient permissions to access Bitbucket workspace (HTTP 403)."
|
||||
)
|
||||
if resp.status_code < 200 or resp.status_code >= 300:
|
||||
raise UnexpectedValidationError(
|
||||
f"Unexpected Bitbucket error (status={resp.status_code})."
|
||||
)
|
||||
except Exception as e:
|
||||
# Network or other unexpected errors
|
||||
if isinstance(
|
||||
e,
|
||||
(
|
||||
CredentialExpiredError,
|
||||
InsufficientPermissionsError,
|
||||
UnexpectedValidationError,
|
||||
ConnectorMissingCredentialError,
|
||||
),
|
||||
):
|
||||
raise
|
||||
raise UnexpectedValidationError(
|
||||
f"Unexpected error while validating Bitbucket settings: {e}"
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
bitbucket = BitbucketConnector(
|
||||
workspace="<YOUR_WORKSPACE>"
|
||||
)
|
||||
|
||||
bitbucket.load_credentials({
|
||||
"bitbucket_email": "<YOUR_EMAIL>",
|
||||
"bitbucket_api_token": "<YOUR_API_TOKEN>",
|
||||
})
|
||||
|
||||
bitbucket.validate_connector_settings()
|
||||
print("Credentials validated successfully.")
|
||||
|
||||
start_time = datetime.fromtimestamp(0, tz=timezone.utc)
|
||||
end_time = datetime.now(timezone.utc)
|
||||
|
||||
for doc_batch in bitbucket.retrieve_all_slim_docs_perm_sync(
|
||||
start=start_time.timestamp(),
|
||||
end=end_time.timestamp(),
|
||||
):
|
||||
for doc in doc_batch:
|
||||
print(doc)
|
||||
|
||||
|
||||
bitbucket_checkpoint = bitbucket.build_dummy_checkpoint()
|
||||
|
||||
while bitbucket_checkpoint.has_more:
|
||||
gen = bitbucket.load_from_checkpoint(
|
||||
start=start_time.timestamp(),
|
||||
end=end_time.timestamp(),
|
||||
checkpoint=bitbucket_checkpoint,
|
||||
)
|
||||
|
||||
while True:
|
||||
try:
|
||||
doc = next(gen)
|
||||
print(doc)
|
||||
except StopIteration as e:
|
||||
bitbucket_checkpoint = e.value
|
||||
break
|
||||
|
||||
288
common/data_source/bitbucket/utils.py
Normal file
288
common/data_source/bitbucket/utils.py
Normal file
@ -0,0 +1,288 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Iterator
|
||||
from datetime import datetime
|
||||
from datetime import timezone
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
|
||||
from common.data_source.config import REQUEST_TIMEOUT_SECONDS, DocumentSource
|
||||
from common.data_source.cross_connector_utils.rate_limit_wrapper import (
|
||||
rate_limit_builder,
|
||||
)
|
||||
from common.data_source.utils import sanitize_filename
|
||||
from common.data_source.models import BasicExpertInfo, Document
|
||||
from common.data_source.cross_connector_utils.retry_wrapper import retry_builder
|
||||
|
||||
# Fields requested from Bitbucket PR list endpoint to ensure rich PR data
|
||||
PR_LIST_RESPONSE_FIELDS: str = ",".join(
|
||||
[
|
||||
"next",
|
||||
"page",
|
||||
"pagelen",
|
||||
"values.author",
|
||||
"values.close_source_branch",
|
||||
"values.closed_by",
|
||||
"values.comment_count",
|
||||
"values.created_on",
|
||||
"values.description",
|
||||
"values.destination",
|
||||
"values.draft",
|
||||
"values.id",
|
||||
"values.links",
|
||||
"values.merge_commit",
|
||||
"values.participants",
|
||||
"values.reason",
|
||||
"values.rendered",
|
||||
"values.reviewers",
|
||||
"values.source",
|
||||
"values.state",
|
||||
"values.summary",
|
||||
"values.task_count",
|
||||
"values.title",
|
||||
"values.type",
|
||||
"values.updated_on",
|
||||
]
|
||||
)
|
||||
|
||||
# Minimal fields for slim retrieval (IDs only)
|
||||
SLIM_PR_LIST_RESPONSE_FIELDS: str = ",".join(
|
||||
[
|
||||
"next",
|
||||
"page",
|
||||
"pagelen",
|
||||
"values.id",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
# Minimal fields for repository list calls
|
||||
REPO_LIST_RESPONSE_FIELDS: str = ",".join(
|
||||
[
|
||||
"next",
|
||||
"page",
|
||||
"pagelen",
|
||||
"values.slug",
|
||||
"values.full_name",
|
||||
"values.project.key",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class BitbucketRetriableError(Exception):
|
||||
"""Raised for retriable Bitbucket conditions (429, 5xx)."""
|
||||
|
||||
|
||||
class BitbucketNonRetriableError(Exception):
|
||||
"""Raised for non-retriable Bitbucket client errors (4xx except 429)."""
|
||||
|
||||
|
||||
@retry_builder(
|
||||
tries=6,
|
||||
delay=1,
|
||||
backoff=2,
|
||||
max_delay=30,
|
||||
exceptions=(BitbucketRetriableError, httpx.RequestError),
|
||||
)
|
||||
@rate_limit_builder(max_calls=60, period=60)
|
||||
def bitbucket_get(
|
||||
client: httpx.Client, url: str, params: dict[str, Any] | None = None
|
||||
) -> httpx.Response:
|
||||
"""Perform a GET against Bitbucket with retry and rate limiting.
|
||||
|
||||
Retries on 429 and 5xx responses, and on transport errors. Honors
|
||||
`Retry-After` header for 429 when present by sleeping before retrying.
|
||||
"""
|
||||
try:
|
||||
response = client.get(url, params=params, timeout=REQUEST_TIMEOUT_SECONDS)
|
||||
except httpx.RequestError:
|
||||
# Allow retry_builder to handle retries of transport errors
|
||||
raise
|
||||
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPStatusError as e:
|
||||
status = e.response.status_code if e.response is not None else None
|
||||
if status == 429:
|
||||
retry_after = e.response.headers.get("Retry-After") if e.response else None
|
||||
if retry_after is not None:
|
||||
try:
|
||||
time.sleep(int(retry_after))
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
raise BitbucketRetriableError("Bitbucket rate limit exceeded (429)") from e
|
||||
if status is not None and 500 <= status < 600:
|
||||
raise BitbucketRetriableError(f"Bitbucket server error: {status}") from e
|
||||
if status is not None and 400 <= status < 500:
|
||||
raise BitbucketNonRetriableError(f"Bitbucket client error: {status}") from e
|
||||
# Unknown status, propagate
|
||||
raise
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def build_auth_client(email: str, api_token: str) -> httpx.Client:
|
||||
"""Create an authenticated httpx client for Bitbucket Cloud API."""
|
||||
return httpx.Client(auth=(email, api_token), http2=True)
|
||||
|
||||
|
||||
def paginate(
|
||||
client: httpx.Client,
|
||||
url: str,
|
||||
params: dict[str, Any] | None = None,
|
||||
start_url: str | None = None,
|
||||
on_page: Callable[[str | None], None] | None = None,
|
||||
) -> Iterator[dict[str, Any]]:
|
||||
"""Iterate over paginated Bitbucket API responses yielding individual values.
|
||||
|
||||
Args:
|
||||
client: Authenticated HTTP client.
|
||||
url: Base collection URL (first page when start_url is None).
|
||||
params: Query params for the first page.
|
||||
start_url: If provided, start from this absolute URL (ignores params).
|
||||
on_page: Optional callback invoked after each page with the next page URL.
|
||||
"""
|
||||
next_url = start_url or url
|
||||
# If resuming from a next URL, do not pass params again
|
||||
query = params.copy() if params else None
|
||||
query = None if start_url else query
|
||||
while next_url:
|
||||
resp = bitbucket_get(client, next_url, params=query)
|
||||
data = resp.json()
|
||||
values = data.get("values", [])
|
||||
for item in values:
|
||||
yield item
|
||||
next_url = data.get("next")
|
||||
if on_page is not None:
|
||||
on_page(next_url)
|
||||
# only include params on first call, next_url will contain all necessary params
|
||||
query = None
|
||||
|
||||
|
||||
def list_repositories(
|
||||
client: httpx.Client, workspace: str, project_key: str | None = None
|
||||
) -> Iterator[dict[str, Any]]:
|
||||
"""List repositories in a workspace, optionally filtered by project key."""
|
||||
base_url = f"https://api.bitbucket.org/2.0/repositories/{workspace}"
|
||||
params: dict[str, Any] = {
|
||||
"fields": REPO_LIST_RESPONSE_FIELDS,
|
||||
"pagelen": 100,
|
||||
# Ensure deterministic ordering
|
||||
"sort": "full_name",
|
||||
}
|
||||
if project_key:
|
||||
params["q"] = f'project.key="{project_key}"'
|
||||
yield from paginate(client, base_url, params)
|
||||
|
||||
|
||||
def map_pr_to_document(pr: dict[str, Any], workspace: str, repo_slug: str) -> Document:
|
||||
"""Map a Bitbucket pull request JSON to Onyx Document."""
|
||||
pr_id = pr["id"]
|
||||
title = pr.get("title") or f"PR {pr_id}"
|
||||
description = pr.get("description") or ""
|
||||
state = pr.get("state")
|
||||
draft = pr.get("draft", False)
|
||||
author = pr.get("author", {})
|
||||
reviewers = pr.get("reviewers", [])
|
||||
participants = pr.get("participants", [])
|
||||
|
||||
link = pr.get("links", {}).get("html", {}).get("href") or (
|
||||
f"https://bitbucket.org/{workspace}/{repo_slug}/pull-requests/{pr_id}"
|
||||
)
|
||||
|
||||
created_on = pr.get("created_on")
|
||||
updated_on = pr.get("updated_on")
|
||||
updated_dt = (
|
||||
datetime.fromisoformat(updated_on.replace("Z", "+00:00")).astimezone(
|
||||
timezone.utc
|
||||
)
|
||||
if isinstance(updated_on, str)
|
||||
else None
|
||||
)
|
||||
|
||||
source_branch = pr.get("source", {}).get("branch", {}).get("name", "")
|
||||
destination_branch = pr.get("destination", {}).get("branch", {}).get("name", "")
|
||||
|
||||
approved_by = [
|
||||
_get_user_name(p.get("user", {})) for p in participants if p.get("approved")
|
||||
]
|
||||
|
||||
primary_owner = None
|
||||
if author:
|
||||
primary_owner = BasicExpertInfo(
|
||||
display_name=_get_user_name(author),
|
||||
)
|
||||
|
||||
# secondary_owners = [
|
||||
# BasicExpertInfo(display_name=_get_user_name(r)) for r in reviewers
|
||||
# ] or None
|
||||
|
||||
reviewer_names = [_get_user_name(r) for r in reviewers]
|
||||
|
||||
# Create a concise summary of key PR info
|
||||
created_date = created_on.split("T")[0] if created_on else "N/A"
|
||||
updated_date = updated_on.split("T")[0] if updated_on else "N/A"
|
||||
content_text = (
|
||||
"Pull Request Information:\n"
|
||||
f"- Pull Request ID: {pr_id}\n"
|
||||
f"- Title: {title}\n"
|
||||
f"- State: {state or 'N/A'} {'(Draft)' if draft else ''}\n"
|
||||
)
|
||||
if state == "DECLINED":
|
||||
content_text += f"- Reason: {pr.get('reason', 'N/A')}\n"
|
||||
content_text += (
|
||||
f"- Author: {_get_user_name(author) if author else 'N/A'}\n"
|
||||
f"- Reviewers: {', '.join(reviewer_names) if reviewer_names else 'N/A'}\n"
|
||||
f"- Branch: {source_branch} -> {destination_branch}\n"
|
||||
f"- Created: {created_date}\n"
|
||||
f"- Updated: {updated_date}"
|
||||
)
|
||||
if description:
|
||||
content_text += f"\n\nDescription:\n{description}"
|
||||
|
||||
metadata: dict[str, str | list[str]] = {
|
||||
"object_type": "PullRequest",
|
||||
"workspace": workspace,
|
||||
"repository": repo_slug,
|
||||
"pr_key": f"{workspace}/{repo_slug}#{pr_id}",
|
||||
"id": str(pr_id),
|
||||
"title": title,
|
||||
"state": state or "",
|
||||
"draft": str(bool(draft)),
|
||||
"link": link,
|
||||
"author": _get_user_name(author) if author else "",
|
||||
"reviewers": reviewer_names,
|
||||
"approved_by": approved_by,
|
||||
"comment_count": str(pr.get("comment_count", "")),
|
||||
"task_count": str(pr.get("task_count", "")),
|
||||
"created_on": created_on or "",
|
||||
"updated_on": updated_on or "",
|
||||
"source_branch": source_branch,
|
||||
"destination_branch": destination_branch,
|
||||
"closed_by": (
|
||||
_get_user_name(pr.get("closed_by", {})) if pr.get("closed_by") else ""
|
||||
),
|
||||
"close_source_branch": str(bool(pr.get("close_source_branch", False))),
|
||||
}
|
||||
|
||||
name = sanitize_filename(title, "md")
|
||||
|
||||
return Document(
|
||||
id=f"{DocumentSource.BITBUCKET.value}:{workspace}:{repo_slug}:pr:{pr_id}",
|
||||
blob=content_text.encode("utf-8"),
|
||||
source=DocumentSource.BITBUCKET,
|
||||
extension=".md",
|
||||
semantic_identifier=f"#{pr_id}: {name}",
|
||||
size_bytes=len(content_text.encode("utf-8")),
|
||||
doc_updated_at=updated_dt,
|
||||
primary_owners=[primary_owner] if primary_owner else None,
|
||||
# secondary_owners=secondary_owners,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
|
||||
def _get_user_name(user: dict[str, Any]) -> str:
|
||||
return user.get("display_name") or user.get("nickname") or "unknown"
|
||||
@ -13,6 +13,9 @@ def get_current_tz_offset() -> int:
|
||||
return round(time_diff.total_seconds() / 3600)
|
||||
|
||||
|
||||
# Default request timeout, mostly used by connectors
|
||||
REQUEST_TIMEOUT_SECONDS = int(os.environ.get("REQUEST_TIMEOUT_SECONDS") or 60)
|
||||
|
||||
ONE_MINUTE = 60
|
||||
ONE_HOUR = 3600
|
||||
ONE_DAY = ONE_HOUR * 24
|
||||
@ -57,8 +60,11 @@ class DocumentSource(str, Enum):
|
||||
ASANA = "asana"
|
||||
GITHUB = "github"
|
||||
GITLAB = "gitlab"
|
||||
IMAP = "imap"
|
||||
BITBUCKET = "bitbucket"
|
||||
ZENDESK = "zendesk"
|
||||
|
||||
|
||||
|
||||
class FileOrigin(str, Enum):
|
||||
"""File origins"""
|
||||
CONNECTOR = "connector"
|
||||
@ -266,6 +272,14 @@ ASANA_CONNECTOR_SIZE_THRESHOLD = int(
|
||||
os.environ.get("ASANA_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
|
||||
)
|
||||
|
||||
IMAP_CONNECTOR_SIZE_THRESHOLD = int(
|
||||
os.environ.get("IMAP_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
|
||||
)
|
||||
|
||||
ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS = os.environ.get(
|
||||
"ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS", ""
|
||||
).split(",")
|
||||
|
||||
_USER_NOT_FOUND = "Unknown Confluence User"
|
||||
|
||||
_COMMENT_EXPANSION_FIELDS = ["body.storage.value"]
|
||||
|
||||
126
common/data_source/cross_connector_utils/rate_limit_wrapper.py
Normal file
126
common/data_source/cross_connector_utils/rate_limit_wrapper.py
Normal file
@ -0,0 +1,126 @@
|
||||
import time
|
||||
import logging
|
||||
from collections.abc import Callable
|
||||
from functools import wraps
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
from typing import TypeVar
|
||||
|
||||
import requests
|
||||
|
||||
F = TypeVar("F", bound=Callable[..., Any])
|
||||
|
||||
|
||||
class RateLimitTriedTooManyTimesError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class _RateLimitDecorator:
|
||||
"""Builds a generic wrapper/decorator for calls to external APIs that
|
||||
prevents making more than `max_calls` requests per `period`
|
||||
|
||||
Implementation inspired by the `ratelimit` library:
|
||||
https://github.com/tomasbasham/ratelimit.
|
||||
|
||||
NOTE: is not thread safe.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_calls: int,
|
||||
period: float, # in seconds
|
||||
sleep_time: float = 2, # in seconds
|
||||
sleep_backoff: float = 2, # applies exponential backoff
|
||||
max_num_sleep: int = 0,
|
||||
):
|
||||
self.max_calls = max_calls
|
||||
self.period = period
|
||||
self.sleep_time = sleep_time
|
||||
self.sleep_backoff = sleep_backoff
|
||||
self.max_num_sleep = max_num_sleep
|
||||
|
||||
self.call_history: list[float] = []
|
||||
self.curr_calls = 0
|
||||
|
||||
def __call__(self, func: F) -> F:
|
||||
@wraps(func)
|
||||
def wrapped_func(*args: list, **kwargs: dict[str, Any]) -> Any:
|
||||
# cleanup calls which are no longer relevant
|
||||
self._cleanup()
|
||||
|
||||
# check if we've exceeded the rate limit
|
||||
sleep_cnt = 0
|
||||
while len(self.call_history) == self.max_calls:
|
||||
sleep_time = self.sleep_time * (self.sleep_backoff**sleep_cnt)
|
||||
logging.warning(
|
||||
f"Rate limit exceeded for function {func.__name__}. "
|
||||
f"Waiting {sleep_time} seconds before retrying."
|
||||
)
|
||||
time.sleep(sleep_time)
|
||||
sleep_cnt += 1
|
||||
if self.max_num_sleep != 0 and sleep_cnt >= self.max_num_sleep:
|
||||
raise RateLimitTriedTooManyTimesError(
|
||||
f"Exceeded '{self.max_num_sleep}' retries for function '{func.__name__}'"
|
||||
)
|
||||
|
||||
self._cleanup()
|
||||
|
||||
# add the current call to the call history
|
||||
self.call_history.append(time.monotonic())
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return cast(F, wrapped_func)
|
||||
|
||||
def _cleanup(self) -> None:
|
||||
curr_time = time.monotonic()
|
||||
time_to_expire_before = curr_time - self.period
|
||||
self.call_history = [
|
||||
call_time
|
||||
for call_time in self.call_history
|
||||
if call_time > time_to_expire_before
|
||||
]
|
||||
|
||||
|
||||
rate_limit_builder = _RateLimitDecorator
|
||||
|
||||
|
||||
"""If you want to allow the external service to tell you when you've hit the rate limit,
|
||||
use the following instead"""
|
||||
|
||||
R = TypeVar("R", bound=Callable[..., requests.Response])
|
||||
|
||||
|
||||
def wrap_request_to_handle_ratelimiting(
|
||||
request_fn: R, default_wait_time_sec: int = 30, max_waits: int = 30
|
||||
) -> R:
|
||||
def wrapped_request(*args: list, **kwargs: dict[str, Any]) -> requests.Response:
|
||||
for _ in range(max_waits):
|
||||
response = request_fn(*args, **kwargs)
|
||||
if response.status_code == 429:
|
||||
try:
|
||||
wait_time = int(
|
||||
response.headers.get("Retry-After", default_wait_time_sec)
|
||||
)
|
||||
except ValueError:
|
||||
wait_time = default_wait_time_sec
|
||||
|
||||
time.sleep(wait_time)
|
||||
continue
|
||||
|
||||
return response
|
||||
|
||||
raise RateLimitTriedTooManyTimesError(f"Exceeded '{max_waits}' retries")
|
||||
|
||||
return cast(R, wrapped_request)
|
||||
|
||||
|
||||
_rate_limited_get = wrap_request_to_handle_ratelimiting(requests.get)
|
||||
_rate_limited_post = wrap_request_to_handle_ratelimiting(requests.post)
|
||||
|
||||
|
||||
class _RateLimitedRequest:
|
||||
get = _rate_limited_get
|
||||
post = _rate_limited_post
|
||||
|
||||
|
||||
rl_requests = _RateLimitedRequest
|
||||
88
common/data_source/cross_connector_utils/retry_wrapper.py
Normal file
88
common/data_source/cross_connector_utils/retry_wrapper.py
Normal file
@ -0,0 +1,88 @@
|
||||
from collections.abc import Callable
|
||||
import logging
|
||||
from logging import Logger
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
from typing import TypeVar
|
||||
import requests
|
||||
from retry import retry
|
||||
|
||||
from common.data_source.config import REQUEST_TIMEOUT_SECONDS
|
||||
|
||||
|
||||
F = TypeVar("F", bound=Callable[..., Any])
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def retry_builder(
|
||||
tries: int = 20,
|
||||
delay: float = 0.1,
|
||||
max_delay: float | None = 60,
|
||||
backoff: float = 2,
|
||||
jitter: tuple[float, float] | float = 1,
|
||||
exceptions: type[Exception] | tuple[type[Exception], ...] = (Exception,),
|
||||
) -> Callable[[F], F]:
|
||||
"""Builds a generic wrapper/decorator for calls to external APIs that
|
||||
may fail due to rate limiting, flakes, or other reasons. Applies exponential
|
||||
backoff with jitter to retry the call."""
|
||||
|
||||
def retry_with_default(func: F) -> F:
|
||||
@retry(
|
||||
tries=tries,
|
||||
delay=delay,
|
||||
max_delay=max_delay,
|
||||
backoff=backoff,
|
||||
jitter=jitter,
|
||||
logger=cast(Logger, logger),
|
||||
exceptions=exceptions,
|
||||
)
|
||||
def wrapped_func(*args: list, **kwargs: dict[str, Any]) -> Any:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return cast(F, wrapped_func)
|
||||
|
||||
return retry_with_default
|
||||
|
||||
|
||||
def request_with_retries(
|
||||
method: str,
|
||||
url: str,
|
||||
*,
|
||||
data: dict[str, Any] | None = None,
|
||||
headers: dict[str, Any] | None = None,
|
||||
params: dict[str, Any] | None = None,
|
||||
timeout: int = REQUEST_TIMEOUT_SECONDS,
|
||||
stream: bool = False,
|
||||
tries: int = 8,
|
||||
delay: float = 1,
|
||||
backoff: float = 2,
|
||||
) -> requests.Response:
|
||||
@retry(tries=tries, delay=delay, backoff=backoff, logger=cast(Logger, logger))
|
||||
def _make_request() -> requests.Response:
|
||||
response = requests.request(
|
||||
method=method,
|
||||
url=url,
|
||||
data=data,
|
||||
headers=headers,
|
||||
params=params,
|
||||
timeout=timeout,
|
||||
stream=stream,
|
||||
)
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError:
|
||||
logging.exception(
|
||||
"Request failed:\n%s",
|
||||
{
|
||||
"method": method,
|
||||
"url": url,
|
||||
"data": data,
|
||||
"headers": headers,
|
||||
"params": params,
|
||||
"timeout": timeout,
|
||||
"stream": stream,
|
||||
},
|
||||
)
|
||||
raise
|
||||
return response
|
||||
|
||||
return _make_request()
|
||||
@ -19,7 +19,7 @@ from github.PaginatedList import PaginatedList
|
||||
from github.PullRequest import PullRequest
|
||||
from pydantic import BaseModel
|
||||
from typing_extensions import override
|
||||
from common.data_source.google_util.util import sanitize_filename
|
||||
from common.data_source.utils import sanitize_filename
|
||||
from common.data_source.config import DocumentSource, GITHUB_CONNECTOR_BASE_URL
|
||||
from common.data_source.exceptions import (
|
||||
ConnectorMissingCredentialError,
|
||||
|
||||
@ -8,10 +8,10 @@ from common.data_source.config import INDEX_BATCH_SIZE, SLIM_BATCH_SIZE, Documen
|
||||
from common.data_source.google_util.auth import get_google_creds
|
||||
from common.data_source.google_util.constant import DB_CREDENTIALS_PRIMARY_ADMIN_KEY, MISSING_SCOPES_ERROR_STR, SCOPE_INSTRUCTIONS, USER_FIELDS
|
||||
from common.data_source.google_util.resource import get_admin_service, get_gmail_service
|
||||
from common.data_source.google_util.util import _execute_single_retrieval, execute_paginated_retrieval, sanitize_filename, clean_string
|
||||
from common.data_source.google_util.util import _execute_single_retrieval, execute_paginated_retrieval, clean_string
|
||||
from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch, SlimConnectorWithPermSync
|
||||
from common.data_source.models import BasicExpertInfo, Document, ExternalAccess, GenerateDocumentsOutput, GenerateSlimDocumentOutput, SlimDocument, TextSection
|
||||
from common.data_source.utils import build_time_range_query, clean_email_and_extract_name, get_message_body, is_mail_service_disabled_error, gmail_time_str_to_utc
|
||||
from common.data_source.utils import build_time_range_query, clean_email_and_extract_name, get_message_body, is_mail_service_disabled_error, gmail_time_str_to_utc, sanitize_filename
|
||||
|
||||
# Constants for Gmail API fields
|
||||
THREAD_LIST_FIELDS = "nextPageToken, threads(id)"
|
||||
|
||||
@ -191,42 +191,6 @@ def get_credentials_from_env(email: str, oauth: bool = False, source="drive") ->
|
||||
DB_CREDENTIALS_AUTHENTICATION_METHOD: "uploaded",
|
||||
}
|
||||
|
||||
def sanitize_filename(name: str, extension: str = "txt") -> str:
|
||||
"""
|
||||
Soft sanitize for MinIO/S3:
|
||||
- Replace only prohibited characters with a space.
|
||||
- Preserve readability (no ugly underscores).
|
||||
- Collapse multiple spaces.
|
||||
"""
|
||||
if name is None:
|
||||
return f"file.{extension}"
|
||||
|
||||
name = str(name).strip()
|
||||
|
||||
# Characters that MUST NOT appear in S3/MinIO object keys
|
||||
# Replace them with a space (not underscore)
|
||||
forbidden = r'[\\\?\#\%\*\:\|\<\>"]'
|
||||
name = re.sub(forbidden, " ", name)
|
||||
|
||||
# Replace slashes "/" (S3 interprets as folder) with space
|
||||
name = name.replace("/", " ")
|
||||
|
||||
# Collapse multiple spaces into one
|
||||
name = re.sub(r"\s+", " ", name)
|
||||
|
||||
# Trim both ends
|
||||
name = name.strip()
|
||||
|
||||
# Enforce reasonable max length
|
||||
if len(name) > 200:
|
||||
base, ext = os.path.splitext(name)
|
||||
name = base[:180].rstrip() + ext
|
||||
|
||||
if not os.path.splitext(name)[1]:
|
||||
name += f".{extension}"
|
||||
|
||||
return name
|
||||
|
||||
|
||||
def clean_string(text: str | None) -> str | None:
|
||||
"""
|
||||
|
||||
724
common/data_source/imap_connector.py
Normal file
724
common/data_source/imap_connector.py
Normal file
@ -0,0 +1,724 @@
|
||||
import copy
|
||||
import email
|
||||
from email.header import decode_header
|
||||
import imaplib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import timezone
|
||||
from email.message import Message
|
||||
from email.utils import collapse_rfc2231_value, parseaddr
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
|
||||
import bs4
|
||||
from pydantic import BaseModel
|
||||
|
||||
from common.data_source.config import IMAP_CONNECTOR_SIZE_THRESHOLD, DocumentSource
|
||||
from common.data_source.interfaces import CheckpointOutput, CheckpointedConnectorWithPermSync, CredentialsConnector, CredentialsProviderInterface
|
||||
from common.data_source.models import BasicExpertInfo, ConnectorCheckpoint, Document, ExternalAccess, SecondsSinceUnixEpoch
|
||||
|
||||
_DEFAULT_IMAP_PORT_NUMBER = int(os.environ.get("IMAP_PORT", 993))
|
||||
_IMAP_OKAY_STATUS = "OK"
|
||||
_PAGE_SIZE = 100
|
||||
_USERNAME_KEY = "imap_username"
|
||||
_PASSWORD_KEY = "imap_password"
|
||||
|
||||
class Header(str, Enum):
|
||||
SUBJECT_HEADER = "subject"
|
||||
FROM_HEADER = "from"
|
||||
TO_HEADER = "to"
|
||||
CC_HEADER = "cc"
|
||||
DELIVERED_TO_HEADER = (
|
||||
"Delivered-To" # Used in mailing lists instead of the "to" header.
|
||||
)
|
||||
DATE_HEADER = "date"
|
||||
MESSAGE_ID_HEADER = "Message-ID"
|
||||
|
||||
|
||||
class EmailHeaders(BaseModel):
|
||||
"""
|
||||
Model for email headers extracted from IMAP messages.
|
||||
"""
|
||||
|
||||
id: str
|
||||
subject: str
|
||||
sender: str
|
||||
recipients: str | None
|
||||
cc: str | None
|
||||
date: datetime
|
||||
|
||||
@classmethod
|
||||
def from_email_msg(cls, email_msg: Message) -> "EmailHeaders":
|
||||
def _decode(header: str, default: str | None = None) -> str | None:
|
||||
value = email_msg.get(header, default)
|
||||
if not value:
|
||||
return None
|
||||
|
||||
decoded_fragments = decode_header(value)
|
||||
decoded_strings: list[str] = []
|
||||
|
||||
for decoded_value, encoding in decoded_fragments:
|
||||
if isinstance(decoded_value, bytes):
|
||||
try:
|
||||
decoded_strings.append(
|
||||
decoded_value.decode(encoding or "utf-8", errors="replace")
|
||||
)
|
||||
except LookupError:
|
||||
decoded_strings.append(
|
||||
decoded_value.decode("utf-8", errors="replace")
|
||||
)
|
||||
elif isinstance(decoded_value, str):
|
||||
decoded_strings.append(decoded_value)
|
||||
else:
|
||||
decoded_strings.append(str(decoded_value))
|
||||
|
||||
return "".join(decoded_strings)
|
||||
|
||||
def _parse_date(date_str: str | None) -> datetime | None:
|
||||
if not date_str:
|
||||
return None
|
||||
try:
|
||||
return email.utils.parsedate_to_datetime(date_str)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
message_id = _decode(header=Header.MESSAGE_ID_HEADER)
|
||||
if not message_id:
|
||||
message_id = f"<generated-{uuid.uuid4()}@imap.local>"
|
||||
# It's possible for the subject line to not exist or be an empty string.
|
||||
subject = _decode(header=Header.SUBJECT_HEADER) or "Unknown Subject"
|
||||
from_ = _decode(header=Header.FROM_HEADER)
|
||||
to = _decode(header=Header.TO_HEADER)
|
||||
if not to:
|
||||
to = _decode(header=Header.DELIVERED_TO_HEADER)
|
||||
cc = _decode(header=Header.CC_HEADER)
|
||||
date_str = _decode(header=Header.DATE_HEADER)
|
||||
date = _parse_date(date_str=date_str)
|
||||
|
||||
if not date:
|
||||
date = datetime.now(tz=timezone.utc)
|
||||
|
||||
# If any of the above are `None`, model validation will fail.
|
||||
# Therefore, no guards (i.e.: `if <header> is None: raise RuntimeError(..)`) were written.
|
||||
return cls.model_validate(
|
||||
{
|
||||
"id": message_id,
|
||||
"subject": subject,
|
||||
"sender": from_,
|
||||
"recipients": to,
|
||||
"cc": cc,
|
||||
"date": date,
|
||||
}
|
||||
)
|
||||
|
||||
class CurrentMailbox(BaseModel):
|
||||
mailbox: str
|
||||
todo_email_ids: list[str]
|
||||
|
||||
|
||||
# An email has a list of mailboxes.
|
||||
# Each mailbox has a list of email-ids inside of it.
|
||||
#
|
||||
# Usage:
|
||||
# To use this checkpointer, first fetch all the mailboxes.
|
||||
# Then, pop a mailbox and fetch all of its email-ids.
|
||||
# Then, pop each email-id and fetch its content (and parse it, etc..).
|
||||
# When you have popped all email-ids for this mailbox, pop the next mailbox and repeat the above process until you're done.
|
||||
#
|
||||
# For initial checkpointing, set both fields to `None`.
|
||||
class ImapCheckpoint(ConnectorCheckpoint):
|
||||
todo_mailboxes: list[str] | None = None
|
||||
current_mailbox: CurrentMailbox | None = None
|
||||
|
||||
|
||||
class LoginState(str, Enum):
|
||||
LoggedIn = "logged_in"
|
||||
LoggedOut = "logged_out"
|
||||
|
||||
|
||||
class ImapConnector(
|
||||
CredentialsConnector,
|
||||
CheckpointedConnectorWithPermSync,
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
host: str,
|
||||
port: int = _DEFAULT_IMAP_PORT_NUMBER,
|
||||
mailboxes: list[str] | None = None,
|
||||
) -> None:
|
||||
self._host = host
|
||||
self._port = port
|
||||
self._mailboxes = mailboxes
|
||||
self._credentials: dict[str, Any] | None = None
|
||||
|
||||
@property
|
||||
def credentials(self) -> dict[str, Any]:
|
||||
if not self._credentials:
|
||||
raise RuntimeError(
|
||||
"Credentials have not been initialized; call `set_credentials_provider` first"
|
||||
)
|
||||
return self._credentials
|
||||
|
||||
def _get_mail_client(self) -> imaplib.IMAP4_SSL:
|
||||
"""
|
||||
Returns a new `imaplib.IMAP4_SSL` instance.
|
||||
|
||||
The `imaplib.IMAP4_SSL` object is supposed to be an "ephemeral" object; it's not something that you can login,
|
||||
logout, then log back into again. I.e., the following will fail:
|
||||
|
||||
```py
|
||||
mail_client.login(..)
|
||||
mail_client.logout();
|
||||
mail_client.login(..)
|
||||
```
|
||||
|
||||
Therefore, you need a fresh, new instance in order to operate with IMAP. This function gives one to you.
|
||||
|
||||
# Notes
|
||||
This function will throw an error if the credentials have not yet been set.
|
||||
"""
|
||||
|
||||
def get_or_raise(name: str) -> str:
|
||||
value = self.credentials.get(name)
|
||||
if not value:
|
||||
raise RuntimeError(f"Credential item {name=} was not found")
|
||||
if not isinstance(value, str):
|
||||
raise RuntimeError(
|
||||
f"Credential item {name=} must be of type str, instead received {type(name)=}"
|
||||
)
|
||||
return value
|
||||
|
||||
username = get_or_raise(_USERNAME_KEY)
|
||||
password = get_or_raise(_PASSWORD_KEY)
|
||||
|
||||
mail_client = imaplib.IMAP4_SSL(host=self._host, port=self._port)
|
||||
status, _data = mail_client.login(user=username, password=password)
|
||||
|
||||
if status != _IMAP_OKAY_STATUS:
|
||||
raise RuntimeError(f"Failed to log into imap server; {status=}")
|
||||
|
||||
return mail_client
|
||||
|
||||
def _load_from_checkpoint(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: ImapCheckpoint,
|
||||
include_perm_sync: bool,
|
||||
) -> CheckpointOutput[ImapCheckpoint]:
|
||||
checkpoint = cast(ImapCheckpoint, copy.deepcopy(checkpoint))
|
||||
checkpoint.has_more = True
|
||||
|
||||
mail_client = self._get_mail_client()
|
||||
|
||||
if checkpoint.todo_mailboxes is None:
|
||||
# This is the dummy checkpoint.
|
||||
# Fill it with mailboxes first.
|
||||
if self._mailboxes:
|
||||
checkpoint.todo_mailboxes = _sanitize_mailbox_names(self._mailboxes)
|
||||
else:
|
||||
fetched_mailboxes = _fetch_all_mailboxes_for_email_account(
|
||||
mail_client=mail_client
|
||||
)
|
||||
if not fetched_mailboxes:
|
||||
raise RuntimeError(
|
||||
"Failed to find any mailboxes for this email account"
|
||||
)
|
||||
checkpoint.todo_mailboxes = _sanitize_mailbox_names(fetched_mailboxes)
|
||||
|
||||
return checkpoint
|
||||
|
||||
if (
|
||||
not checkpoint.current_mailbox
|
||||
or not checkpoint.current_mailbox.todo_email_ids
|
||||
):
|
||||
if not checkpoint.todo_mailboxes:
|
||||
checkpoint.has_more = False
|
||||
return checkpoint
|
||||
|
||||
mailbox = checkpoint.todo_mailboxes.pop()
|
||||
email_ids = _fetch_email_ids_in_mailbox(
|
||||
mail_client=mail_client,
|
||||
mailbox=mailbox,
|
||||
start=start,
|
||||
end=end,
|
||||
)
|
||||
checkpoint.current_mailbox = CurrentMailbox(
|
||||
mailbox=mailbox,
|
||||
todo_email_ids=email_ids,
|
||||
)
|
||||
|
||||
_select_mailbox(
|
||||
mail_client=mail_client, mailbox=checkpoint.current_mailbox.mailbox
|
||||
)
|
||||
current_todos = cast(
|
||||
list, copy.deepcopy(checkpoint.current_mailbox.todo_email_ids[:_PAGE_SIZE])
|
||||
)
|
||||
checkpoint.current_mailbox.todo_email_ids = (
|
||||
checkpoint.current_mailbox.todo_email_ids[_PAGE_SIZE:]
|
||||
)
|
||||
|
||||
for email_id in current_todos:
|
||||
email_msg = _fetch_email(mail_client=mail_client, email_id=email_id)
|
||||
if not email_msg:
|
||||
logging.warning(f"Failed to fetch message {email_id=}; skipping")
|
||||
continue
|
||||
|
||||
email_headers = EmailHeaders.from_email_msg(email_msg=email_msg)
|
||||
msg_dt = email_headers.date
|
||||
if msg_dt.tzinfo is None:
|
||||
msg_dt = msg_dt.replace(tzinfo=timezone.utc)
|
||||
else:
|
||||
msg_dt = msg_dt.astimezone(timezone.utc)
|
||||
|
||||
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
|
||||
end_dt = datetime.fromtimestamp(end, tz=timezone.utc)
|
||||
|
||||
if not (start_dt < msg_dt <= end_dt):
|
||||
continue
|
||||
|
||||
email_doc = _convert_email_headers_and_body_into_document(
|
||||
email_msg=email_msg,
|
||||
email_headers=email_headers,
|
||||
include_perm_sync=include_perm_sync,
|
||||
)
|
||||
yield email_doc
|
||||
attachments = extract_attachments(email_msg)
|
||||
for att in attachments:
|
||||
yield attachment_to_document(email_doc, att, email_headers)
|
||||
|
||||
return checkpoint
|
||||
|
||||
# impls for BaseConnector
|
||||
|
||||
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
|
||||
self._credentials = credentials
|
||||
return None
|
||||
|
||||
def validate_connector_settings(self) -> None:
|
||||
self._get_mail_client()
|
||||
|
||||
# impls for CredentialsConnector
|
||||
|
||||
def set_credentials_provider(
|
||||
self, credentials_provider: CredentialsProviderInterface
|
||||
) -> None:
|
||||
self._credentials = credentials_provider.get_credentials()
|
||||
|
||||
# impls for CheckpointedConnector
|
||||
|
||||
def load_from_checkpoint(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: ImapCheckpoint,
|
||||
) -> CheckpointOutput[ImapCheckpoint]:
|
||||
return self._load_from_checkpoint(
|
||||
start=start, end=end, checkpoint=checkpoint, include_perm_sync=False
|
||||
)
|
||||
|
||||
def build_dummy_checkpoint(self) -> ImapCheckpoint:
|
||||
return ImapCheckpoint(has_more=True)
|
||||
|
||||
def validate_checkpoint_json(self, checkpoint_json: str) -> ImapCheckpoint:
|
||||
return ImapCheckpoint.model_validate_json(json_data=checkpoint_json)
|
||||
|
||||
# impls for CheckpointedConnectorWithPermSync
|
||||
|
||||
def load_from_checkpoint_with_perm_sync(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: ImapCheckpoint,
|
||||
) -> CheckpointOutput[ImapCheckpoint]:
|
||||
return self._load_from_checkpoint(
|
||||
start=start, end=end, checkpoint=checkpoint, include_perm_sync=True
|
||||
)
|
||||
|
||||
|
||||
def _fetch_all_mailboxes_for_email_account(mail_client: imaplib.IMAP4_SSL) -> list[str]:
|
||||
status, mailboxes_data = mail_client.list('""', "*")
|
||||
if status != _IMAP_OKAY_STATUS:
|
||||
raise RuntimeError(f"Failed to fetch mailboxes; {status=}")
|
||||
|
||||
mailboxes = []
|
||||
|
||||
for mailboxes_raw in mailboxes_data:
|
||||
if isinstance(mailboxes_raw, bytes):
|
||||
mailboxes_str = mailboxes_raw.decode()
|
||||
elif isinstance(mailboxes_raw, str):
|
||||
mailboxes_str = mailboxes_raw
|
||||
else:
|
||||
logging.warning(
|
||||
f"Expected the mailbox data to be of type str, instead got {type(mailboxes_raw)=} {mailboxes_raw}; skipping"
|
||||
)
|
||||
continue
|
||||
|
||||
# The mailbox LIST response output can be found here:
|
||||
# https://www.rfc-editor.org/rfc/rfc3501.html#section-7.2.2
|
||||
#
|
||||
# The general format is:
|
||||
# `(<name-attributes>) <hierarchy-delimiter> <mailbox-name>`
|
||||
#
|
||||
# The below regex matches on that pattern; from there, we select the 3rd match (index 2), which is the mailbox-name.
|
||||
match = re.match(r'\([^)]*\)\s+"([^"]+)"\s+"?(.+?)"?$', mailboxes_str)
|
||||
if not match:
|
||||
logging.warning(
|
||||
f"Invalid mailbox-data formatting structure: {mailboxes_str=}; skipping"
|
||||
)
|
||||
continue
|
||||
|
||||
mailbox = match.group(2)
|
||||
mailboxes.append(mailbox)
|
||||
if not mailboxes:
|
||||
logging.warning(
|
||||
"No mailboxes parsed from LIST response; falling back to INBOX"
|
||||
)
|
||||
return ["INBOX"]
|
||||
|
||||
return mailboxes
|
||||
|
||||
|
||||
def _select_mailbox(mail_client: imaplib.IMAP4_SSL, mailbox: str) -> bool:
|
||||
try:
|
||||
status, _ = mail_client.select(mailbox=mailbox, readonly=True)
|
||||
if status != _IMAP_OKAY_STATUS:
|
||||
return False
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _fetch_email_ids_in_mailbox(
|
||||
mail_client: imaplib.IMAP4_SSL,
|
||||
mailbox: str,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
) -> list[str]:
|
||||
if not _select_mailbox(mail_client, mailbox):
|
||||
logging.warning(f"Skip mailbox: {mailbox}")
|
||||
return []
|
||||
|
||||
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
|
||||
end_dt = datetime.fromtimestamp(end, tz=timezone.utc) + timedelta(days=1)
|
||||
|
||||
start_str = start_dt.strftime("%d-%b-%Y")
|
||||
end_str = end_dt.strftime("%d-%b-%Y")
|
||||
search_criteria = f'(SINCE "{start_str}" BEFORE "{end_str}")'
|
||||
|
||||
status, email_ids_byte_array = mail_client.search(None, search_criteria)
|
||||
|
||||
if status != _IMAP_OKAY_STATUS or not email_ids_byte_array:
|
||||
raise RuntimeError(f"Failed to fetch email ids; {status=}")
|
||||
|
||||
email_ids: bytes = email_ids_byte_array[0]
|
||||
|
||||
return [email_id.decode() for email_id in email_ids.split()]
|
||||
|
||||
|
||||
def _fetch_email(mail_client: imaplib.IMAP4_SSL, email_id: str) -> Message | None:
|
||||
status, msg_data = mail_client.fetch(message_set=email_id, message_parts="(RFC822)")
|
||||
if status != _IMAP_OKAY_STATUS or not msg_data:
|
||||
return None
|
||||
|
||||
data = msg_data[0]
|
||||
if not isinstance(data, tuple):
|
||||
raise RuntimeError(
|
||||
f"Message data should be a tuple; instead got a {type(data)=} {data=}"
|
||||
)
|
||||
|
||||
_, raw_email = data
|
||||
return email.message_from_bytes(raw_email)
|
||||
|
||||
|
||||
def _convert_email_headers_and_body_into_document(
|
||||
email_msg: Message,
|
||||
email_headers: EmailHeaders,
|
||||
include_perm_sync: bool,
|
||||
) -> Document:
|
||||
sender_name, sender_addr = _parse_singular_addr(raw_header=email_headers.sender)
|
||||
to_addrs = (
|
||||
_parse_addrs(email_headers.recipients)
|
||||
if email_headers.recipients
|
||||
else []
|
||||
)
|
||||
cc_addrs = (
|
||||
_parse_addrs(email_headers.cc)
|
||||
if email_headers.cc
|
||||
else []
|
||||
)
|
||||
all_participants = to_addrs + cc_addrs
|
||||
|
||||
expert_info_map = {
|
||||
recipient_addr: BasicExpertInfo(
|
||||
display_name=recipient_name, email=recipient_addr
|
||||
)
|
||||
for recipient_name, recipient_addr in all_participants
|
||||
}
|
||||
if sender_addr not in expert_info_map:
|
||||
expert_info_map[sender_addr] = BasicExpertInfo(
|
||||
display_name=sender_name, email=sender_addr
|
||||
)
|
||||
|
||||
email_body = _parse_email_body(email_msg=email_msg, email_headers=email_headers)
|
||||
primary_owners = list(expert_info_map.values())
|
||||
external_access = (
|
||||
ExternalAccess(
|
||||
external_user_emails=set(expert_info_map.keys()),
|
||||
external_user_group_ids=set(),
|
||||
is_public=False,
|
||||
)
|
||||
if include_perm_sync
|
||||
else None
|
||||
)
|
||||
return Document(
|
||||
id=email_headers.id,
|
||||
title=email_headers.subject,
|
||||
blob=email_body,
|
||||
size_bytes=len(email_body),
|
||||
semantic_identifier=email_headers.subject,
|
||||
metadata={},
|
||||
extension='.txt',
|
||||
doc_updated_at=email_headers.date,
|
||||
source=DocumentSource.IMAP,
|
||||
primary_owners=primary_owners,
|
||||
external_access=external_access,
|
||||
)
|
||||
|
||||
def extract_attachments(email_msg: Message, max_bytes: int = IMAP_CONNECTOR_SIZE_THRESHOLD):
|
||||
attachments = []
|
||||
|
||||
if not email_msg.is_multipart():
|
||||
return attachments
|
||||
|
||||
for part in email_msg.walk():
|
||||
if part.get_content_maintype() == "multipart":
|
||||
continue
|
||||
|
||||
disposition = (part.get("Content-Disposition") or "").lower()
|
||||
filename = part.get_filename()
|
||||
|
||||
if not (
|
||||
disposition.startswith("attachment")
|
||||
or (disposition.startswith("inline") and filename)
|
||||
):
|
||||
continue
|
||||
|
||||
payload = part.get_payload(decode=True)
|
||||
if not payload:
|
||||
continue
|
||||
|
||||
if len(payload) > max_bytes:
|
||||
continue
|
||||
|
||||
attachments.append({
|
||||
"filename": filename or "attachment.bin",
|
||||
"content_type": part.get_content_type(),
|
||||
"content_bytes": payload,
|
||||
"size_bytes": len(payload),
|
||||
})
|
||||
|
||||
return attachments
|
||||
|
||||
def decode_mime_filename(raw: str | None) -> str | None:
|
||||
if not raw:
|
||||
return None
|
||||
|
||||
try:
|
||||
raw = collapse_rfc2231_value(raw)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
parts = decode_header(raw)
|
||||
decoded = []
|
||||
|
||||
for value, encoding in parts:
|
||||
if isinstance(value, bytes):
|
||||
decoded.append(value.decode(encoding or "utf-8", errors="replace"))
|
||||
else:
|
||||
decoded.append(value)
|
||||
|
||||
return "".join(decoded)
|
||||
|
||||
def attachment_to_document(
|
||||
parent_doc: Document,
|
||||
att: dict,
|
||||
email_headers: EmailHeaders,
|
||||
):
|
||||
raw_filename = att["filename"]
|
||||
filename = decode_mime_filename(raw_filename) or "attachment.bin"
|
||||
ext = "." + filename.split(".")[-1] if "." in filename else ""
|
||||
|
||||
return Document(
|
||||
id=f"{parent_doc.id}#att:{filename}",
|
||||
source=DocumentSource.IMAP,
|
||||
semantic_identifier=filename,
|
||||
extension=ext,
|
||||
blob=att["content_bytes"],
|
||||
size_bytes=att["size_bytes"],
|
||||
doc_updated_at=email_headers.date,
|
||||
primary_owners=parent_doc.primary_owners,
|
||||
metadata={
|
||||
"parent_email_id": parent_doc.id,
|
||||
"parent_subject": email_headers.subject,
|
||||
"attachment_filename": filename,
|
||||
"attachment_content_type": att["content_type"],
|
||||
},
|
||||
)
|
||||
|
||||
def _parse_email_body(
|
||||
email_msg: Message,
|
||||
email_headers: EmailHeaders,
|
||||
) -> str:
|
||||
body = None
|
||||
for part in email_msg.walk():
|
||||
if part.is_multipart():
|
||||
# Multipart parts are *containers* for other parts, not the actual content itself.
|
||||
# Therefore, we skip until we find the individual parts instead.
|
||||
continue
|
||||
|
||||
charset = part.get_content_charset() or "utf-8"
|
||||
|
||||
try:
|
||||
raw_payload = part.get_payload(decode=True)
|
||||
if not isinstance(raw_payload, bytes):
|
||||
logging.warning(
|
||||
"Payload section from email was expected to be an array of bytes, instead got "
|
||||
f"{type(raw_payload)=}, {raw_payload=}"
|
||||
)
|
||||
continue
|
||||
body = raw_payload.decode(charset)
|
||||
break
|
||||
except (UnicodeDecodeError, LookupError) as e:
|
||||
logging.warning(f"Could not decode part with charset {charset}. Error: {e}")
|
||||
continue
|
||||
|
||||
if not body:
|
||||
logging.warning(
|
||||
f"Email with {email_headers.id=} has an empty body; returning an empty string"
|
||||
)
|
||||
return ""
|
||||
|
||||
soup = bs4.BeautifulSoup(markup=body, features="html.parser")
|
||||
|
||||
return " ".join(str_section for str_section in soup.stripped_strings)
|
||||
|
||||
|
||||
def _sanitize_mailbox_names(mailboxes: list[str]) -> list[str]:
|
||||
"""
|
||||
Mailboxes with special characters in them must be enclosed by double-quotes, as per the IMAP protocol.
|
||||
Just to be safe, we wrap *all* mailboxes with double-quotes.
|
||||
"""
|
||||
return [f'"{mailbox}"' for mailbox in mailboxes if mailbox]
|
||||
|
||||
|
||||
def _parse_addrs(raw_header: str) -> list[tuple[str, str]]:
|
||||
addrs = raw_header.split(",")
|
||||
name_addr_pairs = [parseaddr(addr=addr) for addr in addrs if addr]
|
||||
return [(name, addr) for name, addr in name_addr_pairs if addr]
|
||||
|
||||
|
||||
def _parse_singular_addr(raw_header: str) -> tuple[str, str]:
|
||||
addrs = _parse_addrs(raw_header=raw_header)
|
||||
if not addrs:
|
||||
return ("Unknown", "unknown@example.com")
|
||||
elif len(addrs) >= 2:
|
||||
raise RuntimeError(
|
||||
f"Expected a singular address, but instead got multiple; {raw_header=} {addrs=}"
|
||||
)
|
||||
|
||||
return addrs[0]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import time
|
||||
import uuid
|
||||
from types import TracebackType
|
||||
from common.data_source.utils import load_all_docs_from_checkpoint_connector
|
||||
|
||||
|
||||
class OnyxStaticCredentialsProvider(
|
||||
CredentialsProviderInterface["OnyxStaticCredentialsProvider"]
|
||||
):
|
||||
"""Implementation (a very simple one!) to handle static credentials."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
tenant_id: str | None,
|
||||
connector_name: str,
|
||||
credential_json: dict[str, Any],
|
||||
):
|
||||
self._tenant_id = tenant_id
|
||||
self._connector_name = connector_name
|
||||
self._credential_json = credential_json
|
||||
|
||||
self._provider_key = str(uuid.uuid4())
|
||||
|
||||
def __enter__(self) -> "OnyxStaticCredentialsProvider":
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def get_tenant_id(self) -> str | None:
|
||||
return self._tenant_id
|
||||
|
||||
def get_provider_key(self) -> str:
|
||||
return self._provider_key
|
||||
|
||||
def get_credentials(self) -> dict[str, Any]:
|
||||
return self._credential_json
|
||||
|
||||
def set_credentials(self, credential_json: dict[str, Any]) -> None:
|
||||
self._credential_json = credential_json
|
||||
|
||||
def is_dynamic(self) -> bool:
|
||||
return False
|
||||
# from tests.daily.connectors.utils import load_all_docs_from_checkpoint_connector
|
||||
# from onyx.connectors.credentials_provider import OnyxStaticCredentialsProvider
|
||||
|
||||
host = os.environ.get("IMAP_HOST")
|
||||
mailboxes_str = os.environ.get("IMAP_MAILBOXES","INBOX")
|
||||
username = os.environ.get("IMAP_USERNAME")
|
||||
password = os.environ.get("IMAP_PASSWORD")
|
||||
|
||||
mailboxes = (
|
||||
[mailbox.strip() for mailbox in mailboxes_str.split(",")]
|
||||
if mailboxes_str
|
||||
else []
|
||||
)
|
||||
|
||||
if not host:
|
||||
raise RuntimeError("`IMAP_HOST` must be set")
|
||||
|
||||
imap_connector = ImapConnector(
|
||||
host=host,
|
||||
mailboxes=mailboxes,
|
||||
)
|
||||
|
||||
imap_connector.set_credentials_provider(
|
||||
OnyxStaticCredentialsProvider(
|
||||
tenant_id=None,
|
||||
connector_name=DocumentSource.IMAP,
|
||||
credential_json={
|
||||
_USERNAME_KEY: username,
|
||||
_PASSWORD_KEY: password,
|
||||
},
|
||||
)
|
||||
)
|
||||
END = time.time()
|
||||
START = END - 1 * 24 * 60 * 60
|
||||
for doc in load_all_docs_from_checkpoint_connector(
|
||||
connector=imap_connector,
|
||||
start=START,
|
||||
end=END,
|
||||
):
|
||||
print(doc.id,doc.extension)
|
||||
@ -1149,3 +1149,137 @@ def parallel_yield(gens: list[Iterator[R]], max_workers: int = 10) -> Iterator[R
|
||||
future_to_index[executor.submit(_next_or_none, ind, gens[ind])] = next_ind
|
||||
next_ind += 1
|
||||
del future_to_index[future]
|
||||
|
||||
|
||||
def sanitize_filename(name: str, extension: str = "txt") -> str:
|
||||
"""
|
||||
Soft sanitize for MinIO/S3:
|
||||
- Replace only prohibited characters with a space.
|
||||
- Preserve readability (no ugly underscores).
|
||||
- Collapse multiple spaces.
|
||||
"""
|
||||
if name is None:
|
||||
return f"file.{extension}"
|
||||
|
||||
name = str(name).strip()
|
||||
|
||||
# Characters that MUST NOT appear in S3/MinIO object keys
|
||||
# Replace them with a space (not underscore)
|
||||
forbidden = r'[\\\?\#\%\*\:\|\<\>"]'
|
||||
name = re.sub(forbidden, " ", name)
|
||||
|
||||
# Replace slashes "/" (S3 interprets as folder) with space
|
||||
name = name.replace("/", " ")
|
||||
|
||||
# Collapse multiple spaces into one
|
||||
name = re.sub(r"\s+", " ", name)
|
||||
|
||||
# Trim both ends
|
||||
name = name.strip()
|
||||
|
||||
# Enforce reasonable max length
|
||||
if len(name) > 200:
|
||||
base, ext = os.path.splitext(name)
|
||||
name = base[:180].rstrip() + ext
|
||||
|
||||
if not os.path.splitext(name)[1]:
|
||||
name += f".{extension}"
|
||||
|
||||
return name
|
||||
F = TypeVar("F", bound=Callable[..., Any])
|
||||
|
||||
class _RateLimitDecorator:
|
||||
"""Builds a generic wrapper/decorator for calls to external APIs that
|
||||
prevents making more than `max_calls` requests per `period`
|
||||
|
||||
Implementation inspired by the `ratelimit` library:
|
||||
https://github.com/tomasbasham/ratelimit.
|
||||
|
||||
NOTE: is not thread safe.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_calls: int,
|
||||
period: float, # in seconds
|
||||
sleep_time: float = 2, # in seconds
|
||||
sleep_backoff: float = 2, # applies exponential backoff
|
||||
max_num_sleep: int = 0,
|
||||
):
|
||||
self.max_calls = max_calls
|
||||
self.period = period
|
||||
self.sleep_time = sleep_time
|
||||
self.sleep_backoff = sleep_backoff
|
||||
self.max_num_sleep = max_num_sleep
|
||||
|
||||
self.call_history: list[float] = []
|
||||
self.curr_calls = 0
|
||||
|
||||
def __call__(self, func: F) -> F:
|
||||
@wraps(func)
|
||||
def wrapped_func(*args: list, **kwargs: dict[str, Any]) -> Any:
|
||||
# cleanup calls which are no longer relevant
|
||||
self._cleanup()
|
||||
|
||||
# check if we've exceeded the rate limit
|
||||
sleep_cnt = 0
|
||||
while len(self.call_history) == self.max_calls:
|
||||
sleep_time = self.sleep_time * (self.sleep_backoff**sleep_cnt)
|
||||
logging.warning(
|
||||
f"Rate limit exceeded for function {func.__name__}. "
|
||||
f"Waiting {sleep_time} seconds before retrying."
|
||||
)
|
||||
time.sleep(sleep_time)
|
||||
sleep_cnt += 1
|
||||
if self.max_num_sleep != 0 and sleep_cnt >= self.max_num_sleep:
|
||||
raise RateLimitTriedTooManyTimesError(
|
||||
f"Exceeded '{self.max_num_sleep}' retries for function '{func.__name__}'"
|
||||
)
|
||||
|
||||
self._cleanup()
|
||||
|
||||
# add the current call to the call history
|
||||
self.call_history.append(time.monotonic())
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return cast(F, wrapped_func)
|
||||
|
||||
def _cleanup(self) -> None:
|
||||
curr_time = time.monotonic()
|
||||
time_to_expire_before = curr_time - self.period
|
||||
self.call_history = [
|
||||
call_time
|
||||
for call_time in self.call_history
|
||||
if call_time > time_to_expire_before
|
||||
]
|
||||
|
||||
rate_limit_builder = _RateLimitDecorator
|
||||
|
||||
def retry_builder(
|
||||
tries: int = 20,
|
||||
delay: float = 0.1,
|
||||
max_delay: float | None = 60,
|
||||
backoff: float = 2,
|
||||
jitter: tuple[float, float] | float = 1,
|
||||
exceptions: type[Exception] | tuple[type[Exception], ...] = (Exception,),
|
||||
) -> Callable[[F], F]:
|
||||
"""Builds a generic wrapper/decorator for calls to external APIs that
|
||||
may fail due to rate limiting, flakes, or other reasons. Applies exponential
|
||||
backoff with jitter to retry the call."""
|
||||
|
||||
def retry_with_default(func: F) -> F:
|
||||
@retry(
|
||||
tries=tries,
|
||||
delay=delay,
|
||||
max_delay=max_delay,
|
||||
backoff=backoff,
|
||||
jitter=jitter,
|
||||
logger=logging.getLogger(__name__),
|
||||
exceptions=exceptions,
|
||||
)
|
||||
def wrapped_func(*args: list, **kwargs: dict[str, Any]) -> Any:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return cast(F, wrapped_func)
|
||||
|
||||
return retry_with_default
|
||||
|
||||
@ -82,10 +82,6 @@ class WebDAVConnector(LoadConnector, PollConnector):
|
||||
base_url=self.base_url,
|
||||
auth=(username, password)
|
||||
)
|
||||
|
||||
# Test connection
|
||||
self.client.exists(self.remote_path)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to connect to WebDAV server: {e}")
|
||||
raise ConnectorMissingCredentialError(
|
||||
@ -308,60 +304,79 @@ class WebDAVConnector(LoadConnector, PollConnector):
|
||||
yield batch
|
||||
|
||||
def validate_connector_settings(self) -> None:
|
||||
"""Validate WebDAV connector settings
|
||||
|
||||
Raises:
|
||||
ConnectorMissingCredentialError: If credentials are not loaded
|
||||
ConnectorValidationError: If settings are invalid
|
||||
"""Validate WebDAV connector settings.
|
||||
|
||||
Validation should exercise the same code-paths used by the connector
|
||||
(directory listing / PROPFIND), avoiding exists() which may probe with
|
||||
methods that differ across servers.
|
||||
"""
|
||||
if self.client is None:
|
||||
raise ConnectorMissingCredentialError(
|
||||
"WebDAV credentials not loaded."
|
||||
)
|
||||
raise ConnectorMissingCredentialError("WebDAV credentials not loaded.")
|
||||
|
||||
if not self.base_url:
|
||||
raise ConnectorValidationError(
|
||||
"No base URL was provided in connector settings."
|
||||
)
|
||||
raise ConnectorValidationError("No base URL was provided in connector settings.")
|
||||
|
||||
# Normalize directory path: for collections, many servers behave better with trailing '/'
|
||||
test_path = self.remote_path or "/"
|
||||
if not test_path.startswith("/"):
|
||||
test_path = f"/{test_path}"
|
||||
if test_path != "/" and not test_path.endswith("/"):
|
||||
test_path = f"{test_path}/"
|
||||
|
||||
try:
|
||||
if not self.client.exists(self.remote_path):
|
||||
raise ConnectorValidationError(
|
||||
f"Remote path '{self.remote_path}' does not exist on WebDAV server."
|
||||
)
|
||||
# Use the same behavior as real sync: list directory with details (PROPFIND)
|
||||
self.client.ls(test_path, detail=True)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
|
||||
if "401" in error_message or "unauthorized" in error_message.lower():
|
||||
raise CredentialExpiredError(
|
||||
"WebDAV credentials appear invalid or expired."
|
||||
)
|
||||
|
||||
if "403" in error_message or "forbidden" in error_message.lower():
|
||||
# Prefer structured status codes if present on the exception/response
|
||||
status = None
|
||||
for attr in ("status_code", "code"):
|
||||
v = getattr(e, attr, None)
|
||||
if isinstance(v, int):
|
||||
status = v
|
||||
break
|
||||
if status is None:
|
||||
resp = getattr(e, "response", None)
|
||||
v = getattr(resp, "status_code", None)
|
||||
if isinstance(v, int):
|
||||
status = v
|
||||
|
||||
# If we can classify by status code, do it
|
||||
if status == 401:
|
||||
raise CredentialExpiredError("WebDAV credentials appear invalid or expired.")
|
||||
if status == 403:
|
||||
raise InsufficientPermissionsError(
|
||||
f"Insufficient permissions to access path '{self.remote_path}' on WebDAV server."
|
||||
)
|
||||
|
||||
if "404" in error_message or "not found" in error_message.lower():
|
||||
if status == 404:
|
||||
raise ConnectorValidationError(
|
||||
f"Remote path '{self.remote_path}' does not exist on WebDAV server."
|
||||
)
|
||||
|
||||
# Fallback: avoid brittle substring matching that caused false positives.
|
||||
# Provide the original exception for diagnosis.
|
||||
raise ConnectorValidationError(
|
||||
f"Unexpected WebDAV client error: {e}"
|
||||
f"WebDAV validation failed for path '{test_path}': {repr(e)}"
|
||||
)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
credentials_dict = {
|
||||
"username": os.environ.get("WEBDAV_USERNAME"),
|
||||
"password": os.environ.get("WEBDAV_PASSWORD"),
|
||||
}
|
||||
|
||||
credentials_dict = {
|
||||
"username": "user",
|
||||
"password": "pass",
|
||||
}
|
||||
|
||||
|
||||
|
||||
connector = WebDAVConnector(
|
||||
base_url=os.environ.get("WEBDAV_URL") or "https://webdav.example.com",
|
||||
remote_path=os.environ.get("WEBDAV_PATH") or "/",
|
||||
base_url="http://172.17.0.1:8080/",
|
||||
remote_path="/",
|
||||
)
|
||||
|
||||
try:
|
||||
|
||||
667
common/data_source/zendesk_connector.py
Normal file
667
common/data_source/zendesk_connector.py
Normal file
@ -0,0 +1,667 @@
|
||||
import copy
|
||||
import logging
|
||||
import time
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Iterator
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
from pydantic import BaseModel
|
||||
from requests.exceptions import HTTPError
|
||||
from typing_extensions import override
|
||||
|
||||
from common.data_source.config import ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS, DocumentSource
|
||||
from common.data_source.exceptions import ConnectorValidationError, CredentialExpiredError, InsufficientPermissionsError
|
||||
from common.data_source.html_utils import parse_html_page_basic
|
||||
from common.data_source.interfaces import CheckpointOutput, CheckpointOutputWrapper, CheckpointedConnector, IndexingHeartbeatInterface, SlimConnectorWithPermSync
|
||||
from common.data_source.models import BasicExpertInfo, ConnectorCheckpoint, ConnectorFailure, Document, DocumentFailure, GenerateSlimDocumentOutput, SecondsSinceUnixEpoch, SlimDocument
|
||||
from common.data_source.utils import retry_builder, time_str_to_utc,rate_limit_builder
|
||||
|
||||
MAX_PAGE_SIZE = 30 # Zendesk API maximum
|
||||
MAX_AUTHOR_MAP_SIZE = 50_000 # Reset author map cache if it gets too large
|
||||
_SLIM_BATCH_SIZE = 1000
|
||||
|
||||
|
||||
class ZendeskCredentialsNotSetUpError(PermissionError):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
"Zendesk Credentials are not set up, was load_credentials called?"
|
||||
)
|
||||
|
||||
|
||||
class ZendeskClient:
|
||||
def __init__(
|
||||
self,
|
||||
subdomain: str,
|
||||
email: str,
|
||||
token: str,
|
||||
calls_per_minute: int | None = None,
|
||||
):
|
||||
self.base_url = f"https://{subdomain}.zendesk.com/api/v2"
|
||||
self.auth = (f"{email}/token", token)
|
||||
self.make_request = request_with_rate_limit(self, calls_per_minute)
|
||||
|
||||
|
||||
def request_with_rate_limit(
|
||||
client: ZendeskClient, max_calls_per_minute: int | None = None
|
||||
) -> Callable[[str, dict[str, Any]], dict[str, Any]]:
|
||||
@retry_builder()
|
||||
@(
|
||||
rate_limit_builder(max_calls=max_calls_per_minute, period=60)
|
||||
if max_calls_per_minute
|
||||
else lambda x: x
|
||||
)
|
||||
def make_request(endpoint: str, params: dict[str, Any]) -> dict[str, Any]:
|
||||
response = requests.get(
|
||||
f"{client.base_url}/{endpoint}", auth=client.auth, params=params
|
||||
)
|
||||
|
||||
if response.status_code == 429:
|
||||
retry_after = response.headers.get("Retry-After")
|
||||
if retry_after is not None:
|
||||
# Sleep for the duration indicated by the Retry-After header
|
||||
time.sleep(int(retry_after))
|
||||
|
||||
elif (
|
||||
response.status_code == 403
|
||||
and response.json().get("error") == "SupportProductInactive"
|
||||
):
|
||||
return response.json()
|
||||
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
return make_request
|
||||
|
||||
|
||||
class ZendeskPageResponse(BaseModel):
|
||||
data: list[dict[str, Any]]
|
||||
meta: dict[str, Any]
|
||||
has_more: bool
|
||||
|
||||
|
||||
def _get_content_tag_mapping(client: ZendeskClient) -> dict[str, str]:
|
||||
content_tags: dict[str, str] = {}
|
||||
params = {"page[size]": MAX_PAGE_SIZE}
|
||||
|
||||
try:
|
||||
while True:
|
||||
data = client.make_request("guide/content_tags", params)
|
||||
|
||||
for tag in data.get("records", []):
|
||||
content_tags[tag["id"]] = tag["name"]
|
||||
|
||||
# Check if there are more pages
|
||||
if data.get("meta", {}).get("has_more", False):
|
||||
params["page[after]"] = data["meta"]["after_cursor"]
|
||||
else:
|
||||
break
|
||||
|
||||
return content_tags
|
||||
except Exception as e:
|
||||
raise Exception(f"Error fetching content tags: {str(e)}")
|
||||
|
||||
|
||||
def _get_articles(
|
||||
client: ZendeskClient, start_time: int | None = None, page_size: int = MAX_PAGE_SIZE
|
||||
) -> Iterator[dict[str, Any]]:
|
||||
params = {"page[size]": page_size, "sort_by": "updated_at", "sort_order": "asc"}
|
||||
if start_time is not None:
|
||||
params["start_time"] = start_time
|
||||
|
||||
while True:
|
||||
data = client.make_request("help_center/articles", params)
|
||||
for article in data["articles"]:
|
||||
yield article
|
||||
|
||||
if not data.get("meta", {}).get("has_more"):
|
||||
break
|
||||
params["page[after]"] = data["meta"]["after_cursor"]
|
||||
|
||||
|
||||
def _get_article_page(
|
||||
client: ZendeskClient,
|
||||
start_time: int | None = None,
|
||||
after_cursor: str | None = None,
|
||||
page_size: int = MAX_PAGE_SIZE,
|
||||
) -> ZendeskPageResponse:
|
||||
params = {"page[size]": page_size, "sort_by": "updated_at", "sort_order": "asc"}
|
||||
if start_time is not None:
|
||||
params["start_time"] = start_time
|
||||
if after_cursor is not None:
|
||||
params["page[after]"] = after_cursor
|
||||
|
||||
data = client.make_request("help_center/articles", params)
|
||||
return ZendeskPageResponse(
|
||||
data=data["articles"],
|
||||
meta=data["meta"],
|
||||
has_more=bool(data["meta"].get("has_more", False)),
|
||||
)
|
||||
|
||||
|
||||
def _get_tickets(
|
||||
client: ZendeskClient, start_time: int | None = None
|
||||
) -> Iterator[dict[str, Any]]:
|
||||
params = {"start_time": start_time or 0}
|
||||
|
||||
while True:
|
||||
data = client.make_request("incremental/tickets.json", params)
|
||||
for ticket in data["tickets"]:
|
||||
yield ticket
|
||||
|
||||
if not data.get("end_of_stream", False):
|
||||
params["start_time"] = data["end_time"]
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
# TODO: maybe these don't need to be their own functions?
|
||||
def _get_tickets_page(
|
||||
client: ZendeskClient, start_time: int | None = None
|
||||
) -> ZendeskPageResponse:
|
||||
params = {"start_time": start_time or 0}
|
||||
|
||||
# NOTE: for some reason zendesk doesn't seem to be respecting the start_time param
|
||||
# in my local testing with very few tickets. We'll look into it if this becomes an
|
||||
# issue in larger deployments
|
||||
data = client.make_request("incremental/tickets.json", params)
|
||||
if data.get("error") == "SupportProductInactive":
|
||||
raise ValueError(
|
||||
"Zendesk Support Product is not active for this account, No tickets to index"
|
||||
)
|
||||
return ZendeskPageResponse(
|
||||
data=data["tickets"],
|
||||
meta={"end_time": data["end_time"]},
|
||||
has_more=not bool(data.get("end_of_stream", False)),
|
||||
)
|
||||
|
||||
|
||||
def _fetch_author(
|
||||
client: ZendeskClient, author_id: str | int
|
||||
) -> BasicExpertInfo | None:
|
||||
# Skip fetching if author_id is invalid
|
||||
# cast to str to avoid issues with zendesk changing their types
|
||||
if not author_id or str(author_id) == "-1":
|
||||
return None
|
||||
|
||||
try:
|
||||
author_data = client.make_request(f"users/{author_id}", {})
|
||||
user = author_data.get("user")
|
||||
return (
|
||||
BasicExpertInfo(display_name=user.get("name"), email=user.get("email"))
|
||||
if user and user.get("name") and user.get("email")
|
||||
else None
|
||||
)
|
||||
except requests.exceptions.HTTPError:
|
||||
# Handle any API errors gracefully
|
||||
return None
|
||||
|
||||
|
||||
def _article_to_document(
|
||||
article: dict[str, Any],
|
||||
content_tags: dict[str, str],
|
||||
author_map: dict[str, BasicExpertInfo],
|
||||
client: ZendeskClient,
|
||||
) -> tuple[dict[str, BasicExpertInfo] | None, Document]:
|
||||
author_id = article.get("author_id")
|
||||
if not author_id:
|
||||
author = None
|
||||
else:
|
||||
author = (
|
||||
author_map.get(author_id)
|
||||
if author_id in author_map
|
||||
else _fetch_author(client, author_id)
|
||||
)
|
||||
|
||||
new_author_mapping = {author_id: author} if author_id and author else None
|
||||
|
||||
updated_at = article.get("updated_at")
|
||||
update_time = time_str_to_utc(updated_at) if updated_at else None
|
||||
|
||||
text = parse_html_page_basic(article.get("body") or "")
|
||||
blob = text.encode("utf-8", errors="replace")
|
||||
# Build metadata
|
||||
metadata: dict[str, str | list[str]] = {
|
||||
"labels": [str(label) for label in article.get("label_names", []) if label],
|
||||
"content_tags": [
|
||||
content_tags[tag_id]
|
||||
for tag_id in article.get("content_tag_ids", [])
|
||||
if tag_id in content_tags
|
||||
],
|
||||
}
|
||||
|
||||
# Remove empty values
|
||||
metadata = {k: v for k, v in metadata.items() if v}
|
||||
|
||||
return new_author_mapping, Document(
|
||||
id=f"article:{article['id']}",
|
||||
source=DocumentSource.ZENDESK,
|
||||
semantic_identifier=article["title"],
|
||||
extension=".txt",
|
||||
blob=blob,
|
||||
size_bytes=len(blob),
|
||||
doc_updated_at=update_time,
|
||||
primary_owners=[author] if author else None,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
|
||||
def _get_comment_text(
|
||||
comment: dict[str, Any],
|
||||
author_map: dict[str, BasicExpertInfo],
|
||||
client: ZendeskClient,
|
||||
) -> tuple[dict[str, BasicExpertInfo] | None, str]:
|
||||
author_id = comment.get("author_id")
|
||||
if not author_id:
|
||||
author = None
|
||||
else:
|
||||
author = (
|
||||
author_map.get(author_id)
|
||||
if author_id in author_map
|
||||
else _fetch_author(client, author_id)
|
||||
)
|
||||
|
||||
new_author_mapping = {author_id: author} if author_id and author else None
|
||||
|
||||
comment_text = f"Comment{' by ' + author.display_name if author and author.display_name else ''}"
|
||||
comment_text += f"{' at ' + comment['created_at'] if comment.get('created_at') else ''}:\n{comment['body']}"
|
||||
|
||||
return new_author_mapping, comment_text
|
||||
|
||||
|
||||
def _ticket_to_document(
|
||||
ticket: dict[str, Any],
|
||||
author_map: dict[str, BasicExpertInfo],
|
||||
client: ZendeskClient,
|
||||
) -> tuple[dict[str, BasicExpertInfo] | None, Document]:
|
||||
submitter_id = ticket.get("submitter")
|
||||
if not submitter_id:
|
||||
submitter = None
|
||||
else:
|
||||
submitter = (
|
||||
author_map.get(submitter_id)
|
||||
if submitter_id in author_map
|
||||
else _fetch_author(client, submitter_id)
|
||||
)
|
||||
|
||||
new_author_mapping = (
|
||||
{submitter_id: submitter} if submitter_id and submitter else None
|
||||
)
|
||||
|
||||
updated_at = ticket.get("updated_at")
|
||||
update_time = time_str_to_utc(updated_at) if updated_at else None
|
||||
|
||||
metadata: dict[str, str | list[str]] = {}
|
||||
if status := ticket.get("status"):
|
||||
metadata["status"] = status
|
||||
if priority := ticket.get("priority"):
|
||||
metadata["priority"] = priority
|
||||
if tags := ticket.get("tags"):
|
||||
metadata["tags"] = tags
|
||||
if ticket_type := ticket.get("type"):
|
||||
metadata["ticket_type"] = ticket_type
|
||||
|
||||
# Fetch comments for the ticket
|
||||
comments_data = client.make_request(f"tickets/{ticket.get('id')}/comments", {})
|
||||
comments = comments_data.get("comments", [])
|
||||
|
||||
comment_texts = []
|
||||
for comment in comments:
|
||||
new_author_mapping, comment_text = _get_comment_text(
|
||||
comment, author_map, client
|
||||
)
|
||||
if new_author_mapping:
|
||||
author_map.update(new_author_mapping)
|
||||
comment_texts.append(comment_text)
|
||||
|
||||
comments_text = "\n\n".join(comment_texts)
|
||||
|
||||
subject = ticket.get("subject")
|
||||
full_text = f"Ticket Subject:\n{subject}\n\nComments:\n{comments_text}"
|
||||
|
||||
blob = full_text.encode("utf-8", errors="replace")
|
||||
return new_author_mapping, Document(
|
||||
id=f"zendesk_ticket_{ticket['id']}",
|
||||
blob=blob,
|
||||
extension=".txt",
|
||||
size_bytes=len(blob),
|
||||
source=DocumentSource.ZENDESK,
|
||||
semantic_identifier=f"Ticket #{ticket['id']}: {subject or 'No Subject'}",
|
||||
doc_updated_at=update_time,
|
||||
primary_owners=[submitter] if submitter else None,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
|
||||
class ZendeskConnectorCheckpoint(ConnectorCheckpoint):
|
||||
# We use cursor-based paginated retrieval for articles
|
||||
after_cursor_articles: str | None
|
||||
|
||||
# We use timestamp-based paginated retrieval for tickets
|
||||
next_start_time_tickets: int | None
|
||||
|
||||
cached_author_map: dict[str, BasicExpertInfo] | None
|
||||
cached_content_tags: dict[str, str] | None
|
||||
|
||||
|
||||
class ZendeskConnector(
|
||||
SlimConnectorWithPermSync, CheckpointedConnector[ZendeskConnectorCheckpoint]
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
content_type: str = "articles",
|
||||
calls_per_minute: int | None = None,
|
||||
) -> None:
|
||||
self.content_type = content_type
|
||||
self.subdomain = ""
|
||||
# Fetch all tags ahead of time
|
||||
self.content_tags: dict[str, str] = {}
|
||||
self.calls_per_minute = calls_per_minute
|
||||
|
||||
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
|
||||
# Subdomain is actually the whole URL
|
||||
subdomain = (
|
||||
credentials["zendesk_subdomain"]
|
||||
.replace("https://", "")
|
||||
.split(".zendesk.com")[0]
|
||||
)
|
||||
self.subdomain = subdomain
|
||||
|
||||
self.client = ZendeskClient(
|
||||
subdomain,
|
||||
credentials["zendesk_email"],
|
||||
credentials["zendesk_token"],
|
||||
calls_per_minute=self.calls_per_minute,
|
||||
)
|
||||
return None
|
||||
|
||||
@override
|
||||
def load_from_checkpoint(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: ZendeskConnectorCheckpoint,
|
||||
) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
|
||||
if self.client is None:
|
||||
raise ZendeskCredentialsNotSetUpError()
|
||||
if checkpoint.cached_content_tags is None:
|
||||
checkpoint.cached_content_tags = _get_content_tag_mapping(self.client)
|
||||
return checkpoint # save the content tags to the checkpoint
|
||||
self.content_tags = checkpoint.cached_content_tags
|
||||
|
||||
if self.content_type == "articles":
|
||||
checkpoint = yield from self._retrieve_articles(start, end, checkpoint)
|
||||
return checkpoint
|
||||
elif self.content_type == "tickets":
|
||||
checkpoint = yield from self._retrieve_tickets(start, end, checkpoint)
|
||||
return checkpoint
|
||||
else:
|
||||
raise ValueError(f"Unsupported content_type: {self.content_type}")
|
||||
|
||||
def _retrieve_articles(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch | None,
|
||||
end: SecondsSinceUnixEpoch | None,
|
||||
checkpoint: ZendeskConnectorCheckpoint,
|
||||
) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
|
||||
checkpoint = copy.deepcopy(checkpoint)
|
||||
# This one is built on the fly as there may be more many more authors than tags
|
||||
author_map: dict[str, BasicExpertInfo] = checkpoint.cached_author_map or {}
|
||||
after_cursor = checkpoint.after_cursor_articles
|
||||
doc_batch: list[Document] = []
|
||||
|
||||
response = _get_article_page(
|
||||
self.client,
|
||||
start_time=int(start) if start else None,
|
||||
after_cursor=after_cursor,
|
||||
)
|
||||
articles = response.data
|
||||
has_more = response.has_more
|
||||
after_cursor = response.meta.get("after_cursor")
|
||||
for article in articles:
|
||||
if (
|
||||
article.get("body") is None
|
||||
or article.get("draft")
|
||||
or any(
|
||||
label in ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS
|
||||
for label in article.get("label_names", [])
|
||||
)
|
||||
):
|
||||
continue
|
||||
|
||||
try:
|
||||
new_author_map, document = _article_to_document(
|
||||
article, self.content_tags, author_map, self.client
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing article {article['id']}: {e}")
|
||||
yield ConnectorFailure(
|
||||
failed_document=DocumentFailure(
|
||||
document_id=f"{article.get('id')}",
|
||||
document_link=article.get("html_url", ""),
|
||||
),
|
||||
failure_message=str(e),
|
||||
exception=e,
|
||||
)
|
||||
continue
|
||||
|
||||
if new_author_map:
|
||||
author_map.update(new_author_map)
|
||||
updated_at = document.doc_updated_at
|
||||
updated_ts = updated_at.timestamp() if updated_at else None
|
||||
if updated_ts is not None:
|
||||
if start is not None and updated_ts <= start:
|
||||
continue
|
||||
if end is not None and updated_ts > end:
|
||||
continue
|
||||
|
||||
doc_batch.append(document)
|
||||
|
||||
if not has_more:
|
||||
yield from doc_batch
|
||||
checkpoint.has_more = False
|
||||
return checkpoint
|
||||
|
||||
# Sometimes no documents are retrieved, but the cursor
|
||||
# is still updated so the connector makes progress.
|
||||
yield from doc_batch
|
||||
checkpoint.after_cursor_articles = after_cursor
|
||||
|
||||
last_doc_updated_at = doc_batch[-1].doc_updated_at if doc_batch else None
|
||||
checkpoint.has_more = bool(
|
||||
end is None
|
||||
or last_doc_updated_at is None
|
||||
or last_doc_updated_at.timestamp() <= end
|
||||
)
|
||||
checkpoint.cached_author_map = (
|
||||
author_map if len(author_map) <= MAX_AUTHOR_MAP_SIZE else None
|
||||
)
|
||||
return checkpoint
|
||||
|
||||
def _retrieve_tickets(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch | None,
|
||||
end: SecondsSinceUnixEpoch | None,
|
||||
checkpoint: ZendeskConnectorCheckpoint,
|
||||
) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
|
||||
checkpoint = copy.deepcopy(checkpoint)
|
||||
if self.client is None:
|
||||
raise ZendeskCredentialsNotSetUpError()
|
||||
|
||||
author_map: dict[str, BasicExpertInfo] = checkpoint.cached_author_map or {}
|
||||
|
||||
doc_batch: list[Document] = []
|
||||
next_start_time = int(checkpoint.next_start_time_tickets or start or 0)
|
||||
ticket_response = _get_tickets_page(self.client, start_time=next_start_time)
|
||||
|
||||
tickets = ticket_response.data
|
||||
has_more = ticket_response.has_more
|
||||
next_start_time = ticket_response.meta["end_time"]
|
||||
for ticket in tickets:
|
||||
if ticket.get("status") == "deleted":
|
||||
continue
|
||||
|
||||
try:
|
||||
new_author_map, document = _ticket_to_document(
|
||||
ticket=ticket,
|
||||
author_map=author_map,
|
||||
client=self.client,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing ticket {ticket['id']}: {e}")
|
||||
yield ConnectorFailure(
|
||||
failed_document=DocumentFailure(
|
||||
document_id=f"{ticket.get('id')}",
|
||||
document_link=ticket.get("url", ""),
|
||||
),
|
||||
failure_message=str(e),
|
||||
exception=e,
|
||||
)
|
||||
continue
|
||||
|
||||
if new_author_map:
|
||||
author_map.update(new_author_map)
|
||||
|
||||
updated_at = document.doc_updated_at
|
||||
updated_ts = updated_at.timestamp() if updated_at else None
|
||||
|
||||
if updated_ts is not None:
|
||||
if start is not None and updated_ts <= start:
|
||||
continue
|
||||
if end is not None and updated_ts > end:
|
||||
continue
|
||||
|
||||
doc_batch.append(document)
|
||||
|
||||
if not has_more:
|
||||
yield from doc_batch
|
||||
checkpoint.has_more = False
|
||||
return checkpoint
|
||||
|
||||
yield from doc_batch
|
||||
checkpoint.next_start_time_tickets = next_start_time
|
||||
last_doc_updated_at = doc_batch[-1].doc_updated_at if doc_batch else None
|
||||
checkpoint.has_more = bool(
|
||||
end is None
|
||||
or last_doc_updated_at is None
|
||||
or last_doc_updated_at.timestamp() <= end
|
||||
)
|
||||
checkpoint.cached_author_map = (
|
||||
author_map if len(author_map) <= MAX_AUTHOR_MAP_SIZE else None
|
||||
)
|
||||
return checkpoint
|
||||
|
||||
def retrieve_all_slim_docs_perm_sync(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch | None = None,
|
||||
end: SecondsSinceUnixEpoch | None = None,
|
||||
callback: IndexingHeartbeatInterface | None = None,
|
||||
) -> GenerateSlimDocumentOutput:
|
||||
slim_doc_batch: list[SlimDocument] = []
|
||||
if self.content_type == "articles":
|
||||
articles = _get_articles(
|
||||
self.client, start_time=int(start) if start else None
|
||||
)
|
||||
for article in articles:
|
||||
slim_doc_batch.append(
|
||||
SlimDocument(
|
||||
id=f"article:{article['id']}",
|
||||
)
|
||||
)
|
||||
if len(slim_doc_batch) >= _SLIM_BATCH_SIZE:
|
||||
yield slim_doc_batch
|
||||
slim_doc_batch = []
|
||||
elif self.content_type == "tickets":
|
||||
tickets = _get_tickets(
|
||||
self.client, start_time=int(start) if start else None
|
||||
)
|
||||
for ticket in tickets:
|
||||
slim_doc_batch.append(
|
||||
SlimDocument(
|
||||
id=f"zendesk_ticket_{ticket['id']}",
|
||||
)
|
||||
)
|
||||
if len(slim_doc_batch) >= _SLIM_BATCH_SIZE:
|
||||
yield slim_doc_batch
|
||||
slim_doc_batch = []
|
||||
else:
|
||||
raise ValueError(f"Unsupported content_type: {self.content_type}")
|
||||
if slim_doc_batch:
|
||||
yield slim_doc_batch
|
||||
|
||||
@override
|
||||
def validate_connector_settings(self) -> None:
|
||||
if self.client is None:
|
||||
raise ZendeskCredentialsNotSetUpError()
|
||||
|
||||
try:
|
||||
_get_article_page(self.client, start_time=0)
|
||||
except HTTPError as e:
|
||||
# Check for HTTP status codes
|
||||
if e.response.status_code == 401:
|
||||
raise CredentialExpiredError(
|
||||
"Your Zendesk credentials appear to be invalid or expired (HTTP 401)."
|
||||
) from e
|
||||
elif e.response.status_code == 403:
|
||||
raise InsufficientPermissionsError(
|
||||
"Your Zendesk token does not have sufficient permissions (HTTP 403)."
|
||||
) from e
|
||||
elif e.response.status_code == 404:
|
||||
raise ConnectorValidationError(
|
||||
"Zendesk resource not found (HTTP 404)."
|
||||
) from e
|
||||
else:
|
||||
raise ConnectorValidationError(
|
||||
f"Unexpected Zendesk error (status={e.response.status_code}): {e}"
|
||||
) from e
|
||||
|
||||
@override
|
||||
def validate_checkpoint_json(
|
||||
self, checkpoint_json: str
|
||||
) -> ZendeskConnectorCheckpoint:
|
||||
return ZendeskConnectorCheckpoint.model_validate_json(checkpoint_json)
|
||||
|
||||
@override
|
||||
def build_dummy_checkpoint(self) -> ZendeskConnectorCheckpoint:
|
||||
return ZendeskConnectorCheckpoint(
|
||||
after_cursor_articles=None,
|
||||
next_start_time_tickets=None,
|
||||
cached_author_map=None,
|
||||
cached_content_tags=None,
|
||||
has_more=True,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
connector = ZendeskConnector(content_type="articles")
|
||||
connector.load_credentials(
|
||||
{
|
||||
"zendesk_subdomain": os.environ["ZENDESK_SUBDOMAIN"],
|
||||
"zendesk_email": os.environ["ZENDESK_EMAIL"],
|
||||
"zendesk_token": os.environ["ZENDESK_TOKEN"],
|
||||
}
|
||||
)
|
||||
|
||||
current = time.time()
|
||||
one_day_ago = current - 24 * 60 * 60 # 1 day
|
||||
|
||||
checkpoint = connector.build_dummy_checkpoint()
|
||||
|
||||
while checkpoint.has_more:
|
||||
gen = connector.load_from_checkpoint(
|
||||
one_day_ago, current, checkpoint
|
||||
)
|
||||
|
||||
wrapper = CheckpointOutputWrapper()
|
||||
any_doc = False
|
||||
|
||||
for document, failure, next_checkpoint in wrapper(gen):
|
||||
if document:
|
||||
print("got document:", document.id)
|
||||
any_doc = True
|
||||
|
||||
checkpoint = next_checkpoint
|
||||
if any_doc:
|
||||
break
|
||||
@ -476,11 +476,13 @@ class RAGFlowPdfParser:
|
||||
self.boxes = bxs
|
||||
|
||||
def _naive_vertical_merge(self, zoomin=3):
|
||||
bxs = self._assign_column(self.boxes, zoomin)
|
||||
#bxs = self._assign_column(self.boxes, zoomin)
|
||||
bxs = self.boxes
|
||||
|
||||
grouped = defaultdict(list)
|
||||
for b in bxs:
|
||||
grouped[(b["page_number"], b.get("col_id", 0))].append(b)
|
||||
# grouped[(b["page_number"], b.get("col_id", 0))].append(b)
|
||||
grouped[(b["page_number"], "x")].append(b)
|
||||
|
||||
merged_boxes = []
|
||||
for (pg, col), bxs in grouped.items():
|
||||
@ -551,7 +553,7 @@ class RAGFlowPdfParser:
|
||||
|
||||
merged_boxes.extend(bxs)
|
||||
|
||||
self.boxes = sorted(merged_boxes, key=lambda x: (x["page_number"], x.get("col_id", 0), x["top"]))
|
||||
#self.boxes = sorted(merged_boxes, key=lambda x: (x["page_number"], x.get("col_id", 0), x["top"]))
|
||||
|
||||
def _final_reading_order_merge(self, zoomin=3):
|
||||
if not self.boxes:
|
||||
@ -1206,7 +1208,7 @@ class RAGFlowPdfParser:
|
||||
start = timer()
|
||||
self._text_merge()
|
||||
self._concat_downward()
|
||||
#self._naive_vertical_merge(zoomin)
|
||||
self._naive_vertical_merge(zoomin)
|
||||
if callback:
|
||||
callback(0.92, "Text merged ({:.2f}s)".format(timer() - start))
|
||||
|
||||
|
||||
@ -137,11 +137,11 @@ ADMIN_SVR_HTTP_PORT=9381
|
||||
SVR_MCP_PORT=9382
|
||||
|
||||
# The RAGFlow Docker image to download. v0.22+ doesn't include embedding models.
|
||||
RAGFLOW_IMAGE=infiniflow/ragflow:v0.23.0
|
||||
RAGFLOW_IMAGE=infiniflow/ragflow:v0.23.1
|
||||
|
||||
# If you cannot download the RAGFlow Docker image:
|
||||
# RAGFLOW_IMAGE=swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:v0.23.0
|
||||
# RAGFLOW_IMAGE=registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow:v0.23.0
|
||||
# RAGFLOW_IMAGE=swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:v0.23.1
|
||||
# RAGFLOW_IMAGE=registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow:v0.23.1
|
||||
#
|
||||
# - For the `nightly` edition, uncomment either of the following:
|
||||
# RAGFLOW_IMAGE=swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:nightly
|
||||
|
||||
@ -77,7 +77,7 @@ The [.env](./.env) file contains important environment variables for Docker.
|
||||
- `SVR_HTTP_PORT`
|
||||
The port used to expose RAGFlow's HTTP API service to the host machine, allowing **external** access to the service running inside the Docker container. Defaults to `9380`.
|
||||
- `RAGFLOW-IMAGE`
|
||||
The Docker image edition. Defaults to `infiniflow/ragflow:v0.23.0`. The RAGFlow Docker image does not include embedding models.
|
||||
The Docker image edition. Defaults to `infiniflow/ragflow:v0.23.1`. The RAGFlow Docker image does not include embedding models.
|
||||
|
||||
|
||||
> [!TIP]
|
||||
|
||||
8
docs/basics/_category_.json
Normal file
8
docs/basics/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Basics",
|
||||
"position": 2,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "Basic concepts."
|
||||
}
|
||||
}
|
||||
61
docs/basics/agent_context_engine.md
Normal file
61
docs/basics/agent_context_engine.md
Normal file
@ -0,0 +1,61 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
slug: /what-is-agent-context-engine
|
||||
---
|
||||
|
||||
# What is Agent context engine?
|
||||
|
||||
From 2025, a silent revolution began beneath the dazzling surface of AI Agents. While the world marveled at agents that could write code, analyze data, and automate workflows, a fundamental bottleneck emerged: why do even the most advanced agents still stumble on simple questions, forget previous conversations, or misuse available tools?
|
||||
|
||||
The answer lies not in the intelligence of the Large Language Model (LLM) itself, but in the quality of the Context it receives. An LLM, no matter how powerful, is only as good as the information we feed it. Today’s cutting-edge agents are often crippled by a cumbersome, manual, and error-prone process of context assembly—a process known as Context Engineering.
|
||||
|
||||
This is where the Agent Context Engine comes in. It is not merely an incremental improvement but a foundational shift, representing the evolution of RAG from a singular technique into the core data and intelligence substrate for the entire Agent ecosystem.
|
||||
|
||||
## Beyond the hype: The reality of today's "intelligent" Agents
|
||||
Today, the “intelligence” behind most AI Agents hides a mountain of human labor. Developers must:
|
||||
|
||||
- Hand-craft elaborate prompt templates
|
||||
- Hard-code document-retrieval logic for every task
|
||||
- Juggle tool descriptions, conversation history, and knowledge snippets inside a tiny context window
|
||||
- Repeat the whole process for each new scenario
|
||||
|
||||
This pattern is called Context Engineering. It is deeply tied to expert know-how, almost impossible to scale, and prohibitively expensive to maintain. When an enterprise needs to keep dozens of distinct agents alive, the artisanal workshop model collapses under its own weight.
|
||||
|
||||
The mission of an Agent Context Engine is to turn Context Engineering from an “art” into an industrial-grade science.
|
||||
|
||||
Deconstructing the Agent Context Engine
|
||||
So, what exactly is an Agent Context Engine? It is a unified, intelligent, and automated platform responsible for the end-to-end process of assembling the optimal context for an LLM or Agent at the moment of inference. It moves from artisanal crafting to industrialized production.
|
||||
At its core, an Agent Context Engine is built on a triumvirate of next-generation retrieval capabilities, seamlessly integrated into a single service layer:
|
||||
|
||||
1. The Knowledge Core (Advanced RAG): This is the evolution of traditional RAG. It moves beyond simple chunk-and-embed to intelligently process static, private enterprise knowledge. Techniques like TreeRAG (building LLM-generated document outlines for "locate-then-expand" retrieval) and GraphRAG (extracting entity networks to find semantically distant connections) work to close the "semantic gap." The engine’s Ingestion Pipeline acts as the ETL for unstructured data, parsing multi-format documents and using LLMs to enrich content with summaries, metadata, and structure before indexing.
|
||||
|
||||
2. The Memory Layer: An Agent’s intelligence is defined by its ability to learn from interaction. The Memory Layer is a specialized retrieval system for dynamic, episodic data: conversation history, user preferences, and the agent’s own internal state (e.g., "waiting for human input"). It manages the lifecycle of this data—storing raw dialogue, triggering summarization into semantic memory, and retrieving relevant past interactions to provide continuity and personalization. Technologically, it is a close sibling to RAG, but focused on a temporal stream of data.
|
||||
|
||||
3. The Tool Orchestrator: As MCP (Model Context Protocol) enables the connection of hundreds of internal services as tools, a new problem arises: tool selection. The Context Engine solves this with Tool Retrieval. Instead of dumping all tool descriptions into the prompt, it maintains an index of tools and—critically—an index of Playbooks or Guidelines (best practices on when and how to use tools). For a given task, it retrieves only the most relevant tools and instructions, transforming the LLM’s job from "searching a haystack" to "following a recipe."
|
||||
|
||||
## Why we need a dedicated engine? The case for a unified substrate
|
||||
|
||||
The necessity of an Agent Context Engine becomes clear when we examine the alternative: siloed, manually wired components.
|
||||
|
||||
- The Data Silo Problem: Knowledge, memory, and tools reside in separate systems, requiring complex integration for each new agent.
|
||||
- The Assembly Line Bottleneck: Developers spend more time on context plumbing than on agent logic, slowing innovation to a crawl.
|
||||
- The "Context Ownership" Dilemma: In manually engineered systems, context logic is buried in code, owned by developers, and opaque to business users. An Engine makes context a configurable, observable, and customer-owned asset.
|
||||
|
||||
The shift from Context Engineering to a Context Platform/Engine marks the maturation of enterprise AI, as summarized in the table below:
|
||||
|
||||
| Dimension | Context engineering (present) | Context engineering/Platform (future) |
|
||||
| ------------------- | -------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- |
|
||||
| Context creation | Manual, artisanal work by developers and prompt engineers. | Automated, driven by intelligent ingestion pipelines and configurable rules. |
|
||||
| Context delivery | Hard-coded prompts and static retrieval logic embedded in agent workflows. | Dynamic, real-time retrieval and assembly based on the agent's live state and intent. |
|
||||
| Context maintenance | A development and operational burden, logic locked in code. | A manageable platform function, with visibility and control returned to the business. |
|
||||
|
||||
|
||||
## RAGFlow: A resolute march toward the context engine of Agents
|
||||
|
||||
This is the future RAGFlow is forging.
|
||||
|
||||
We left behind the label of “yet another RAG system” long ago. From DeepDoc—our deeply-optimized, multimodal document parser—to the bleeding-edge architectures that bridge semantic chasms in complex RAG scenarios, all the way to a full-blown, enterprise-grade ingestion pipeline, every evolutionary step RAGFlow takes is a deliberate stride toward the ultimate form: an Agentic Context Engine.
|
||||
|
||||
We believe tomorrow’s enterprise AI advantage will hinge not on who owns the largest model, but on who can feed that model the highest-quality, most real-time, and most relevant context. An Agentic Context Engine is the critical infrastructure that turns this vision into reality.
|
||||
|
||||
In the paradigm shift from “hand-crafted prompts” to “intelligent context,” RAGFlow is determined to be the most steadfast propeller and enabler. We invite every developer, enterprise, and researcher who cares about the future of AI agents to follow RAGFlow’s journey—so together we can witness and build the cornerstone of the next-generation AI stack.
|
||||
107
docs/basics/rag.md
Normal file
107
docs/basics/rag.md
Normal file
@ -0,0 +1,107 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
slug: /what-is-rag
|
||||
---
|
||||
|
||||
# What is Retreival-Augmented-Generation (RAG)?
|
||||
|
||||
Since large language models (LLMs) became the focus of technology, their ability to handle general knowledge has been astonishing. However, when questions shift to internal corporate documents, proprietary knowledge bases, or real-time data, the limitations of LLMs become glaringly apparent: they cannot access private information outside their training data. Retrieval-Augmented Generation (RAG) was born precisely to address this core need. Before an LLM generates an answer, it first retrieves the most relevant context from an external knowledge base and inputs it as "reference material" to the LLM, thereby guiding it to produce accurate answers. In short, RAG elevates LLMs from "relying on memory" to "having evidence to rely on," significantly improving their accuracy and trustworthiness in specialized fields and real-time information queries.
|
||||
|
||||
## Why RAG is important?
|
||||
|
||||
Although LLMs excel in language understanding and generation, they have inherent limitations:
|
||||
|
||||
- Static Knowledge: The model's knowledge is based on a data snapshot from its training time and cannot be automatically updated, making it difficult to perceive the latest information.
|
||||
- Blind Spot to External Data: They cannot directly access corporate private documents, real-time information streams, or domain-specific content.
|
||||
- Hallucination Risk: When lacking accurate evidence, they may still fabricate plausible-sounding but false answers to maintain conversational fluency.
|
||||
|
||||
The introduction of RAG provides LLMs with real-time, credible "factual grounding." Its core mechanism is divided into two stages:
|
||||
|
||||
- Retrieval Stage: Based on the user's question, quickly retrieve the most relevant documents or data fragments from an external knowledge base.
|
||||
- Generation Stage: The LLM organizes and generates the final answer by incorporating the retrieved information as context, combined with its own linguistic capabilities.
|
||||
|
||||
This upgrades LLMs from "speaking from memory" to "speaking with documentation," significantly enhancing reliability in professional and enterprise-level applications.
|
||||
|
||||
## How RAG works?
|
||||
|
||||
Retrieval-Augmented Generation enables LLMs to generate higher-quality responses by leveraging real-time, external, or private data sources through the introduction of an information retrieval mechanism. Its workflow can be divided into following key steps:
|
||||
|
||||
### Data processing and vectorization
|
||||
|
||||
The knowledge required by RAG comes from unstructured data in various formats, such as documents, database records, or API return content. This data typically needs to be chunked, then transformed into vectors via an embedding model, and stored in a vector database.
|
||||
|
||||
Why is Chunking Needed? Indexing entire documents directly faces the following problems:
|
||||
|
||||
- Decreased Retrieval Precision: Vectorizing long documents leads to semantic "averaging," losing details.
|
||||
- Context Length Limitation: LLMs have a finite context window, requiring filtering of the most relevant parts for input.
|
||||
- Cost and Efficiency: Embedding computation and retrieval costs are higher for long texts.
|
||||
|
||||
Therefore, an intelligent chunking strategy is key to balancing information integrity, retrieval granularity, and computational efficiency.
|
||||
|
||||
### Retrieve relevant information
|
||||
|
||||
The user's query is also converted into a vector to perform semantic relevance searches (e.g., calculating cosine similarity) in the vector database, matching and recalling the most relevant text fragments.
|
||||
|
||||
### Context construction and answer generation
|
||||
|
||||
The retrieved relevant content is added to the LLM's context as factual grounding, and the LLM finally generates the answer. Therefore, RAG can be seen as Context Engineering 1.0 for automated context construction.
|
||||
|
||||
## Deep dive into existing RAG architecture: beyond vector retrieval
|
||||
|
||||
An industrial-grade RAG system is far from being as simple as "vector search + LLM"; its complexity and challenges are primarily embedded in the retrieval process.
|
||||
|
||||
### Data complexity: multimodal document processing
|
||||
|
||||
Core Challenge: Corporate knowledge mostly exists in the form of multimodal documents containing text, charts, tables, and formulas. Simple OCR extraction loses a large amount of semantic information.
|
||||
|
||||
Advanced Practice: Leading solutions, such as RAGFlow, tend to use Visual Language Models (VLM) or specialized parsing models like DeepDoc to "translate" multimodal documents into unimodal text rich in structural and semantic information. Converting multimodal information into high-quality unimodal text has become standard practice for advanced RAG.
|
||||
|
||||
### The complexity of chunking: the trade-off between precision and context
|
||||
|
||||
A simple "chunk-embed-retrieve" pipeline has an inherent contradiction:
|
||||
- Semantic Matching requires small text chunks to ensure clear semantic focus.
|
||||
- Context Understanding requires large text chunks to ensure complete and coherent information.
|
||||
|
||||
This forces system design into a difficult trade-off between "precise but fragmented" and "complete but vague."
|
||||
|
||||
Advanced Practice: Leading solutions, such as RAGFlow, employ semantic enhancement techniques like constructing semantic tables of contents and knowledge graphs. These not only address semantic fragmentation caused by physical chunking but also enable the discovery of relevant content across documents based on entity-relationship networks.
|
||||
|
||||
### Why is a vector database insufficient for serving RAG?
|
||||
|
||||
Vector databases excel at semantic similarity search, but RAG requires precise and reliable answers, demanding more capabilities from the retrieval system:
|
||||
- Hybrid Search: Relying solely on vector retrieval may miss exact keyword matches (e.g., product codes, regulation numbers). Hybrid search, combining vector retrieval with keyword retrieval (BM25), ensures both semantic breadth and keyword precision.
|
||||
- Tensor or Multi-Vector Representation: To support cross-modal data, employing tensor or multi-vector representation has become an important trend.
|
||||
- Metadata Filtering: Filtering based on attributes like date, department, and type is a rigid requirement in business scenarios.
|
||||
|
||||
Therefore, the retrieval layer of RAG is a composite system based on vector search but must integrate capabilities like full-text search, re-ranking, and metadata filtering.
|
||||
|
||||
## RAG and memory: Retrieval from the same source but different streams
|
||||
|
||||
Within the agent framework, the essence of the memory mechanism is the same as RAG: both retrieve relevant information from storage based on current needs. The key difference lies in the data source:
|
||||
- RAG: Targets pre-existing static or dynamic private data provided by the user in advance (e.g., documents, databases).
|
||||
- Memory: Targets dynamic data generated or perceived by the agent in real-time during interaction (e.g., conversation history, environmental state, tool execution results).
|
||||
They are highly consistent at the technical base (e.g., vector retrieval, keyword matching) and can be seen as the same retrieval capability applied in different scenarios ("existing knowledge" vs. "interaction memory"). A complete agent system often includes both an RAG module for inherent knowledge and a Memory module for interaction history.
|
||||
|
||||
## RAG applications
|
||||
|
||||
RAG has demonstrated clear value in several typical scenarios:
|
||||
|
||||
1. Enterprise Knowledge Q&A and Internal Search
|
||||
By vectorizing corporate private data and combining it with an LLM, RAG can directly return natural language answers based on authoritative sources, rather than document lists. While meeting intelligent Q&A needs, it inherently aligns with corporate requirements for data security, access control, and compliance.
|
||||
2. Complex Document Understanding and Professional Q&A
|
||||
For structurally complex documents like contracts and regulations, the value of RAG lies in its ability to generate accurate, verifiable answers while maintaining context integrity. Its system accuracy largely depends on text chunking and semantic understanding strategies.
|
||||
3. Dynamic Knowledge Fusion and Decision Support
|
||||
In business scenarios requiring the synthesis of information from multiple sources, RAG evolves into a knowledge orchestration and reasoning support system for business decisions. Through a multi-path recall mechanism, it fuses knowledge from different systems and formats, maintaining factual consistency and logical controllability during the generation phase.
|
||||
|
||||
## The future of RAG
|
||||
|
||||
The evolution of RAG is unfolding along several clear paths:
|
||||
|
||||
1. RAG as the data foundation for Agents
|
||||
RAG and agents have an architecture vs. scenario relationship. For agents to achieve autonomous and reliable decision-making and execution, they must rely on accurate and timely knowledge. RAG provides them with a standardized capability to access private domain knowledge and is an inevitable choice for building knowledge-aware agents.
|
||||
2. Advanced RAG: Using LLMs to optimize retrieval itself
|
||||
The core feature of next-generation RAG is fully utilizing the reasoning capabilities of LLMs to optimize the retrieval process, such as rewriting queries, summarizing or fusing results, or implementing intelligent routing. Empowering every aspect of retrieval with LLMs is key to breaking through current performance bottlenecks.
|
||||
3. Towards context engineering 2.0
|
||||
Current RAG can be viewed as Context Engineering 1.0, whose core is assembling static knowledge context for single Q&A tasks. The forthcoming Context Engineering 2.0 will extend with RAG technology at its core, becoming a system that automatically and dynamically assembles comprehensive context for agents. The context fused by this system will come not only from documents but also include interaction memory, available tools/skills, and real-time environmental information. This marks the transition of agent development from a "handicraft workshop" model to the industrial starting point of automated context engineering.
|
||||
|
||||
The essence of RAG is to build a dedicated, efficient, and trustworthy external data interface for large language models; its core is Retrieval, not Generation. Starting from the practical need to solve private data access, its technical depth is reflected in the optimization of retrieval for complex unstructured data. With its deep integration into agent architectures and its development towards automated context engineering, RAG is evolving from a technology that improves Q&A quality into the core infrastructure for building the next generation of trustworthy, controllable, and scalable intelligent applications.
|
||||
@ -99,7 +99,7 @@ RAGFlow utilizes MinIO as its object storage solution, leveraging its scalabilit
|
||||
- `SVR_HTTP_PORT`
|
||||
The port used to expose RAGFlow's HTTP API service to the host machine, allowing **external** access to the service running inside the Docker container. Defaults to `9380`.
|
||||
- `RAGFLOW-IMAGE`
|
||||
The Docker image edition. Defaults to `infiniflow/ragflow:v0.23.0` (the RAGFlow Docker image without embedding models).
|
||||
The Docker image edition. Defaults to `infiniflow/ragflow:v0.23.1` (the RAGFlow Docker image without embedding models).
|
||||
|
||||
:::tip NOTE
|
||||
If you cannot download the RAGFlow Docker image, try the following mirrors.
|
||||
|
||||
@ -47,7 +47,7 @@ After building the infiniflow/ragflow:nightly image, you are ready to launch a f
|
||||
|
||||
1. Edit Docker Compose Configuration
|
||||
|
||||
Open the `docker/.env` file. Find the `RAGFLOW_IMAGE` setting and change the image reference from `infiniflow/ragflow:v0.23.0` to `infiniflow/ragflow:nightly` to use the pre-built image.
|
||||
Open the `docker/.env` file. Find the `RAGFLOW_IMAGE` setting and change the image reference from `infiniflow/ragflow:v0.23.1` to `infiniflow/ragflow:nightly` to use the pre-built image.
|
||||
|
||||
|
||||
2. Launch the Service
|
||||
|
||||
8
docs/guides/admin/_category_.json
Normal file
8
docs/guides/admin/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Administration",
|
||||
"position": 6,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "RAGFlow administration"
|
||||
}
|
||||
}
|
||||
@ -1,43 +1,11 @@
|
||||
---
|
||||
sidebar_position: 6
|
||||
slug: /manage_users_and_services
|
||||
sidebar_position: 2
|
||||
slug: /admin_cli
|
||||
---
|
||||
|
||||
# Admin CLI
|
||||
|
||||
# Admin CLI and Admin Service
|
||||
|
||||
|
||||
|
||||
The Admin CLI and Admin Service form a client-server architectural suite for RAGFlow system administration. The Admin CLI serves as an interactive command-line interface that receives instructions and displays execution results from the Admin Service in real-time. This duo enables real-time monitoring of system operational status, supporting visibility into RAGFlow Server services and dependent components including MySQL, Elasticsearch, Redis, and MinIO. In administrator mode, they provide user management capabilities that allow viewing users and performing critical operations—such as user creation, password updates, activation status changes, and comprehensive user data deletion—even when corresponding web interface functionalities are disabled.
|
||||
|
||||
|
||||
|
||||
## Starting the Admin Service
|
||||
|
||||
### Launching from source code
|
||||
|
||||
1. Before start Admin Service, please make sure RAGFlow system is already started.
|
||||
|
||||
2. Launch from source code:
|
||||
|
||||
```bash
|
||||
python admin/server/admin_server.py
|
||||
```
|
||||
|
||||
The service will start and listen for incoming connections from the CLI on the configured port.
|
||||
|
||||
### Using docker image
|
||||
|
||||
1. Before startup, please configure the `docker_compose.yml` file to enable admin server:
|
||||
|
||||
```bash
|
||||
command:
|
||||
- --enable-adminserver
|
||||
```
|
||||
|
||||
2. Start the containers, the service will start and listen for incoming connections from the CLI on the configured port.
|
||||
|
||||
|
||||
The RAGFlow Admin CLI is a command-line-based system administration tool that offers administrators an efficient and flexible method for system interaction and control. Operating on a client-server architecture, it communicates in real-time with the Admin Service, receiving administrator commands and dynamically returning execution results.
|
||||
|
||||
## Using the Admin CLI
|
||||
|
||||
@ -46,7 +14,7 @@ The Admin CLI and Admin Service form a client-server architectural suite for RAG
|
||||
2. Install ragflow-cli.
|
||||
|
||||
```bash
|
||||
pip install ragflow-cli==0.23.0
|
||||
pip install ragflow-cli==0.23.1
|
||||
```
|
||||
|
||||
3. Launch the CLI client:
|
||||
39
docs/guides/admin/admin_service.md
Normal file
39
docs/guides/admin/admin_service.md
Normal file
@ -0,0 +1,39 @@
|
||||
---
|
||||
sidebar_position: 0
|
||||
slug: /admin_service
|
||||
---
|
||||
|
||||
|
||||
# Admin Service
|
||||
|
||||
The Admin Service is the core backend management service of the RAGFlow system, providing comprehensive system administration capabilities through centralized API interfaces for managing and controlling the entire platform. Adopting a client-server architecture, it supports access and operations via both a Web UI and an Admin CLI, ensuring flexible and efficient execution of administrative tasks.
|
||||
|
||||
The core functions of the Admin Service include real-time monitoring of the operational status of the RAGFlow server and its critical dependent components—such as MySQL, Elasticsearch, Redis, and MinIO—along with full-featured user management. In administrator mode, it enables key operations such as viewing user information, creating users, updating passwords, modifying activation status, and performing complete user data deletion. These functions remain accessible via the Admin CLI even when the web management interface is disabled, ensuring the system stays under control at all times.
|
||||
|
||||
With its unified interface design, the Admin Service combines the convenience of visual administration with the efficiency and stability of command-line operations, serving as a crucial foundation for the reliable operation and secure management of the RAGFlow system.
|
||||
|
||||
## Starting the Admin Service
|
||||
|
||||
### Launching from source code
|
||||
|
||||
1. Before start Admin Service, please make sure RAGFlow system is already started.
|
||||
|
||||
2. Launch from source code:
|
||||
|
||||
```bash
|
||||
python admin/server/admin_server.py
|
||||
```
|
||||
|
||||
The service will start and listen for incoming connections from the CLI on the configured port.
|
||||
|
||||
### Using docker image
|
||||
|
||||
1. Before startup, please configure the `docker_compose.yml` file to enable admin server:
|
||||
|
||||
```bash
|
||||
command:
|
||||
- --enable-adminserver
|
||||
```
|
||||
|
||||
2. Start the containers, the service will start and listen for incoming connections from the CLI on the configured port.
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_position: 7
|
||||
slug: /accessing_admin_ui
|
||||
sidebar_position: 1
|
||||
slug: /admin_ui
|
||||
---
|
||||
|
||||
# Admin UI
|
||||
@ -133,7 +133,7 @@ See [Run retrieval test](./run_retrieval_test.md) for details.
|
||||
|
||||
## Search for dataset
|
||||
|
||||
As of RAGFlow v0.23.0, the search feature is still in a rudimentary form, supporting only dataset search by name.
|
||||
As of RAGFlow v0.23.1, the search feature is still in a rudimentary form, supporting only dataset search by name.
|
||||
|
||||

|
||||
|
||||
|
||||
@ -87,4 +87,4 @@ RAGFlow's file management allows you to download an uploaded file:
|
||||
|
||||

|
||||
|
||||
> As of RAGFlow v0.23.0, bulk download is not supported, nor can you download an entire folder.
|
||||
> As of RAGFlow v0.23.1, bulk download is not supported, nor can you download an entire folder.
|
||||
|
||||
8
docs/guides/migration/_category_.json
Normal file
8
docs/guides/migration/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Migration",
|
||||
"position": 5,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "RAGFlow migration guide"
|
||||
}
|
||||
}
|
||||
@ -1,109 +0,0 @@
|
||||
---
|
||||
sidebar_position: 8
|
||||
slug: /run_health_check
|
||||
---
|
||||
|
||||
# Monitoring
|
||||
|
||||
Double-check the health status of RAGFlow's dependencies.
|
||||
|
||||
---
|
||||
|
||||
The operation of RAGFlow depends on four services:
|
||||
|
||||
- **Elasticsearch** (default) or [Infinity](https://github.com/infiniflow/infinity) as the document engine
|
||||
- **MySQL**
|
||||
- **Redis**
|
||||
- **MinIO** for object storage
|
||||
|
||||
If an exception or error occurs related to any of the above services, such as `Exception: Can't connect to ES cluster`, refer to this document to check their health status.
|
||||
|
||||
You can also click you avatar in the top right corner of the page **>** System to view the visualized health status of RAGFlow's core services. The following screenshot shows that all services are 'green' (running healthily). The task executor displays the *cumulative* number of completed and failed document parsing tasks from the past 30 minutes:
|
||||
|
||||

|
||||
|
||||
Services with a yellow or red light are not running properly. The following is a screenshot of the system page after running `docker stop ragflow-es-10`:
|
||||
|
||||

|
||||
|
||||
You can click on a specific 30-second time interval to view the details of completed and failed tasks:
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
## API Health Check
|
||||
|
||||
In addition to checking the system dependencies from the **avatar > System** page in the UI, you can directly query the backend health check endpoint:
|
||||
|
||||
```bash
|
||||
http://IP_OF_YOUR_MACHINE/v1/system/healthz
|
||||
```
|
||||
|
||||
Here `<port>` refers to the actual port of your backend service (e.g., `7897`, `9222`, etc.).
|
||||
|
||||
Key points:
|
||||
- **No login required** (no `@login_required` decorator)
|
||||
- Returns results in JSON format
|
||||
- If all dependencies are healthy → HTTP **200 OK**
|
||||
- If any dependency fails → HTTP **500 Internal Server Error**
|
||||
|
||||
### Example 1: All services healthy (HTTP 200)
|
||||
|
||||
```bash
|
||||
http://127.0.0.1/v1/system/healthz
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
Content-Length: 120
|
||||
|
||||
```
|
||||
|
||||
Explanation:
|
||||
- Database (MySQL/Postgres), Redis, document engine (Elasticsearch/Infinity), and object storage (MinIO) are all healthy.
|
||||
- The `status` field returns `"ok"`.
|
||||
|
||||
### Example 2: One service unhealthy (HTTP 500)
|
||||
|
||||
For example, if Redis is down:
|
||||
|
||||
Response:
|
||||
|
||||
```http
|
||||
HTTP/1.1 500 INTERNAL SERVER ERROR
|
||||
Content-Type: application/json
|
||||
Content-Length: 300
|
||||
|
||||
```
|
||||
|
||||
Explanation:
|
||||
- `redis` is marked as `"nok"`, with detailed error info under `_meta.redis.error`.
|
||||
- The overall `status` is `"nok"`, so the endpoint returns 500.
|
||||
|
||||
---
|
||||
|
||||
This endpoint allows you to monitor RAGFlow’s core dependencies programmatically in scripts or external monitoring systems, without relying on the frontend UI.
|
||||
"redis": "nok",
|
||||
"doc_engine": "ok",
|
||||
"storage": "ok",
|
||||
"status": "nok",
|
||||
"_meta": {
|
||||
"redis": {
|
||||
"elapsed": "5.2",
|
||||
"error": "Lost connection!"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Explanation:
|
||||
- `redis` is marked as `"nok"`, with detailed error info under `_meta.redis.error`.
|
||||
- The overall `status` is `"nok"`, so the endpoint returns 500.
|
||||
|
||||
---
|
||||
|
||||
This endpoint allows you to monitor RAGFlow’s core dependencies programmatically in scripts or external monitoring systems, without relying on the frontend UI.
|
||||
@ -60,16 +60,16 @@ To upgrade RAGFlow, you must upgrade **both** your code **and** your Docker imag
|
||||
git pull
|
||||
```
|
||||
|
||||
3. Switch to the latest, officially published release, e.g., `v0.23.0`:
|
||||
3. Switch to the latest, officially published release, e.g., `v0.23.1`:
|
||||
|
||||
```bash
|
||||
git checkout -f v0.23.0
|
||||
git checkout -f v0.23.1
|
||||
```
|
||||
|
||||
4. Update **ragflow/docker/.env**:
|
||||
|
||||
```bash
|
||||
RAGFLOW_IMAGE=infiniflow/ragflow:v0.23.0
|
||||
RAGFLOW_IMAGE=infiniflow/ragflow:v0.23.1
|
||||
```
|
||||
|
||||
5. Update the RAGFlow image and restart RAGFlow:
|
||||
@ -90,10 +90,10 @@ No, you do not need to. Upgrading RAGFlow in itself will *not* remove your uploa
|
||||
1. From an environment with Internet access, pull the required Docker image.
|
||||
2. Save the Docker image to a **.tar** file.
|
||||
```bash
|
||||
docker save -o ragflow.v0.23.0.tar infiniflow/ragflow:v0.23.0
|
||||
docker save -o ragflow.v0.23.1.tar infiniflow/ragflow:v0.23.1
|
||||
```
|
||||
3. Copy the **.tar** file to the target server.
|
||||
4. Load the **.tar** file into Docker:
|
||||
```bash
|
||||
docker load -i ragflow.v0.23.0.tar
|
||||
docker load -i ragflow.v0.23.1.tar
|
||||
```
|
||||
|
||||
@ -46,7 +46,7 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
|
||||
|
||||
`vm.max_map_count`. This value sets the maximum number of memory map areas a process may have. Its default value is 65530. While most applications require fewer than a thousand maps, reducing this value can result in abnormal behaviors, and the system will throw out-of-memory errors when a process reaches the limitation.
|
||||
|
||||
RAGFlow v0.23.0 uses Elasticsearch or [Infinity](https://github.com/infiniflow/infinity) for multiple recall. Setting the value of `vm.max_map_count` correctly is crucial to the proper functioning of the Elasticsearch component.
|
||||
RAGFlow v0.23.1 uses Elasticsearch or [Infinity](https://github.com/infiniflow/infinity) for multiple recall. Setting the value of `vm.max_map_count` correctly is crucial to the proper functioning of the Elasticsearch component.
|
||||
|
||||
<Tabs
|
||||
defaultValue="linux"
|
||||
@ -186,7 +186,7 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/docker
|
||||
$ git checkout -f v0.23.0
|
||||
$ git checkout -f v0.23.1
|
||||
```
|
||||
|
||||
3. Use the pre-built Docker images and start up the server:
|
||||
@ -202,7 +202,7 @@ This section provides instructions on setting up the RAGFlow server on Linux. If
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Stable? |
|
||||
| ------------------- | --------------- | ------------------------ |
|
||||
| v0.23.0 | ≈2 | Stable release |
|
||||
| v0.23.1 | ≈2 | Stable release |
|
||||
| nightly | ≈2 | _Unstable_ nightly build |
|
||||
|
||||
```mdx-code-block
|
||||
|
||||
@ -13,61 +13,58 @@ A complete list of models supported by RAGFlow, which will continue to expand.
|
||||
<APITable>
|
||||
```
|
||||
|
||||
| Provider | Chat | Embedding | Rerank | Img2txt | Speech2txt | TTS |
|
||||
| --------------------- | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| Anthropic | :heavy_check_mark: | | | | | |
|
||||
| Azure-OpenAI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| BAAI | | :heavy_check_mark: | :heavy_check_mark: | | | |
|
||||
| BaiChuan | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| BaiduYiyan | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| Bedrock | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| Cohere | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| DeepSeek | :heavy_check_mark: | | | | | |
|
||||
| FastEmbed | | :heavy_check_mark: | | | | |
|
||||
| Fish Audio | | | | | | :heavy_check_mark: |
|
||||
| Gemini | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| Google Cloud | :heavy_check_mark: | | | | | |
|
||||
| GPUStack | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Groq | :heavy_check_mark: | | | | | |
|
||||
| HuggingFace | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| Jina | | :heavy_check_mark: | :heavy_check_mark: | | | |
|
||||
| LeptonAI | :heavy_check_mark: | | | | | |
|
||||
| LocalAI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| LM-Studio | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| MiniMax | :heavy_check_mark: | | | | | |
|
||||
| Mistral | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| ModelScope | :heavy_check_mark: | | | | | |
|
||||
| Moonshot | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| Novita AI | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| NVIDIA | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| Ollama | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| OpenAI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| OpenAI-API-Compatible | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| OpenRouter | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| PerfXCloud | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| Replicate | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| PPIO | :heavy_check_mark: | | | | | |
|
||||
| SILICONFLOW | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| StepFun | :heavy_check_mark: | | | | | |
|
||||
| Tencent Hunyuan | :heavy_check_mark: | | | | | |
|
||||
| Tencent Cloud | | | | | :heavy_check_mark: | |
|
||||
| TogetherAI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| Tongyi-Qianwen | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Upstage | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| VLLM | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| VolcEngine | :heavy_check_mark: | | | | | |
|
||||
| Voyage AI | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| Xinference | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| XunFei Spark | :heavy_check_mark: | | | | | :heavy_check_mark: |
|
||||
| xAI | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| Youdao | | :heavy_check_mark: | :heavy_check_mark: | | | |
|
||||
| ZHIPU-AI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| 01.AI | :heavy_check_mark: | | | | | |
|
||||
| DeepInfra | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| 302.AI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| CometAPI | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| DeerAPI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | :heavy_check_mark: |
|
||||
| Jiekou.AI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | | |
|
||||
| Provider | LLM | Image2Text | Speech2text | TTS | Embedding | Rerank | OCR |
|
||||
| --------------------- | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| Anthropic | :heavy_check_mark: | | | | | | |
|
||||
| Azure-OpenAI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| BaiChuan | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| BaiduYiyan | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| Bedrock | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| Cohere | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| DeepSeek | :heavy_check_mark: | | | | | | |
|
||||
| Fish Audio | | | | :heavy_check_mark: | | | |
|
||||
| Gemini | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| Google Cloud | :heavy_check_mark: | | | | | | |
|
||||
| GPUStack | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| Groq | :heavy_check_mark: | | | | | | |
|
||||
| HuggingFace | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| Jina | | | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| LocalAI | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| LongCat | :heavy_check_mark: | | | | | | |
|
||||
| LM-Studio | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| MiniMax | :heavy_check_mark: | | | | | | |
|
||||
| MinerU | | | | | | | :heavy_check_mark: |
|
||||
| Mistral | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| ModelScope | :heavy_check_mark: | | | | | | |
|
||||
| Moonshot | :heavy_check_mark: | :heavy_check_mark: | | | | | |
|
||||
| NovitaAI | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| NVIDIA | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| Ollama | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| OpenAI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| OpenAI-API-Compatible | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| OpenRouter | :heavy_check_mark: | :heavy_check_mark: | | | | | |
|
||||
| Replicate | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| PPIO | :heavy_check_mark: | | | | | | |
|
||||
| SILICONFLOW | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| StepFun | :heavy_check_mark: | | | | | | |
|
||||
| Tencent Hunyuan | :heavy_check_mark: | | | | | | |
|
||||
| Tencent Cloud | | | :heavy_check_mark: | | | | |
|
||||
| TogetherAI | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| TokenPony | :heavy_check_mark: | | | | | | |
|
||||
| Tongyi-Qianwen | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| Upstage | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| VLLM | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| VolcEngine | :heavy_check_mark: | | | | | | |
|
||||
| Voyage AI | | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| Xinference | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| XunFei Spark | :heavy_check_mark: | | | :heavy_check_mark: | | | |
|
||||
| xAI | :heavy_check_mark: | :heavy_check_mark: | | | | | |
|
||||
| ZHIPU-AI | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| DeepInfra | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| 302.AI | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| CometAPI | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| DeerAPI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| Jiekou.AI | :heavy_check_mark: | | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
|
||||
```mdx-code-block
|
||||
</APITable>
|
||||
|
||||
@ -7,6 +7,31 @@ slug: /release_notes
|
||||
|
||||
Key features, improvements and bug fixes in the latest releases.
|
||||
|
||||
|
||||
## v0.23.1
|
||||
|
||||
Released on December 31, 2025.
|
||||
|
||||
### Improvements
|
||||
|
||||
- Memory: Enhances the stability of memory extraction when all memory types are selected.
|
||||
- RAG: Refines the context window extraction strategy for images and tables.
|
||||
|
||||
|
||||
### Fixed issues
|
||||
|
||||
- Memory:
|
||||
- The RAGFlow server failed to start if an empty memory object existed.
|
||||
- Unable to delete a newly created empty Memory.
|
||||
- RAG: MDX file parsing was not supported.
|
||||
|
||||
### Data sources
|
||||
|
||||
- GitHub
|
||||
- Gitlab
|
||||
- Asana
|
||||
- IMAP
|
||||
|
||||
## v0.23.0
|
||||
|
||||
Released on December 27, 2025.
|
||||
@ -32,6 +57,7 @@ Released on December 27, 2025.
|
||||
|
||||
### Improvements
|
||||
|
||||
- RAG: Accelerates GraphRAG generation significantly.
|
||||
- Bumps RAGFlow's document engine, [Infinity](https://github.com/infiniflow/infinity) to v0.6.15 (backward compatible).
|
||||
|
||||
### Data sources
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
@ -32,21 +33,21 @@ from common.doc_store.doc_store_base import OrderByExpr
|
||||
|
||||
|
||||
class KGSearch(Dealer):
|
||||
def _chat(self, llm_bdl, system, history, gen_conf):
|
||||
async def _chat(self, llm_bdl, system, history, gen_conf):
|
||||
response = get_llm_cache(llm_bdl.llm_name, system, history, gen_conf)
|
||||
if response:
|
||||
return response
|
||||
response = llm_bdl.chat(system, history, gen_conf)
|
||||
response = await llm_bdl.async_chat(system, history, gen_conf)
|
||||
if response.find("**ERROR**") >= 0:
|
||||
raise Exception(response)
|
||||
set_llm_cache(llm_bdl.llm_name, system, response, history, gen_conf)
|
||||
return response
|
||||
|
||||
def query_rewrite(self, llm, question, idxnms, kb_ids):
|
||||
async def query_rewrite(self, llm, question, idxnms, kb_ids):
|
||||
ty2ents = get_entity_type2samples(idxnms, kb_ids)
|
||||
hint_prompt = PROMPTS["minirag_query2kwd"].format(query=question,
|
||||
TYPE_POOL=json.dumps(ty2ents, ensure_ascii=False, indent=2))
|
||||
result = self._chat(llm, hint_prompt, [{"role": "user", "content": "Output:"}], {})
|
||||
result = await self._chat(llm, hint_prompt, [{"role": "user", "content": "Output:"}], {})
|
||||
try:
|
||||
keywords_data = json_repair.loads(result)
|
||||
type_keywords = keywords_data.get("answer_type_keywords", [])
|
||||
@ -138,7 +139,7 @@ class KGSearch(Dealer):
|
||||
idxnms, kb_ids)
|
||||
return self._ent_info_from_(es_res, 0)
|
||||
|
||||
def retrieval(self, question: str,
|
||||
async def retrieval(self, question: str,
|
||||
tenant_ids: str | list[str],
|
||||
kb_ids: list[str],
|
||||
emb_mdl,
|
||||
@ -158,7 +159,7 @@ class KGSearch(Dealer):
|
||||
idxnms = [index_name(tid) for tid in tenant_ids]
|
||||
ty_kwds = []
|
||||
try:
|
||||
ty_kwds, ents = self.query_rewrite(llm, qst, [index_name(tid) for tid in tenant_ids], kb_ids)
|
||||
ty_kwds, ents = await self.query_rewrite(llm, qst, [index_name(tid) for tid in tenant_ids], kb_ids)
|
||||
logging.info(f"Q: {qst}, Types: {ty_kwds}, Entities: {ents}")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
@ -334,5 +335,5 @@ if __name__ == "__main__":
|
||||
embed_bdl = LLMBundle(args.tenant_id, LLMType.EMBEDDING, kb.embd_id)
|
||||
|
||||
kg = KGSearch(settings.docStoreConn)
|
||||
print(kg.retrieval({"question": args.question, "kb_ids": [kb_id]},
|
||||
search.index_name(kb.tenant_id), [kb_id], embed_bdl, llm_bdl))
|
||||
print(asyncio.run(kg.retrieval({"question": args.question, "kb_ids": [kb_id]},
|
||||
search.index_name(kb.tenant_id), [kb_id], embed_bdl, llm_bdl)))
|
||||
|
||||
@ -77,7 +77,7 @@ env:
|
||||
ragflow:
|
||||
image:
|
||||
repository: infiniflow/ragflow
|
||||
tag: v0.23.0
|
||||
tag: v0.23.1
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecrets: []
|
||||
# Optional service configuration overrides
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "ragflow"
|
||||
version = "0.23.0"
|
||||
version = "0.23.1"
|
||||
description = "[RAGFlow](https://ragflow.io/) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models) to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted data."
|
||||
authors = [{ name = "Zhichang Yu", email = "yuzhichang@gmail.com" }]
|
||||
license-files = ["LICENSE"]
|
||||
|
||||
@ -40,7 +40,7 @@ from deepdoc.parser.docling_parser import DoclingParser
|
||||
from deepdoc.parser.tcadp_parser import TCADPParser
|
||||
from common.parser_config_utils import normalize_layout_recognizer
|
||||
from rag.nlp import concat_img, find_codec, naive_merge, naive_merge_with_images, naive_merge_docx, rag_tokenizer, \
|
||||
tokenize_chunks, tokenize_chunks_with_images, tokenize_table, attach_media_context
|
||||
tokenize_chunks, tokenize_chunks_with_images, tokenize_table, attach_media_context, append_context2table_image4pdf
|
||||
|
||||
|
||||
def by_deepdoc(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, pdf_cls=None,
|
||||
@ -487,7 +487,7 @@ class Pdf(PdfParser):
|
||||
tbls = self._extract_table_figure(True, zoomin, True, True)
|
||||
self._naive_vertical_merge()
|
||||
self._concat_downward()
|
||||
self._final_reading_order_merge()
|
||||
# self._final_reading_order_merge()
|
||||
# self._filter_forpages()
|
||||
logging.info("layouts cost: {}s".format(timer() - first_start))
|
||||
return [(b["text"], self._line_tag(b, zoomin)) for b in self.boxes], tbls
|
||||
@ -776,6 +776,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
if not sections and not tables:
|
||||
return []
|
||||
|
||||
if table_context_size or image_context_size:
|
||||
tables = append_context2table_image4pdf(sections, tables, image_context_size)
|
||||
|
||||
if name in ["tcadp", "docling", "mineru"]:
|
||||
parser_config["chunk_token_num"] = 0
|
||||
|
||||
@ -1006,8 +1009,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
res.extend(embed_res)
|
||||
if url_res:
|
||||
res.extend(url_res)
|
||||
if table_context_size or image_context_size:
|
||||
attach_media_context(res, table_context_size, image_context_size)
|
||||
#if table_context_size or image_context_size:
|
||||
# attach_media_context(res, table_context_size, image_context_size)
|
||||
return res
|
||||
|
||||
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
|
||||
import logging
|
||||
import random
|
||||
from collections import Counter
|
||||
from collections import Counter, defaultdict
|
||||
|
||||
from common.token_utils import num_tokens_from_string
|
||||
import re
|
||||
@ -667,6 +667,94 @@ def attach_media_context(chunks, table_context_size=0, image_context_size=0):
|
||||
return chunks
|
||||
|
||||
|
||||
def append_context2table_image4pdf(sections: list, tabls: list, table_context_size=0):
|
||||
from deepdoc.parser import PdfParser
|
||||
if table_context_size <=0:
|
||||
return tabls
|
||||
|
||||
page_bucket = defaultdict(list)
|
||||
for i, (txt, poss) in enumerate(sections):
|
||||
poss = PdfParser.extract_positions(poss)
|
||||
for page, left, right, top, bottom in poss:
|
||||
page = page[0]
|
||||
page_bucket[page].append(((left, top, right, bottom), txt))
|
||||
|
||||
def upper_context(page, i):
|
||||
txt = ""
|
||||
if page not in page_bucket:
|
||||
i = -1
|
||||
while num_tokens_from_string(txt) < table_context_size:
|
||||
if i < 0:
|
||||
page -= 1
|
||||
if page < 0 or page not in page_bucket:
|
||||
break
|
||||
i = len(page_bucket[page]) -1
|
||||
blks = page_bucket[page]
|
||||
(_, _, _, _), cnt = blks[i]
|
||||
txts = re.split(r"([。!??;!\n]|\. )", cnt, flags=re.DOTALL)[::-1]
|
||||
for j in range(0, len(txts), 2):
|
||||
txt = (txts[j+1] if j+1<len(txts) else "") + txts[j] + txt
|
||||
if num_tokens_from_string(txt) > table_context_size:
|
||||
break
|
||||
i -= 1
|
||||
return txt
|
||||
|
||||
def lower_context(page, i):
|
||||
txt = ""
|
||||
if page not in page_bucket:
|
||||
return txt
|
||||
while num_tokens_from_string(txt) < table_context_size:
|
||||
if i >= len(page_bucket[page]):
|
||||
page += 1
|
||||
if page not in page_bucket:
|
||||
break
|
||||
i = 0
|
||||
blks = page_bucket[page]
|
||||
(_, _, _, _), cnt = blks[i]
|
||||
txts = re.split(r"([。!??;!\n]|\. )", cnt, flags=re.DOTALL)
|
||||
for j in range(0, len(txts), 2):
|
||||
txt += txts[j] + (txts[j+1] if j+1<len(txts) else "")
|
||||
if num_tokens_from_string(txt) > table_context_size:
|
||||
break
|
||||
i += 1
|
||||
return txt
|
||||
|
||||
res = []
|
||||
for (img, tb), poss in tabls:
|
||||
page, left, top, right, bott = poss[0]
|
||||
_page, _left, _top, _right, _bott = poss[-1]
|
||||
if isinstance(tb, list):
|
||||
tb = "\n".join(tb)
|
||||
|
||||
i = 0
|
||||
blks = page_bucket.get(page, [])
|
||||
_tb = tb
|
||||
while i < len(blks):
|
||||
if i + 1 >= len(blks):
|
||||
if _page > page:
|
||||
page += 1
|
||||
i = 0
|
||||
blks = page_bucket.get(page, [])
|
||||
continue
|
||||
tb = upper_context(page, i) + tb + lower_context(page+1, 0)
|
||||
break
|
||||
(_, t, r, b), txt = blks[i]
|
||||
if b > top:
|
||||
break
|
||||
(_, _t, _r, _b), _txt = blks[i+1]
|
||||
if _t < _bott:
|
||||
i += 1
|
||||
continue
|
||||
|
||||
tb = upper_context(page, i) + tb + lower_context(page, i)
|
||||
break
|
||||
|
||||
if _tb == tb:
|
||||
tb = upper_context(page, -1) + tb + lower_context(page+1, 0)
|
||||
res.append(((img, tb), poss))
|
||||
return res
|
||||
|
||||
|
||||
def add_positions(d, poss):
|
||||
if not poss:
|
||||
return
|
||||
|
||||
@ -729,6 +729,8 @@ TOC_FROM_TEXT_USER = load_prompt("toc_from_text_user")
|
||||
|
||||
# Generate TOC from text chunks with text llms
|
||||
async def gen_toc_from_text(txt_info: dict, chat_mdl, callback=None):
|
||||
if callback:
|
||||
callback(msg="")
|
||||
try:
|
||||
ans = await gen_json(
|
||||
PROMPT_JINJA_ENV.from_string(TOC_FROM_TEXT_SYSTEM).render(),
|
||||
@ -738,8 +740,6 @@ async def gen_toc_from_text(txt_info: dict, chat_mdl, callback=None):
|
||||
gen_conf={"temperature": 0.0, "top_p": 0.9}
|
||||
)
|
||||
txt_info["toc"] = ans if ans and not isinstance(ans, str) else []
|
||||
if callback:
|
||||
callback(msg="")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
||||
|
||||
@ -1,14 +1,11 @@
|
||||
## Role: Metadata extraction expert
|
||||
## Constraints:
|
||||
- Core Directive: Extract important structured information from the given content. Output ONLY a valid JSON string. No Markdown (e.g., ```json), no explanations, and no notes.
|
||||
- Schema Parsing: In the `properties` object provided in Schema, the attribute name (e.g., 'author') is the target Key. Extract values based on the `description`; if no `description` is provided, refer to the key's literal meaning.
|
||||
- Extraction Rules: Extract only when there is an explicit semantic correlation. If multiple values or data points match a field's definition, extract and include all of them. Strictly follow the Schema below and only output matched key-value pairs. If the content is irrelevant or no matching information is identified, you **MUST** output {}.
|
||||
- Data Source: Extraction must be based solely on content below. Semantic mapping (synonyms) is allowed, but strictly prohibit hallucinations or fabricated facts.
|
||||
|
||||
## Enum Rules (Triggered ONLY if an enum list is present):
|
||||
- Value Lock: All extracted values MUST strictly match the provided enum list.
|
||||
- Normalization: Map synonyms or variants in the text back to the standard enum value (e.g., "Dec" to "December").
|
||||
- Fallback: Output {} if no explicit match or synonym is identified.
|
||||
## Role: Metadata extraction expert.
|
||||
## Rules:
|
||||
- Strict Evidence Only: Extract a value ONLY if it is explicitly mentioned in the Content.
|
||||
- Enum Filter: For any field with an 'enum' list, the list acts as a strict filter. If no element from the list (or its direct synonym) is found in the Content, you MUST NOT extract that field.
|
||||
- No Meta-Inference: Do not infer values based on the document's nature, format, or category. If the text does not literally state the information, treat it as missing.
|
||||
- Zero-Hallucination: Never invent information or pick a "likely" value from the enum to fill a field.
|
||||
- Empty Result: If no matches are found for any field, or if the content is irrelevant, output ONLY {}.
|
||||
- Output: ONLY a valid JSON string. No Markdown, no notes.
|
||||
|
||||
## Schema for extraction:
|
||||
{{ schema }}
|
||||
|
||||
@ -10542,6 +10542,5 @@
|
||||
"周五": ["礼拜五", "星期五"],
|
||||
"周六": ["礼拜六", "星期六"],
|
||||
"周日": ["礼拜日", "星期日", "星期天", "礼拜天"],
|
||||
"上班": "办公",
|
||||
"HELO":"agn"
|
||||
"上班": "办公"
|
||||
}
|
||||
|
||||
@ -39,24 +39,28 @@ from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from common import settings
|
||||
from common.config_utils import show_configs
|
||||
from common.data_source import (
|
||||
BlobStorageConnector,
|
||||
NotionConnector,
|
||||
DiscordConnector,
|
||||
GoogleDriveConnector,
|
||||
MoodleConnector,
|
||||
JiraConnector,
|
||||
DropboxConnector,
|
||||
WebDAVConnector,
|
||||
AirtableConnector,
|
||||
BlobStorageConnector,
|
||||
NotionConnector,
|
||||
DiscordConnector,
|
||||
GoogleDriveConnector,
|
||||
MoodleConnector,
|
||||
JiraConnector,
|
||||
DropboxConnector,
|
||||
AirtableConnector,
|
||||
AsanaConnector,
|
||||
ImapConnector,
|
||||
ZendeskConnector,
|
||||
)
|
||||
from common.constants import FileSource, TaskStatus
|
||||
from common.data_source.config import INDEX_BATCH_SIZE
|
||||
from common.data_source.models import ConnectorFailure
|
||||
from common.data_source.webdav_connector import WebDAVConnector
|
||||
from common.data_source.confluence_connector import ConfluenceConnector
|
||||
from common.data_source.gmail_connector import GmailConnector
|
||||
from common.data_source.box_connector import BoxConnector
|
||||
from common.data_source.github.connector import GithubConnector
|
||||
from common.data_source.gitlab_connector import GitlabConnector
|
||||
from common.data_source.bitbucket.connector import BitbucketConnector
|
||||
from common.data_source.interfaces import CheckpointOutputWrapper
|
||||
from common.log_utils import init_root_logger
|
||||
from common.signal_utils import start_tracemalloc_and_snapshot, stop_tracemalloc
|
||||
@ -692,7 +696,12 @@ class WebDAV(SyncBase):
|
||||
self.conf.get("remote_path", "/"),
|
||||
begin_info
|
||||
))
|
||||
return document_batch_generator
|
||||
|
||||
async def async_wrapper():
|
||||
for document_batch in document_batch_generator:
|
||||
yield document_batch
|
||||
|
||||
return async_wrapper()
|
||||
|
||||
|
||||
class Moodle(SyncBase):
|
||||
@ -914,7 +923,155 @@ class Github(SyncBase):
|
||||
)
|
||||
|
||||
return async_wrapper()
|
||||
|
||||
|
||||
class IMAP(SyncBase):
|
||||
SOURCE_NAME: str = FileSource.IMAP
|
||||
|
||||
async def _generate(self, task):
|
||||
from common.data_source.config import DocumentSource
|
||||
from common.data_source.interfaces import StaticCredentialsProvider
|
||||
self.connector = ImapConnector(
|
||||
host=self.conf.get("imap_host"),
|
||||
port=self.conf.get("imap_port"),
|
||||
mailboxes=self.conf.get("imap_mailbox"),
|
||||
)
|
||||
credentials_provider = StaticCredentialsProvider(tenant_id=task["tenant_id"], connector_name=DocumentSource.IMAP, credential_json=self.conf["credentials"])
|
||||
self.connector.set_credentials_provider(credentials_provider)
|
||||
end_time = datetime.now(timezone.utc).timestamp()
|
||||
if task["reindex"] == "1" or not task["poll_range_start"]:
|
||||
start_time = end_time - self.conf.get("poll_range",30) * 24 * 60 * 60
|
||||
begin_info = "totally"
|
||||
else:
|
||||
start_time = task["poll_range_start"].timestamp()
|
||||
begin_info = f"from {task['poll_range_start']}"
|
||||
raw_batch_size = self.conf.get("sync_batch_size") or self.conf.get("batch_size") or INDEX_BATCH_SIZE
|
||||
try:
|
||||
batch_size = int(raw_batch_size)
|
||||
except (TypeError, ValueError):
|
||||
batch_size = INDEX_BATCH_SIZE
|
||||
if batch_size <= 0:
|
||||
batch_size = INDEX_BATCH_SIZE
|
||||
|
||||
def document_batches():
|
||||
checkpoint = self.connector.build_dummy_checkpoint()
|
||||
pending_docs = []
|
||||
iterations = 0
|
||||
iteration_limit = 100_000
|
||||
while checkpoint.has_more:
|
||||
wrapper = CheckpointOutputWrapper()
|
||||
doc_generator = wrapper(self.connector.load_from_checkpoint(start_time, end_time, checkpoint))
|
||||
for document, failure, next_checkpoint in doc_generator:
|
||||
if failure is not None:
|
||||
logging.warning("IMAP connector failure: %s", getattr(failure, "failure_message", failure))
|
||||
continue
|
||||
if document is not None:
|
||||
pending_docs.append(document)
|
||||
if len(pending_docs) >= batch_size:
|
||||
yield pending_docs
|
||||
pending_docs = []
|
||||
if next_checkpoint is not None:
|
||||
checkpoint = next_checkpoint
|
||||
|
||||
iterations += 1
|
||||
if iterations > iteration_limit:
|
||||
raise RuntimeError("Too many iterations while loading IMAP documents.")
|
||||
|
||||
if pending_docs:
|
||||
yield pending_docs
|
||||
|
||||
async def async_wrapper():
|
||||
for batch in document_batches():
|
||||
yield batch
|
||||
|
||||
logging.info(
|
||||
"Connect to IMAP: host(%s) port(%s) user(%s) folder(%s) %s",
|
||||
self.conf["imap_host"],
|
||||
self.conf["imap_port"],
|
||||
self.conf["credentials"]["imap_username"],
|
||||
self.conf["imap_mailbox"],
|
||||
begin_info
|
||||
)
|
||||
return async_wrapper()
|
||||
|
||||
class Zendesk(SyncBase):
|
||||
|
||||
SOURCE_NAME: str = FileSource.ZENDESK
|
||||
async def _generate(self, task: dict):
|
||||
self.connector = ZendeskConnector(content_type=self.conf.get("zendesk_content_type"))
|
||||
self.connector.load_credentials(self.conf["credentials"])
|
||||
|
||||
end_time = datetime.now(timezone.utc).timestamp()
|
||||
if task["reindex"] == "1" or not task.get("poll_range_start"):
|
||||
start_time = 0
|
||||
begin_info = "totally"
|
||||
else:
|
||||
start_time = task["poll_range_start"].timestamp()
|
||||
begin_info = f"from {task['poll_range_start']}"
|
||||
|
||||
raw_batch_size = (
|
||||
self.conf.get("sync_batch_size")
|
||||
or self.conf.get("batch_size")
|
||||
or INDEX_BATCH_SIZE
|
||||
)
|
||||
try:
|
||||
batch_size = int(raw_batch_size)
|
||||
except (TypeError, ValueError):
|
||||
batch_size = INDEX_BATCH_SIZE
|
||||
|
||||
if batch_size <= 0:
|
||||
batch_size = INDEX_BATCH_SIZE
|
||||
|
||||
def document_batches():
|
||||
checkpoint = self.connector.build_dummy_checkpoint()
|
||||
pending_docs = []
|
||||
iterations = 0
|
||||
iteration_limit = 100_000
|
||||
|
||||
while checkpoint.has_more:
|
||||
wrapper = CheckpointOutputWrapper()
|
||||
doc_generator = wrapper(
|
||||
self.connector.load_from_checkpoint(
|
||||
start_time, end_time, checkpoint
|
||||
)
|
||||
)
|
||||
|
||||
for document, failure, next_checkpoint in doc_generator:
|
||||
if failure is not None:
|
||||
logging.warning(
|
||||
"Zendesk connector failure: %s",
|
||||
getattr(failure, "failure_message", failure),
|
||||
)
|
||||
continue
|
||||
|
||||
if document is not None:
|
||||
pending_docs.append(document)
|
||||
if len(pending_docs) >= batch_size:
|
||||
yield pending_docs
|
||||
pending_docs = []
|
||||
|
||||
if next_checkpoint is not None:
|
||||
checkpoint = next_checkpoint
|
||||
|
||||
iterations += 1
|
||||
if iterations > iteration_limit:
|
||||
raise RuntimeError(
|
||||
"Too many iterations while loading Zendesk documents."
|
||||
)
|
||||
|
||||
if pending_docs:
|
||||
yield pending_docs
|
||||
|
||||
async def async_wrapper():
|
||||
for batch in document_batches():
|
||||
yield batch
|
||||
|
||||
logging.info(
|
||||
"Connect to Zendesk: subdomain(%s) %s",
|
||||
self.conf['credentials'].get("zendesk_subdomain"),
|
||||
begin_info,
|
||||
)
|
||||
|
||||
return async_wrapper()
|
||||
|
||||
|
||||
class Gitlab(SyncBase):
|
||||
@ -957,6 +1114,67 @@ class Gitlab(SyncBase):
|
||||
logging.info("Connect to Gitlab: ({}) {}".format(self.conf["project_name"], begin_info))
|
||||
return document_generator
|
||||
|
||||
|
||||
class Bitbucket(SyncBase):
|
||||
SOURCE_NAME: str = FileSource.BITBUCKET
|
||||
|
||||
async def _generate(self, task: dict):
|
||||
self.connector = BitbucketConnector(
|
||||
workspace=self.conf.get("workspace"),
|
||||
repositories=self.conf.get("repository_slugs"),
|
||||
projects=self.conf.get("projects"),
|
||||
)
|
||||
|
||||
self.connector.load_credentials(
|
||||
{
|
||||
"bitbucket_email": self.conf["credentials"].get("bitbucket_account_email"),
|
||||
"bitbucket_api_token": self.conf["credentials"].get("bitbucket_api_token"),
|
||||
}
|
||||
)
|
||||
|
||||
if task["reindex"] == "1" or not task["poll_range_start"]:
|
||||
start_time = datetime.fromtimestamp(0, tz=timezone.utc)
|
||||
begin_info = "totally"
|
||||
else:
|
||||
start_time = task.get("poll_range_start")
|
||||
begin_info = f"from {start_time}"
|
||||
|
||||
end_time = datetime.now(timezone.utc)
|
||||
|
||||
def document_batches():
|
||||
checkpoint = self.connector.build_dummy_checkpoint()
|
||||
|
||||
while checkpoint.has_more:
|
||||
gen = self.connector.load_from_checkpoint(
|
||||
start=start_time.timestamp(),
|
||||
end=end_time.timestamp(),
|
||||
checkpoint=checkpoint)
|
||||
|
||||
while True:
|
||||
try:
|
||||
item = next(gen)
|
||||
if isinstance(item, ConnectorFailure):
|
||||
logging.exception(
|
||||
"Bitbucket connector failure: %s",
|
||||
item.failure_message)
|
||||
break
|
||||
yield [item]
|
||||
except StopIteration as e:
|
||||
checkpoint = e.value
|
||||
break
|
||||
|
||||
async def async_wrapper():
|
||||
for batch in document_batches():
|
||||
yield batch
|
||||
|
||||
logging.info(
|
||||
"Connect to Bitbucket: workspace(%s), %s",
|
||||
self.conf.get("workspace"),
|
||||
begin_info,
|
||||
)
|
||||
|
||||
return async_wrapper()
|
||||
|
||||
func_factory = {
|
||||
FileSource.S3: S3,
|
||||
FileSource.R2: R2,
|
||||
@ -977,8 +1195,11 @@ func_factory = {
|
||||
FileSource.BOX: BOX,
|
||||
FileSource.AIRTABLE: Airtable,
|
||||
FileSource.ASANA: Asana,
|
||||
FileSource.IMAP: IMAP,
|
||||
FileSource.ZENDESK: Zendesk,
|
||||
FileSource.GITHUB: Github,
|
||||
FileSource.GITLAB: Gitlab,
|
||||
FileSource.BITBUCKET: Bitbucket,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -332,6 +332,9 @@ async def build_chunks(task, progress_callback):
|
||||
async def doc_keyword_extraction(chat_mdl, d, topn):
|
||||
cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], "keywords", {"topn": topn})
|
||||
if not cached:
|
||||
if has_canceled(task["id"]):
|
||||
progress_callback(-1, msg="Task has been canceled.")
|
||||
return
|
||||
async with chat_limiter:
|
||||
cached = await keyword_extraction(chat_mdl, d["content_with_weight"], topn)
|
||||
set_llm_cache(chat_mdl.llm_name, d["content_with_weight"], cached, "keywords", {"topn": topn})
|
||||
@ -362,6 +365,9 @@ async def build_chunks(task, progress_callback):
|
||||
async def doc_question_proposal(chat_mdl, d, topn):
|
||||
cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], "question", {"topn": topn})
|
||||
if not cached:
|
||||
if has_canceled(task["id"]):
|
||||
progress_callback(-1, msg="Task has been canceled.")
|
||||
return
|
||||
async with chat_limiter:
|
||||
cached = await question_proposal(chat_mdl, d["content_with_weight"], topn)
|
||||
set_llm_cache(chat_mdl.llm_name, d["content_with_weight"], cached, "question", {"topn": topn})
|
||||
@ -392,6 +398,9 @@ async def build_chunks(task, progress_callback):
|
||||
cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], "metadata",
|
||||
task["parser_config"]["metadata"])
|
||||
if not cached:
|
||||
if has_canceled(task["id"]):
|
||||
progress_callback(-1, msg="Task has been canceled.")
|
||||
return
|
||||
async with chat_limiter:
|
||||
cached = await gen_metadata(chat_mdl,
|
||||
metadata_schema(task["parser_config"]["metadata"]),
|
||||
@ -457,6 +466,9 @@ async def build_chunks(task, progress_callback):
|
||||
async def doc_content_tagging(chat_mdl, d, topn_tags):
|
||||
cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], all_tags, {"topn": topn_tags})
|
||||
if not cached:
|
||||
if has_canceled(task["id"]):
|
||||
progress_callback(-1, msg="Task has been canceled.")
|
||||
return
|
||||
picked_examples = random.choices(examples, k=2) if len(examples) > 2 else examples
|
||||
if not picked_examples:
|
||||
picked_examples.append({"content": "This is an example", TAG_FLD: {'example': 1}})
|
||||
@ -890,6 +902,7 @@ async def do_handle_task(task):
|
||||
task_embedding_id = task["embd_id"]
|
||||
task_language = task["language"]
|
||||
task_llm_id = task["parser_config"].get("llm_id") or task["llm_id"]
|
||||
task["llm_id"] = task_llm_id
|
||||
task_dataset_id = task["kb_id"]
|
||||
task_doc_id = task["doc_id"]
|
||||
task_document_name = task["name"]
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "ragflow-sdk"
|
||||
version = "0.23.0"
|
||||
version = "0.23.1"
|
||||
description = "Python client sdk of [RAGFlow](https://github.com/infiniflow/ragflow). RAGFlow is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding."
|
||||
authors = [{ name = "Zhichang Yu", email = "yuzhichang@gmail.com" }]
|
||||
license = { text = "Apache License, Version 2.0" }
|
||||
|
||||
2
sdk/python/uv.lock
generated
2
sdk/python/uv.lock
generated
@ -353,7 +353,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "ragflow-sdk"
|
||||
version = "0.23.0"
|
||||
version = "0.23.1"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "beartype" },
|
||||
|
||||
@ -29,6 +29,7 @@ DIALOG_APP_URL = f"/{VERSION}/dialog"
|
||||
# SESSION_WITH_CHAT_ASSISTANT_API_URL = "/api/v1/chats/{chat_id}/sessions"
|
||||
# SESSION_WITH_AGENT_API_URL = "/api/v1/agents/{agent_id}/sessions"
|
||||
MEMORY_API_URL = f"/{VERSION}/memories"
|
||||
MESSAGE_API_URL = f"/{VERSION}/messages"
|
||||
|
||||
|
||||
# KB APP
|
||||
@ -299,3 +300,76 @@ def get_memory_config(auth, memory_id:str):
|
||||
url = f"{HOST_ADDRESS}{MEMORY_API_URL}/{memory_id}/config"
|
||||
res = requests.get(url=url, headers=HEADERS, auth=auth)
|
||||
return res.json()
|
||||
|
||||
|
||||
def list_memory_message(auth, memory_id, params=None):
|
||||
url = f"{HOST_ADDRESS}{MEMORY_API_URL}/{memory_id}"
|
||||
if params:
|
||||
query_parts = []
|
||||
for key, value in params.items():
|
||||
if isinstance(value, list):
|
||||
for item in value:
|
||||
query_parts.append(f"{key}={item}")
|
||||
else:
|
||||
query_parts.append(f"{key}={value}")
|
||||
query_string = "&".join(query_parts)
|
||||
url = f"{url}?{query_string}"
|
||||
res = requests.get(url=url, headers=HEADERS, auth=auth)
|
||||
return res.json()
|
||||
|
||||
|
||||
def add_message(auth, payload=None):
|
||||
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}"
|
||||
res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
|
||||
return res.json()
|
||||
|
||||
|
||||
def forget_message(auth, memory_id: str, message_id: int):
|
||||
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/{memory_id}:{message_id}"
|
||||
res = requests.delete(url=url, headers=HEADERS, auth=auth)
|
||||
return res.json()
|
||||
|
||||
|
||||
def update_message_status(auth, memory_id: str, message_id: int, status: bool):
|
||||
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/{memory_id}:{message_id}"
|
||||
payload = {"status": status}
|
||||
res = requests.put(url=url, headers=HEADERS, auth=auth, json=payload)
|
||||
return res.json()
|
||||
|
||||
|
||||
def search_message(auth, params=None):
|
||||
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/search"
|
||||
if params:
|
||||
query_parts = []
|
||||
for key, value in params.items():
|
||||
if isinstance(value, list):
|
||||
for item in value:
|
||||
query_parts.append(f"{key}={item}")
|
||||
else:
|
||||
query_parts.append(f"{key}={value}")
|
||||
query_string = "&".join(query_parts)
|
||||
url = f"{url}?{query_string}"
|
||||
res = requests.get(url=url, headers=HEADERS, auth=auth)
|
||||
return res.json()
|
||||
|
||||
|
||||
def get_recent_message(auth, params=None):
|
||||
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}"
|
||||
if params:
|
||||
query_parts = []
|
||||
for key, value in params.items():
|
||||
if isinstance(value, list):
|
||||
for item in value:
|
||||
query_parts.append(f"{key}={item}")
|
||||
else:
|
||||
query_parts.append(f"{key}={value}")
|
||||
query_string = "&".join(query_parts)
|
||||
url = f"{url}?{query_string}"
|
||||
res = requests.get(url=url, headers=HEADERS, auth=auth)
|
||||
return res.json()
|
||||
|
||||
|
||||
def get_message_content(auth, memory_id: str, message_id: int):
|
||||
url = f"{HOST_ADDRESS}{MESSAGE_API_URL}/{memory_id}:{message_id}/content"
|
||||
res = requests.get(url=url, headers=HEADERS, auth=auth)
|
||||
return res.json()
|
||||
|
||||
101
test/testcases/test_web_api/test_message_app/conftest.py
Normal file
101
test/testcases/test_web_api/test_message_app/conftest.py
Normal file
@ -0,0 +1,101 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import pytest
|
||||
import random
|
||||
from test_web_api.common import create_memory, list_memory, add_message, delete_memory
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def add_memory_with_multiple_message_func(request, WebApiAuth):
|
||||
def cleanup():
|
||||
memory_list_res = list_memory(WebApiAuth)
|
||||
exist_memory_ids = [memory["id"] for memory in memory_list_res["data"]["memory_list"]]
|
||||
for _memory_id in exist_memory_ids:
|
||||
delete_memory(WebApiAuth, _memory_id)
|
||||
|
||||
request.addfinalizer(cleanup)
|
||||
|
||||
payload = {
|
||||
"name": "test_memory_0",
|
||||
"memory_type": ["raw"] + random.choices(["semantic", "episodic", "procedural"], k=random.randint(1, 3)),
|
||||
"embd_id": "BAAI/bge-small-en-v1.5@Builtin",
|
||||
"llm_id": "glm-4-flash@ZHIPU-AI"
|
||||
}
|
||||
res = create_memory(WebApiAuth, payload)
|
||||
memory_id = res["data"]["id"]
|
||||
agent_id = uuid.uuid4().hex
|
||||
message_payload = {
|
||||
"memory_id": [memory_id],
|
||||
"agent_id": agent_id,
|
||||
"session_id": uuid.uuid4().hex,
|
||||
"user_id": "",
|
||||
"user_input": "what is coriander?",
|
||||
"agent_response": """
|
||||
Coriander is a versatile herb with two main edible parts, and its name can refer to both:
|
||||
1. Leaves and Stems (often called Cilantro or Fresh Coriander): These are the fresh, green, fragrant leaves and tender stems of the plant Coriandrum sativum. They have a bright, citrusy, and sometimes pungent flavor. Cilantro is widely used as a garnish or key ingredient in cuisines like Mexican, Indian, Thai, and Middle Eastern.
|
||||
2. Seeds (called Coriander Seeds): These are the dried, golden-brown seeds of the same plant. When ground, they become coriander powder. The seeds have a warm, nutty, floral, and slightly citrusy taste, completely different from the fresh leaves. They are a fundamental spice in curries, stews, pickles, and baking.
|
||||
Key Point of Confusion: The naming differs by region. In North America, "coriander" typically refers to the seeds, while "cilantro" refers to the fresh leaves. In the UK, Europe, and many other parts of the world, "coriander" refers to the fresh herb, and the seeds are called "coriander seeds."
|
||||
"""
|
||||
}
|
||||
add_message(WebApiAuth, message_payload)
|
||||
request.cls.memory_id = memory_id
|
||||
request.cls.agent_id = agent_id
|
||||
return memory_id
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def add_memory_with_5_raw_message_func(request, WebApiAuth):
|
||||
def cleanup():
|
||||
memory_list_res = list_memory(WebApiAuth)
|
||||
exist_memory_ids = [memory["id"] for memory in memory_list_res["data"]["memory_list"]]
|
||||
for _memory_id in exist_memory_ids:
|
||||
delete_memory(WebApiAuth, _memory_id)
|
||||
|
||||
request.addfinalizer(cleanup)
|
||||
|
||||
payload = {
|
||||
"name": "test_memory_1",
|
||||
"memory_type": ["raw"],
|
||||
"embd_id": "BAAI/bge-small-en-v1.5@Builtin",
|
||||
"llm_id": "glm-4-flash@ZHIPU-AI"
|
||||
}
|
||||
res = create_memory(WebApiAuth, payload)
|
||||
memory_id = res["data"]["id"]
|
||||
agent_ids = [uuid.uuid4().hex for _ in range(2)]
|
||||
session_ids = [uuid.uuid4().hex for _ in range(5)]
|
||||
for i in range(5):
|
||||
message_payload = {
|
||||
"memory_id": [memory_id],
|
||||
"agent_id": agent_ids[i % 2],
|
||||
"session_id": session_ids[i],
|
||||
"user_id": "",
|
||||
"user_input": "what is coriander?",
|
||||
"agent_response": """
|
||||
Coriander is a versatile herb with two main edible parts, and its name can refer to both:
|
||||
1. Leaves and Stems (often called Cilantro or Fresh Coriander): These are the fresh, green, fragrant leaves and tender stems of the plant Coriandrum sativum. They have a bright, citrusy, and sometimes pungent flavor. Cilantro is widely used as a garnish or key ingredient in cuisines like Mexican, Indian, Thai, and Middle Eastern.
|
||||
2. Seeds (called Coriander Seeds): These are the dried, golden-brown seeds of the same plant. When ground, they become coriander powder. The seeds have a warm, nutty, floral, and slightly citrusy taste, completely different from the fresh leaves. They are a fundamental spice in curries, stews, pickles, and baking.
|
||||
Key Point of Confusion: The naming differs by region. In North America, "coriander" typically refers to the seeds, while "cilantro" refers to the fresh leaves. In the UK, Europe, and many other parts of the world, "coriander" refers to the fresh herb, and the seeds are called "coriander seeds."
|
||||
"""
|
||||
}
|
||||
add_message(WebApiAuth, message_payload)
|
||||
request.cls.memory_id = memory_id
|
||||
request.cls.agent_ids = agent_ids
|
||||
request.cls.session_ids = session_ids
|
||||
time.sleep(2) # make sure refresh to index before search
|
||||
return memory_id
|
||||
@ -0,0 +1,68 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
|
||||
import pytest
|
||||
from test_web_api.common import get_recent_message
|
||||
from configs import INVALID_API_TOKEN
|
||||
from libs.auth import RAGFlowWebApiAuth
|
||||
|
||||
|
||||
class TestAuthorization:
|
||||
@pytest.mark.p1
|
||||
@pytest.mark.parametrize(
|
||||
"invalid_auth, expected_code, expected_message",
|
||||
[
|
||||
(None, 401, "<Unauthorized '401: Unauthorized'>"),
|
||||
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
|
||||
],
|
||||
)
|
||||
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
|
||||
res = get_recent_message(invalid_auth)
|
||||
assert res["code"] == expected_code, res
|
||||
assert res["message"] == expected_message, res
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
|
||||
class TestGetRecentMessage:
|
||||
|
||||
@pytest.mark.p1
|
||||
def test_get_recent_messages(self, WebApiAuth):
|
||||
memory_id = self.memory_id
|
||||
res = get_recent_message(WebApiAuth, params={"memory_id": memory_id})
|
||||
assert res["code"] == 0, res
|
||||
assert len(res["data"]) == 5, res
|
||||
|
||||
@pytest.mark.p2
|
||||
def test_filter_recent_messages_by_agent(self, WebApiAuth):
|
||||
memory_id = self.memory_id
|
||||
agent_ids = self.agent_ids
|
||||
agent_id = random.choice(agent_ids)
|
||||
res = get_recent_message(WebApiAuth, params={"agent_id": agent_id, "memory_id": memory_id})
|
||||
assert res["code"] == 0, res
|
||||
for message in res["data"]:
|
||||
assert message["agent_id"] == agent_id, message
|
||||
|
||||
@pytest.mark.p2
|
||||
def test_filter_recent_messages_by_session(self, WebApiAuth):
|
||||
memory_id = self.memory_id
|
||||
session_ids = self.session_ids
|
||||
session_id = random.choice(session_ids)
|
||||
res = get_recent_message(WebApiAuth, params={"session_id": session_id, "memory_id": memory_id})
|
||||
assert res["code"] == 0, res
|
||||
for message in res["data"]:
|
||||
assert message["session_id"] == session_id, message
|
||||
|
||||
@ -0,0 +1,100 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import random
|
||||
|
||||
import pytest
|
||||
from test_web_api.common import list_memory_message
|
||||
from configs import INVALID_API_TOKEN
|
||||
from libs.auth import RAGFlowWebApiAuth
|
||||
|
||||
|
||||
class TestAuthorization:
|
||||
@pytest.mark.p1
|
||||
@pytest.mark.parametrize(
|
||||
"invalid_auth, expected_code, expected_message",
|
||||
[
|
||||
(None, 401, "<Unauthorized '401: Unauthorized'>"),
|
||||
(RAGFlowWebApiAuth(INVALID_API_TOKEN), 401, "<Unauthorized '401: Unauthorized'>"),
|
||||
],
|
||||
)
|
||||
def test_auth_invalid(self, invalid_auth, expected_code, expected_message):
|
||||
res = list_memory_message(invalid_auth, "")
|
||||
assert res["code"] == expected_code, res
|
||||
assert res["message"] == expected_message, res
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("add_memory_with_5_raw_message_func")
|
||||
class TestMessageList:
|
||||
|
||||
@pytest.mark.p1
|
||||
def test_params_unset(self, WebApiAuth):
|
||||
memory_id = self.memory_id
|
||||
res = list_memory_message(WebApiAuth, memory_id, params=None)
|
||||
assert res["code"] == 0, res
|
||||
assert len(res["data"]["messages"]["message_list"]) == 5, res
|
||||
|
||||
@pytest.mark.p1
|
||||
def test_params_empty(self, WebApiAuth):
|
||||
memory_id = self.memory_id
|
||||
res = list_memory_message(WebApiAuth, memory_id, params={})
|
||||
assert res["code"] == 0, res
|
||||
assert len(res["data"]["messages"]["message_list"]) == 5, res
|
||||
|
||||
@pytest.mark.p1
|
||||
@pytest.mark.parametrize(
|
||||
"params, expected_page_size",
|
||||
[
|
||||
({"page": 1, "page_size": 10}, 5),
|
||||
({"page": 2, "page_size": 10}, 0),
|
||||
({"page": 1, "page_size": 2}, 2),
|
||||
({"page": 3, "page_size": 2}, 1),
|
||||
({"page": 5, "page_size": 10}, 0),
|
||||
],
|
||||
ids=["normal_first_page", "beyond_max_page", "normal_last_partial_page", "normal_middle_page",
|
||||
"full_data_single_page"],
|
||||
)
|
||||
def test_page_size(self, WebApiAuth, params, expected_page_size):
|
||||
# have added 5 messages in fixture
|
||||
memory_id = self.memory_id
|
||||
res = list_memory_message(WebApiAuth, memory_id, params=params)
|
||||
assert res["code"] == 0, res
|
||||
assert len(res["data"]["messages"]["message_list"]) == expected_page_size, res
|
||||
|
||||
@pytest.mark.p2
|
||||
def test_filter_agent_id(self, WebApiAuth):
|
||||
memory_id = self.memory_id
|
||||
agent_ids = self.agent_ids
|
||||
agent_id = random.choice(agent_ids)
|
||||
res = list_memory_message(WebApiAuth, memory_id, params={"agent_id": agent_id})
|
||||
assert res["code"] == 0, res
|
||||
for message in res["data"]["messages"]["message_list"]:
|
||||
assert message["agent_id"] == agent_id, message
|
||||
|
||||
@pytest.mark.p2
|
||||
@pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="Not support.")
|
||||
def test_search_keyword(self, WebApiAuth):
|
||||
memory_id = self.memory_id
|
||||
session_ids = self.session_ids
|
||||
session_id = random.choice(session_ids)
|
||||
slice_start = random.randint(0, len(session_id) - 2)
|
||||
slice_end = random.randint(slice_start + 1, len(session_id) - 1)
|
||||
keyword = session_id[slice_start:slice_end]
|
||||
res = list_memory_message(WebApiAuth, memory_id, params={"keywords": keyword})
|
||||
assert res["code"] == 0, res
|
||||
assert len(res["data"]["messages"]["message_list"]) > 0, res
|
||||
for message in res["data"]["messages"]["message_list"]:
|
||||
assert keyword in message["session_id"], message
|
||||
2
uv.lock
generated
2
uv.lock
generated
@ -6163,7 +6163,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "ragflow"
|
||||
version = "0.23.0"
|
||||
version = "0.23.1"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "aiosmtplib" },
|
||||
|
||||
7
web/src/assets/svg/data-source/bitbucket.svg
Normal file
7
web/src/assets/svg/data-source/bitbucket.svg
Normal file
@ -0,0 +1,7 @@
|
||||
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
|
||||
<svg xmlns="http://www.w3.org/2000/svg"
|
||||
aria-label="Bitbucket" role="img"
|
||||
viewBox="0 0 512 512"><rect
|
||||
width="512" height="512"
|
||||
rx="15%"
|
||||
fill="#ffffff"/><path fill="#2684ff" d="M422 130a10 10 0 00-9.9-11.7H100.5a10 10 0 00-10 11.7L136 409a10 10 0 009.9 8.4h221c5 0 9.2-3.5 10 -8.4L422 130zM291 316.8h-69.3l-18.7-98h104.8z"/><path fill="url(#a)" d="M59.632 25.2H40.94l-3.1 18.3h-13v18.9H52c1 0 1.7-.7 1.8-1.6l5.8-35.6z" transform="translate(89.8 85) scale(5.3285)"/><linearGradient id="a" x2="1" gradientTransform="rotate(141 22.239 22.239) scale(31.4)" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#0052cc"/><stop offset="1" stop-color="#2684ff"/></linearGradient></svg>
|
||||
|
After Width: | Height: | Size: 803 B |
7
web/src/assets/svg/data-source/imap.svg
Normal file
7
web/src/assets/svg/data-source/imap.svg
Normal file
@ -0,0 +1,7 @@
|
||||
<svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24"
|
||||
stroke-linecap="round" stroke-linejoin="round"
|
||||
class="text-text-04" height="32" width="32"
|
||||
xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M4 4h16c1.1 0 2 .9 2 2v12c0 1.1-.9 2-2 2H4c-1.1 0-2-.9-2-2V6c0-1.1.9-2 2-2z"></path>
|
||||
<polyline points="22,6 12,13 2,6"></polyline>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 360 B |
8
web/src/assets/svg/data-source/zendesk.svg
Normal file
8
web/src/assets/svg/data-source/zendesk.svg
Normal file
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
|
||||
<svg width="800px" height="800px" viewBox="0 -30.5 256 256" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" preserveAspectRatio="xMidYMid">
|
||||
<g>
|
||||
<path d="M118.249172,51.2326115 L118.249172,194.005605 L0,194.005605 L118.249172,51.2326115 Z M118.249172,2.84217094e-14 C118.249172,32.6440764 91.7686624,59.124586 59.124586,59.124586 C26.4805096,59.124586 0,32.6440764 0,2.84217094e-14 L118.249172,2.84217094e-14 Z M137.750828,194.005605 C137.750828,161.328917 164.198726,134.881019 196.875414,134.881019 C229.552102,134.881019 256,161.361529 256,194.005605 L137.750828,194.005605 Z M137.750828,142.740382 L137.750828,0 L256,0 L137.750828,142.740382 Z" fill="#03363D">
|
||||
|
||||
</path>
|
||||
</g>
|
||||
|
After Width: | Height: | Size: 864 B |
@ -1,11 +1,23 @@
|
||||
import CopyToClipboard from '@/components/copy-to-clipboard';
|
||||
import { Button } from '@/components/ui/button';
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from '@/components/ui/dialog';
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from '@/components/ui/table';
|
||||
import { useTranslate } from '@/hooks/common-hooks';
|
||||
import { IModalProps } from '@/interfaces/common';
|
||||
import { IToken } from '@/interfaces/database/chat';
|
||||
import { formatDate } from '@/utils/date';
|
||||
import { DeleteOutlined } from '@ant-design/icons';
|
||||
import type { TableProps } from 'antd';
|
||||
import { Button, Modal, Space, Table } from 'antd';
|
||||
import { Trash2 } from 'lucide-react';
|
||||
import { useOperateApiKey } from '../hooks';
|
||||
|
||||
const ChatApiKeyModal = ({
|
||||
@ -17,57 +29,59 @@ const ChatApiKeyModal = ({
|
||||
useOperateApiKey(idKey, dialogId);
|
||||
const { t } = useTranslate('chat');
|
||||
|
||||
const columns: TableProps<IToken>['columns'] = [
|
||||
{
|
||||
title: 'Token',
|
||||
dataIndex: 'token',
|
||||
key: 'token',
|
||||
render: (text) => <a>{text}</a>,
|
||||
},
|
||||
{
|
||||
title: t('created'),
|
||||
dataIndex: 'create_date',
|
||||
key: 'create_date',
|
||||
render: (text) => formatDate(text),
|
||||
},
|
||||
{
|
||||
title: t('action'),
|
||||
key: 'action',
|
||||
render: (_, record) => (
|
||||
<Space size="middle">
|
||||
<CopyToClipboard text={record.token}></CopyToClipboard>
|
||||
<DeleteOutlined onClick={() => removeToken(record.token)} />
|
||||
</Space>
|
||||
),
|
||||
},
|
||||
];
|
||||
|
||||
return (
|
||||
<>
|
||||
<Modal
|
||||
title={t('apiKey')}
|
||||
open
|
||||
onCancel={hideModal}
|
||||
cancelButtonProps={{ style: { display: 'none' } }}
|
||||
style={{ top: 300 }}
|
||||
onOk={hideModal}
|
||||
width={'50vw'}
|
||||
>
|
||||
<Table
|
||||
columns={columns}
|
||||
dataSource={tokenList}
|
||||
rowKey={'token'}
|
||||
loading={listLoading}
|
||||
pagination={false}
|
||||
/>
|
||||
<Button
|
||||
onClick={createToken}
|
||||
loading={creatingLoading}
|
||||
disabled={tokenList?.length > 0}
|
||||
>
|
||||
{t('createNewKey')}
|
||||
</Button>
|
||||
</Modal>
|
||||
<Dialog open onOpenChange={hideModal}>
|
||||
<DialogContent className="max-w-[50vw]">
|
||||
<DialogHeader>
|
||||
<DialogTitle>{t('apiKey')}</DialogTitle>
|
||||
</DialogHeader>
|
||||
<div className="space-y-4">
|
||||
{listLoading ? (
|
||||
<div className="flex justify-center py-8">Loading...</div>
|
||||
) : (
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Token</TableHead>
|
||||
<TableHead>{t('created')}</TableHead>
|
||||
<TableHead>{t('action')}</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{tokenList?.map((tokenItem) => (
|
||||
<TableRow key={tokenItem.token}>
|
||||
<TableCell className="font-medium break-all">
|
||||
{tokenItem.token}
|
||||
</TableCell>
|
||||
<TableCell>{formatDate(tokenItem.create_date)}</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex items-center gap-2">
|
||||
<CopyToClipboard text={tokenItem.token} />
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
onClick={() => removeToken(tokenItem.token)}
|
||||
>
|
||||
<Trash2 className="h-4 w-4" />
|
||||
</Button>
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
)}
|
||||
<Button
|
||||
onClick={createToken}
|
||||
loading={creatingLoading}
|
||||
disabled={tokenList?.length > 0}
|
||||
>
|
||||
{t('createNewKey')}
|
||||
</Button>
|
||||
</div>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
@ -0,0 +1,93 @@
|
||||
import React, { useSyncExternalStore } from 'react';
|
||||
|
||||
export interface AnchorItem {
|
||||
key: string;
|
||||
href: string;
|
||||
title: string;
|
||||
children?: AnchorItem[];
|
||||
}
|
||||
|
||||
interface SimpleAnchorProps {
|
||||
items: AnchorItem[];
|
||||
className?: string;
|
||||
style?: React.CSSProperties;
|
||||
}
|
||||
|
||||
// Subscribe to URL hash changes
|
||||
const subscribeHash = (callback: () => void) => {
|
||||
window.addEventListener('hashchange', callback);
|
||||
return () => window.removeEventListener('hashchange', callback);
|
||||
};
|
||||
|
||||
const getHash = () => window.location.hash;
|
||||
|
||||
const Anchor: React.FC<SimpleAnchorProps> = ({
|
||||
items,
|
||||
className = '',
|
||||
style = {},
|
||||
}) => {
|
||||
// Sync with URL hash changes, to highlight the active item
|
||||
const hash = useSyncExternalStore(subscribeHash, getHash);
|
||||
|
||||
// Handle menu item click
|
||||
const handleClick = (
|
||||
e: React.MouseEvent<HTMLAnchorElement>,
|
||||
href: string,
|
||||
) => {
|
||||
e.preventDefault();
|
||||
const targetId = href.replace('#', '');
|
||||
const targetElement = document.getElementById(targetId);
|
||||
|
||||
if (targetElement) {
|
||||
// Update URL hash (triggers hashchange event)
|
||||
window.location.hash = href;
|
||||
// Smooth scroll to target
|
||||
targetElement.scrollIntoView({ behavior: 'smooth', block: 'start' });
|
||||
}
|
||||
};
|
||||
|
||||
if (items.length === 0) return null;
|
||||
|
||||
return (
|
||||
<nav className={className} style={style}>
|
||||
<ul className="list-none p-0 m-0">
|
||||
{items.map((item) => (
|
||||
<li key={item.key} className="mb-2">
|
||||
<a
|
||||
href={item.href}
|
||||
onClick={(e) => handleClick(e, item.href)}
|
||||
className={`block px-3 py-1.5 no-underline rounded cursor-pointer transition-all duration-300 hover:text-accent-primary/70 ${
|
||||
hash === item.href
|
||||
? 'text-accent-primary bg-accent-primary-5'
|
||||
: 'text-text-secondary bg-transparent'
|
||||
}`}
|
||||
>
|
||||
{item.title}
|
||||
</a>
|
||||
{item.children && item.children.length > 0 && (
|
||||
<ul className="list-none p-0 ml-4 mt-1">
|
||||
{item.children.map((child) => (
|
||||
<li key={child.key} className="mb-1">
|
||||
<a
|
||||
href={child.href}
|
||||
onClick={(e) => handleClick(e, child.href)}
|
||||
className={`block px-3 py-1 text-sm no-underline rounded cursor-pointer transition-all duration-300 hover:text-accent-primary/70 ${
|
||||
hash === child.href
|
||||
? 'text-accent-primary bg-accent-primary-5'
|
||||
: 'text-text-secondary bg-transparent'
|
||||
}`}
|
||||
>
|
||||
{child.title}
|
||||
</a>
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
)}
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
</nav>
|
||||
);
|
||||
};
|
||||
|
||||
export default Anchor;
|
||||
@ -1,54 +1,35 @@
|
||||
import { useSetModalState, useTranslate } from '@/hooks/common-hooks';
|
||||
import { useIsDarkTheme } from '@/components/theme-provider';
|
||||
import { useSetModalState } from '@/hooks/common-hooks';
|
||||
import { LangfuseCard } from '@/pages/user-setting/setting-model/langfuse';
|
||||
import apiDoc from '@parent/docs/references/http_api_reference.md';
|
||||
import MarkdownPreview from '@uiw/react-markdown-preview';
|
||||
import { Button, Card, Flex, Space } from 'antd';
|
||||
import ChatApiKeyModal from '../chat-api-key-modal';
|
||||
import { usePreviewChat } from '../hooks';
|
||||
import BackendServiceApi from './backend-service-api';
|
||||
import MarkdownToc from './markdown-toc';
|
||||
|
||||
const ApiContent = ({
|
||||
id,
|
||||
idKey,
|
||||
hideChatPreviewCard = false,
|
||||
}: {
|
||||
id?: string;
|
||||
idKey: string;
|
||||
hideChatPreviewCard?: boolean;
|
||||
}) => {
|
||||
const { t } = useTranslate('chat');
|
||||
const ApiContent = ({ id, idKey }: { id?: string; idKey: string }) => {
|
||||
const {
|
||||
visible: apiKeyVisible,
|
||||
hideModal: hideApiKeyModal,
|
||||
showModal: showApiKeyModal,
|
||||
} = useSetModalState();
|
||||
// const { embedVisible, hideEmbedModal, showEmbedModal, embedToken } =
|
||||
// useShowEmbedModal(idKey);
|
||||
|
||||
const { handlePreview } = usePreviewChat(idKey);
|
||||
const isDarkTheme = useIsDarkTheme();
|
||||
|
||||
return (
|
||||
<div className="pb-2">
|
||||
<Flex vertical gap={'middle'}>
|
||||
<section className="flex flex-col gap-2 pb-5">
|
||||
<BackendServiceApi show={showApiKeyModal}></BackendServiceApi>
|
||||
{!hideChatPreviewCard && (
|
||||
<Card title={`${name} Web App`}>
|
||||
<Flex gap={8} vertical>
|
||||
<Space size={'middle'}>
|
||||
<Button onClick={handlePreview}>{t('preview')}</Button>
|
||||
{/* <Button onClick={() => showEmbedModal(id)}>
|
||||
{t('embedded')}
|
||||
</Button> */}
|
||||
</Space>
|
||||
</Flex>
|
||||
</Card>
|
||||
)}
|
||||
|
||||
<div style={{ position: 'relative' }}>
|
||||
<MarkdownToc content={apiDoc} />
|
||||
</div>
|
||||
<MarkdownPreview source={apiDoc}></MarkdownPreview>
|
||||
</Flex>
|
||||
<MarkdownPreview
|
||||
source={apiDoc}
|
||||
wrapperElement={{ 'data-color-mode': isDarkTheme ? 'dark' : 'light' }}
|
||||
></MarkdownPreview>
|
||||
</section>
|
||||
<LangfuseCard></LangfuseCard>
|
||||
{apiKeyVisible && (
|
||||
<ChatApiKeyModal
|
||||
hideModal={hideApiKeyModal}
|
||||
@ -56,14 +37,6 @@ const ApiContent = ({
|
||||
idKey={idKey}
|
||||
></ChatApiKeyModal>
|
||||
)}
|
||||
{/* {embedVisible && (
|
||||
<EmbedModal
|
||||
token={embedToken}
|
||||
visible={embedVisible}
|
||||
hideModal={hideEmbedModal}
|
||||
></EmbedModal>
|
||||
)} */}
|
||||
<LangfuseCard></LangfuseCard>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@ -1,33 +1,28 @@
|
||||
import { Button, Card, Flex, Space, Typography } from 'antd';
|
||||
import { Button } from '@/components/ui/button';
|
||||
import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card';
|
||||
|
||||
import { CopyToClipboardWithText } from '@/components/copy-to-clipboard';
|
||||
import { useTranslate } from '@/hooks/common-hooks';
|
||||
import styles from './index.less';
|
||||
|
||||
const { Paragraph } = Typography;
|
||||
|
||||
const BackendServiceApi = ({ show }: { show(): void }) => {
|
||||
const { t } = useTranslate('chat');
|
||||
|
||||
return (
|
||||
<Card
|
||||
title={
|
||||
<Space size={'large'}>
|
||||
<span>RAGFlow API</span>
|
||||
<Button onClick={show} type="primary">
|
||||
{t('apiKey')}
|
||||
</Button>
|
||||
</Space>
|
||||
}
|
||||
>
|
||||
<Flex gap={8} align="center">
|
||||
<b>{t('backendServiceApi')}</b>
|
||||
<Paragraph
|
||||
copyable={{ text: `${location.origin}` }}
|
||||
className={styles.apiLinkText}
|
||||
>
|
||||
{location.origin}
|
||||
</Paragraph>
|
||||
</Flex>
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<div className="flex items-center gap-4">
|
||||
<CardTitle>RAGFlow API</CardTitle>
|
||||
<Button onClick={show}>{t('apiKey')}</Button>
|
||||
</div>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<div className="flex items-center gap-2">
|
||||
<b className="font-semibold">{t('backendServiceApi')}</b>
|
||||
<CopyToClipboardWithText
|
||||
text={location.origin}
|
||||
></CopyToClipboardWithText>
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
|
||||
@ -1,31 +0,0 @@
|
||||
import { useTranslate } from '@/hooks/common-hooks';
|
||||
import { IModalProps } from '@/interfaces/common';
|
||||
import { Modal } from 'antd';
|
||||
import ApiContent from './api-content';
|
||||
|
||||
const ChatOverviewModal = ({
|
||||
visible,
|
||||
hideModal,
|
||||
id,
|
||||
idKey,
|
||||
}: IModalProps<any> & { id: string; name?: string; idKey: string }) => {
|
||||
const { t } = useTranslate('chat');
|
||||
|
||||
return (
|
||||
<>
|
||||
<Modal
|
||||
title={t('overview')}
|
||||
open={visible}
|
||||
onCancel={hideModal}
|
||||
cancelButtonProps={{ style: { display: 'none' } }}
|
||||
onOk={hideModal}
|
||||
width={'100vw'}
|
||||
okText={t('close', { keyPrefix: 'common' })}
|
||||
>
|
||||
<ApiContent id={id} idKey={idKey}></ApiContent>
|
||||
</Modal>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default ChatOverviewModal;
|
||||
@ -1,21 +1,27 @@
|
||||
import { Anchor } from 'antd';
|
||||
import type { AnchorLinkItemProps } from 'antd/es/anchor/Anchor';
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import Anchor, { AnchorItem } from './anchor';
|
||||
|
||||
interface MarkdownTocProps {
|
||||
content: string;
|
||||
}
|
||||
|
||||
const MarkdownToc: React.FC<MarkdownTocProps> = ({ content }) => {
|
||||
const [items, setItems] = useState<AnchorLinkItemProps[]>([]);
|
||||
const [items, setItems] = useState<AnchorItem[]>([]);
|
||||
|
||||
useEffect(() => {
|
||||
const generateTocItems = () => {
|
||||
const headings = document.querySelectorAll(
|
||||
'.wmde-markdown h2, .wmde-markdown h3',
|
||||
);
|
||||
const tocItems: AnchorLinkItemProps[] = [];
|
||||
let currentH2Item: AnchorLinkItemProps | null = null;
|
||||
|
||||
// If headings haven't rendered yet, wait for next frame
|
||||
if (headings.length === 0) {
|
||||
requestAnimationFrame(generateTocItems);
|
||||
return;
|
||||
}
|
||||
|
||||
const tocItems: AnchorItem[] = [];
|
||||
let currentH2Item: AnchorItem | null = null;
|
||||
|
||||
headings.forEach((heading) => {
|
||||
const title = heading.textContent || '';
|
||||
@ -23,7 +29,7 @@ const MarkdownToc: React.FC<MarkdownTocProps> = ({ content }) => {
|
||||
const isH2 = heading.tagName.toLowerCase() === 'h2';
|
||||
|
||||
if (id && title) {
|
||||
const item: AnchorLinkItemProps = {
|
||||
const item: AnchorItem = {
|
||||
key: id,
|
||||
href: `#${id}`,
|
||||
title,
|
||||
@ -48,7 +54,10 @@ const MarkdownToc: React.FC<MarkdownTocProps> = ({ content }) => {
|
||||
setItems(tocItems.slice(1));
|
||||
};
|
||||
|
||||
setTimeout(generateTocItems, 100);
|
||||
// Use requestAnimationFrame to ensure execution after DOM rendering
|
||||
requestAnimationFrame(() => {
|
||||
requestAnimationFrame(generateTocItems);
|
||||
});
|
||||
}, [content]);
|
||||
|
||||
return (
|
||||
@ -56,7 +65,7 @@ const MarkdownToc: React.FC<MarkdownTocProps> = ({ content }) => {
|
||||
className="markdown-toc bg-bg-base text-text-primary shadow shadow-text-secondary"
|
||||
style={{
|
||||
position: 'fixed',
|
||||
right: 20,
|
||||
right: 30,
|
||||
top: 100,
|
||||
bottom: 150,
|
||||
width: 200,
|
||||
@ -66,7 +75,7 @@ const MarkdownToc: React.FC<MarkdownTocProps> = ({ content }) => {
|
||||
zIndex: 1000,
|
||||
}}
|
||||
>
|
||||
<Anchor items={items} affix={false} />
|
||||
<Anchor items={items} />
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@ -1,21 +0,0 @@
|
||||
.codeCard {
|
||||
.clearCardBody();
|
||||
}
|
||||
|
||||
.codeText {
|
||||
padding: 10px;
|
||||
background-color: #ffffff09;
|
||||
}
|
||||
|
||||
.id {
|
||||
.linkText();
|
||||
}
|
||||
|
||||
.darkBg {
|
||||
background-color: rgb(69, 68, 68);
|
||||
}
|
||||
|
||||
.darkId {
|
||||
color: white;
|
||||
.darkBg();
|
||||
}
|
||||
@ -1,170 +0,0 @@
|
||||
import CopyToClipboard from '@/components/copy-to-clipboard';
|
||||
import HighLightMarkdown from '@/components/highlight-markdown';
|
||||
import { SharedFrom } from '@/constants/chat';
|
||||
import { useTranslate } from '@/hooks/common-hooks';
|
||||
import { IModalProps } from '@/interfaces/common';
|
||||
import {
|
||||
Card,
|
||||
Checkbox,
|
||||
Form,
|
||||
Modal,
|
||||
Select,
|
||||
Tabs,
|
||||
TabsProps,
|
||||
Typography,
|
||||
} from 'antd';
|
||||
import { useMemo, useState } from 'react';
|
||||
|
||||
import { useIsDarkTheme } from '@/components/theme-provider';
|
||||
import {
|
||||
LanguageAbbreviation,
|
||||
LanguageAbbreviationMap,
|
||||
} from '@/constants/common';
|
||||
import { cn } from '@/lib/utils';
|
||||
import styles from './index.less';
|
||||
|
||||
const { Paragraph, Link } = Typography;
|
||||
|
||||
const EmbedModal = ({
|
||||
visible,
|
||||
hideModal,
|
||||
token = '',
|
||||
form,
|
||||
beta = '',
|
||||
isAgent,
|
||||
}: IModalProps<any> & {
|
||||
token: string;
|
||||
form: SharedFrom;
|
||||
beta: string;
|
||||
isAgent: boolean;
|
||||
}) => {
|
||||
const { t } = useTranslate('chat');
|
||||
const isDarkTheme = useIsDarkTheme();
|
||||
|
||||
const [visibleAvatar, setVisibleAvatar] = useState(false);
|
||||
const [locale, setLocale] = useState('');
|
||||
|
||||
const languageOptions = useMemo(() => {
|
||||
return Object.values(LanguageAbbreviation).map((x) => ({
|
||||
label: LanguageAbbreviationMap[x],
|
||||
value: x,
|
||||
}));
|
||||
}, []);
|
||||
|
||||
const generateIframeSrc = () => {
|
||||
let src = `${location.origin}/chat/share?shared_id=${token}&from=${form}&auth=${beta}`;
|
||||
if (visibleAvatar) {
|
||||
src += '&visible_avatar=1';
|
||||
}
|
||||
if (locale) {
|
||||
src += `&locale=${locale}`;
|
||||
}
|
||||
return src;
|
||||
};
|
||||
|
||||
const iframeSrc = generateIframeSrc();
|
||||
|
||||
const text = `
|
||||
~~~ html
|
||||
<iframe
|
||||
src="${iframeSrc}"
|
||||
style="width: 100%; height: 100%; min-height: 600px"
|
||||
frameborder="0"
|
||||
>
|
||||
</iframe>
|
||||
~~~
|
||||
`;
|
||||
|
||||
const items: TabsProps['items'] = [
|
||||
{
|
||||
key: '1',
|
||||
label: t('fullScreenTitle'),
|
||||
children: (
|
||||
<Card
|
||||
title={t('fullScreenDescription')}
|
||||
extra={<CopyToClipboard text={text}></CopyToClipboard>}
|
||||
className={styles.codeCard}
|
||||
>
|
||||
<div className="p-2">
|
||||
<h2 className="mb-3">Option:</h2>
|
||||
|
||||
<Form.Item
|
||||
label={t('avatarHidden')}
|
||||
labelCol={{ span: 6 }}
|
||||
wrapperCol={{ span: 18 }}
|
||||
>
|
||||
<Checkbox
|
||||
checked={visibleAvatar}
|
||||
onChange={(e) => setVisibleAvatar(e.target.checked)}
|
||||
></Checkbox>
|
||||
</Form.Item>
|
||||
<Form.Item
|
||||
label={t('locale')}
|
||||
labelCol={{ span: 6 }}
|
||||
wrapperCol={{ span: 18 }}
|
||||
>
|
||||
<Select
|
||||
placeholder="Select a locale"
|
||||
onChange={(value) => setLocale(value)}
|
||||
options={languageOptions}
|
||||
style={{ width: '100%' }}
|
||||
/>
|
||||
</Form.Item>
|
||||
</div>
|
||||
<HighLightMarkdown>{text}</HighLightMarkdown>
|
||||
</Card>
|
||||
),
|
||||
},
|
||||
{
|
||||
key: '2',
|
||||
label: t('partialTitle'),
|
||||
children: t('comingSoon'),
|
||||
},
|
||||
{
|
||||
key: '3',
|
||||
label: t('extensionTitle'),
|
||||
children: t('comingSoon'),
|
||||
},
|
||||
];
|
||||
|
||||
const onChange = (key: string) => {
|
||||
console.log(key);
|
||||
};
|
||||
|
||||
return (
|
||||
<Modal
|
||||
title={t('embedIntoSite', { keyPrefix: 'common' })}
|
||||
open={visible}
|
||||
style={{ top: 300 }}
|
||||
width={'50vw'}
|
||||
onOk={hideModal}
|
||||
onCancel={hideModal}
|
||||
>
|
||||
<Tabs defaultActiveKey="1" items={items} onChange={onChange} />
|
||||
<div className="text-base font-medium mt-4 mb-1">
|
||||
{t(isAgent ? 'flow' : 'chat', { keyPrefix: 'header' })}
|
||||
<span className="ml-1 inline-block">ID</span>
|
||||
</div>
|
||||
<Paragraph
|
||||
copyable={{ text: token }}
|
||||
className={cn(styles.id, {
|
||||
[styles.darkId]: isDarkTheme,
|
||||
})}
|
||||
>
|
||||
{token}
|
||||
</Paragraph>
|
||||
<Link
|
||||
href={
|
||||
isAgent
|
||||
? 'https://ragflow.io/docs/dev/http_api_reference#create-session-with-agent'
|
||||
: 'https://ragflow.io/docs/dev/http_api_reference#create-session-with-chat-assistant'
|
||||
}
|
||||
target="_blank"
|
||||
>
|
||||
{t('howUseId', { keyPrefix: isAgent ? 'flow' : 'chat' })}
|
||||
</Link>
|
||||
</Modal>
|
||||
);
|
||||
};
|
||||
|
||||
export default EmbedModal;
|
||||
@ -1,4 +1,3 @@
|
||||
import { SharedFrom } from '@/constants/chat';
|
||||
import {
|
||||
useSetModalState,
|
||||
useShowDeleteConfirm,
|
||||
@ -80,11 +79,6 @@ export const useShowBetaEmptyError = () => {
|
||||
return { showBetaEmptyError };
|
||||
};
|
||||
|
||||
const getUrlWithToken = (token: string, from: string = 'chat') => {
|
||||
const { protocol, host } = window.location;
|
||||
return `${protocol}//${host}/chat/share?shared_id=${token}&from=${from}`;
|
||||
};
|
||||
|
||||
const useFetchTokenListBeforeOtherStep = () => {
|
||||
const { showTokenEmptyError } = useShowTokenEmptyError();
|
||||
const { showBetaEmptyError } = useShowBetaEmptyError();
|
||||
@ -149,31 +143,3 @@ export const useShowEmbedModal = () => {
|
||||
beta,
|
||||
};
|
||||
};
|
||||
|
||||
export const usePreviewChat = (idKey: string) => {
|
||||
const { handleOperate } = useFetchTokenListBeforeOtherStep();
|
||||
|
||||
const open = useCallback(
|
||||
(t: string) => {
|
||||
window.open(
|
||||
getUrlWithToken(
|
||||
t,
|
||||
idKey === 'canvasId' ? SharedFrom.Agent : SharedFrom.Chat,
|
||||
),
|
||||
'_blank',
|
||||
);
|
||||
},
|
||||
[idKey],
|
||||
);
|
||||
|
||||
const handlePreview = useCallback(async () => {
|
||||
const token = await handleOperate();
|
||||
if (token) {
|
||||
open(token);
|
||||
}
|
||||
}, [handleOperate, open]);
|
||||
|
||||
return {
|
||||
handlePreview,
|
||||
};
|
||||
};
|
||||
|
||||
@ -1,79 +1,72 @@
|
||||
import { DocumentParserType } from '@/constants/knowledge';
|
||||
import { useTranslate } from '@/hooks/common-hooks';
|
||||
import { useFetchKnowledgeList } from '@/hooks/use-knowledge-request';
|
||||
import { IKnowledge } from '@/interfaces/database/knowledge';
|
||||
import { useBuildQueryVariableOptions } from '@/pages/agent/hooks/use-get-begin-query';
|
||||
import { UserOutlined } from '@ant-design/icons';
|
||||
import { Avatar as AntAvatar, Form, Select, Space } from 'antd';
|
||||
import { toLower } from 'lodash';
|
||||
import { useMemo } from 'react';
|
||||
import { useEffect, useMemo, useState } from 'react';
|
||||
import { useFormContext } from 'react-hook-form';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { RAGFlowAvatar } from './ragflow-avatar';
|
||||
import { FormControl, FormField, FormItem, FormLabel } from './ui/form';
|
||||
import { MultiSelect } from './ui/multi-select';
|
||||
|
||||
interface KnowledgeBaseItemProps {
|
||||
label?: string;
|
||||
tooltipText?: string;
|
||||
name?: string;
|
||||
required?: boolean;
|
||||
onChange?(): void;
|
||||
}
|
||||
|
||||
const KnowledgeBaseItem = ({
|
||||
label,
|
||||
tooltipText,
|
||||
name,
|
||||
required = true,
|
||||
onChange,
|
||||
}: KnowledgeBaseItemProps) => {
|
||||
const { t } = useTranslate('chat');
|
||||
|
||||
const { list: knowledgeList } = useFetchKnowledgeList(true);
|
||||
|
||||
const filteredKnowledgeList = knowledgeList.filter(
|
||||
(x) => x.parser_id !== DocumentParserType.Tag,
|
||||
);
|
||||
|
||||
const knowledgeOptions = filteredKnowledgeList.map((x) => ({
|
||||
label: (
|
||||
<Space>
|
||||
<AntAvatar size={20} icon={<UserOutlined />} src={x.avatar} />
|
||||
{x.name}
|
||||
</Space>
|
||||
),
|
||||
value: x.id,
|
||||
}));
|
||||
|
||||
return (
|
||||
<Form.Item
|
||||
label={label || t('knowledgeBases')}
|
||||
name={name || 'kb_ids'}
|
||||
tooltip={tooltipText || t('knowledgeBasesTip')}
|
||||
rules={[
|
||||
{
|
||||
required,
|
||||
message: t('knowledgeBasesMessage'),
|
||||
type: 'array',
|
||||
},
|
||||
]}
|
||||
>
|
||||
<Select
|
||||
mode="multiple"
|
||||
options={knowledgeOptions}
|
||||
placeholder={t('knowledgeBasesMessage')}
|
||||
onChange={onChange}
|
||||
></Select>
|
||||
</Form.Item>
|
||||
);
|
||||
};
|
||||
|
||||
export default KnowledgeBaseItem;
|
||||
import { MultiSelect, MultiSelectOptionType } from './ui/multi-select';
|
||||
|
||||
function buildQueryVariableOptionsByShowVariable(showVariable?: boolean) {
|
||||
return showVariable ? useBuildQueryVariableOptions : () => [];
|
||||
}
|
||||
|
||||
export function useDisableDifferenceEmbeddingDataset() {
|
||||
const [datasetOptions, setDatasetOptions] = useState<MultiSelectOptionType[]>(
|
||||
[],
|
||||
);
|
||||
const [datasetSelectEmbedId, setDatasetSelectEmbedId] = useState('');
|
||||
const { list: datasetListOrigin } = useFetchKnowledgeList(true);
|
||||
|
||||
useEffect(() => {
|
||||
const datasetListMap = datasetListOrigin
|
||||
.filter((x) => x.parser_id !== DocumentParserType.Tag)
|
||||
.map((item: IKnowledge) => {
|
||||
return {
|
||||
label: item.name,
|
||||
icon: () => (
|
||||
<RAGFlowAvatar
|
||||
className="size-4"
|
||||
avatar={item.avatar}
|
||||
name={item.name}
|
||||
/>
|
||||
),
|
||||
suffix: (
|
||||
<div className="text-xs px-4 p-1 bg-bg-card text-text-secondary rounded-lg border border-bg-card">
|
||||
{item.embd_id}
|
||||
</div>
|
||||
),
|
||||
value: item.id,
|
||||
disabled:
|
||||
item.embd_id !== datasetSelectEmbedId &&
|
||||
datasetSelectEmbedId !== '',
|
||||
};
|
||||
});
|
||||
setDatasetOptions(datasetListMap);
|
||||
}, [datasetListOrigin, datasetSelectEmbedId]);
|
||||
|
||||
const handleDatasetSelectChange = (
|
||||
value: string[],
|
||||
onChange: (value: string[]) => void,
|
||||
) => {
|
||||
if (value.length) {
|
||||
const data = datasetListOrigin?.find((item) => item.id === value[0]);
|
||||
setDatasetSelectEmbedId(data?.embd_id ?? '');
|
||||
} else {
|
||||
setDatasetSelectEmbedId('');
|
||||
}
|
||||
onChange?.(value);
|
||||
};
|
||||
|
||||
return {
|
||||
datasetOptions,
|
||||
handleDatasetSelectChange,
|
||||
};
|
||||
}
|
||||
|
||||
export function KnowledgeBaseFormField({
|
||||
showVariable = false,
|
||||
}: {
|
||||
@ -82,22 +75,12 @@ export function KnowledgeBaseFormField({
|
||||
const form = useFormContext();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const { list: knowledgeList } = useFetchKnowledgeList(true);
|
||||
|
||||
const filteredKnowledgeList = knowledgeList.filter(
|
||||
(x) => x.parser_id !== DocumentParserType.Tag,
|
||||
);
|
||||
const { datasetOptions, handleDatasetSelectChange } =
|
||||
useDisableDifferenceEmbeddingDataset();
|
||||
|
||||
const nextOptions = buildQueryVariableOptionsByShowVariable(showVariable)();
|
||||
|
||||
const knowledgeOptions = filteredKnowledgeList.map((x) => ({
|
||||
label: x.name,
|
||||
value: x.id,
|
||||
icon: () => (
|
||||
<RAGFlowAvatar className="size-4 mr-2" avatar={x.avatar} name={x.name} />
|
||||
),
|
||||
}));
|
||||
|
||||
const knowledgeOptions = datasetOptions;
|
||||
const options = useMemo(() => {
|
||||
if (showVariable) {
|
||||
return [
|
||||
@ -140,11 +123,14 @@ export function KnowledgeBaseFormField({
|
||||
<FormControl>
|
||||
<MultiSelect
|
||||
options={options}
|
||||
onValueChange={field.onChange}
|
||||
onValueChange={(value) => {
|
||||
handleDatasetSelectChange(value, field.onChange);
|
||||
}}
|
||||
placeholder={t('chat.knowledgeBasesMessage')}
|
||||
variant="inverted"
|
||||
maxCount={100}
|
||||
defaultValue={field.value}
|
||||
showSelectAll={false}
|
||||
{...field}
|
||||
/>
|
||||
</FormControl>
|
||||
|
||||
@ -109,6 +109,19 @@ export const SelectWithSearch = forwardRef<
|
||||
}
|
||||
}, [options, value]);
|
||||
|
||||
const showSearch = useMemo(() => {
|
||||
if (Array.isArray(options) && options.length > 5) {
|
||||
return true;
|
||||
}
|
||||
if (Array.isArray(options)) {
|
||||
const optionsNum = options.reduce((acc, option) => {
|
||||
return acc + (option?.options?.length || 0);
|
||||
}, 0);
|
||||
return optionsNum > 5;
|
||||
}
|
||||
return false;
|
||||
}, [options]);
|
||||
|
||||
const handleSelect = useCallback(
|
||||
(val: string) => {
|
||||
setValue(val);
|
||||
@ -179,7 +192,7 @@ export const SelectWithSearch = forwardRef<
|
||||
align="start"
|
||||
>
|
||||
<Command className="p-5">
|
||||
{options && options.length > 5 && (
|
||||
{showSearch && (
|
||||
<CommandInput
|
||||
placeholder={t('common.search') + '...'}
|
||||
className=" placeholder:text-text-disabled"
|
||||
|
||||
@ -1,35 +1,19 @@
|
||||
import { toast } from 'sonner';
|
||||
import { ExternalToast, toast } from 'sonner';
|
||||
|
||||
const duration = { duration: 2500 };
|
||||
const configuration: ExternalToast = { duration: 2500, position: 'top-center' };
|
||||
|
||||
const message = {
|
||||
success: (msg: string) => {
|
||||
toast.success(msg, {
|
||||
position: 'top-center',
|
||||
closeButton: false,
|
||||
...duration,
|
||||
});
|
||||
toast.success(msg, configuration);
|
||||
},
|
||||
error: (msg: string) => {
|
||||
toast.error(msg, {
|
||||
position: 'top-center',
|
||||
closeButton: false,
|
||||
...duration,
|
||||
});
|
||||
toast.error(msg, configuration);
|
||||
},
|
||||
warning: (msg: string) => {
|
||||
toast.warning(msg, {
|
||||
position: 'top-center',
|
||||
closeButton: false,
|
||||
...duration,
|
||||
});
|
||||
toast.warning(msg, configuration);
|
||||
},
|
||||
info: (msg: string) => {
|
||||
toast.info(msg, {
|
||||
position: 'top-center',
|
||||
closeButton: false,
|
||||
...duration,
|
||||
});
|
||||
toast.info(msg, configuration);
|
||||
},
|
||||
};
|
||||
export default message;
|
||||
|
||||
@ -211,3 +211,14 @@ export const WebhookJWTAlgorithmList = [
|
||||
'ps512',
|
||||
'none',
|
||||
] as const;
|
||||
|
||||
export enum AgentDialogueMode {
|
||||
Conversational = 'conversational',
|
||||
Task = 'task',
|
||||
Webhook = 'Webhook',
|
||||
}
|
||||
|
||||
export const initialBeginValues = {
|
||||
mode: AgentDialogueMode.Conversational,
|
||||
prologue: `Hi! I'm your assistant. What can I do for you?`,
|
||||
};
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import { FileUploadProps } from '@/components/file-upload';
|
||||
import { useHandleFilterSubmit } from '@/components/list-filter-bar/use-handle-filter-submit';
|
||||
import message from '@/components/ui/message';
|
||||
import { AgentGlobals } from '@/constants/agent';
|
||||
import { AgentGlobals, initialBeginValues } from '@/constants/agent';
|
||||
import {
|
||||
IAgentLogsRequest,
|
||||
IAgentLogsResponse,
|
||||
@ -76,6 +76,7 @@ export const EmptyDsl = {
|
||||
data: {
|
||||
label: 'Begin',
|
||||
name: 'begin',
|
||||
form: initialBeginValues,
|
||||
},
|
||||
sourcePosition: 'left',
|
||||
targetPosition: 'right',
|
||||
|
||||
@ -116,7 +116,7 @@ export interface ITenantInfo {
|
||||
tts_id: string;
|
||||
}
|
||||
|
||||
export type ChunkDocType = 'image' | 'table';
|
||||
export type ChunkDocType = 'image' | 'table' | 'text';
|
||||
|
||||
export interface IChunk {
|
||||
available_int: number; // Whether to enable, 0: not enabled, 1: enabled
|
||||
|
||||
@ -947,6 +947,19 @@ Beispiel: Virtual Hosted Style`,
|
||||
'Laden Sie das OAuth-JSON hoch, das von der Google Console generiert wurde. Wenn es nur Client-Anmeldeinformationen enthält, führen Sie die browserbasierte Überprüfung einmal durch, um langlebige Refresh-Token zu erstellen.',
|
||||
dropboxDescription:
|
||||
'Verbinden Sie Ihre Dropbox, um Dateien und Ordner von einem ausgewählten Konto zu synchronisieren.',
|
||||
bitbucketDescription:
|
||||
'Bitbucket verbinden, um PR-Inhalte zu synchronisieren.',
|
||||
zendeskDescription:
|
||||
'Verbinden Sie Ihr Zendesk, um Tickets, Artikel und andere Inhalte zu synchronisieren.',
|
||||
bitbucketTopWorkspaceTip:
|
||||
'Der zu indizierende Bitbucket-Workspace (z. B. "atlassian" aus https://bitbucket.org/atlassian/workspace )',
|
||||
bitbucketWorkspaceTip:
|
||||
'Dieser Connector indiziert alle Repositories im Workspace.',
|
||||
bitbucketProjectsTip: 'Kommagetrennte Projekt-Keys, z. B.: PROJ1,PROJ2',
|
||||
bitbucketRepositorySlugsTip:
|
||||
'Kommagetrennte Repository-Slugs, z. B.: repo-one,repo-two',
|
||||
connectorNameTip:
|
||||
'Geben Sie einen aussagekräftigen Namen für den Connector an',
|
||||
boxDescription:
|
||||
'Verbinden Sie Ihr Box-Laufwerk, um Dateien und Ordner zu synchronisieren.',
|
||||
githubDescription:
|
||||
|
||||
@ -147,6 +147,8 @@ Procedural Memory: Learned skills, habits, and automated procedures.`,
|
||||
action: 'Action',
|
||||
},
|
||||
config: {
|
||||
memorySizeTooltip: `Accounts for each message's content + its embedding vector (≈ Content + Dimensions × 8 Bytes).
|
||||
Example: A 1 KB message with 1024-dim embedding uses ~9 KB. The 5 MB default limit holds ~500 such messages.`,
|
||||
avatar: 'Avatar',
|
||||
description: 'Description',
|
||||
memorySize: 'Memory size',
|
||||
@ -181,6 +183,8 @@ Procedural Memory: Learned skills, habits, and automated procedures.`,
|
||||
},
|
||||
knowledgeDetails: {
|
||||
metadata: {
|
||||
toMetadataSetting: 'Generation settings',
|
||||
toMetadataSettingTip: 'Set auto-metadata in Configuration.',
|
||||
descriptionTip:
|
||||
'Provide descriptions or examples to guide LLM extract values for this field. If left empty, it will rely on the field name.',
|
||||
restrictTDefinedValuesTip:
|
||||
@ -875,6 +879,7 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s
|
||||
cropImage: 'Crop image',
|
||||
selectModelPlaceholder: 'Select model',
|
||||
configureModelTitle: 'Configure model',
|
||||
connectorNameTip: 'A descriptive name for the connector',
|
||||
confluenceIsCloudTip:
|
||||
'Check if this is a Confluence Cloud instance, uncheck for Confluence Server/Data Center',
|
||||
confluenceWikiBaseUrlTip:
|
||||
@ -919,7 +924,9 @@ Example: Virtual Hosted Style`,
|
||||
google_driveTokenTip:
|
||||
'Upload the OAuth token JSON generated from the OAuth helper or Google Cloud Console. You may also upload a client_secret JSON from an "installed" or "web" application. If this is your first sync, a browser window will open to complete the OAuth consent. If the JSON already contains a refresh token, it will be reused automatically.',
|
||||
google_drivePrimaryAdminTip:
|
||||
'Email address that has access to the Drive content being synced.',
|
||||
'Email address that has access to the Drive content being synced',
|
||||
zendeskDescription:
|
||||
'Connect your Zendesk to sync tickets, articles, and other content.',
|
||||
google_driveMyDriveEmailsTip:
|
||||
'Comma-separated emails whose "My Drive" contents should be indexed (include the primary admin).',
|
||||
google_driveSharedFoldersTip:
|
||||
@ -930,7 +937,16 @@ Example: Virtual Hosted Style`,
|
||||
'Upload the OAuth JSON generated from Google Console. If it only contains client credentials, run the browser-based verification once to mint long-lived refresh tokens.',
|
||||
dropboxDescription:
|
||||
'Connect your Dropbox to sync files and folders from a chosen account.',
|
||||
bitbucketDescription: 'Connect Bitbucket to sync PR content.',
|
||||
bitbucketTopWorkspaceTip:
|
||||
'The Bitbucket workspace to index (e.g., "atlassian" from https://bitbucket.org/atlassian/workspace ).',
|
||||
bitbucketRepositorySlugsTip:
|
||||
'Comma separated repository slugs. E.g., repo-one,repo-two',
|
||||
bitbucketProjectsTip: 'Comma separated project keys. E.g., PROJ1,PROJ2',
|
||||
bitbucketWorkspaceTip:
|
||||
'This connector will index all repositories in the workspace.',
|
||||
boxDescription: 'Connect your Box drive to sync files and folders.',
|
||||
|
||||
githubDescription:
|
||||
'Connect GitHub to sync pull requests and issues for retrieval.',
|
||||
airtableDescription:
|
||||
@ -939,6 +955,8 @@ Example: Virtual Hosted Style`,
|
||||
'Connect GitLab to sync repositories, issues, merge requests, and related documentation.',
|
||||
asanaDescription:
|
||||
'Connect to Asana and synchronize files from a specified workspace.',
|
||||
imapDescription:
|
||||
'Connect to your IMAP mailbox to sync emails for knowledge retrieval.',
|
||||
dropboxAccessTokenTip:
|
||||
'Generate a long-lived access token in the Dropbox App Console with files.metadata.read, files.content.read, and sharing.read scopes.',
|
||||
moodleDescription:
|
||||
|
||||
@ -731,6 +731,7 @@ export default {
|
||||
newDocs: 'Новые документы',
|
||||
timeStarted: 'Время начала',
|
||||
log: 'Лог',
|
||||
connectorNameTip: 'Укажите понятное имя для коннектора',
|
||||
confluenceDescription:
|
||||
'Интегрируйте ваше рабочее пространство Confluence для поиска документации.',
|
||||
s3Description:
|
||||
@ -747,6 +748,18 @@ export default {
|
||||
'Синхронизируйте страницы и базы данных из Notion для извлечения знаний.',
|
||||
boxDescription:
|
||||
'Подключите ваш диск Box для синхронизации файлов и папок.',
|
||||
bitbucketDescription:
|
||||
'Подключите Bitbucket для синхронизации содержимого PR.',
|
||||
zendeskDescription:
|
||||
'Подключите Zendesk для синхронизации тикетов, статей и другого контента.',
|
||||
bitbucketTopWorkspaceTip:
|
||||
'Рабочее пространство Bitbucket для индексации (например, "atlassian" из https://bitbucket.org/atlassian/workspace )',
|
||||
bitbucketWorkspaceTip:
|
||||
'Этот коннектор проиндексирует все репозитории в рабочем пространстве.',
|
||||
bitbucketProjectsTip:
|
||||
'Ключи проектов через запятую, например: PROJ1,PROJ2',
|
||||
bitbucketRepositorySlugsTip:
|
||||
'Слоги репозиториев через запятую, например: repo-one,repo-two',
|
||||
githubDescription:
|
||||
'Подключите GitHub для синхронизации содержимого Pull Request и Issue для поиска.',
|
||||
airtableDescription:
|
||||
@ -755,6 +768,8 @@ export default {
|
||||
'Подключите GitLab для синхронизации репозиториев, задач, merge requests и связанной документации.',
|
||||
asanaDescription:
|
||||
'Подключите Asana и синхронизируйте файлы из рабочего пространства.',
|
||||
imapDescription:
|
||||
'Подключите почтовый ящик IMAP для синхронизации писем из указанных почтовых ящиков (mailboxes) с целью поиска и анализа знаний.',
|
||||
google_driveDescription:
|
||||
'Подключите ваш Google Drive через OAuth и синхронизируйте определенные папки или диски.',
|
||||
gmailDescription:
|
||||
|
||||
@ -726,6 +726,16 @@ export default {
|
||||
view: '查看',
|
||||
modelsToBeAddedTooltip:
|
||||
'若您的模型供應商未列於此處,但宣稱與 OpenAI 相容,可透過選擇「OpenAI-API-compatible」卡片來設定相關模型。',
|
||||
dropboxDescription: '連接 Dropbox,同步指定帳號下的文件與文件夾。',
|
||||
bitbucketDescription: '連接 Bitbucket,同步 PR 內容。',
|
||||
zendeskDescription: '連接 Zendesk,同步工單、文章及其他內容。',
|
||||
bitbucketTopWorkspaceTip:
|
||||
'要索引的 Bitbucket 工作區(例如:https://bitbucket.org/atlassian/workspace 中的 "atlassian")',
|
||||
bitbucketWorkspaceTip: '此連接器將索引工作區下的所有倉庫。',
|
||||
bitbucketRepositorySlugsTip:
|
||||
'以英文逗號分隔的倉庫 slug,例如:repo-one,repo-two',
|
||||
bitbucketProjectsTip: '以英文逗號分隔的項目鍵,例如:PROJ1,PROJ2',
|
||||
connectorNameTip: '為連接器填寫一個有意義的名稱',
|
||||
},
|
||||
message: {
|
||||
registered: '註冊成功',
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user