mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
188 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a5cf6fc546 | |||
| 57fe5d0864 | |||
| bfdc4944a3 | |||
| a45ba3a91e | |||
| e513ad2f16 | |||
| 1fdad50dac | |||
| 4764ca5ef7 | |||
| 85f3d92816 | |||
| 742eef028f | |||
| dfbdeaddaf | |||
| 50c2b9d562 | |||
| f8cef73244 | |||
| f8c9ec4d56 | |||
| db74a3ef34 | |||
| 00f99ecbd5 | |||
| 0a3c6fff7c | |||
| 79e435fc2e | |||
| 163c2a70fc | |||
| bedc09f69c | |||
| 251592eeeb | |||
| 09436f6c60 | |||
| e8b4e8b3d7 | |||
| 000cd6d615 | |||
| 1d65299791 | |||
| bcccaccc2b | |||
| fddac1345d | |||
| 4a95349492 | |||
| 0fcb564261 | |||
| 96667696d2 | |||
| ce1e855328 | |||
| b5e4a5563c | |||
| 1053ef5551 | |||
| cb6e9ce164 | |||
| 8ea631a2a0 | |||
| 7fb67c4f67 | |||
| 44ac87aef4 | |||
| 7ddccbb952 | |||
| 4a7bc4df92 | |||
| 3b7d182720 | |||
| 78527acd88 | |||
| e5c3083826 | |||
| 9b9039de92 | |||
| 9b2ef62aee | |||
| 86507af770 | |||
| 93635674c3 | |||
| 1defe0b19b | |||
| 0bca46ac3a | |||
| 1ecb687c51 | |||
| 68d46b2a1e | |||
| 7559bbd46d | |||
| 275b5d14f2 | |||
| 9ae81b42a3 | |||
| d6c74ff131 | |||
| e8d74108a5 | |||
| c8b1a564aa | |||
| 301f95837c | |||
| 835fd7abcd | |||
| bb8f97c9cd | |||
| 6d19294ddc | |||
| f61c276f74 | |||
| 409acf0d9f | |||
| 74c6b21f3b | |||
| beeacd3e3f | |||
| 95259af68f | |||
| 855455006b | |||
| b844ad6e06 | |||
| e0533f19e9 | |||
| 9a6d976252 | |||
| 3d76f10a91 | |||
| e9b8c30a38 | |||
| 601d74160b | |||
| fc4e644e5f | |||
| 03f00c9e6f | |||
| 87e46b4425 | |||
| d5a322a352 | |||
| 7d4f1c0645 | |||
| 927873bfa6 | |||
| 5fe0791684 | |||
| 3e134ac0ad | |||
| 7a6bf4326e | |||
| 41a0601735 | |||
| 60486ecde5 | |||
| 255f4ccffc | |||
| afe82feb57 | |||
| 044afa83d1 | |||
| 4b00be4173 | |||
| 215e9361ea | |||
| aaec630759 | |||
| 3d735dca87 | |||
| dcedfc5ec8 | |||
| 1254ecf445 | |||
| 0d68a6cd1b | |||
| e267a026f3 | |||
| 44d4686b20 | |||
| 95614175e6 | |||
| c817ff184b | |||
| f284578cea | |||
| e69e6b2274 | |||
| 8cdb805c0b | |||
| 885418f3b0 | |||
| b44321f9c3 | |||
| f54a8d7748 | |||
| 311a475b6f | |||
| 655b01a0a4 | |||
| d4ee082735 | |||
| 1f5a7c4b12 | |||
| dab58b9311 | |||
| e56a60b316 | |||
| f189452446 | |||
| f576c555e4 | |||
| d8eea624e2 | |||
| e64c7dfdf6 | |||
| c76e7b1e28 | |||
| 0d5486aa57 | |||
| 3a0e9f9263 | |||
| 1f0a153d0e | |||
| 8bdf1d98a3 | |||
| 8037dc7b76 | |||
| 56f473b680 | |||
| b502dc7399 | |||
| cfe23badb0 | |||
| 593ffc4067 | |||
| a88a1848ff | |||
| 5ae33184d5 | |||
| 78601ee1bd | |||
| 84afb4259c | |||
| 1b817a5b4c | |||
| 1b589609a4 | |||
| 289f4f1916 | |||
| cf37e2ef1a | |||
| 41e2dadea7 | |||
| f3318b2e49 | |||
| 3f3469130b | |||
| fc38afcec4 | |||
| efae7afd62 | |||
| 285bc58364 | |||
| 6657ca7cde | |||
| 87455d79e4 | |||
| 821fdf02b4 | |||
| 54980337e4 | |||
| 92ab7ef659 | |||
| 934dbc2e2b | |||
| 95da6de9e1 | |||
| ccdeeda9cc | |||
| 74b28ef1b0 | |||
| 7543047de3 | |||
| e66addc82d | |||
| 7b6a5ffaff | |||
| 19545282aa | |||
| 6a0583f5ad | |||
| ed7e46b6ca | |||
| 9654e64a0a | |||
| 8b650fc9ef | |||
| 69fb323581 | |||
| 9d093547e8 | |||
| c5f13629af | |||
| c4b6df350a | |||
| 976d112280 | |||
| 8fba5c4179 | |||
| d19f059f34 | |||
| deca6c1b72 | |||
| 3ee9ca749d | |||
| 7058ac0041 | |||
| a7efd3cac5 | |||
| 59a5813f1b | |||
| 08c1a5e1e8 | |||
| ea84cc2e33 | |||
| b5f643681f | |||
| 5497ea34b9 | |||
| e079656473 | |||
| d00297a763 | |||
| a19210daf1 | |||
| b2abc36baa | |||
| fadbe23bfe | |||
| ea8a59d0b0 | |||
| 381219aa41 | |||
| 0f08b0f053 | |||
| 0dafce31c4 | |||
| c93e0355c3 | |||
| 1e0fc76efa | |||
| d94386e00a | |||
| 0a62dd7a7e | |||
| 06a21d2031 | |||
| 9a3febb7c5 | |||
| 27cd765d6f | |||
| a0c0a957b4 | |||
| b89f7c69ad | |||
| fcdc6ad085 |
124
.github/workflows/release.yml
vendored
Normal file
124
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
name: release
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 13 * * *' # This schedule runs every 13:00:00Z(21:00:00+08:00)
|
||||
# The "create tags" trigger is specifically focused on the creation of new tags, while the "push tags" trigger is activated when tags are pushed, including both new tag creations and updates to existing tags.
|
||||
create:
|
||||
tags:
|
||||
- "v*.*.*" # normal release
|
||||
- "nightly" # the only one mutable tag
|
||||
|
||||
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: [ "self-hosted", "overseas" ]
|
||||
steps:
|
||||
- name: Ensure workspace ownership
|
||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||
|
||||
# https://github.com/actions/checkout/blob/v3/README.md
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Prepare release body
|
||||
run: |
|
||||
if [[ $GITHUB_EVENT_NAME == 'create' ]]; then
|
||||
RELEASE_TAG=${GITHUB_REF#refs/tags/}
|
||||
if [[ $RELEASE_TAG == 'nightly' ]]; then
|
||||
PRERELEASE=true
|
||||
else
|
||||
PRERELEASE=false
|
||||
fi
|
||||
echo "Workflow triggered by create tag: $RELEASE_TAG"
|
||||
else
|
||||
RELEASE_TAG=nightly
|
||||
PRERELEASE=true
|
||||
echo "Workflow triggered by schedule"
|
||||
fi
|
||||
echo "RELEASE_TAG=$RELEASE_TAG" >> $GITHUB_ENV
|
||||
echo "PRERELEASE=$PRERELEASE" >> $GITHUB_ENV
|
||||
RELEASE_DATETIME=$(date --rfc-3339=seconds)
|
||||
echo Release $RELEASE_TAG created from $GITHUB_SHA at $RELEASE_DATETIME > release_body.md
|
||||
|
||||
- name: Move the existing mutable tag
|
||||
# https://github.com/softprops/action-gh-release/issues/171
|
||||
run: |
|
||||
git fetch --tags
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
# Determine if a given tag exists and matches a specific Git commit.
|
||||
# actions/checkout@v4 fetch-tags doesn't work when triggered by schedule
|
||||
if [ "$(git rev-parse -q --verify "refs/tags/$RELEASE_TAG")" = "$GITHUB_SHA" ]; then
|
||||
echo "mutable tag $RELEASE_TAG exists and matches $GITHUB_SHA"
|
||||
else
|
||||
git tag -f $RELEASE_TAG $GITHUB_SHA
|
||||
git push -f origin $RELEASE_TAG:refs/tags/$RELEASE_TAG
|
||||
echo "created/moved mutable tag $RELEASE_TAG to $GITHUB_SHA"
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Create or overwrite a release
|
||||
# https://github.com/actions/upload-release-asset has been replaced by https://github.com/softprops/action-gh-release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||
prerelease: ${{ env.PRERELEASE }}
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
# The body field does not support environment variable substitution directly.
|
||||
body_path: release_body.md
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# https://github.com/marketplace/actions/docker-login
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: infiniflow
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# https://github.com/marketplace/actions/build-and-push-docker-images
|
||||
- name: Build and push full image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
|
||||
# https://github.com/marketplace/actions/build-and-push-docker-images
|
||||
- name: Build and push slim image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}-slim
|
||||
file: Dockerfile
|
||||
build-args: LIGHTEN=1
|
||||
platforms: linux/amd64
|
||||
|
||||
- name: Build ragflow-sdk
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: |
|
||||
cd sdk/python && \
|
||||
poetry build
|
||||
|
||||
- name: Publish package distributions to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
packages-dir: dist/
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
verbose: true
|
||||
31
.github/workflows/tests.yml
vendored
31
.github/workflows/tests.yml
vendored
@ -49,29 +49,36 @@ jobs:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Build ragflow:dev-slim
|
||||
# https://github.com/astral-sh/ruff-action
|
||||
- name: Static check with Ruff
|
||||
uses: astral-sh/ruff-action@v2
|
||||
with:
|
||||
version: ">=0.8.2"
|
||||
args: "check --ignore E402"
|
||||
|
||||
- name: Build ragflow:nightly-slim
|
||||
run: |
|
||||
RUNNER_WORKSPACE_PREFIX=${RUNNER_WORKSPACE_PREFIX:-$HOME}
|
||||
cp -r ${RUNNER_WORKSPACE_PREFIX}/huggingface.co ${RUNNER_WORKSPACE_PREFIX}/nltk_data ${RUNNER_WORKSPACE_PREFIX}/libssl*.deb ${RUNNER_WORKSPACE_PREFIX}/tika-server*.jar* ${RUNNER_WORKSPACE_PREFIX}/chrome* ${RUNNER_WORKSPACE_PREFIX}/cl100k_base.tiktoken .
|
||||
sudo docker pull ubuntu:22.04
|
||||
sudo docker build --progress=plain -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
sudo docker build --progress=plain --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
|
||||
- name: Build ragflow:dev
|
||||
- name: Build ragflow:nightly
|
||||
run: |
|
||||
sudo docker build --progress=plain -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
sudo docker build --progress=plain --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
- name: Start ragflow:dev-slim
|
||||
- name: Start ragflow:nightly-slim
|
||||
run: |
|
||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Stop ragflow:dev-slim
|
||||
- name: Stop ragflow:nightly-slim
|
||||
if: always() # always run this step even if previous steps failed
|
||||
run: |
|
||||
sudo docker compose -f docker/docker-compose.yml down -v
|
||||
|
||||
- name: Start ragflow:dev
|
||||
- name: Start ragflow:nightly
|
||||
run: |
|
||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:dev" >> docker/.env
|
||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Run sdk tests against Elasticsearch
|
||||
@ -95,12 +102,12 @@ jobs:
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
|
||||
- name: Stop ragflow:dev
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
run: |
|
||||
sudo docker compose -f docker/docker-compose.yml down -v
|
||||
|
||||
- name: Start ragflow:dev
|
||||
- name: Start ragflow:nightly
|
||||
run: |
|
||||
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
@ -124,7 +131,7 @@ jobs:
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Stop ragflow:dev
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
run: |
|
||||
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml down -v
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@ -35,4 +35,6 @@ rag/res/deepdoc
|
||||
sdk/python/ragflow.egg-info/
|
||||
sdk/python/build/
|
||||
sdk/python/dist/
|
||||
sdk/python/ragflow_sdk.egg-info/
|
||||
sdk/python/ragflow_sdk.egg-info/
|
||||
huggingface.co/
|
||||
nltk_data/
|
||||
|
||||
231
Dockerfile
231
Dockerfile
@ -3,36 +3,71 @@ FROM ubuntu:22.04 AS base
|
||||
USER root
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
ENV LIGHTEN=0
|
||||
ARG NEED_MIRROR=0
|
||||
ARG LIGHTEN=0
|
||||
ENV LIGHTEN=${LIGHTEN}
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
# Copy models downloaded via download_deps.py
|
||||
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co,target=/huggingface.co \
|
||||
cp /huggingface.co/InfiniFlow/huqie/huqie.txt.trie /ragflow/rag/res/ && \
|
||||
tar --exclude='.*' -cf - \
|
||||
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
||||
/huggingface.co/InfiniFlow/deepdoc \
|
||||
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co,target=/huggingface.co \
|
||||
if [ "$LIGHTEN" != "1" ]; then \
|
||||
(tar -cf - \
|
||||
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
||||
/huggingface.co/BAAI/bge-reranker-v2-m3 \
|
||||
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
||||
/huggingface.co/maidalun1020/bce-reranker-base_v1 \
|
||||
| tar -xf - --strip-components=2 -C /root/.ragflow) \
|
||||
fi
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt-get --no-install-recommends install -y ca-certificates
|
||||
# https://github.com/chrismattmann/tika-python
|
||||
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/,target=/deps \
|
||||
cp -r /deps/nltk_data /root/ && \
|
||||
cp /deps/tika-server-standard-3.0.0.jar /deps/tika-server-standard-3.0.0.jar.md5 /ragflow/ && \
|
||||
cp /deps/cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
|
||||
# Setup apt mirror site
|
||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
|
||||
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard-3.0.0.jar"
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && DEBIAN_FRONTEND=noninteractive apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus default-jdk python3-pip pipx \
|
||||
libatk-bridge2.0-0 libgtk-4-1 libnss3 xdg-utils unzip libgbm-dev wget git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
# Setup apt
|
||||
# Python package and implicit dependencies:
|
||||
# opencv-python: libglib2.0-0 libglx-mesa0 libgl1
|
||||
# aspose-slides: pkg-config libicu-dev libgdiplus libssl1.1_1.1.1f-1ubuntu2_amd64.deb
|
||||
# python-pptx: default-jdk tika-server-standard-3.0.0.jar
|
||||
# selenium: libatk-bridge2.0-0 chrome-linux64-121-0-6167-85
|
||||
# Building C extensions: libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev
|
||||
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||
fi; \
|
||||
rm -f /etc/apt/apt.conf.d/docker-clean && \
|
||||
echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache && \
|
||||
chmod 1777 /tmp && \
|
||||
apt update && \
|
||||
apt --no-install-recommends install -y ca-certificates && \
|
||||
apt update && \
|
||||
apt install -y libglib2.0-0 libglx-mesa0 libgl1 && \
|
||||
apt install -y pkg-config libicu-dev libgdiplus && \
|
||||
apt install -y default-jdk && \
|
||||
apt install -y libatk-bridge2.0-0 && \
|
||||
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less
|
||||
|
||||
RUN pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && pip3 config set global.trusted-host "pypi.tuna.tsinghua.edu.cn mirrors.pku.edu.cn" && pip3 config set global.extra-index-url "https://mirrors.pku.edu.cn/pypi/web/simple" \
|
||||
&& pipx install poetry \
|
||||
&& /root/.local/bin/poetry self add poetry-plugin-pypi-mirror
|
||||
|
||||
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
||||
# aspose-slides on linux/arm64 is unavailable
|
||||
RUN --mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_amd64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb \
|
||||
--mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_arm64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb \
|
||||
if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb; \
|
||||
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \
|
||||
pip3 config set global.trusted-host pypi.tuna.tsinghua.edu.cn; \
|
||||
fi; \
|
||||
pipx install poetry; \
|
||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
pipx inject poetry poetry-plugin-pypi-mirror; \
|
||||
fi
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
||||
@ -42,16 +77,53 @@ ENV POETRY_NO_INTERACTION=1
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
||||
ENV POETRY_REQUESTS_TIMEOUT=15
|
||||
ENV POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||
|
||||
# nodejs 12.22 on Ubuntu 22.04 is too old
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||
apt purge -y nodejs npm && \
|
||||
apt autoremove && \
|
||||
apt update && \
|
||||
apt install -y nodejs cargo && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
apt install -y nodejs cargo
|
||||
|
||||
|
||||
# Add msssql ODBC driver
|
||||
# macOS ARM64 environment, install msodbcsql18.
|
||||
# general x86_64 environment, install msodbcsql17.
|
||||
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - && \
|
||||
curl https://packages.microsoft.com/config/ubuntu/22.04/prod.list > /etc/apt/sources.list.d/mssql-release.list && \
|
||||
apt update && \
|
||||
if [ -n "$ARCH" ] && [ "$ARCH" = "arm64" ]; then \
|
||||
# MacOS ARM64
|
||||
ACCEPT_EULA=Y apt install -y unixodbc-dev msodbcsql18; \
|
||||
else \
|
||||
# (x86_64)
|
||||
ACCEPT_EULA=Y apt install -y unixodbc-dev msodbcsql17; \
|
||||
fi || \
|
||||
{ echo "Failed to install ODBC driver"; exit 1; }
|
||||
|
||||
|
||||
|
||||
# Add dependencies of selenium
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/chrome-linux64-121-0-6167-85,target=/chrome-linux64.zip \
|
||||
unzip /chrome-linux64.zip && \
|
||||
mv chrome-linux64 /opt/chrome && \
|
||||
ln -s /opt/chrome/chrome /usr/local/bin/
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/chromedriver-linux64-121-0-6167-85,target=/chromedriver-linux64.zip \
|
||||
unzip -j /chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
|
||||
mv chromedriver /usr/local/bin/ && \
|
||||
rm -f /usr/bin/google-chrome
|
||||
|
||||
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
||||
# aspose-slides on linux/arm64 is unavailable
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/,target=/deps \
|
||||
if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
dpkg -i /deps/libssl1.1_1.1.1f-1ubuntu2_amd64.deb; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
dpkg -i /deps/libssl1.1_1.1.1f-1ubuntu2_arm64.deb; \
|
||||
fi
|
||||
|
||||
|
||||
# builder stage
|
||||
FROM base AS builder
|
||||
@ -59,17 +131,27 @@ USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
# install dependencies from poetry.lock file
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_poetry,target=/root/.cache/pypoetry,sharing=locked \
|
||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
export POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/; \
|
||||
fi; \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
poetry install --no-root; \
|
||||
else \
|
||||
poetry install --no-root --with=full; \
|
||||
fi
|
||||
|
||||
COPY web web
|
||||
COPY docs docs
|
||||
RUN --mount=type=cache,id=ragflow_npm,target=/root/.npm,sharing=locked \
|
||||
cd web && npm install --force && npm run build
|
||||
|
||||
COPY .git /ragflow/.git
|
||||
|
||||
RUN current_commit=$(git rev-parse --short HEAD); \
|
||||
last_tag=$(git describe --tags --abbrev=0); \
|
||||
commit_count=$(git rev-list --count "$last_tag..HEAD"); \
|
||||
version_info=""; \
|
||||
if [ "$commit_count" -eq 0 ]; then \
|
||||
version_info=$last_tag; \
|
||||
else \
|
||||
version_info="$current_commit($last_tag~$commit_count)"; \
|
||||
fi; \
|
||||
RUN version_info=$(git describe --tags --match=v* --first-parent --always); \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
version_info="$version_info slim"; \
|
||||
else \
|
||||
@ -78,34 +160,18 @@ RUN current_commit=$(git rev-parse --short HEAD); \
|
||||
echo "RAGFlow version: $version_info"; \
|
||||
echo $version_info > /ragflow/VERSION
|
||||
|
||||
COPY web web
|
||||
COPY docs docs
|
||||
RUN --mount=type=cache,id=ragflow_builder_npm,target=/root/.npm,sharing=locked \
|
||||
cd web && npm install --force && npm run build
|
||||
|
||||
# install dependencies from poetry.lock file
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_builder_poetry,target=/root/.cache/pypoetry,sharing=locked \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
poetry install --no-root; \
|
||||
else \
|
||||
poetry install --no-root --with=full; \
|
||||
fi
|
||||
|
||||
# production stage
|
||||
FROM base AS production
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
COPY --from=builder /ragflow/VERSION /ragflow/VERSION
|
||||
# Copy Python environment and packages
|
||||
ENV VIRTUAL_ENV=/ragflow/.venv
|
||||
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
# Install python packages' dependencies
|
||||
# cv2 requires libGL.so.1
|
||||
RUN --mount=type=cache,id=ragflow_production_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt install -y --no-install-recommends nginx libgl1 vim less && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
|
||||
COPY web web
|
||||
COPY api api
|
||||
@ -116,55 +182,12 @@ COPY agent agent
|
||||
COPY graphrag graphrag
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
|
||||
# Copy models downloaded via download_deps.py
|
||||
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
||||
tar --exclude='.*' -cf - \
|
||||
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
||||
/huggingface.co/InfiniFlow/deepdoc \
|
||||
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
||||
tar -cf - \
|
||||
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
||||
/huggingface.co/BAAI/bge-reranker-v2-m3 \
|
||||
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
||||
/huggingface.co/maidalun1020/bce-reranker-base_v1 \
|
||||
| tar -xf - --strip-components=2 -C /root/.ragflow
|
||||
|
||||
# Copy nltk data downloaded via download_deps.py
|
||||
COPY nltk_data /root/nltk_data
|
||||
|
||||
# https://github.com/chrismattmann/tika-python
|
||||
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||
COPY tika-server-standard-3.0.0.jar /ragflow/tika-server-standard.jar
|
||||
COPY tika-server-standard-3.0.0.jar.md5 /ragflow/tika-server-standard.jar.md5
|
||||
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard.jar"
|
||||
|
||||
# Copy cl100k_base
|
||||
COPY cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
|
||||
# Add dependencies of selenium
|
||||
RUN --mount=type=bind,source=chrome-linux64-121-0-6167-85,target=/chrome-linux64.zip \
|
||||
unzip /chrome-linux64.zip && \
|
||||
mv chrome-linux64 /opt/chrome && \
|
||||
ln -s /opt/chrome/chrome /usr/local/bin/
|
||||
RUN --mount=type=bind,source=chromedriver-linux64-121-0-6167-85,target=/chromedriver-linux64.zip \
|
||||
unzip -j /chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
|
||||
mv chromedriver /usr/local/bin/ && \
|
||||
rm -f /usr/bin/google-chrome
|
||||
|
||||
# Copy compiled web pages
|
||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
||||
|
||||
# Copy Python environment and packages
|
||||
ENV VIRTUAL_ENV=/ragflow/.venv
|
||||
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
# Copy compiled web pages
|
||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
||||
|
||||
COPY --from=builder /ragflow/VERSION /ragflow/VERSION
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
|
||||
10
Dockerfile.deps
Normal file
10
Dockerfile.deps
Normal file
@ -0,0 +1,10 @@
|
||||
# This builds an image that contains the resources needed by Dockerfile
|
||||
#
|
||||
FROM scratch
|
||||
|
||||
# Copy resources downloaded via download_deps.py
|
||||
COPY chromedriver-linux64-121-0-6167-85 chrome-linux64-121-0-6167-85 cl100k_base.tiktoken libssl1.1_1.1.1f-1ubuntu2_amd64.deb libssl1.1_1.1.1f-1ubuntu2_arm64.deb tika-server-standard-3.0.0.jar tika-server-standard-3.0.0.jar.md5 libssl*.deb /
|
||||
|
||||
COPY nltk_data /nltk_data
|
||||
|
||||
COPY huggingface.co /huggingface.co
|
||||
163
Dockerfile.slim
163
Dockerfile.slim
@ -1,163 +0,0 @@
|
||||
# base stage
|
||||
FROM ubuntu:22.04 AS base
|
||||
USER root
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
ENV LIGHTEN=1
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt-get --no-install-recommends install -y ca-certificates
|
||||
|
||||
# Setup apt mirror site
|
||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && DEBIAN_FRONTEND=noninteractive apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus default-jdk python3-pip pipx \
|
||||
libatk-bridge2.0-0 libgtk-4-1 libnss3 xdg-utils unzip libgbm-dev wget git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && pip3 config set global.trusted-host "pypi.tuna.tsinghua.edu.cn mirrors.pku.edu.cn" && pip3 config set global.extra-index-url "https://mirrors.pku.edu.cn/pypi/web/simple" \
|
||||
&& pipx install poetry \
|
||||
&& /root/.local/bin/poetry self add poetry-plugin-pypi-mirror
|
||||
|
||||
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
||||
# aspose-slides on linux/arm64 is unavailable
|
||||
RUN --mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_amd64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb \
|
||||
--mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_arm64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb \
|
||||
if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb; \
|
||||
fi
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
||||
ENV PATH=/root/.local/bin:$PATH
|
||||
# Configure Poetry
|
||||
ENV POETRY_NO_INTERACTION=1
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
||||
ENV POETRY_REQUESTS_TIMEOUT=15
|
||||
ENV POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||
|
||||
# nodejs 12.22 on Ubuntu 22.04 is too old
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||
apt purge -y nodejs npm && \
|
||||
apt autoremove && \
|
||||
apt update && \
|
||||
apt install -y nodejs cargo && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# builder stage
|
||||
FROM base AS builder
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
COPY .git /ragflow/.git
|
||||
|
||||
RUN current_commit=$(git rev-parse --short HEAD); \
|
||||
last_tag=$(git describe --tags --abbrev=0); \
|
||||
commit_count=$(git rev-list --count "$last_tag..HEAD"); \
|
||||
version_info=""; \
|
||||
if [ "$commit_count" -eq 0 ]; then \
|
||||
version_info=$last_tag; \
|
||||
else \
|
||||
version_info="$current_commit($last_tag~$commit_count)"; \
|
||||
fi; \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
version_info="$version_info slim"; \
|
||||
else \
|
||||
version_info="$version_info full"; \
|
||||
fi; \
|
||||
echo "RAGFlow version: $version_info"; \
|
||||
echo $version_info > /ragflow/VERSION
|
||||
|
||||
COPY web web
|
||||
COPY docs docs
|
||||
RUN --mount=type=cache,id=ragflow_builder_npm,target=/root/.npm,sharing=locked \
|
||||
cd web && npm install --force && npm run build
|
||||
|
||||
# install dependencies from poetry.lock file
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_builder_poetry,target=/root/.cache/pypoetry,sharing=locked \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
poetry install --no-root; \
|
||||
else \
|
||||
poetry install --no-root --with=full; \
|
||||
fi
|
||||
|
||||
# production stage
|
||||
FROM base AS production
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
COPY --from=builder /ragflow/VERSION /ragflow/VERSION
|
||||
|
||||
# Install python packages' dependencies
|
||||
# cv2 requires libGL.so.1
|
||||
RUN --mount=type=cache,id=ragflow_production_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt install -y --no-install-recommends nginx libgl1 vim less && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY web web
|
||||
COPY api api
|
||||
COPY conf conf
|
||||
COPY deepdoc deepdoc
|
||||
COPY rag rag
|
||||
COPY agent agent
|
||||
COPY graphrag graphrag
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
|
||||
# Copy models downloaded via download_deps.py
|
||||
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
||||
tar --exclude='.*' -cf - \
|
||||
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
||||
/huggingface.co/InfiniFlow/deepdoc \
|
||||
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
||||
|
||||
# Copy nltk data downloaded via download_deps.py
|
||||
COPY nltk_data /root/nltk_data
|
||||
|
||||
# https://github.com/chrismattmann/tika-python
|
||||
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||
COPY tika-server-standard-3.0.0.jar /ragflow/tika-server-standard.jar
|
||||
COPY tika-server-standard-3.0.0.jar.md5 /ragflow/tika-server-standard.jar.md5
|
||||
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard.jar"
|
||||
|
||||
# Copy cl100k_base
|
||||
COPY cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
|
||||
# Add dependencies of selenium
|
||||
RUN --mount=type=bind,source=chrome-linux64-121-0-6167-85,target=/chrome-linux64.zip \
|
||||
unzip /chrome-linux64.zip && \
|
||||
mv chrome-linux64 /opt/chrome && \
|
||||
ln -s /opt/chrome/chrome /usr/local/bin/
|
||||
RUN --mount=type=bind,source=chromedriver-linux64-121-0-6167-85,target=/chromedriver-linux64.zip \
|
||||
unzip -j /chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
|
||||
mv chromedriver /usr/local/bin/ && \
|
||||
rm -f /usr/bin/google-chrome
|
||||
|
||||
# Copy compiled web pages
|
||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
||||
|
||||
# Copy Python environment and packages
|
||||
ENV VIRTUAL_ENV=/ragflow/.venv
|
||||
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
48
README.md
48
README.md
@ -20,7 +20,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.1">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -75,9 +75,10 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Latest Updates
|
||||
|
||||
- 2024-12-18 Upgrades Document Layout Analysis model in Deepdoc.
|
||||
- 2024-12-04 Adds support for pagerank score in knowledge base.
|
||||
- 2024-11-22 Adds more variables to Agent.
|
||||
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
||||
- 2024-09-13 Adds search mode for knowledge base Q&A.
|
||||
- 2024-08-22 Support text to SQL statements through RAG.
|
||||
- 2024-08-02 Supports GraphRAG inspired by [graphrag](https://github.com/microsoft/graphrag) and mind map.
|
||||
|
||||
@ -165,29 +166,21 @@ releases! 🌟
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. Build the pre-built Docker images and start up the server:
|
||||
3. Start up the server using the pre-built Docker images:
|
||||
|
||||
> The command below downloads the dev version Docker image for RAGFlow slim (`dev-slim`). Note that RAGFlow slim
|
||||
Docker images do not include embedding models or Python libraries and hence are approximately 1GB in size.
|
||||
> The command below downloads the `v0.15.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download an RAGFlow edition different from `v0.14.1-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1` for the full edition `v0.14.1`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - To download a RAGFlow slim Docker image of a specific version, update the `RAGFLOW_IMAGE` variable in *
|
||||
*docker/.env** to your desired version. For example, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1-slim`. After
|
||||
making this change, rerun the command above to initiate the download.
|
||||
> - To download the dev version of RAGFlow Docker image *including* embedding models and Python libraries, update the
|
||||
`RAGFLOW_IMAGE` variable in **docker/.env** to `RAGFLOW_IMAGE=infiniflow/ragflow:dev`. After making this change,
|
||||
rerun the command above to initiate the download.
|
||||
> - To download a specific version of RAGFlow Docker image *including* embedding models and Python libraries, update
|
||||
the `RAGFLOW_IMAGE` variable in **docker/.env** to your desired version. For example,
|
||||
`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1`. After making this change, rerun the command above to initiate the
|
||||
download.
|
||||
|
||||
> **NOTE:** A RAGFlow Docker image that includes embedding models and Python libraries is approximately 9GB in size
|
||||
and may take significantly longer time to load.
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||
|
||||
4. Check the server status after having the server up and running:
|
||||
|
||||
@ -267,14 +260,12 @@ RAGFlow uses Elasticsearch by default for storing full text and vectors. To swit
|
||||
|
||||
## 🔧 Build a Docker image without embedding models
|
||||
|
||||
This image is approximately 1 GB in size and relies on external LLM and embedding services.
|
||||
This image is approximately 2 GB in size and relies on external LLM and embedding services.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Build a Docker image including embedding models
|
||||
@ -284,23 +275,21 @@ This image is approximately 9 GB in size. As it includes embedding models, it re
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Launch service from source for development
|
||||
|
||||
1. Install Poetry, or skip this step if it is already installed:
|
||||
```bash
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
pipx install poetry
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
```
|
||||
|
||||
2. Clone the source code and install Python dependencies:
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root --with=full # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
@ -313,7 +302,6 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
In **docker/service_conf.yaml.template**, update mysql port to `5455` and es port to `1200`, as specified in **docker/.env**.
|
||||
|
||||
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
||||
|
||||
|
||||
66
README_id.md
66
README_id.md
@ -20,7 +20,7 @@
|
||||
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.1">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||
@ -72,11 +72,12 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Pembaruan Terbaru
|
||||
|
||||
- 22-11-2024 Peningkatan definisi dan penggunaan variabel di Agen.
|
||||
- 2024-11-01: Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||
- 2024-09-13: Penambahan mode pencarian untuk Q&A basis pengetahuan.
|
||||
- 2024-08-22: Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||
- 2024-08-02: Dukungan GraphRAG yang terinspirasi oleh [graphrag](https://github.com/microsoft/graphrag) dan mind map.
|
||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di Deepdoc.
|
||||
- 2024-12-04 Mendukung skor pagerank ke basis pengetahuan.
|
||||
- 2024-11-22 Peningkatan definisi dan penggunaan variabel di Agen.
|
||||
- 2024-11-01 Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||
- 2024-08-02 Dukungan GraphRAG yang terinspirasi oleh [graphrag](https://github.com/microsoft/graphrag) dan mind map.
|
||||
|
||||
## 🎉 Tetap Terkini
|
||||
|
||||
@ -160,26 +161,19 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
3. Bangun image Docker pre-built dan jalankan server:
|
||||
|
||||
> Perintah di bawah ini akan mengunduh versi dev dari Docker image RAGFlow slim (`dev-slim`). Image RAGFlow slim
|
||||
tidak termasuk model embedding atau library Python dan berukuran sekitar 1GB.
|
||||
> Perintah di bawah ini mengunduh edisi v0.15.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.14.1-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1 untuk edisi lengkap v0.14.1.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - Untuk mengunduh versi tertentu dari image Docker RAGFlow slim, perbarui variabel `RAGFlow_IMAGE` di *
|
||||
*docker/.env** sesuai dengan versi yang diinginkan. Misalnya, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1-slim`.
|
||||
Setelah mengubah ini, jalankan ulang perintah di atas untuk memulai unduhan.
|
||||
> - Untuk mengunduh versi dev dari image Docker RAGFlow *termasuk* model embedding dan library Python, perbarui
|
||||
variabel `RAGFlow_IMAGE` di **docker/.env** menjadi `RAGFLOW_IMAGE=infiniflow/ragflow:dev`. Setelah mengubah ini,
|
||||
jalankan ulang perintah di atas untuk memulai unduhan.
|
||||
> - Untuk mengunduh versi tertentu dari image Docker RAGFlow *termasuk* model embedding dan library Python, perbarui
|
||||
variabel `RAGFlow_IMAGE` di **docker/.env** sesuai dengan versi yang diinginkan. Misalnya,
|
||||
`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1`. Setelah mengubah ini, jalankan ulang perintah di atas untuk memulai unduhan.
|
||||
|
||||
> **CATATAN:** Image Docker RAGFlow yang mencakup model embedding dan library Python berukuran sekitar 9GB
|
||||
dan mungkin memerlukan waktu lebih lama untuk dimuat.
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||
|
||||
4. Periksa status server setelah server aktif dan berjalan:
|
||||
|
||||
@ -208,7 +202,7 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
5. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||
port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||
6. Dalam [service_conf.yaml](./docker/service_conf.yaml), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||
6. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||
bidang `API_KEY` dengan kunci API yang sesuai.
|
||||
|
||||
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
||||
@ -221,16 +215,9 @@ Untuk konfigurasi sistem, Anda perlu mengelola file-file berikut:
|
||||
|
||||
- [.env](./docker/.env): Menyimpan pengaturan dasar sistem, seperti `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, dan
|
||||
`MINIO_PASSWORD`.
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): Mengonfigurasi aplikasi backend.
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template): Mengonfigurasi aplikasi backend.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): Sistem ini bergantung pada [docker-compose.yml](./docker/docker-compose.yml) untuk memulai.
|
||||
|
||||
Anda harus memastikan bahwa perubahan pada file [.env](./docker/.env) sesuai dengan yang ada di file [service_conf.yaml](./docker/service_conf.yaml).
|
||||
|
||||
> File [./docker/README](./docker/README.md) menyediakan penjelasan detail tentang pengaturan lingkungan dan konfigurasi aplikasi,
|
||||
> dan Anda DIWAJIBKAN memastikan bahwa semua pengaturan lingkungan yang tercantum di
|
||||
> [./docker/README](./docker/README.md) selaras dengan konfigurasi yang sesuai di
|
||||
> [service_conf.yaml](./docker/service_conf.yaml).
|
||||
|
||||
Untuk memperbarui port HTTP default (80), buka [docker-compose.yml](./docker/docker-compose.yml) dan ubah `80:80`
|
||||
menjadi `<YOUR_SERVING_PORT>:80`.
|
||||
|
||||
@ -242,14 +229,12 @@ Pembaruan konfigurasi ini memerlukan reboot semua kontainer agar efektif:
|
||||
|
||||
## 🔧 Membangun Docker Image tanpa Model Embedding
|
||||
|
||||
Image ini berukuran sekitar 1 GB dan bergantung pada aplikasi LLM eksternal dan embedding.
|
||||
Image ini berukuran sekitar 2 GB dan bergantung pada aplikasi LLM eksternal dan embedding.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Membangun Docker Image Termasuk Model Embedding
|
||||
@ -259,23 +244,21 @@ Image ini berukuran sekitar 9 GB. Karena sudah termasuk model embedding, ia hany
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
||||
|
||||
1. Instal Poetry, atau lewati langkah ini jika sudah terinstal:
|
||||
```bash
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
pipx install poetry
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
```
|
||||
|
||||
2. Clone kode sumber dan instal dependensi Python:
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root # install modul python RAGFlow
|
||||
```
|
||||
|
||||
@ -284,11 +267,10 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
Tambahkan baris berikut ke `/etc/hosts` untuk memetakan semua host yang ditentukan di **docker/service_conf.yaml** ke `127.0.0.1`:
|
||||
Tambahkan baris berikut ke `/etc/hosts` untuk memetakan semua host yang ditentukan di **conf/service_conf.yaml** ke `127.0.0.1`:
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
Di **docker/service_conf.yaml**, perbarui port mysql ke `5455` dan es ke `1200`, sesuai dengan yang ditentukan di **docker/.env**.
|
||||
|
||||
4. Jika Anda tidak dapat mengakses HuggingFace, atur variabel lingkungan `HF_ENDPOINT` untuk menggunakan situs mirror:
|
||||
|
||||
|
||||
45
README_ja.md
45
README_ja.md
@ -20,7 +20,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.1">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -54,9 +54,10 @@
|
||||
|
||||
## 🔥 最新情報
|
||||
|
||||
- 2024-12-18 Deepdoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||
- 2024-12-04 ナレッジ ベースへのページランク スコアをサポートしました。
|
||||
- 2024-11-22 エージェントでの変数の定義と使用法を改善しました。
|
||||
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
||||
- 2024-09-13 ナレッジベース Q&A の検索モードを追加しました。
|
||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||
- 2024-08-02 [graphrag](https://github.com/microsoft/graphrag) からインスピレーションを得た GraphRAG とマインド マップをサポートします。
|
||||
|
||||
@ -141,18 +142,19 @@
|
||||
|
||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||
|
||||
> 以下のコマンドは、RAGFlow slim(`dev-slim`)の開発版Dockerイメージをダウンロードします。RAGFlow slimのDockerイメージには、埋め込みモデルやPythonライブラリが含まれていないため、サイズは約1GBです。
|
||||
> 以下のコマンドは、RAGFlow Dockerイメージの v0.15.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.15.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.14.1 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1 と設定します。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - 特定のバージョンのRAGFlow slim Dockerイメージをダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を希望のバージョンに更新します。例えば、`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1`とします。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
||||
> - RAGFlowの埋め込みモデルとPythonライブラリを含む開発版Dockerイメージをダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を`RAGFLOW_IMAGE=infiniflow/ragflow:dev`に更新します。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
||||
> - 特定のバージョンのRAGFlow Dockerイメージ(埋め込みモデルとPythonライブラリを含む)をダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を希望のバージョンに更新します。例えば、`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1`とします。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
||||
|
||||
> **NOTE:** 埋め込みモデルとPythonライブラリを含むRAGFlow Dockerイメージのサイズは約9GBであり、読み込みにかなりの時間がかかる場合があります。
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||
|
||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
|
||||
@ -178,7 +180,7 @@
|
||||
|
||||
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||
6. [service_conf.yaml](./docker/service_conf.yaml) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
|
||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||
|
||||
@ -189,12 +191,12 @@
|
||||
システムコンフィグに関しては、以下のファイルを管理する必要がある:
|
||||
|
||||
- [.env](./docker/.env): `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` などのシステムの基本設定を保持する。
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): バックエンドのサービスを設定します。
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template): バックエンドのサービスを設定します。
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): システムの起動は [docker-compose.yml](./docker/docker-compose.yml) に依存している。
|
||||
|
||||
[.env](./docker/.env) ファイルの変更が [service_conf.yaml](./docker/service_conf.yaml) ファイルの内容と一致していることを確認する必要があります。
|
||||
[.env](./docker/.env) ファイルの変更が [service_conf.yaml.template](./docker/service_conf.yaml.template) ファイルの内容と一致していることを確認する必要があります。
|
||||
|
||||
> [./docker/README](./docker/README.md) ファイルは環境設定とサービスコンフィグの詳細な説明を提供し、[./docker/README](./docker/README.md) ファイルに記載されている全ての環境設定が [service_conf.yaml](./docker/service_conf.yaml) ファイルの対応するコンフィグと一致していることを確認することが義務付けられています。
|
||||
> [./docker/README](./docker/README.md) ファイル ./docker/README には、service_conf.yaml.template ファイルで ${ENV_VARS} として使用できる環境設定とサービス構成の詳細な説明が含まれています。
|
||||
|
||||
デフォルトの HTTP サービングポート(80)を更新するには、[docker-compose.yml](./docker/docker-compose.yml) にアクセスして、`80:80` を `<YOUR_SERVING_PORT>:80` に変更します。
|
||||
|
||||
@ -228,9 +230,7 @@ RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトル
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 ソースコードをコンパイルしたDockerイメージ(埋め込みモデルを含む)
|
||||
@ -240,23 +240,21 @@ docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 ソースコードからサービスを起動する方法
|
||||
|
||||
1. Poetry をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||
```bash
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
pipx install poetry
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
```
|
||||
|
||||
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
@ -265,11 +263,10 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
`/etc/hosts` に以下の行を追加して、**docker/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||
`/etc/hosts` に以下の行を追加して、**conf/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
**docker/service_conf.yaml** で mysql のポートを `5455` に、es のポートを `1200` に更新します(**docker/.env** に指定された通り).
|
||||
|
||||
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
||||
|
||||
|
||||
53
README_ko.md
53
README_ko.md
@ -20,7 +20,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.1">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -56,12 +56,14 @@
|
||||
|
||||
## 🔥 업데이트
|
||||
|
||||
- 2024-12-18 Deepdoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||
|
||||
- 2024-12-04 지식베이스에 대한 페이지랭크 점수를 지원합니다.
|
||||
|
||||
- 2024-11-22 에이전트의 변수 정의 및 사용을 개선했습니다.
|
||||
|
||||
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
||||
|
||||
- 2024-09-13 지식베이스 Q&A 검색 모드를 추가합니다.
|
||||
|
||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||
|
||||
- 2024-08-02: [graphrag](https://github.com/microsoft/graphrag)와 마인드맵에서 영감을 받은 GraphRAG를 지원합니다.
|
||||
@ -145,19 +147,19 @@
|
||||
|
||||
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
||||
|
||||
> 아래의 명령은 RAGFlow slim(dev-slim)의 개발 버전 Docker 이미지를 다운로드합니다. RAGFlow slim Docker 이미지에는 임베딩 모델이나 Python 라이브러리가 포함되어 있지 않으므로 크기는 약 1GB입니다.
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.15.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.15.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.14.1을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1로 설정합니다.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - 특정 버전의 RAGFlow slim Docker 이미지를 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 원하는 버전으로 업데이트하세요. 예를 들어, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1-slim`으로 설정합니다. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
||||
> - RAGFlow의 임베딩 모델과 Python 라이브러리를 포함한 개발 버전 Docker 이미지를 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 `RAGFLOW_IMAGE=infiniflow/ragflow:dev`로 업데이트하세요. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
||||
> - 특정 버전의 RAGFlow Docker 이미지를 임베딩 모델과 Python 라이브러리를 포함하여 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 원하는 버전으로 업데이트하세요. 예를 들어, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1` 로 설정합니다. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
||||
|
||||
> **NOTE:** 임베딩 모델과 Python 라이브러리를 포함한 RAGFlow Docker 이미지의 크기는 약 9GB이며, 로드하는 데 상당히 오랜 시간이 걸릴 수 있습니다.
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||
|
||||
4. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||
|
||||
@ -183,7 +185,7 @@
|
||||
|
||||
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||
6. [service_conf.yaml](./docker/service_conf.yaml) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
||||
|
||||
_이제 쇼가 시작됩니다!_
|
||||
@ -193,12 +195,12 @@
|
||||
시스템 설정과 관련하여 다음 파일들을 관리해야 합니다:
|
||||
|
||||
- [.env](./docker/.env): `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, `MINIO_PASSWORD`와 같은 시스템의 기본 설정을 포함합니다.
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): 백엔드 서비스를 구성합니다.
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template): 백엔드 서비스를 구성합니다.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): 시스템은 [docker-compose.yml](./docker/docker-compose.yml)을 사용하여 시작됩니다.
|
||||
|
||||
[.env](./docker/.env) 파일의 변경 사항이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 내용과 일치하도록 해야 합니다.
|
||||
[.env](./docker/.env) 파일의 변경 사항이 [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일의 내용과 일치하도록 해야 합니다.
|
||||
|
||||
> [./docker/README](./docker/README.md) 파일에는 환경 설정과 서비스 구성에 대한 자세한 설명이 있으며, [./docker/README](./docker/README.md) 파일에 나열된 모든 환경 설정이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 해당 구성과 일치하도록 해야 합니다.
|
||||
> [./docker/README](./docker/README.md) 파일 ./docker/README은 service_conf.yaml.template 파일에서 ${ENV_VARS}로 사용할 수 있는 환경 설정과 서비스 구성에 대한 자세한 설명을 제공합니다.
|
||||
|
||||
기본 HTTP 서비스 포트(80)를 업데이트하려면 [docker-compose.yml](./docker/docker-compose.yml) 파일에서 `80:80`을 `<YOUR_SERVING_PORT>:80`으로 변경하세요.
|
||||
|
||||
@ -213,12 +215,12 @@
|
||||
RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및 벡터를 저장합니다. [Infinity]로 전환(https://github.com/infiniflow/infinity/), 다음 절차를 따르십시오.
|
||||
1. 실행 중인 모든 컨테이너를 중지합니다.
|
||||
```bash
|
||||
$docker compose-f docker/docker-compose.yml down-v
|
||||
$docker compose-f docker/docker-compose.yml down -v
|
||||
```
|
||||
2. **docker/.env**의 "DOC_ENGINE" 을 "infinity" 로 설정합니다.
|
||||
3. 컨테이너 부팅:
|
||||
```bash
|
||||
$docker compose-f docker/docker-compose.yml up-d
|
||||
$docker compose-f docker/docker-compose.yml up -d
|
||||
```
|
||||
> [!WARNING]
|
||||
> Linux/arm64 시스템에서 Infinity로 전환하는 것은 공식적으로 지원되지 않습니다.
|
||||
@ -230,9 +232,7 @@ RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함)
|
||||
@ -242,23 +242,21 @@ docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 소스 코드로 서비스를 시작합니다.
|
||||
|
||||
1. Poetry를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||
```bash
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
pipx install poetry
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
```
|
||||
|
||||
2. 소스 코드를 클론하고 Python 의존성을 설치합니다:
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
@ -267,11 +265,10 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
`/etc/hosts` 에 다음 줄을 추가하여 **docker/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||
`/etc/hosts` 에 다음 줄을 추가하여 **conf/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
**docker/service_conf.yaml** 에서 mysql 포트를 `5455` 로, es 포트를 `1200` 으로 업데이트합니다( **docker/.env** 에 지정된 대로).
|
||||
|
||||
4. HuggingFace에 접근할 수 없는 경우, `HF_ENDPOINT` 환경 변수를 설정하여 미러 사이트를 사용하세요:
|
||||
|
||||
|
||||
59
README_zh.md
59
README_zh.md
@ -20,7 +20,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.1">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -54,9 +54,10 @@
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2024-12-18 升级了 Deepdoc 的文档布局分析模型。
|
||||
- 2024-12-04 支持知识库的 Pagerank 分数。
|
||||
- 2024-11-22 完善了 Agent 中的变量定义和使用。
|
||||
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
||||
- 2024-09-13 增加知识库问答搜索模式。
|
||||
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
||||
- 2024-08-02 支持 GraphRAG 启发于 [graphrag](https://github.com/microsoft/graphrag) 和思维导图。
|
||||
|
||||
@ -142,18 +143,25 @@
|
||||
|
||||
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
||||
|
||||
> 运行以下命令会自动下载 dev 版的 RAGFlow slim Docker 镜像(`dev-slim`),该镜像并不包含 embedding 模型以及一些 Python 库,因此镜像大小约 1GB。
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.15.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.15.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1` 来下载 RAGFlow 镜像的 `v0.14.1` 完整发行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - 如果你想下载并运行特定版本的 RAGFlow slim Docker 镜像,请在 **docker/.env** 文件中找到 `RAGFLOW_IMAGE` 变量,将其改为对应版本。例如 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1-slim`,然后再运行上述命令。
|
||||
> - 如果您想安装内置 embedding 模型和 Python 库的 dev 版本的 Docker 镜像,需要将 **docker/.env** 文件中的 `RAGFLOW_IMAGE` 变量修改为: `RAGFLOW_IMAGE=infiniflow/ragflow:dev`。
|
||||
> - 如果您想安装内置 embedding 模型和 Python 库的指定版本的 RAGFlow Docker 镜像,需要将 **docker/.env** 文件中的 `RAGFLOW_IMAGE` 变量修改为: `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1`。修改后,再运行上面的命令。
|
||||
> **注意:** 安装内置 embedding 模型和 Python 库的指定版本的 RAGFlow Docker 镜像大小约 9 GB,可能需要更长时间下载,请耐心等待。
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||
|
||||
> [!TIP]
|
||||
> 如果你遇到 Docker 镜像拉不下来的问题,可以在 **docker/.env** 文件内根据变量 `RAGFLOW_IMAGE` 的注释提示选择华为云或者阿里云的相应镜像。
|
||||
> - 华为云镜像名:`swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow`
|
||||
> - 阿里云镜像名:`registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow`
|
||||
|
||||
4. 服务器启动成功后再次确认服务器状态:
|
||||
|
||||
```bash
|
||||
@ -178,7 +186,7 @@
|
||||
|
||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||
6. 在 [service_conf.yaml](./docker/service_conf.yaml) 文件的 `user_default_llm` 栏配置 LLM factory,并在 `API_KEY` 栏填写和你选择的大模型相对应的 API key。
|
||||
6. 在 [service_conf.yaml.template](./docker/service_conf.yaml.template) 文件的 `user_default_llm` 栏配置 LLM factory,并在 `API_KEY` 栏填写和你选择的大模型相对应的 API key。
|
||||
|
||||
> 详见 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
||||
|
||||
@ -189,21 +197,21 @@
|
||||
系统配置涉及以下三份文件:
|
||||
|
||||
- [.env](./docker/.env):存放一些基本的系统环境变量,比如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||
- [service_conf.yaml](./docker/service_conf.yaml):配置各类后台服务。
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template):配置各类后台服务。
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): 系统依赖该文件完成启动。
|
||||
|
||||
请务必确保 [.env](./docker/.env) 文件中的变量设置与 [service_conf.yaml](./docker/service_conf.yaml) 文件中的配置保持一致!
|
||||
请务必确保 [.env](./docker/.env) 文件中的变量设置与 [service_conf.yaml.template](./docker/service_conf.yaml.template) 文件中的配置保持一致!
|
||||
|
||||
如果不能访问镜像站点hub.docker.com或者模型站点huggingface.co,请按照[.env](./docker/.env)注释修改`RAGFLOW_IMAGE`和`HF_ENDPOINT`。
|
||||
如果不能访问镜像站点 hub.docker.com 或者模型站点 huggingface.co,请按照 [.env](./docker/.env) 注释修改 `RAGFLOW_IMAGE` 和 `HF_ENDPOINT`。
|
||||
|
||||
> [./docker/README](./docker/README.md) 文件提供了环境变量设置和服务配置的详细信息。请**一定要**确保 [./docker/README](./docker/README.md) 文件当中列出来的环境变量的值与 [service_conf.yaml](./docker/service_conf.yaml) 文件当中的系统配置保持一致。
|
||||
> [./docker/README](./docker/README.md) 解释了 [service_conf.yaml.template](./docker/service_conf.yaml.template) 用到的环境变量设置和服务配置。
|
||||
|
||||
如需更新默认的 HTTP 服务端口(80), 可以在 [docker-compose.yml](./docker/docker-compose.yml) 文件中将配置 `80:80` 改为 `<YOUR_SERVING_PORT>:80`。
|
||||
|
||||
> 所有系统配置都需要通过系统重启生效:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### 把文档引擎从 Elasticsearch 切换成为 Infinity
|
||||
@ -230,14 +238,12 @@ RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换
|
||||
|
||||
## 🔧 源码编译 Docker 镜像(不含 embedding 模型)
|
||||
|
||||
本 Docker 镜像大小约 1 GB 左右并且依赖外部的大模型和 embedding 服务。
|
||||
本 Docker 镜像大小约 2 GB 左右并且依赖外部的大模型和 embedding 服务。
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 源码编译 Docker 镜像(包含 embedding 模型)
|
||||
@ -247,23 +253,23 @@ docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 以源代码启动服务
|
||||
|
||||
1. 安装 Poetry。如已经安装,可跳过本步骤:
|
||||
```bash
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
pipx install poetry
|
||||
pipx inject poetry poetry-plugin-pypi-mirror
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
export POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||
```
|
||||
|
||||
2. 下载源代码并安装 Python 依赖:
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
@ -272,11 +278,10 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
在 `/etc/hosts` 中添加以下代码,将 **docker/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
在 `/etc/hosts` 中添加以下代码,将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
在文件 **docker/service_conf.yaml** 中,对照 **docker/.env** 的配置将 mysql 端口更新为 `5455`,es 端口更新为 `1200`。
|
||||
|
||||
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
||||
|
||||
|
||||
@ -10,7 +10,7 @@ It is used to compose a complex work flow or agent.
|
||||
And this graph is beyond the DAG that we can use circles to describe our agent or work flow.
|
||||
Under this folder, we propose a test tool ./test/client.py which can test the DSLs such as json files in folder ./test/dsl_examples.
|
||||
Please use this client at the same folder you start RAGFlow. If it's run by Docker, please go into the container before running the client.
|
||||
Otherwise, correct configurations in conf/service_conf.yaml is essential.
|
||||
Otherwise, correct configurations in service_conf.yaml is essential.
|
||||
|
||||
```bash
|
||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
在这个文件夹下,我们提出了一个测试工具 ./test/client.py,
|
||||
它可以测试像文件夹./test/dsl_examples下一样的DSL文件。
|
||||
请在启动 RAGFlow 的同一文件夹中使用此客户端。如果它是通过 Docker 运行的,请在运行客户端之前进入容器。
|
||||
否则,正确配置 conf/service_conf.yaml 文件是必不可少的。
|
||||
否则,正确配置 service_conf.yaml 文件是必不可少的。
|
||||
|
||||
```bash
|
||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||
|
||||
@ -0,0 +1,2 @@
|
||||
from beartype.claw import beartype_this_package
|
||||
beartype_this_package()
|
||||
|
||||
@ -21,6 +21,7 @@ from functools import partial
|
||||
from agent.component import component_class
|
||||
from agent.component.base import ComponentBase
|
||||
|
||||
|
||||
class Canvas(ABC):
|
||||
"""
|
||||
dsl = {
|
||||
@ -133,7 +134,8 @@ class Canvas(ABC):
|
||||
"components": {}
|
||||
}
|
||||
for k in self.dsl.keys():
|
||||
if k in ["components"]:continue
|
||||
if k in ["components"]:
|
||||
continue
|
||||
dsl[k] = deepcopy(self.dsl[k])
|
||||
|
||||
for k, cpn in self.components.items():
|
||||
@ -158,7 +160,8 @@ class Canvas(ABC):
|
||||
|
||||
def get_compnent_name(self, cid):
|
||||
for n in self.dsl["graph"]["nodes"]:
|
||||
if cid == n["id"]: return n["data"]["name"]
|
||||
if cid == n["id"]:
|
||||
return n["data"]["name"]
|
||||
return ""
|
||||
|
||||
def run(self, **kwargs):
|
||||
@ -173,7 +176,8 @@ class Canvas(ABC):
|
||||
if kwargs.get("stream"):
|
||||
for an in ans():
|
||||
yield an
|
||||
else: yield ans
|
||||
else:
|
||||
yield ans
|
||||
return
|
||||
|
||||
if not self.path:
|
||||
@ -181,6 +185,7 @@ class Canvas(ABC):
|
||||
self.path.append(["begin"])
|
||||
|
||||
self.path.append([])
|
||||
|
||||
ran = -1
|
||||
waiting = []
|
||||
without_dependent_checking = []
|
||||
@ -188,7 +193,8 @@ class Canvas(ABC):
|
||||
def prepare2run(cpns):
|
||||
nonlocal ran, ans
|
||||
for c in cpns:
|
||||
if self.path[-1] and c == self.path[-1][-1]: continue
|
||||
if self.path[-1] and c == self.path[-1][-1]:
|
||||
continue
|
||||
cpn = self.components[c]["obj"]
|
||||
if cpn.component_name == "Answer":
|
||||
self.answer.append(c)
|
||||
@ -197,10 +203,17 @@ class Canvas(ABC):
|
||||
if c not in without_dependent_checking:
|
||||
cpids = cpn.get_dependent_components()
|
||||
if any([cc not in self.path[-1] for cc in cpids]):
|
||||
if c not in waiting: waiting.append(c)
|
||||
if c not in waiting:
|
||||
waiting.append(c)
|
||||
continue
|
||||
yield "*'{}'* is running...🕞".format(self.get_compnent_name(c))
|
||||
ans = cpn.run(self.history, **kwargs)
|
||||
try:
|
||||
ans = cpn.run(self.history, **kwargs)
|
||||
except Exception as e:
|
||||
logging.exception(f"Canvas.run got exception: {e}")
|
||||
self.path[-1].append(c)
|
||||
ran += 1
|
||||
raise e
|
||||
self.path[-1].append(c)
|
||||
ran += 1
|
||||
|
||||
@ -211,29 +224,23 @@ class Canvas(ABC):
|
||||
logging.debug(f"Canvas.run: {ran} {self.path}")
|
||||
cpn_id = self.path[-1][ran]
|
||||
cpn = self.get_component(cpn_id)
|
||||
if not cpn["downstream"]: break
|
||||
if not cpn["downstream"]:
|
||||
break
|
||||
|
||||
loop = self._find_loop()
|
||||
if loop: raise OverflowError(f"Too much loops: {loop}")
|
||||
if loop:
|
||||
raise OverflowError(f"Too much loops: {loop}")
|
||||
|
||||
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
||||
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
||||
assert switch_out in self.components, \
|
||||
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
||||
try:
|
||||
for m in prepare2run([switch_out]):
|
||||
yield {"content": m, "running_status": True}
|
||||
except Exception as e:
|
||||
yield {"content": "*Exception*: {}".format(e), "running_status": True}
|
||||
logging.exception("Canvas.run got exception")
|
||||
for m in prepare2run([switch_out]):
|
||||
yield {"content": m, "running_status": True}
|
||||
continue
|
||||
|
||||
try:
|
||||
for m in prepare2run(cpn["downstream"]):
|
||||
yield {"content": m, "running_status": True}
|
||||
except Exception as e:
|
||||
yield {"content": "*Exception*: {}".format(e), "running_status": True}
|
||||
logging.exception("Canvas.run got exception")
|
||||
for m in prepare2run(cpn["downstream"]):
|
||||
yield {"content": m, "running_status": True}
|
||||
|
||||
if ran >= len(self.path[-1]) and waiting:
|
||||
without_dependent_checking = waiting
|
||||
@ -283,19 +290,22 @@ class Canvas(ABC):
|
||||
|
||||
def _find_loop(self, max_loops=6):
|
||||
path = self.path[-1][::-1]
|
||||
if len(path) < 2: return False
|
||||
if len(path) < 2:
|
||||
return False
|
||||
|
||||
for i in range(len(path)):
|
||||
if path[i].lower().find("answer") >= 0:
|
||||
path = path[:i]
|
||||
break
|
||||
|
||||
if len(path) < 2: return False
|
||||
if len(path) < 2:
|
||||
return False
|
||||
|
||||
for l in range(2, len(path) // 2):
|
||||
pat = ",".join(path[0:l])
|
||||
for loc in range(2, len(path) // 2):
|
||||
pat = ",".join(path[0:loc])
|
||||
path_str = ",".join(path)
|
||||
if len(pat) >= len(path_str): return False
|
||||
if len(pat) >= len(path_str):
|
||||
return False
|
||||
loop = max_loops
|
||||
while path_str.find(pat) == 0 and loop >= 0:
|
||||
loop -= 1
|
||||
@ -303,10 +313,23 @@ class Canvas(ABC):
|
||||
return False
|
||||
path_str = path_str[len(pat)+1:]
|
||||
if loop < 0:
|
||||
pat = " => ".join([p.split(":")[0] for p in path[0:l]])
|
||||
pat = " => ".join([p.split(":")[0] for p in path[0:loc]])
|
||||
return pat + " => " + pat
|
||||
|
||||
return False
|
||||
|
||||
def get_prologue(self):
|
||||
return self.components["begin"]["obj"]._param.prologue
|
||||
|
||||
def set_global_param(self, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
for q in self.components["begin"]["obj"]._param.query:
|
||||
if k != q["key"]:
|
||||
continue
|
||||
q["value"] = v
|
||||
|
||||
def get_preset_param(self):
|
||||
return self.components["begin"]["obj"]._param.query
|
||||
|
||||
def get_component_input_elements(self, cpnnm):
|
||||
return self.components[cpnnm]["obj"].get_input_elements()
|
||||
@ -31,9 +31,81 @@ from .akshare import AkShare, AkShareParam
|
||||
from .crawler import Crawler, CrawlerParam
|
||||
from .invoke import Invoke, InvokeParam
|
||||
from .template import Template, TemplateParam
|
||||
from .email import Email, EmailParam
|
||||
|
||||
|
||||
|
||||
def component_class(class_name):
|
||||
m = importlib.import_module("agent.component")
|
||||
c = getattr(m, class_name)
|
||||
return c
|
||||
|
||||
__all__ = [
|
||||
"Begin",
|
||||
"BeginParam",
|
||||
"Generate",
|
||||
"GenerateParam",
|
||||
"Retrieval",
|
||||
"RetrievalParam",
|
||||
"Answer",
|
||||
"AnswerParam",
|
||||
"Categorize",
|
||||
"CategorizeParam",
|
||||
"Switch",
|
||||
"SwitchParam",
|
||||
"Relevant",
|
||||
"RelevantParam",
|
||||
"Message",
|
||||
"MessageParam",
|
||||
"RewriteQuestion",
|
||||
"RewriteQuestionParam",
|
||||
"KeywordExtract",
|
||||
"KeywordExtractParam",
|
||||
"Concentrator",
|
||||
"ConcentratorParam",
|
||||
"Baidu",
|
||||
"BaiduParam",
|
||||
"DuckDuckGo",
|
||||
"DuckDuckGoParam",
|
||||
"Wikipedia",
|
||||
"WikipediaParam",
|
||||
"PubMed",
|
||||
"PubMedParam",
|
||||
"ArXiv",
|
||||
"ArXivParam",
|
||||
"Google",
|
||||
"GoogleParam",
|
||||
"Bing",
|
||||
"BingParam",
|
||||
"GoogleScholar",
|
||||
"GoogleScholarParam",
|
||||
"DeepL",
|
||||
"DeepLParam",
|
||||
"GitHub",
|
||||
"GitHubParam",
|
||||
"BaiduFanyi",
|
||||
"BaiduFanyiParam",
|
||||
"QWeather",
|
||||
"QWeatherParam",
|
||||
"ExeSQL",
|
||||
"ExeSQLParam",
|
||||
"YahooFinance",
|
||||
"YahooFinanceParam",
|
||||
"WenCai",
|
||||
"WenCaiParam",
|
||||
"Jin10",
|
||||
"Jin10Param",
|
||||
"TuShare",
|
||||
"TuShareParam",
|
||||
"AkShare",
|
||||
"AkShareParam",
|
||||
"Crawler",
|
||||
"CrawlerParam",
|
||||
"Invoke",
|
||||
"InvokeParam",
|
||||
"Template",
|
||||
"TemplateParam",
|
||||
"Email",
|
||||
"EmailParam",
|
||||
"component_class"
|
||||
]
|
||||
|
||||
@ -37,6 +37,7 @@ class ComponentParamBase(ABC):
|
||||
self.message_history_window_size = 22
|
||||
self.query = []
|
||||
self.inputs = []
|
||||
self.debug_inputs = []
|
||||
|
||||
def set_name(self, name: str):
|
||||
self._name = name
|
||||
@ -410,6 +411,7 @@ class ComponentBase(ABC):
|
||||
def run(self, history, **kwargs):
|
||||
logging.debug("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
||||
json.dumps(kwargs, ensure_ascii=False)))
|
||||
self._param.debug_inputs = []
|
||||
try:
|
||||
res = self._run(history, **kwargs)
|
||||
self.set_output(res)
|
||||
@ -425,7 +427,8 @@ class ComponentBase(ABC):
|
||||
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||
o = getattr(self._param, self._param.output_var_name)
|
||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||
if not isinstance(o, list): o = [o]
|
||||
if not isinstance(o, list):
|
||||
o = [o]
|
||||
o = pd.DataFrame(o)
|
||||
|
||||
if allow_partial or not isinstance(o, partial):
|
||||
@ -437,17 +440,21 @@ class ComponentBase(ABC):
|
||||
for oo in o():
|
||||
if not isinstance(oo, pd.DataFrame):
|
||||
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo])
|
||||
else: outs = oo
|
||||
else:
|
||||
outs = oo
|
||||
return self._param.output_var_name, outs
|
||||
|
||||
def reset(self):
|
||||
setattr(self._param, self._param.output_var_name, None)
|
||||
self._param.inputs = []
|
||||
|
||||
def set_output(self, v: partial | pd.DataFrame):
|
||||
def set_output(self, v):
|
||||
setattr(self._param, self._param.output_var_name, v)
|
||||
|
||||
def get_input(self):
|
||||
if self._param.debug_inputs:
|
||||
return pd.DataFrame([{"content": v["value"]} for v in self._param.debug_inputs])
|
||||
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
@ -457,7 +464,7 @@ class ComponentBase(ABC):
|
||||
self._param.inputs = []
|
||||
outs = []
|
||||
for q in self._param.query:
|
||||
if q["component_id"]:
|
||||
if q.get("component_id"):
|
||||
if q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
||||
cpn_id, key = q["component_id"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
@ -474,18 +481,20 @@ class ComponentBase(ABC):
|
||||
self._param.inputs.append({"component_id": q["component_id"],
|
||||
"content": "\n".join(
|
||||
[str(d["content"]) for d in outs[-1].to_dict('records')])})
|
||||
elif q["value"]:
|
||||
elif q.get("value"):
|
||||
self._param.inputs.append({"component_id": None, "content": q["value"]})
|
||||
outs.append(pd.DataFrame([{"content": q["value"]}]))
|
||||
if outs:
|
||||
df = pd.concat(outs, ignore_index=True)
|
||||
if "content" in df: df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
if "content" in df:
|
||||
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
return df
|
||||
|
||||
upstream_outs = []
|
||||
|
||||
for u in reversed_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch", "concentrator"]: continue
|
||||
if self.get_component_name(u) in ["switch", "concentrator"]:
|
||||
continue
|
||||
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||
if o is not None:
|
||||
@ -522,6 +531,22 @@ class ComponentBase(ABC):
|
||||
|
||||
return df
|
||||
|
||||
def get_input_elements(self):
|
||||
assert self._param.query, "Please identify input parameters firstly."
|
||||
eles = []
|
||||
for q in self._param.query:
|
||||
if q.get("component_id"):
|
||||
cpn_id = q["component_id"]
|
||||
if cpn_id.split("@")[0].lower().find("begin") >= 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
eles.extend(self._canvas.get_component(cpn_id)["obj"]._param.query)
|
||||
continue
|
||||
|
||||
eles.append({"name": self._canvas.get_compnent_name(cpn_id), "key": cpn_id})
|
||||
else:
|
||||
eles.append({"key": q["value"], "name": q["value"], "value": q["value"]})
|
||||
return eles
|
||||
|
||||
def get_stream_input(self):
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
@ -529,7 +554,8 @@ class ComponentBase(ABC):
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
|
||||
for u in reversed_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch", "answer"]: continue
|
||||
if self.get_component_name(u) in ["switch", "answer"]:
|
||||
continue
|
||||
return self._canvas.get_component(u)["obj"].output()[1]
|
||||
|
||||
@staticmethod
|
||||
@ -538,3 +564,6 @@ class ComponentBase(ABC):
|
||||
|
||||
def get_component_name(self, cpn_id):
|
||||
return self._canvas.get_component(cpn_id)["obj"].component_name.lower()
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
@ -26,6 +26,7 @@ class BeginParam(ComponentParamBase):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
||||
self.query = []
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
@ -42,7 +43,7 @@ class Begin(ComponentBase):
|
||||
def stream_output(self):
|
||||
res = {"content": self._param.prologue}
|
||||
yield res
|
||||
self.set_output(res)
|
||||
self.set_output(self.be_output(res))
|
||||
|
||||
|
||||
|
||||
|
||||
@ -34,15 +34,18 @@ class CategorizeParam(GenerateParam):
|
||||
super().check()
|
||||
self.check_empty(self.category_description, "[Categorize] Category examples")
|
||||
for k, v in self.category_description.items():
|
||||
if not k: raise ValueError("[Categorize] Category name can not be empty!")
|
||||
if not v.get("to"): raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
||||
if not k:
|
||||
raise ValueError("[Categorize] Category name can not be empty!")
|
||||
if not v.get("to"):
|
||||
raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
||||
|
||||
def get_prompt(self):
|
||||
cate_lines = []
|
||||
for c, desc in self.category_description.items():
|
||||
for l in desc.get("examples", "").split("\n"):
|
||||
if not l: continue
|
||||
cate_lines.append("Question: {}\tCategory: {}".format(l, c))
|
||||
for line in desc.get("examples", "").split("\n"):
|
||||
if not line:
|
||||
continue
|
||||
cate_lines.append("Question: {}\tCategory: {}".format(line, c))
|
||||
descriptions = []
|
||||
for c, desc in self.category_description.items():
|
||||
if desc.get("description"):
|
||||
@ -84,4 +87,8 @@ class Categorize(Generate, ABC):
|
||||
|
||||
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
||||
|
||||
def debug(self, **kwargs):
|
||||
df = self._run([], **kwargs)
|
||||
cpn_id = df.iloc[0, 0]
|
||||
return Categorize.be_output(self._canvas.get_compnent_name(cpn_id))
|
||||
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import re
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
import deepl
|
||||
|
||||
|
||||
138
agent/component/email.py
Normal file
138
agent/component/email.py
Normal file
@ -0,0 +1,138 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from abc import ABC
|
||||
import json
|
||||
import smtplib
|
||||
import logging
|
||||
from email.mime.text import MIMEText
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.header import Header
|
||||
from email.utils import formataddr
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
class EmailParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Email component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# Fixed configuration parameters
|
||||
self.smtp_server = "" # SMTP server address
|
||||
self.smtp_port = 465 # SMTP port
|
||||
self.email = "" # Sender email
|
||||
self.password = "" # Email authorization code
|
||||
self.sender_name = "" # Sender name
|
||||
|
||||
def check(self):
|
||||
# Check required parameters
|
||||
self.check_empty(self.smtp_server, "SMTP Server")
|
||||
self.check_empty(self.email, "Email")
|
||||
self.check_empty(self.password, "Password")
|
||||
self.check_empty(self.sender_name, "Sender Name")
|
||||
|
||||
class Email(ComponentBase, ABC):
|
||||
component_name = "Email"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
# Get upstream component output and parse JSON
|
||||
ans = self.get_input()
|
||||
content = "".join(ans["content"]) if "content" in ans else ""
|
||||
if not content:
|
||||
return Email.be_output("No content to send")
|
||||
|
||||
success = False
|
||||
try:
|
||||
# Parse JSON string passed from upstream
|
||||
email_data = json.loads(content)
|
||||
|
||||
# Validate required fields
|
||||
if "to_email" not in email_data:
|
||||
return Email.be_output("Missing required field: to_email")
|
||||
|
||||
# Create email object
|
||||
msg = MIMEMultipart('alternative')
|
||||
|
||||
# Properly handle sender name encoding
|
||||
msg['From'] = formataddr((str(Header(self._param.sender_name,'utf-8')), self._param.email))
|
||||
msg['To'] = email_data["to_email"]
|
||||
if "cc_email" in email_data and email_data["cc_email"]:
|
||||
msg['Cc'] = email_data["cc_email"]
|
||||
msg['Subject'] = Header(email_data.get("subject", "No Subject"), 'utf-8').encode()
|
||||
|
||||
# Use content from email_data or default content
|
||||
email_content = email_data.get("content", "No content provided")
|
||||
# msg.attach(MIMEText(email_content, 'plain', 'utf-8'))
|
||||
msg.attach(MIMEText(email_content, 'html', 'utf-8'))
|
||||
|
||||
# Connect to SMTP server and send
|
||||
logging.info(f"Connecting to SMTP server {self._param.smtp_server}:{self._param.smtp_port}")
|
||||
|
||||
context = smtplib.ssl.create_default_context()
|
||||
with smtplib.SMTP_SSL(self._param.smtp_server, self._param.smtp_port, context=context) as server:
|
||||
# Login
|
||||
logging.info(f"Attempting to login with email: {self._param.email}")
|
||||
server.login(self._param.email, self._param.password)
|
||||
|
||||
# Get all recipient list
|
||||
recipients = [email_data["to_email"]]
|
||||
if "cc_email" in email_data and email_data["cc_email"]:
|
||||
recipients.extend(email_data["cc_email"].split(','))
|
||||
|
||||
# Send email
|
||||
logging.info(f"Sending email to recipients: {recipients}")
|
||||
try:
|
||||
server.send_message(msg, self._param.email, recipients)
|
||||
success = True
|
||||
except Exception as e:
|
||||
logging.error(f"Error during send_message: {str(e)}")
|
||||
# Try alternative method
|
||||
server.sendmail(self._param.email, recipients, msg.as_string())
|
||||
success = True
|
||||
|
||||
try:
|
||||
server.quit()
|
||||
except Exception as e:
|
||||
# Ignore errors when closing connection
|
||||
logging.warning(f"Non-fatal error during connection close: {str(e)}")
|
||||
|
||||
if success:
|
||||
return Email.be_output("Email sent successfully")
|
||||
|
||||
except json.JSONDecodeError:
|
||||
error_msg = "Invalid JSON format in input"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(error_msg)
|
||||
|
||||
except smtplib.SMTPAuthenticationError:
|
||||
error_msg = "SMTP Authentication failed. Please check your email and authorization code."
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
|
||||
except smtplib.SMTPConnectError:
|
||||
error_msg = f"Failed to connect to SMTP server {self._param.smtp_server}:{self._param.smtp_port}"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
|
||||
except smtplib.SMTPException as e:
|
||||
error_msg = f"SMTP error occurred: {str(e)}"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error: {str(e)}"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
@ -19,7 +19,8 @@ import pandas as pd
|
||||
import pymysql
|
||||
import psycopg2
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
import pyodbc
|
||||
import logging
|
||||
|
||||
class ExeSQLParam(ComponentParamBase):
|
||||
"""
|
||||
@ -38,7 +39,7 @@ class ExeSQLParam(ComponentParamBase):
|
||||
self.top_n = 30
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb'])
|
||||
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb', 'mssql'])
|
||||
self.check_empty(self.database, "Database name")
|
||||
self.check_empty(self.username, "database username")
|
||||
self.check_empty(self.host, "IP Address")
|
||||
@ -46,8 +47,10 @@ class ExeSQLParam(ComponentParamBase):
|
||||
self.check_empty(self.password, "Database password")
|
||||
self.check_positive_integer(self.top_n, "Number of records")
|
||||
if self.database == "rag_flow":
|
||||
if self.host == "ragflow-mysql": raise ValueError("The host is not accessible.")
|
||||
if self.password == "infini_rag_flow": raise ValueError("The host is not accessible.")
|
||||
if self.host == "ragflow-mysql":
|
||||
raise ValueError("The host is not accessible.")
|
||||
if self.password == "infini_rag_flow":
|
||||
raise ValueError("The host is not accessible.")
|
||||
|
||||
|
||||
class ExeSQL(ComponentBase, ABC):
|
||||
@ -62,20 +65,41 @@ class ExeSQL(ComponentBase, ABC):
|
||||
self._loop += 1
|
||||
|
||||
ans = self.get_input()
|
||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', repr(ans), flags=re.IGNORECASE)
|
||||
|
||||
|
||||
ans = "".join([str(a) for a in ans["content"]]) if "content" in ans else ""
|
||||
if self._param.db_type == 'mssql':
|
||||
# improve the information extraction, most llm return results in markdown format ```sql query ```
|
||||
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
||||
if match:
|
||||
ans = match.group(1) # Query content
|
||||
print(ans)
|
||||
else:
|
||||
print("no markdown")
|
||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', (ans), flags=re.IGNORECASE)
|
||||
else:
|
||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', repr(ans), flags=re.IGNORECASE)
|
||||
ans = re.sub(r';.*?SELECT ', '; SELECT ', ans, flags=re.IGNORECASE)
|
||||
ans = re.sub(r';[^;]*$', r';', ans)
|
||||
if not ans:
|
||||
raise Exception("SQL statement not found!")
|
||||
|
||||
logging.info("db_type: ",self._param.db_type)
|
||||
if self._param.db_type in ["mysql", "mariadb"]:
|
||||
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
||||
port=self._param.port, password=self._param.password)
|
||||
elif self._param.db_type == 'postgresql':
|
||||
db = psycopg2.connect(dbname=self._param.database, user=self._param.username, host=self._param.host,
|
||||
port=self._param.port, password=self._param.password)
|
||||
|
||||
elif self._param.db_type == 'mssql':
|
||||
conn_str = (
|
||||
r'DRIVER={ODBC Driver 17 for SQL Server};'
|
||||
r'SERVER=' + self._param.host + ',' + str(self._param.port) + ';'
|
||||
r'DATABASE=' + self._param.database + ';'
|
||||
r'UID=' + self._param.username + ';'
|
||||
r'PWD=' + self._param.password
|
||||
)
|
||||
db = pyodbc.connect(conn_str)
|
||||
try:
|
||||
cursor = db.cursor()
|
||||
except Exception as e:
|
||||
@ -85,11 +109,12 @@ class ExeSQL(ComponentBase, ABC):
|
||||
if not single_sql:
|
||||
continue
|
||||
try:
|
||||
logging.info("single_sql: ",single_sql)
|
||||
cursor.execute(single_sql)
|
||||
if cursor.rowcount == 0:
|
||||
sql_res.append({"content": "\nTotal: 0\n No record in the database!"})
|
||||
continue
|
||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(size=self._param.top_n)])
|
||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.top_n)])
|
||||
single_res.columns = [i[0] for i in cursor.description]
|
||||
sql_res.append({"content": "\nTotal: " + str(cursor.rowcount) + "\n" + single_res.to_markdown()})
|
||||
except Exception as e:
|
||||
|
||||
@ -17,6 +17,7 @@ import re
|
||||
from functools import partial
|
||||
import pandas as pd
|
||||
from api.db import LLMType
|
||||
from api.db.services.conversation_service import structure_answer
|
||||
from api.db.services.dialog_service import message_fit_in
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
@ -51,11 +52,16 @@ class GenerateParam(ComponentParamBase):
|
||||
|
||||
def gen_conf(self):
|
||||
conf = {}
|
||||
if self.max_tokens > 0: conf["max_tokens"] = self.max_tokens
|
||||
if self.temperature > 0: conf["temperature"] = self.temperature
|
||||
if self.top_p > 0: conf["top_p"] = self.top_p
|
||||
if self.presence_penalty > 0: conf["presence_penalty"] = self.presence_penalty
|
||||
if self.frequency_penalty > 0: conf["frequency_penalty"] = self.frequency_penalty
|
||||
if self.max_tokens > 0:
|
||||
conf["max_tokens"] = self.max_tokens
|
||||
if self.temperature > 0:
|
||||
conf["temperature"] = self.temperature
|
||||
if self.top_p > 0:
|
||||
conf["top_p"] = self.top_p
|
||||
if self.presence_penalty > 0:
|
||||
conf["presence_penalty"] = self.presence_penalty
|
||||
if self.frequency_penalty > 0:
|
||||
conf["frequency_penalty"] = self.frequency_penalty
|
||||
return conf
|
||||
|
||||
|
||||
@ -83,7 +89,8 @@ class Generate(ComponentBase):
|
||||
recall_docs = []
|
||||
for i in idx:
|
||||
did = retrieval_res.loc[int(i), "doc_id"]
|
||||
if did in doc_ids: continue
|
||||
if did in doc_ids:
|
||||
continue
|
||||
doc_ids.add(did)
|
||||
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
||||
|
||||
@ -96,11 +103,18 @@ class Generate(ComponentBase):
|
||||
}
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
||||
res = {"content": answer, "reference": reference}
|
||||
res = structure_answer(None, res, "", "")
|
||||
|
||||
return res
|
||||
|
||||
def get_input_elements(self):
|
||||
if self._param.parameters:
|
||||
return [{"key": "user", "name": "User"}, *self._param.parameters]
|
||||
|
||||
return [{"key": "user", "name": "User"}]
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
prompt = self._param.prompt
|
||||
@ -108,7 +122,8 @@ class Generate(ComponentBase):
|
||||
retrieval_res = []
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
if not para.get("component_id"): continue
|
||||
if not para.get("component_id"):
|
||||
continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
@ -142,7 +157,8 @@ class Generate(ComponentBase):
|
||||
|
||||
if retrieval_res:
|
||||
retrieval_res = pd.concat(retrieval_res, ignore_index=True)
|
||||
else: retrieval_res = pd.DataFrame([])
|
||||
else:
|
||||
retrieval_res = pd.DataFrame([])
|
||||
|
||||
for n, v in kwargs.items():
|
||||
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||
@ -164,9 +180,11 @@ class Generate(ComponentBase):
|
||||
return pd.DataFrame([res])
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1: msg.append({"role": "user", "content": ""})
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2: msg.append({"role": "user", "content": ""})
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
@ -185,9 +203,11 @@ class Generate(ComponentBase):
|
||||
return
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1: msg.append({"role": "user", "content": ""})
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2: msg.append({"role": "user", "content": ""})
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
||||
res = {"content": ans, "reference": []}
|
||||
@ -198,4 +218,17 @@ class Generate(ComponentBase):
|
||||
res = self.set_cite(retrieval_res, answer)
|
||||
yield res
|
||||
|
||||
self.set_output(res)
|
||||
self.set_output(Generate.be_output(res))
|
||||
|
||||
def debug(self, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
prompt = self._param.prompt
|
||||
|
||||
for para in self._param.debug_inputs:
|
||||
kwargs[para["key"]] = para.get("value", "")
|
||||
|
||||
for n, v in kwargs.items():
|
||||
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": kwargs.get("user", "")}], self._param.gen_conf())
|
||||
return pd.DataFrame([ans])
|
||||
|
||||
@ -60,3 +60,6 @@ class KeywordExtract(Generate, ABC):
|
||||
ans = re.sub(r".*keyword:", "", ans).strip()
|
||||
logging.debug(f"ans: {ans}")
|
||||
return KeywordExtract.be_output(ans)
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
@ -78,4 +78,6 @@ class Relevant(Generate, ABC):
|
||||
return Relevant.be_output(self._param.no)
|
||||
assert False, f"Relevant component got: {ans}"
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
|
||||
|
||||
@ -95,7 +95,8 @@ class RewriteQuestion(Generate, ABC):
|
||||
hist = self._canvas.get_history(4)
|
||||
conv = []
|
||||
for m in hist:
|
||||
if m["role"] not in ["user", "assistant"]: continue
|
||||
if m["role"] not in ["user", "assistant"]:
|
||||
continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
|
||||
@ -109,3 +110,4 @@ class RewriteQuestion(Generate, ABC):
|
||||
return RewriteQuestion.be_output(ans)
|
||||
|
||||
|
||||
|
||||
|
||||
@ -41,7 +41,8 @@ class SwitchParam(ComponentParamBase):
|
||||
def check(self):
|
||||
self.check_empty(self.conditions, "[Switch] conditions")
|
||||
for cond in self.conditions:
|
||||
if not cond["to"]: raise ValueError(f"[Switch] 'To' can not be empty!")
|
||||
if not cond["to"]:
|
||||
raise ValueError("[Switch] 'To' can not be empty!")
|
||||
|
||||
|
||||
class Switch(ComponentBase, ABC):
|
||||
@ -51,7 +52,8 @@ class Switch(ComponentBase, ABC):
|
||||
res = []
|
||||
for cond in self._param.conditions:
|
||||
for item in cond["items"]:
|
||||
if not item["cpn_id"]: continue
|
||||
if not item["cpn_id"]:
|
||||
continue
|
||||
if item["cpn_id"].find("begin") >= 0:
|
||||
continue
|
||||
cid = item["cpn_id"].split("@")[0]
|
||||
@ -63,7 +65,8 @@ class Switch(ComponentBase, ABC):
|
||||
for cond in self._param.conditions:
|
||||
res = []
|
||||
for item in cond["items"]:
|
||||
if not item["cpn_id"]:continue
|
||||
if not item["cpn_id"]:
|
||||
continue
|
||||
cid = item["cpn_id"].split("@")[0]
|
||||
if item["cpn_id"].find("@") > 0:
|
||||
cpn_id, key = item["cpn_id"].split("@")
|
||||
@ -107,22 +110,22 @@ class Switch(ComponentBase, ABC):
|
||||
elif operator == ">":
|
||||
try:
|
||||
return True if float(input) > float(value) else False
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return True if input > value else False
|
||||
elif operator == "<":
|
||||
try:
|
||||
return True if float(input) < float(value) else False
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return True if input < value else False
|
||||
elif operator == "≥":
|
||||
try:
|
||||
return True if float(input) >= float(value) else False
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return True if input >= value else False
|
||||
elif operator == "≤":
|
||||
try:
|
||||
return True if float(input) <= float(value) else False
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return True if input <= value else False
|
||||
|
||||
raise ValueError('Not supported operator' + operator)
|
||||
@ -47,7 +47,8 @@ class Template(ComponentBase):
|
||||
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
if not para.get("component_id"): continue
|
||||
if not para.get("component_id"):
|
||||
continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
|
||||
@ -620,7 +620,7 @@
|
||||
"text": "Searches for description about meanings of tables and fields."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "N:DB Desctription"
|
||||
"name": "N:DB Description"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 128,
|
||||
@ -679,7 +679,7 @@
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "DDL(Data Definition Language).\n\nSearches for relevent database creation statements.\n\nIt should bind with a KB to which DDL is dumped in.\nYou could use 'General' as parsing method and ';' as delimiter."
|
||||
"text": "DDL(Data Definition Language).\n\nSearches for relevant database creation statements.\n\nIt should bind with a KB to which DDL is dumped in.\nYou could use 'General' as parsing method and ';' as delimiter."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "N: DDL"
|
||||
|
||||
@ -90,7 +90,7 @@
|
||||
"message_history_window_size": 12,
|
||||
"parameters": [],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You are a customer support. \n\nTask: Please answer the question based on content of knowledge base. \n\nReuirements & restrictions:\n - DO NOT make things up when all knowledge base content is irrelevant to the question. \n - Answers need to consider chat history.\n - Request about customer's contact information like, Wechat number, LINE number, twitter, discord, etc,. , when knowlegebase content can't answer his question. So, product expert could contact him soon to solve his problem.\n\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
||||
"prompt": "Role: You are a customer support. \n\nTask: Please answer the question based on content of knowledge base. \n\nRequirements & restrictions:\n - DO NOT make things up when all knowledge base content is irrelevant to the question. \n - Answers need to consider chat history.\n - Request about customer's contact information like, Wechat number, LINE number, twitter, discord, etc,. , when knowledge base content can't answer his question. So, product expert could contact him soon to solve his problem.\n\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
|
||||
@ -70,8 +70,8 @@
|
||||
"to": "QWeather:DeepKiwisTeach"
|
||||
},
|
||||
"2. finance": {
|
||||
"description": "Question is about finace/economic information, stock market, economic news.",
|
||||
"examples": "昨日涨幅大于5%的军工股?\nStocks have MACD buyin signals?\nWhen is the next interest rate cut by the Federal Reserve?\n国家救市都有哪些举措?",
|
||||
"description": "Question is about finance/economic information, stock market, economic news.",
|
||||
"examples": "Stocks have MACD buy signals?\nWhen is the next interest rate cut by the Federal Reserve?\n",
|
||||
"to": "Concentrator:TrueGeckosSlide"
|
||||
},
|
||||
"3. medical": {
|
||||
@ -268,7 +268,7 @@
|
||||
"message_history_window_size": 12,
|
||||
"parameters": [],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You‘re warm-hearted lovely young girl, 22 years old, located at Shanghai in China. Your name is R. Who are talking to you is your very good old friend of yours.\n\nTask: \n- Chat with the friend.\n- Ask question and care about them.\n- Provide useful advice to your friend.\n- Tell jokes to make your firend happy.\n\nThe following is the weatcher information:\n{weather}",
|
||||
"prompt": "Role: You‘re warm-hearted lovely young girl, 22 years old, located at Shanghai in China. Your name is R. Who are talking to you is your very good old friend of yours.\n\nTask: \n- Chat with the friend.\n- Ask question and care about them.\n- Provide useful advice to your friend.\n- Tell jokes to make your friend happy.\n\nThe following is the weather information:\n{weather}",
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
@ -497,7 +497,7 @@
|
||||
}
|
||||
],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You‘re warm-hearted lovely young girl, 22 years old, located at Shanghai in China. Your name is R. Who are talking to you is your very good old friend of yours.\n\nTask: \n- Chat with the friend.\n- Ask question and care about them.\n- Tell your friend the weather if there's weather information provided. If your friend did not provide region information, ask about where he/she is.\n\nThe following is the weatcher information:\n{weather}\n",
|
||||
"prompt": "Role: You‘re warm-hearted lovely young girl, 22 years old, located at Shanghai in China. Your name is R. Who are talking to you is your very good old friend of yours.\n\nTask: \n- Chat with the friend.\n- Ask question and care about them.\n- Tell your friend the weather if there's weather information provided. If your friend did not provide region information, ask about where he/she is.\n\nThe following is the weather information:\n{weather}\n",
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
@ -622,8 +622,8 @@
|
||||
"to": "QWeather:DeepKiwisTeach"
|
||||
},
|
||||
"2. finance": {
|
||||
"description": "Question is about finace/economic information, stock market, economic news.",
|
||||
"examples": "昨日涨幅大于5%的军工股?\nStocks have MACD buyin signals?\nWhen is the next interest rate cut by the Federal Reserve?\n国家救市都有哪些举措?",
|
||||
"description": "Question is about finance/economic information, stock market, economic news.",
|
||||
"examples": "Stocks have MACD buy signals?\nWhen is the next interest rate cut by the Federal Reserve?\n",
|
||||
"to": "Concentrator:TrueGeckosSlide"
|
||||
},
|
||||
"3. medical": {
|
||||
@ -927,7 +927,7 @@
|
||||
"parameters": [],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You‘re warm-hearted lovely young girl, 22 years old, located at Shanghai in China. Your name is R. Who are talking to you is your very good old friend of yours.\n\nTask: \n- Chat with the friend.\n- Ask question and care about them.\n- Provide useful advice to your friend.\n- Tell jokes to make your firend happy.\n\nThe following is the weatcher information:\n{weather}",
|
||||
"prompt": "Role: You‘re warm-hearted lovely young girl, 22 years old, located at Shanghai in China. Your name is R. Who are talking to you is your very good old friend of yours.\n\nTask: \n- Chat with the friend.\n- Ask question and care about them.\n- Provide useful advice to your friend.\n- Tell jokes to make your friend happy.\n\nThe following is the weather information:\n{weather}",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
@ -1011,7 +1011,7 @@
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "tranlate to Chinese"
|
||||
"name": "translate to Chinese"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 86,
|
||||
@ -1276,7 +1276,7 @@
|
||||
],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You‘re warm-hearted lovely young girl, 22 years old, located at Shanghai in China. Your name is R. Who are talking to you is your very good old friend of yours.\n\nTask: \n- Chat with the friend.\n- Ask question and care about them.\n- Tell your friend the weather if there's weather information provided. If your friend did not provide region information, ask about where he/she is.\n\nThe following is the weatcher information:\n{weather}\n",
|
||||
"prompt": "Role: You‘re warm-hearted lovely young girl, 22 years old, located at Shanghai in China. Your name is R. Who are talking to you is your very good old friend of yours.\n\nTask: \n- Chat with the friend.\n- Ask question and care about them.\n- Tell your friend the weather if there's weather information provided. If your friend did not provide region information, ask about where he/she is.\n\nThe following is the weather information:\n{weather}\n",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
|
||||
@ -476,7 +476,7 @@
|
||||
"text": "Translation Agent: Agentic translation using reflection workflow\n\nThis is inspired by Andrew NG's project: https://github.com/andrewyng/translation-agent\n\n1. Prompt an LLM to translate a text into the target language;\n2. Have the LLM reflect on the translation and provide constructive suggestions for improvement;\n3. Use these suggestions to improve the translation."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Breif"
|
||||
"name": "Brief"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
|
||||
@ -534,7 +534,7 @@
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "A prompt sumerize content from search result from PubMed and Q&A dataset."
|
||||
"text": "A prompt summarize content from search result from PubMed and Q&A dataset."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "N: LLM"
|
||||
|
||||
@ -347,7 +347,7 @@
|
||||
}
|
||||
],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "You are an SEO expert who writes in a direct, practical, educational style that is factual rather than storytelling or narrative, focusing on explaining to {audience} the \"how\" and \"what is\" and \u201cwhy\u201d rather than narrating to the audience. \n - Please write at a sixth grade reading level. \n - ONLY output in Markdown format.\n - Use positive, present tense expressions and avoid using complex words and sentence structures that lack narrative, such as \"reveal\" and \"dig deep.\"\n - Next, please continue writing articles related to our topic with a concise title, {title_0}{title} {keywords_0}{keywords}. \n - Please AVOID repeating what has already been written and do not use the same sentence structure. \n - JUST write the body of the article based on the outline.\n - DO NOT include introduction, title.\n - DO NOT miss anything mentioned in artical outline, except introduction and title.\n - Please use the information I provide to create in-depth, interesting and unique content. Also, incorporate the references and data points I provided earlier into the article to increase its value to the reader.\n - MUST be in language of \"{keywords_0} {title_0}\".\n\n<article_outline>\n{outline}\n\n<article_body>",
|
||||
"prompt": "You are an SEO expert who writes in a direct, practical, educational style that is factual rather than storytelling or narrative, focusing on explaining to {audience} the \"how\" and \"what is\" and \u201cwhy\u201d rather than narrating to the audience. \n - Please write at a sixth grade reading level. \n - ONLY output in Markdown format.\n - Use positive, present tense expressions and avoid using complex words and sentence structures that lack narrative, such as \"reveal\" and \"dig deep.\"\n - Next, please continue writing articles related to our topic with a concise title, {title_0}{title} {keywords_0}{keywords}. \n - Please AVOID repeating what has already been written and do not use the same sentence structure. \n - JUST write the body of the article based on the outline.\n - DO NOT include introduction, title.\n - DO NOT miss anything mentioned in article outline, except introduction and title.\n - Please use the information I provide to create in-depth, interesting and unique content. Also, incorporate the references and data points I provided earlier into the article to increase its value to the reader.\n - MUST be in language of \"{keywords_0} {title_0}\".\n\n<article_outline>\n{outline}\n\n<article_body>",
|
||||
"query": [],
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
|
||||
@ -440,7 +440,7 @@
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "DDL(Data Definition Language).\n\nSearches for relevent database creation statements.\n\nIt should bind with a KB to which DDL is dumped in.\nYou could use 'General' as parsing method and ';' as delimiter."
|
||||
"text": "DDL(Data Definition Language).\n\nSearches for relevant database creation statements.\n\nIt should bind with a KB to which DDL is dumped in.\nYou could use 'General' as parsing method and ';' as delimiter."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "N: DDL"
|
||||
|
||||
@ -577,7 +577,7 @@
|
||||
"text": "Based on the keywords, searches on Wikipedia and returns the found content."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "N: Wiukipedia"
|
||||
"name": "N: Wikipedia"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 128,
|
||||
|
||||
@ -43,6 +43,7 @@ if __name__ == '__main__':
|
||||
else:
|
||||
print(ans["content"])
|
||||
|
||||
if DEBUG: print(canvas.path)
|
||||
if DEBUG:
|
||||
print(canvas.path)
|
||||
question = input("\n==================== User =====================\n> ")
|
||||
canvas.add_user_input(question)
|
||||
|
||||
@ -0,0 +1,2 @@
|
||||
from beartype.claw import beartype_this_package
|
||||
beartype_this_package()
|
||||
|
||||
@ -45,7 +45,7 @@ from agent.canvas import Canvas
|
||||
from functools import partial
|
||||
|
||||
|
||||
@manager.route('/new_token', methods=['POST'])
|
||||
@manager.route('/new_token', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def new_token():
|
||||
req = request.json
|
||||
@ -75,7 +75,7 @@ def new_token():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/token_list', methods=['GET'])
|
||||
@manager.route('/token_list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def token_list():
|
||||
try:
|
||||
@ -90,7 +90,7 @@ def token_list():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@validate_request("tokens", "tenant_id")
|
||||
@login_required
|
||||
def rm():
|
||||
@ -104,7 +104,7 @@ def rm():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/stats', methods=['GET'])
|
||||
@manager.route('/stats', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def stats():
|
||||
try:
|
||||
@ -135,14 +135,13 @@ def stats():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/new_conversation', methods=['GET'])
|
||||
@manager.route('/new_conversation', methods=['GET']) # noqa: F821
|
||||
def set_conversation():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
try:
|
||||
if objs[0].source == "agent":
|
||||
e, cvs = UserCanvasService.get_by_id(objs[0].dialog_id)
|
||||
@ -176,7 +175,7 @@ def set_conversation():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||
@validate_request("conversation_id", "messages")
|
||||
def completion():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
@ -188,7 +187,8 @@ def completion():
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
if "quote" not in req: req["quote"] = False
|
||||
if "quote" not in req:
|
||||
req["quote"] = False
|
||||
|
||||
msg = []
|
||||
for m in req["messages"]:
|
||||
@ -197,7 +197,8 @@ def completion():
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
if not msg[-1].get("id"): msg[-1]["id"] = get_uuid()
|
||||
if not msg[-1].get("id"):
|
||||
msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
def fillin_conv(ans):
|
||||
@ -340,7 +341,7 @@ def completion():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/conversation/<conversation_id>', methods=['GET'])
|
||||
@manager.route('/conversation/<conversation_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get(conversation_id):
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
@ -371,7 +372,7 @@ def get(conversation_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/document/upload', methods=['POST'])
|
||||
@manager.route('/document/upload', methods=['POST']) # noqa: F821
|
||||
@validate_request("kb_name")
|
||||
def upload():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
@ -483,7 +484,7 @@ def upload():
|
||||
return get_json_result(data=doc_result.to_json())
|
||||
|
||||
|
||||
@manager.route('/document/upload_and_parse', methods=['POST'])
|
||||
@manager.route('/document/upload_and_parse', methods=['POST']) # noqa: F821
|
||||
@validate_request("conversation_id")
|
||||
def upload_parse():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
@ -506,7 +507,7 @@ def upload_parse():
|
||||
return get_json_result(data=doc_ids)
|
||||
|
||||
|
||||
@manager.route('/list_chunks', methods=['POST'])
|
||||
@manager.route('/list_chunks', methods=['POST']) # noqa: F821
|
||||
# @login_required
|
||||
def list_chunks():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
@ -546,7 +547,7 @@ def list_chunks():
|
||||
return get_json_result(data=res)
|
||||
|
||||
|
||||
@manager.route('/list_kb_docs', methods=['POST'])
|
||||
@manager.route('/list_kb_docs', methods=['POST']) # noqa: F821
|
||||
# @login_required
|
||||
def list_kb_docs():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
@ -586,7 +587,7 @@ def list_kb_docs():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/document/infos', methods=['POST'])
|
||||
@manager.route('/document/infos', methods=['POST']) # noqa: F821
|
||||
@validate_request("doc_ids")
|
||||
def docinfos():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
@ -600,7 +601,7 @@ def docinfos():
|
||||
return get_json_result(data=list(docs.dicts()))
|
||||
|
||||
|
||||
@manager.route('/document', methods=['DELETE'])
|
||||
@manager.route('/document', methods=['DELETE']) # noqa: F821
|
||||
# @login_required
|
||||
def document_rm():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
@ -659,7 +660,7 @@ def document_rm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/completion_aibotk', methods=['POST'])
|
||||
@manager.route('/completion_aibotk', methods=['POST']) # noqa: F821
|
||||
@validate_request("Authorization", "conversation_id", "word")
|
||||
def completion_faq():
|
||||
import base64
|
||||
@ -674,11 +675,13 @@ def completion_faq():
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
if "quote" not in req: req["quote"] = True
|
||||
if "quote" not in req:
|
||||
req["quote"] = True
|
||||
|
||||
msg = []
|
||||
msg.append({"role": "user", "content": req["word"]})
|
||||
if not msg[-1].get("id"): msg[-1]["id"] = get_uuid()
|
||||
if not msg[-1].get("id"):
|
||||
msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
def fillin_conv(ans):
|
||||
@ -799,7 +802,7 @@ def completion_faq():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/retrieval', methods=['POST'])
|
||||
@manager.route('/retrieval', methods=['POST']) # noqa: F821
|
||||
@validate_request("kb_id", "question")
|
||||
def retrieval():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
|
||||
@ -13,10 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
import traceback
|
||||
from functools import partial
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
||||
@ -25,15 +23,16 @@ from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
||||
from agent.canvas import Canvas
|
||||
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||
from api.db.db_models import APIToken
|
||||
|
||||
|
||||
@manager.route('/templates', methods=['GET'])
|
||||
@manager.route('/templates', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def templates():
|
||||
return get_json_result(data=[c.to_dict() for c in CanvasTemplateService.get_all()])
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def canvas_list():
|
||||
return get_json_result(data=sorted([c.to_dict() for c in \
|
||||
@ -41,7 +40,7 @@ def canvas_list():
|
||||
)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@validate_request("canvas_ids")
|
||||
@login_required
|
||||
def rm():
|
||||
@ -54,18 +53,19 @@ def rm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@validate_request("dsl", "title")
|
||||
@login_required
|
||||
def save():
|
||||
req = request.json
|
||||
req["user_id"] = current_user.id
|
||||
if not isinstance(req["dsl"], str): req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
if not isinstance(req["dsl"], str):
|
||||
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
if "id" not in req:
|
||||
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
||||
return server_error_response(ValueError("Duplicated title."))
|
||||
return get_data_error_result(f"{req['title'].strip()} already exists.")
|
||||
req["id"] = get_uuid()
|
||||
if not UserCanvasService.save(**req):
|
||||
return get_data_error_result(message="Fail to save canvas.")
|
||||
@ -78,7 +78,7 @@ def save():
|
||||
return get_json_result(data=req)
|
||||
|
||||
|
||||
@manager.route('/get/<canvas_id>', methods=['GET'])
|
||||
@manager.route('/get/<canvas_id>', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get(canvas_id):
|
||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||
@ -86,8 +86,22 @@ def get(canvas_id):
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
return get_json_result(data=c.to_dict())
|
||||
|
||||
@manager.route('/getsse/<canvas_id>', methods=['GET']) # type: ignore # noqa: F821
|
||||
def getsse(canvas_id):
|
||||
token = request.headers.get('Authorization').split()
|
||||
if len(token) != 2:
|
||||
return get_data_error_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_data_error_result(message='Token is not valid!"')
|
||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
return get_json_result(data=c.to_dict())
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
|
||||
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||
@validate_request("id")
|
||||
@login_required
|
||||
def run():
|
||||
@ -153,7 +167,8 @@ def run():
|
||||
return resp
|
||||
|
||||
for answer in canvas.run(stream=False):
|
||||
if answer.get("running_status"): continue
|
||||
if answer.get("running_status"):
|
||||
continue
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
@ -163,7 +178,7 @@ def run():
|
||||
return get_json_result(data={"answer": final_ans["content"], "reference": final_ans.get("reference", [])})
|
||||
|
||||
|
||||
@manager.route('/reset', methods=['POST'])
|
||||
@manager.route('/reset', methods=['POST']) # noqa: F821
|
||||
@validate_request("id")
|
||||
@login_required
|
||||
def reset():
|
||||
@ -186,7 +201,51 @@ def reset():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/test_db_connect', methods=['POST'])
|
||||
@manager.route('/input_elements', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def input_elements():
|
||||
cvs_id = request.args.get("id")
|
||||
cpn_id = request.args.get("component_id")
|
||||
try:
|
||||
e, user_canvas = UserCanvasService.get_by_id(cvs_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=cvs_id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||
return get_json_result(data=canvas.get_component_input_elements(cpn_id))
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/debug', methods=['POST']) # noqa: F821
|
||||
@validate_request("id", "component_id", "params")
|
||||
@login_required
|
||||
def debug():
|
||||
req = request.json
|
||||
for p in req["params"]:
|
||||
assert p.get("key")
|
||||
try:
|
||||
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||
canvas.get_component(req["component_id"])["obj"]._param.debug_inputs = req["params"]
|
||||
df = canvas.get_component(req["component_id"])["obj"].debug()
|
||||
return get_json_result(data=df.to_dict(orient="records"))
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/test_db_connect', methods=['POST']) # noqa: F821
|
||||
@validate_request("db_type", "database", "username", "host", "port", "password")
|
||||
@login_required
|
||||
def test_db_connect():
|
||||
@ -198,8 +257,26 @@ def test_db_connect():
|
||||
elif req["db_type"] == 'postgresql':
|
||||
db = PostgresqlDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
||||
password=req["password"])
|
||||
db.connect()
|
||||
elif req["db_type"] == 'mssql':
|
||||
import pyodbc
|
||||
connection_string = (
|
||||
f"DRIVER={{ODBC Driver 17 for SQL Server}};"
|
||||
f"SERVER={req['host']},{req['port']};"
|
||||
f"DATABASE={req['database']};"
|
||||
f"UID={req['username']};"
|
||||
f"PWD={req['password']};"
|
||||
)
|
||||
db = pyodbc.connect(connection_string)
|
||||
cursor = db.cursor()
|
||||
cursor.execute("SELECT 1")
|
||||
cursor.close()
|
||||
else:
|
||||
return server_error_response("Unsupported database type.")
|
||||
if req["db_type"] != 'mssql':
|
||||
db.connect()
|
||||
db.close()
|
||||
|
||||
return get_json_result(data="Database Connection Successful!")
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@ -31,11 +31,11 @@ from api.utils.api_utils import server_error_response, get_data_error_result, va
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
import hashlib
|
||||
import xxhash
|
||||
import re
|
||||
|
||||
|
||||
@manager.route('/list', methods=['POST'])
|
||||
@manager.route('/list', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def list_chunk():
|
||||
@ -68,9 +68,10 @@ def list_chunk():
|
||||
"doc_id": sres.field[id]["doc_id"],
|
||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||
"question_kwd": sres.field[id].get("question_kwd", []),
|
||||
"image_id": sres.field[id].get("img_id", ""),
|
||||
"available_int": sres.field[id].get("available_int", 1),
|
||||
"positions": json.loads(sres.field[id].get("position_list", "[]")),
|
||||
"available_int": int(sres.field[id].get("available_int", 1)),
|
||||
"positions": sres.field[id].get("position_int", []),
|
||||
}
|
||||
assert isinstance(d["positions"], list)
|
||||
assert len(d["positions"]) == 0 or (isinstance(d["positions"][0], list) and len(d["positions"][0]) == 5)
|
||||
@ -83,7 +84,7 @@ def list_chunk():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get():
|
||||
chunk_id = request.args["chunk_id"]
|
||||
@ -112,10 +113,10 @@ def get():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "chunk_id", "content_with_weight",
|
||||
"important_kwd")
|
||||
"important_kwd", "question_kwd")
|
||||
def set():
|
||||
req = request.json
|
||||
d = {
|
||||
@ -125,6 +126,8 @@ def set():
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req["important_kwd"]
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||
d["question_kwd"] = req["question_kwd"]
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["question_kwd"]))
|
||||
if "available_int" in req:
|
||||
d["available_int"] = req["available_int"]
|
||||
|
||||
@ -152,7 +155,7 @@ def set():
|
||||
d = beAdoc(d, arr[0], arr[1], not any(
|
||||
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
settings.docStoreConn.update({"id": req["chunk_id"]}, d, search.index_name(tenant_id), doc.kb_id)
|
||||
@ -161,7 +164,7 @@ def set():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/switch', methods=['POST'])
|
||||
@manager.route('/switch', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("chunk_ids", "available_int", "doc_id")
|
||||
def switch():
|
||||
@ -181,7 +184,7 @@ def switch():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("chunk_ids", "doc_id")
|
||||
def rm():
|
||||
@ -200,19 +203,19 @@ def rm():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "content_with_weight")
|
||||
def create():
|
||||
req = request.json
|
||||
md5 = hashlib.md5()
|
||||
md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
|
||||
chunck_id = md5.hexdigest()
|
||||
chunck_id = xxhash.xxh64((req["content_with_weight"] + req["doc_id"]).encode("utf-8")).hexdigest()
|
||||
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req.get("important_kwd", [])
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
||||
d["question_kwd"] = req.get("question_kwd", [])
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req.get("question_kwd", [])))
|
||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||
|
||||
@ -222,16 +225,23 @@ def create():
|
||||
return get_data_error_result(message="Document not found!")
|
||||
d["kb_id"] = [doc.kb_id]
|
||||
d["docnm_kwd"] = doc.name
|
||||
d["title_tks"] = rag_tokenizer.tokenize(doc.name)
|
||||
d["doc_id"] = doc.id
|
||||
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Knowledgebase not found!")
|
||||
if kb.pagerank:
|
||||
d["pagerank_fea"] = kb.pagerank
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
settings.docStoreConn.insert([d], search.index_name(tenant_id), doc.kb_id)
|
||||
@ -243,7 +253,7 @@ def create():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/retrieval_test', methods=['POST'])
|
||||
@manager.route('/retrieval_test', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id", "question")
|
||||
def retrieval_test():
|
||||
@ -302,7 +312,7 @@ def retrieval_test():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/knowledge_graph', methods=['GET'])
|
||||
@manager.route('/knowledge_graph', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def knowledge_graph():
|
||||
doc_id = request.args["doc_id"]
|
||||
|
||||
@ -17,12 +17,15 @@ import json
|
||||
import re
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from api.db.db_models import APIToken
|
||||
|
||||
from api.db.services.conversation_service import ConversationService, structure_answer
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat, ask
|
||||
from api.db.services.dialog_service import DialogService, chat, ask
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
|
||||
from api import settings
|
||||
@ -30,8 +33,7 @@ from api.utils.api_utils import get_json_result
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from graphrag.mind_map_extractor import MindMapExtractor
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def set_conversation():
|
||||
req = request.json
|
||||
@ -72,29 +74,71 @@ def set_conversation():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get():
|
||||
conv_id = request.args["conversation_id"]
|
||||
try:
|
||||
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
avatar =None
|
||||
for tenant in tenants:
|
||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||
dialog = DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id)
|
||||
if dialog and len(dialog)>0:
|
||||
avatar = dialog[0].icon
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of conversation authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
def get_value(d, k1, k2):
|
||||
return d.get(k1, d.get(k2))
|
||||
|
||||
for ref in conv.reference:
|
||||
if isinstance(ref, list):
|
||||
continue
|
||||
ref["chunks"] = [{
|
||||
"id": get_value(ck, "chunk_id", "id"),
|
||||
"content": get_value(ck, "content", "content_with_weight"),
|
||||
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
||||
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(ck, "image_id", "img_id"),
|
||||
"positions": get_value(ck, "positions", "position_int"),
|
||||
} for ck in ref.get("chunks", [])]
|
||||
|
||||
conv = conv.to_dict()
|
||||
conv["avatar"]=avatar
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/getsse/<dialog_id>', methods=['GET']) # type: ignore # noqa: F821
|
||||
def getsse(dialog_id):
|
||||
|
||||
token = request.headers.get('Authorization').split()
|
||||
if len(token) != 2:
|
||||
return get_data_error_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_data_error_result(message='Token is not valid!"')
|
||||
try:
|
||||
e, conv = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
conv = conv.to_dict()
|
||||
conv["avatar"]= conv["icon"]
|
||||
del conv["icon"]
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def rm():
|
||||
conv_ids = request.json["conversation_ids"]
|
||||
@ -117,7 +161,7 @@ def rm():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_convsersation():
|
||||
dialog_id = request.args["dialog_id"]
|
||||
@ -130,13 +174,14 @@ def list_convsersation():
|
||||
dialog_id=dialog_id,
|
||||
order_by=ConversationService.model.create_time,
|
||||
reverse=True)
|
||||
|
||||
convs = [d.to_dict() for d in convs]
|
||||
return get_json_result(data=convs)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "messages")
|
||||
def completion():
|
||||
@ -162,24 +207,31 @@ def completion():
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
else:
|
||||
def get_value(d, k1, k2):
|
||||
return d.get(k1, d.get(k2))
|
||||
|
||||
for ref in conv.reference:
|
||||
if isinstance(ref, list):
|
||||
continue
|
||||
ref["chunks"] = [{
|
||||
"id": get_value(ck, "chunk_id", "id"),
|
||||
"content": get_value(ck, "content", "content_with_weight"),
|
||||
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
||||
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(ck, "image_id", "img_id"),
|
||||
"positions": get_value(ck, "positions", "position_int"),
|
||||
} for ck in ref.get("chunks", [])]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else:
|
||||
conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
||||
"id": message_id, "prompt": ans.get("prompt", "")}
|
||||
ans["id"] = message_id
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
fillin_conv(ans)
|
||||
ans = structure_answer(conv, ans, message_id, conv.id)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
@ -200,8 +252,7 @@ def completion():
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
answer = structure_answer(conv, ans, message_id, req["conversation_id"])
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_json_result(data=answer)
|
||||
@ -209,7 +260,7 @@ def completion():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/tts', methods=['POST'])
|
||||
@manager.route('/tts', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def tts():
|
||||
req = request.json
|
||||
@ -243,7 +294,7 @@ def tts():
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/delete_msg', methods=['POST'])
|
||||
@manager.route('/delete_msg', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "message_id")
|
||||
def delete_msg():
|
||||
@ -266,7 +317,7 @@ def delete_msg():
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/thumbup', methods=['POST'])
|
||||
@manager.route('/thumbup', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "message_id")
|
||||
def thumbup():
|
||||
@ -281,17 +332,19 @@ def thumbup():
|
||||
if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
|
||||
if up_down:
|
||||
msg["thumbup"] = True
|
||||
if "feedback" in msg: del msg["feedback"]
|
||||
if "feedback" in msg:
|
||||
del msg["feedback"]
|
||||
else:
|
||||
msg["thumbup"] = False
|
||||
if feedback: msg["feedback"] = feedback
|
||||
if feedback:
|
||||
msg["feedback"] = feedback
|
||||
break
|
||||
|
||||
ConversationService.update_by_id(conv["id"], conv)
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/ask', methods=['POST'])
|
||||
@manager.route('/ask', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question", "kb_ids")
|
||||
def ask_about():
|
||||
@ -317,7 +370,7 @@ def ask_about():
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/mindmap', methods=['POST'])
|
||||
@manager.route('/mindmap', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question", "kb_ids")
|
||||
def mindmap():
|
||||
@ -339,7 +392,7 @@ def mindmap():
|
||||
return get_json_result(data=mind_map)
|
||||
|
||||
|
||||
@manager.route('/related_questions', methods=['POST'])
|
||||
@manager.route('/related_questions', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question")
|
||||
def related_questions():
|
||||
|
||||
@ -26,21 +26,21 @@ from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def set_dialog():
|
||||
req = request.json
|
||||
dialog_id = req.get("dialog_id")
|
||||
name = req.get("name", "New Dialog")
|
||||
description = req.get("description", "A helpful Dialog")
|
||||
description = req.get("description", "A helpful dialog")
|
||||
icon = req.get("icon", "")
|
||||
top_n = req.get("top_n", 6)
|
||||
top_k = req.get("top_k", 1024)
|
||||
rerank_id = req.get("rerank_id", "")
|
||||
if not rerank_id: req["rerank_id"] = ""
|
||||
if not rerank_id:
|
||||
req["rerank_id"] = ""
|
||||
similarity_threshold = req.get("similarity_threshold", 0.1)
|
||||
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
||||
if vector_similarity_weight is None: vector_similarity_weight = 0.3
|
||||
llm_setting = req.get("llm_setting", {})
|
||||
default_prompt = {
|
||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||
@ -123,7 +123,7 @@ def set_dialog():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get():
|
||||
dialog_id = request.args["dialog_id"]
|
||||
@ -149,7 +149,7 @@ def get_kb_names(kb_ids):
|
||||
return ids, nms
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_dialogs():
|
||||
try:
|
||||
@ -166,7 +166,7 @@ def list_dialogs():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("dialog_ids")
|
||||
def rm():
|
||||
|
||||
@ -13,7 +13,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
import json
|
||||
import os.path
|
||||
import pathlib
|
||||
import re
|
||||
@ -22,19 +21,25 @@ import flask
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.db_models import Task, File
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.task_service import TaskService, queue_tasks
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from deepdoc.parser.html_parser import RAGFlowHtmlParser
|
||||
from rag.nlp import search
|
||||
|
||||
from api.db import FileType, TaskStatus, ParserType, FileSource
|
||||
from api.db.db_models import File, Task
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.task_service import queue_tasks
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType, TaskStatus, ParserType, FileSource
|
||||
from api.db.services.task_service import TaskService
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.utils.api_utils import (
|
||||
server_error_response,
|
||||
get_data_error_result,
|
||||
validate_request,
|
||||
)
|
||||
from api.utils import get_uuid
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
@ -43,7 +48,7 @@ from api.utils.web_utils import html2pdf, is_valid_url
|
||||
from api.constants import IMG_BASE64_PREFIX
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST'])
|
||||
@manager.route('/upload', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id")
|
||||
def upload():
|
||||
@ -72,7 +77,7 @@ def upload():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/web_crawl', methods=['POST'])
|
||||
@manager.route('/web_crawl', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id", "name", "url")
|
||||
def web_crawl():
|
||||
@ -90,7 +95,8 @@ def web_crawl():
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
blob = html2pdf(url)
|
||||
if not blob: return server_error_response(ValueError("Download failure."))
|
||||
if not blob:
|
||||
return server_error_response(ValueError("Download failure."))
|
||||
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
@ -138,7 +144,7 @@ def web_crawl():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("name", "kb_id")
|
||||
def create():
|
||||
@ -174,7 +180,7 @@ def create():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_docs():
|
||||
kb_id = request.args.get("kb_id")
|
||||
@ -209,7 +215,7 @@ def list_docs():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/infos', methods=['POST'])
|
||||
@manager.route('/infos', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def docinfos():
|
||||
req = request.json
|
||||
@ -225,7 +231,7 @@ def docinfos():
|
||||
return get_json_result(data=list(docs.dicts()))
|
||||
|
||||
|
||||
@manager.route('/thumbnails', methods=['GET'])
|
||||
@manager.route('/thumbnails', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def thumbnails():
|
||||
doc_ids = request.args.get("doc_ids").split(",")
|
||||
@ -245,7 +251,7 @@ def thumbnails():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_status', methods=['POST'])
|
||||
@manager.route('/change_status', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "status")
|
||||
def change_status():
|
||||
@ -284,13 +290,14 @@ def change_status():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def rm():
|
||||
req = request.json
|
||||
doc_ids = req["doc_id"]
|
||||
if isinstance(doc_ids, str): doc_ids = [doc_ids]
|
||||
if isinstance(doc_ids, str):
|
||||
doc_ids = [doc_ids]
|
||||
|
||||
for doc_id in doc_ids:
|
||||
if not DocumentService.accessible4deletion(doc_id, current_user.id):
|
||||
@ -315,6 +322,7 @@ def rm():
|
||||
|
||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||
|
||||
TaskService.filter_delete([Task.doc_id == doc_id])
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
@ -333,7 +341,7 @@ def rm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/run', methods=['POST'])
|
||||
@manager.route('/run', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_ids", "run")
|
||||
def run():
|
||||
@ -348,23 +356,23 @@ def run():
|
||||
try:
|
||||
for id in req["doc_ids"]:
|
||||
info = {"run": str(req["run"]), "progress": 0}
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value and req.get("delete", False):
|
||||
info["progress_msg"] = ""
|
||||
info["chunk_num"] = 0
|
||||
info["token_num"] = 0
|
||||
DocumentService.update_by_id(id, info)
|
||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||
tenant_id = DocumentService.get_tenant_id(id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), doc.kb_id)
|
||||
if req.get("delete", False):
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), doc.kb_id)
|
||||
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
@ -376,7 +384,7 @@ def run():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rename', methods=['POST'])
|
||||
@manager.route('/rename', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "name")
|
||||
def rename():
|
||||
@ -417,7 +425,7 @@ def rename():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get/<doc_id>', methods=['GET'])
|
||||
@manager.route('/get/<doc_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get(doc_id):
|
||||
try:
|
||||
@ -442,7 +450,7 @@ def get(doc_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_parser', methods=['POST'])
|
||||
@manager.route('/change_parser', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "parser_id")
|
||||
def change_parser():
|
||||
@ -493,10 +501,13 @@ def change_parser():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/image/<image_id>', methods=['GET'])
|
||||
@manager.route('/image/<image_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get_image(image_id):
|
||||
try:
|
||||
arr = image_id.split("-")
|
||||
if len(arr) != 2:
|
||||
return get_data_error_result(message="Image not found.")
|
||||
bkt, nm = image_id.split("-")
|
||||
response = flask.make_response(STORAGE_IMPL.get(bkt, nm))
|
||||
response.headers.set('Content-Type', 'image/JPEG')
|
||||
@ -505,7 +516,7 @@ def get_image(image_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/upload_and_parse', methods=['POST'])
|
||||
@manager.route('/upload_and_parse', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id")
|
||||
def upload_and_parse():
|
||||
@ -524,7 +535,7 @@ def upload_and_parse():
|
||||
return get_json_result(data=doc_ids)
|
||||
|
||||
|
||||
@manager.route('/parse', methods=['POST'])
|
||||
@manager.route('/parse', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def parse():
|
||||
url = request.json.get("url") if request.json else ""
|
||||
@ -548,7 +559,7 @@ def parse():
|
||||
})
|
||||
driver = Chrome(options=options)
|
||||
driver.get(url)
|
||||
res_headers = [r.response.headers for r in driver.requests]
|
||||
res_headers = [r.response.headers for r in driver.requests if r and r.response]
|
||||
if len(res_headers) > 1:
|
||||
sections = RAGFlowHtmlParser().parser_txt(driver.page_source)
|
||||
driver.quit()
|
||||
|
||||
@ -28,7 +28,7 @@ from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
|
||||
|
||||
@manager.route('/convert', methods=['POST'])
|
||||
@manager.route('/convert', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("file_ids", "kb_ids")
|
||||
def convert():
|
||||
@ -92,7 +92,7 @@ def convert():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("file_ids")
|
||||
def rm():
|
||||
|
||||
@ -34,7 +34,7 @@ from api.utils.file_utils import filename_type
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST'])
|
||||
@manager.route('/upload', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
# @validate_request("parent_id")
|
||||
def upload():
|
||||
@ -120,7 +120,7 @@ def upload():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("name")
|
||||
def create():
|
||||
@ -160,7 +160,7 @@ def create():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_files():
|
||||
pf_id = request.args.get("parent_id")
|
||||
@ -192,7 +192,7 @@ def list_files():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/root_folder', methods=['GET'])
|
||||
@manager.route('/root_folder', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get_root_folder():
|
||||
try:
|
||||
@ -202,7 +202,7 @@ def get_root_folder():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/parent_folder', methods=['GET'])
|
||||
@manager.route('/parent_folder', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get_parent_folder():
|
||||
file_id = request.args.get("file_id")
|
||||
@ -217,7 +217,7 @@ def get_parent_folder():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/all_parent_folder', methods=['GET'])
|
||||
@manager.route('/all_parent_folder', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get_all_parent_folders():
|
||||
file_id = request.args.get("file_id")
|
||||
@ -235,7 +235,7 @@ def get_all_parent_folders():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("file_ids")
|
||||
def rm():
|
||||
@ -284,7 +284,7 @@ def rm():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rename', methods=['POST'])
|
||||
@manager.route('/rename', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("file_id", "name")
|
||||
def rename():
|
||||
@ -322,15 +322,20 @@ def rename():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get/<file_id>', methods=['GET'])
|
||||
# @login_required
|
||||
@manager.route('/get/<file_id>', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get(file_id):
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
b, n = File2DocumentService.get_storage_address(file_id=file_id)
|
||||
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
||||
|
||||
blob = STORAGE_IMPL.get(file.parent_id, file.location)
|
||||
if not blob:
|
||||
b, n = File2DocumentService.get_storage_address(file_id=file_id)
|
||||
blob = STORAGE_IMPL.get(b, n)
|
||||
|
||||
response = flask.make_response(blob)
|
||||
ext = re.search(r"\.([^.]+)$", file.name)
|
||||
if ext:
|
||||
if file.type == FileType.VISUAL.value:
|
||||
@ -345,7 +350,7 @@ def get(file_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/mv', methods=['POST'])
|
||||
@manager.route('/mv', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("src_file_ids", "dest_file_id")
|
||||
def move():
|
||||
|
||||
@ -21,7 +21,7 @@ from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request, not_allowed_parameters
|
||||
from api.utils import get_uuid
|
||||
from api.db import StatusEnum, FileSource
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
@ -32,7 +32,7 @@ from rag.nlp import search
|
||||
from api.constants import DATASET_NAME_LIMIT
|
||||
|
||||
|
||||
@manager.route('/create', methods=['post'])
|
||||
@manager.route('/create', methods=['post']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("name")
|
||||
def create():
|
||||
@ -67,9 +67,10 @@ def create():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/update', methods=['post'])
|
||||
@manager.route('/update', methods=['post']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id", "name", "description", "permission", "parser_id")
|
||||
@not_allowed_parameters("id", "tenant_id", "created_by", "create_time", "update_time", "create_date", "update_date", "created_by")
|
||||
def update():
|
||||
req = request.json
|
||||
req["name"] = req["name"].strip()
|
||||
@ -101,6 +102,15 @@ def update():
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
return get_data_error_result()
|
||||
|
||||
if kb.pagerank != req.get("pagerank", 0):
|
||||
if req.get("pagerank", 0) > 0:
|
||||
settings.docStoreConn.update({"kb_id": kb.id}, {"pagerank_fea": req["pagerank"]},
|
||||
search.index_name(kb.tenant_id), kb.id)
|
||||
else:
|
||||
# Elasticsearch requires pagerank_fea be non-zero!
|
||||
settings.docStoreConn.update({"exist": "pagerank_fea"}, {"remove": "pagerank_fea"},
|
||||
search.index_name(kb.tenant_id), kb.id)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
@ -111,7 +121,7 @@ def update():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/detail', methods=['GET'])
|
||||
@manager.route('/detail', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def detail():
|
||||
kb_id = request.args["kb_id"]
|
||||
@ -134,7 +144,7 @@ def detail():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_kbs():
|
||||
keywords = request.args.get("keywords", "")
|
||||
@ -151,7 +161,7 @@ def list_kbs():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['post'])
|
||||
@manager.route('/rm', methods=['post']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id")
|
||||
def rm():
|
||||
|
||||
@ -28,7 +28,7 @@ from rag.llm import EmbeddingModel, ChatModel, RerankModel, CvModel, TTSModel
|
||||
import requests
|
||||
|
||||
|
||||
@manager.route('/factories', methods=['GET'])
|
||||
@manager.route('/factories', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def factories():
|
||||
try:
|
||||
@ -50,7 +50,7 @@ def factories():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/set_api_key', methods=['POST'])
|
||||
@manager.route('/set_api_key', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory", "api_key")
|
||||
def set_api_key():
|
||||
@ -129,7 +129,7 @@ def set_api_key():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/add_llm', methods=['POST'])
|
||||
@manager.route('/add_llm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory")
|
||||
def add_llm():
|
||||
@ -216,7 +216,7 @@ def add_llm():
|
||||
base_url=llm["api_base"])
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0 or tc == 0:
|
||||
if len(arr[0]) == 0:
|
||||
raise Exception("Fail")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
||||
@ -242,7 +242,7 @@ def add_llm():
|
||||
)
|
||||
try:
|
||||
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!", "Ohh, my friend!"])
|
||||
if len(arr) == 0 or tc == 0:
|
||||
if len(arr) == 0:
|
||||
raise Exception("Not known.")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
@ -255,9 +255,7 @@ def add_llm():
|
||||
)
|
||||
try:
|
||||
img_url = (
|
||||
"https://upload.wikimedia.org/wikipedia/comm"
|
||||
"ons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/256"
|
||||
"0px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
"https://www.8848seo.cn/zb_users/upload/2022/07/20220705101240_99378.jpg"
|
||||
)
|
||||
res = requests.get(img_url)
|
||||
if res.status_code == 200:
|
||||
@ -292,7 +290,7 @@ def add_llm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/delete_llm', methods=['POST'])
|
||||
@manager.route('/delete_llm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory", "llm_name")
|
||||
def delete_llm():
|
||||
@ -303,7 +301,7 @@ def delete_llm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/delete_factory', methods=['POST'])
|
||||
@manager.route('/delete_factory', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory")
|
||||
def delete_factory():
|
||||
@ -313,7 +311,7 @@ def delete_factory():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/my_llms', methods=['GET'])
|
||||
@manager.route('/my_llms', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def my_llms():
|
||||
try:
|
||||
@ -334,7 +332,7 @@ def my_llms():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_app():
|
||||
self_deploied = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
|
||||
@ -351,8 +349,10 @@ def list_app():
|
||||
|
||||
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
|
||||
for o in objs:
|
||||
if not o.api_key: continue
|
||||
if o.llm_name + "@" + o.llm_factory in llm_set: continue
|
||||
if not o.api_key:
|
||||
continue
|
||||
if o.llm_name + "@" + o.llm_factory in llm_set:
|
||||
continue
|
||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
||||
|
||||
res = {}
|
||||
|
||||
39
api/apps/sdk/agent.py
Normal file
39
api/apps/sdk/agent.py
Normal file
@ -0,0 +1,39 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.utils.api_utils import get_error_data_result, token_required
|
||||
from api.utils.api_utils import get_result
|
||||
from flask import request
|
||||
|
||||
@manager.route('/agents', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def list_agents(tenant_id):
|
||||
id = request.args.get("id")
|
||||
title = request.args.get("title")
|
||||
if id or title:
|
||||
canvas = UserCanvasService.query(id=id, title=title, user_id=tenant_id)
|
||||
if not canvas:
|
||||
return get_error_data_result("The agent doesn't exist.")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "update_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
canvas = UserCanvasService.get_list(tenant_id,page_number,items_per_page,orderby,desc,id,title)
|
||||
return get_result(data=canvas)
|
||||
@ -26,7 +26,7 @@ from api.utils.api_utils import get_result
|
||||
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['POST'])
|
||||
@manager.route('/chats', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
req=request.json
|
||||
@ -82,7 +82,8 @@ def create(tenant_id):
|
||||
req["top_k"] = req.get("top_k", 1024)
|
||||
req["rerank_id"] = req.get("rerank_id", "")
|
||||
if req.get("rerank_id"):
|
||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_id"),model_type="rerank"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3","maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_id"),model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if not req.get("llm_id"):
|
||||
req["llm_id"] = tenant.llm_id
|
||||
@ -104,9 +105,12 @@ def create(tenant_id):
|
||||
"parameters": [
|
||||
{"key": "knowledge", "optional": False}
|
||||
],
|
||||
"empty_response": "Sorry! No relevant content was found in the knowledge base!"
|
||||
"empty_response": "Sorry! No relevant content was found in the knowledge base!",
|
||||
"quote":True,
|
||||
"tts":False,
|
||||
"refine_multiturn":True
|
||||
}
|
||||
key_list_2 = ["system", "prologue", "parameters", "empty_response"]
|
||||
key_list_2 = ["system", "prologue", "parameters", "empty_response","quote","tts","refine_multiturn"]
|
||||
if "prompt_config" not in req:
|
||||
req['prompt_config'] = {}
|
||||
for key in key_list_2:
|
||||
@ -147,7 +151,7 @@ def create(tenant_id):
|
||||
res["avatar"] = res.pop("icon")
|
||||
return get_result(data=res)
|
||||
|
||||
@manager.route('/chats/<chat_id>', methods=['PUT'])
|
||||
@manager.route('/chats/<chat_id>', methods=['PUT']) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id,chat_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
@ -158,10 +162,10 @@ def update(tenant_id,chat_id):
|
||||
req["do_refer"]=req.pop("show_quotation")
|
||||
if "dataset_ids" in req:
|
||||
if not ids:
|
||||
return get_error_data_result("`datasets` can't be empty")
|
||||
return get_error_data_result("`dataset_ids` can't be empty")
|
||||
if ids:
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.accessible(kb_id=chat_id, user_id=tenant_id)
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
@ -185,9 +189,6 @@ def update(tenant_id,chat_id):
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
return get_error_data_result(message="Tenant not found!")
|
||||
if req.get("rerank_model"):
|
||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_model"),model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_model')} doesn't exist")
|
||||
# prompt
|
||||
prompt = req.get("prompt")
|
||||
key_mapping = {"parameters": "variables",
|
||||
@ -207,6 +208,10 @@ def update(tenant_id,chat_id):
|
||||
req["prompt_config"] = req.pop("prompt")
|
||||
e, res = DialogService.get_by_id(chat_id)
|
||||
res = res.to_json()
|
||||
if req.get("rerank_id"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3","maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_id"),model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if "name" in req:
|
||||
if not req.get("name"):
|
||||
return get_error_data_result(message="`name` is not empty.")
|
||||
@ -235,7 +240,7 @@ def update(tenant_id,chat_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['DELETE'])
|
||||
@manager.route('/chats', methods=['DELETE']) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
req = request.json
|
||||
@ -257,7 +262,7 @@ def delete(tenant_id):
|
||||
DialogService.update_by_id(id, temp_dict)
|
||||
return get_result()
|
||||
|
||||
@manager.route('/chats', methods=['GET'])
|
||||
@manager.route('/chats', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def list_chat(tenant_id):
|
||||
id = request.args.get("id")
|
||||
|
||||
@ -34,7 +34,7 @@ from api.utils.api_utils import (
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["POST"])
|
||||
@manager.route("/datasets", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
"""
|
||||
@ -190,7 +190,7 @@ def create(tenant_id):
|
||||
return get_result(data=renamed_data)
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["DELETE"])
|
||||
@manager.route("/datasets", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
"""
|
||||
@ -260,7 +260,7 @@ def delete(tenant_id):
|
||||
return get_result(code=settings.RetCode.SUCCESS)
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>", methods=["PUT"])
|
||||
@manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id, dataset_id):
|
||||
"""
|
||||
@ -429,7 +429,7 @@ def update(tenant_id, dataset_id):
|
||||
return get_result(code=settings.RetCode.SUCCESS)
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["GET"])
|
||||
@manager.route("/datasets", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list(tenant_id):
|
||||
"""
|
||||
|
||||
@ -22,7 +22,7 @@ from api import settings
|
||||
from api.utils.api_utils import validate_request, build_error_result, apikey_required
|
||||
|
||||
|
||||
@manager.route('/dify/retrieval', methods=['POST'])
|
||||
@manager.route('/dify/retrieval', methods=['POST']) # noqa: F821
|
||||
@apikey_required
|
||||
@validate_request("knowledge_id", "query")
|
||||
def retrieval(tenant_id):
|
||||
|
||||
@ -22,7 +22,7 @@ from rag.nlp import rag_tokenizer
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api import settings
|
||||
import hashlib
|
||||
import xxhash
|
||||
import re
|
||||
from api.utils.api_utils import token_required
|
||||
from api.db.db_models import Task
|
||||
@ -41,12 +41,11 @@ from api.utils.api_utils import construct_json_result, get_parser_config
|
||||
from rag.nlp import search
|
||||
from rag.utils import rmSpace
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
import os
|
||||
|
||||
MAXIMUM_OF_UPLOADING_FILES = 256
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["POST"])
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def upload(dataset_id, tenant_id):
|
||||
"""
|
||||
@ -154,7 +153,7 @@ def upload(dataset_id, tenant_id):
|
||||
return get_result(data=renamed_doc_list)
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["PUT"])
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update_doc(tenant_id, dataset_id, document_id):
|
||||
"""
|
||||
@ -297,7 +296,7 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["GET"])
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def download(tenant_id, dataset_id, document_id):
|
||||
"""
|
||||
@ -361,7 +360,7 @@ def download(tenant_id, dataset_id, document_id):
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["GET"])
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list_docs(dataset_id, tenant_id):
|
||||
"""
|
||||
@ -495,7 +494,7 @@ def list_docs(dataset_id, tenant_id):
|
||||
return get_result(data={"total": tol, "docs": renamed_doc_list})
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["DELETE"])
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id, dataset_id):
|
||||
"""
|
||||
@ -587,7 +586,7 @@ def delete(tenant_id, dataset_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/chunks", methods=["POST"])
|
||||
@manager.route("/datasets/<dataset_id>/chunks", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def parse(tenant_id, dataset_id):
|
||||
"""
|
||||
@ -654,7 +653,7 @@ def parse(tenant_id, dataset_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/chunks", methods=["DELETE"])
|
||||
@manager.route("/datasets/<dataset_id>/chunks", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def stop_parsing(tenant_id, dataset_id):
|
||||
"""
|
||||
@ -712,7 +711,7 @@ def stop_parsing(tenant_id, dataset_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>/chunks", methods=["GET"])
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>/chunks", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list_chunks(tenant_id, dataset_id, document_id):
|
||||
"""
|
||||
@ -844,9 +843,10 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
"doc_id": sres.field[id]["doc_id"],
|
||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||
"question_kwd": sres.field[id].get("question_kwd", []),
|
||||
"img_id": sres.field[id].get("img_id", ""),
|
||||
"available_int": sres.field[id].get("available_int", 1),
|
||||
"positions": sres.field[id].get("position_int", "").split("\t"),
|
||||
"positions": sres.field[id].get("position_int", []),
|
||||
}
|
||||
if len(d["positions"]) % 5 == 0:
|
||||
poss = []
|
||||
@ -879,6 +879,7 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
"content_with_weight": "content",
|
||||
"doc_id": "document_id",
|
||||
"important_kwd": "important_keywords",
|
||||
"question_kwd": "questions",
|
||||
"img_id": "image_id",
|
||||
"available_int": "available",
|
||||
}
|
||||
@ -894,7 +895,7 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
return get_result(data=res)
|
||||
|
||||
|
||||
@manager.route(
|
||||
@manager.route( # noqa: F821
|
||||
"/datasets/<dataset_id>/documents/<document_id>/chunks", methods=["POST"]
|
||||
)
|
||||
@token_required
|
||||
@ -974,14 +975,16 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
if not req.get("content"):
|
||||
return get_error_data_result(message="`content` is required")
|
||||
if "important_keywords" in req:
|
||||
if type(req["important_keywords"]) != list:
|
||||
if not isinstance(req["important_keywords"], list):
|
||||
return get_error_data_result(
|
||||
"`important_keywords` is required to be a list"
|
||||
)
|
||||
md5 = hashlib.md5()
|
||||
md5.update((req["content"] + document_id).encode("utf-8"))
|
||||
|
||||
chunk_id = md5.hexdigest()
|
||||
if "questions" in req:
|
||||
if not isinstance(req["questions"], list):
|
||||
return get_error_data_result(
|
||||
"`questions` is required to be a list"
|
||||
)
|
||||
chunk_id = xxhash.xxh64((req["content"] + document_id).encode("utf-8")).hexdigest()
|
||||
d = {
|
||||
"id": chunk_id,
|
||||
"content_ltks": rag_tokenizer.tokenize(req["content"]),
|
||||
@ -992,6 +995,10 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
d["important_tks"] = rag_tokenizer.tokenize(
|
||||
" ".join(req.get("important_keywords", []))
|
||||
)
|
||||
d["question_kwd"] = req.get("questions", [])
|
||||
d["question_tks"] = rag_tokenizer.tokenize(
|
||||
"\n".join(req.get("questions", []))
|
||||
)
|
||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||
d["kb_id"] = dataset_id
|
||||
@ -1001,7 +1008,7 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
tenant_id, LLMType.EMBEDDING.value, embd_id
|
||||
)
|
||||
v, c = embd_mdl.encode([doc.name, req["content"]])
|
||||
v, c = embd_mdl.encode([doc.name, req["content"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
settings.docStoreConn.insert([d], search.index_name(tenant_id), dataset_id)
|
||||
@ -1013,6 +1020,7 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
"content_with_weight": "content",
|
||||
"doc_id": "document_id",
|
||||
"important_kwd": "important_keywords",
|
||||
"question_kwd": "questions",
|
||||
"kb_id": "dataset_id",
|
||||
"create_timestamp_flt": "create_timestamp",
|
||||
"create_time": "create_time",
|
||||
@ -1027,7 +1035,7 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
# return get_result(data={"chunk_id": chunk_id})
|
||||
|
||||
|
||||
@manager.route(
|
||||
@manager.route( # noqa: F821
|
||||
"datasets/<dataset_id>/documents/<document_id>/chunks", methods=["DELETE"]
|
||||
)
|
||||
@token_required
|
||||
@ -1087,7 +1095,7 @@ def rm_chunk(tenant_id, dataset_id, document_id):
|
||||
return get_result(message=f"deleted {chunk_number} chunks")
|
||||
|
||||
|
||||
@manager.route(
|
||||
@manager.route( # noqa: F821
|
||||
"/datasets/<dataset_id>/documents/<document_id>/chunks/<chunk_id>", methods=["PUT"]
|
||||
)
|
||||
@token_required
|
||||
@ -1166,8 +1174,13 @@ def update_chunk(tenant_id, dataset_id, document_id, chunk_id):
|
||||
if "important_keywords" in req:
|
||||
if not isinstance(req["important_keywords"], list):
|
||||
return get_error_data_result("`important_keywords` should be a list")
|
||||
d["important_kwd"] = req.get("important_keywords")
|
||||
d["important_kwd"] = req.get("important_keywords", [])
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"]))
|
||||
if "questions" in req:
|
||||
if not isinstance(req["questions"], list):
|
||||
return get_error_data_result("`questions` should be a list")
|
||||
d["question_kwd"] = req.get("questions")
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["questions"]))
|
||||
if "available" in req:
|
||||
d["available_int"] = int(req["available"])
|
||||
embd_id = DocumentService.get_embd_id(document_id)
|
||||
@ -1185,14 +1198,14 @@ def update_chunk(tenant_id, dataset_id, document_id, chunk_id):
|
||||
d, arr[0], arr[1], not any([rag_tokenizer.is_chinese(t) for t in q + a])
|
||||
)
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, d["content_with_weight"]])
|
||||
v, c = embd_mdl.encode([doc.name, d["content_with_weight"] if not d.get("question_kwd") else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
settings.docStoreConn.update({"id": chunk_id}, d, search.index_name(tenant_id), dataset_id)
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/retrieval", methods=["POST"])
|
||||
@manager.route("/retrieval", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def retrieval_test(tenant_id):
|
||||
"""
|
||||
@ -1353,6 +1366,7 @@ def retrieval_test(tenant_id):
|
||||
"content_with_weight": "content",
|
||||
"doc_id": "document_id",
|
||||
"important_kwd": "important_keywords",
|
||||
"question_kwd": "questions",
|
||||
"docnm_kwd": "document_keyword",
|
||||
}
|
||||
rename_chunk = {}
|
||||
|
||||
@ -15,17 +15,19 @@
|
||||
#
|
||||
import re
|
||||
import json
|
||||
from copy import deepcopy
|
||||
from uuid import uuid4
|
||||
from api.db import LLMType
|
||||
from flask import request, Response
|
||||
|
||||
from api.db.services.conversation_service import ConversationService, iframe_completion
|
||||
from api.db.services.conversation_service import completion as rag_completion
|
||||
from api.db.services.canvas_service import completion as agent_completion
|
||||
from api.db.services.dialog_service import ask
|
||||
from agent.canvas import Canvas
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import API4Conversation
|
||||
from api.db.db_models import APIToken
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_error_data_result
|
||||
@ -33,9 +35,9 @@ from api.utils.api_utils import get_result, token_required
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['POST'])
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id,chat_id):
|
||||
def create(tenant_id, chat_id):
|
||||
req = request.json
|
||||
req["dialog_id"] = chat_id
|
||||
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
|
||||
@ -45,7 +47,7 @@ def create(tenant_id,chat_id):
|
||||
"id": get_uuid(),
|
||||
"dialog_id": req["dialog_id"],
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}]
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_error_data_result(message="`name` can not be empty.")
|
||||
@ -60,39 +62,39 @@ def create(tenant_id,chat_id):
|
||||
return get_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=['POST'])
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def create_agent_session(tenant_id, agent_id):
|
||||
req = request.json
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
if not e:
|
||||
return get_error_data_result("Agent not found.")
|
||||
if cvs.user_id != tenant_id:
|
||||
return get_error_data_result(message="You do not own the agent.")
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
canvas = Canvas(cvs.dsl, tenant_id)
|
||||
if canvas.get_preset_param():
|
||||
return get_error_data_result("The agent can't create a session directly")
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": req.get("usr_id","") if isinstance(req, dict) else "",
|
||||
"user_id": tenant_id,
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||
"source": "agent"
|
||||
"source": "agent",
|
||||
"dsl": json.loads(cvs.dsl)
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
conv["agent_id"] = conv.pop("dialog_id")
|
||||
return get_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions/<session_id>', methods=['PUT'])
|
||||
@manager.route('/chats/<chat_id>/sessions/<session_id>', methods=['PUT']) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id,chat_id,session_id):
|
||||
def update(tenant_id, chat_id, session_id):
|
||||
req = request.json
|
||||
req["dialog_id"] = chat_id
|
||||
conv_id = session_id
|
||||
conv = ConversationService.query(id=conv_id,dialog_id=chat_id)
|
||||
conv = ConversationService.query(id=conv_id, dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(message="Session does not exist")
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
@ -108,276 +110,63 @@ def update(tenant_id,chat_id,session_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/completions', methods=['POST'])
|
||||
@manager.route('/chats/<chat_id>/completions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def completion(tenant_id, chat_id):
|
||||
def chat_completion(tenant_id, chat_id):
|
||||
req = request.json
|
||||
if not req.get("session_id"):
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": chat_id,
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_error_data_result(message="`name` can not be empty.")
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
session_id=conv.id
|
||||
else:
|
||||
session_id = req.get("session_id")
|
||||
if not req.get("question"):
|
||||
return get_error_data_result(message="Please input your question.")
|
||||
conv = ConversationService.query(id=session_id,dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(message="Session does not exist")
|
||||
conv = conv[0]
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message="You do not own the chat")
|
||||
msg = []
|
||||
question = {
|
||||
"content": req.get("question"),
|
||||
"role": "user",
|
||||
"id": str(uuid4())
|
||||
}
|
||||
conv.message.append(question)
|
||||
for m in conv.message:
|
||||
if m["role"] == "system": continue
|
||||
if m["role"] == "assistant" and not msg: continue
|
||||
msg.append(m)
|
||||
message_id = msg[-1].get("id")
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
reference = ans["reference"]
|
||||
temp_reference = deepcopy(ans["reference"])
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(temp_reference)
|
||||
else:
|
||||
conv.reference[-1] = temp_reference
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
||||
"id": message_id, "prompt": ans.get("prompt", "")}
|
||||
if "chunks" in reference:
|
||||
chunks = reference.get("chunks")
|
||||
chunk_list = []
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk.get("image_id", ""),
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk.get("positions", []),
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
ans["session_id"]=session_id
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, **req):
|
||||
fillin_conv(ans)
|
||||
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e),"reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if not req or not req.get("session_id"):
|
||||
req = {"question":""}
|
||||
if not DialogService.query(tenant_id=tenant_id,id=chat_id,status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||
if req.get("session_id"):
|
||||
if not ConversationService.query(id=req["session_id"],dialog_id=chat_id):
|
||||
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp = Response(rag_completion(tenant_id, chat_id, **req), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
|
||||
return resp
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
for ans in rag_completion(tenant_id, chat_id, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_result(data=answer)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/completions', methods=['POST'])
|
||||
@manager.route('/agents/<agent_id>/completions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def agent_completion(tenant_id, agent_id):
|
||||
req = request.json
|
||||
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
if not e:
|
||||
return get_error_data_result("Agent not found.")
|
||||
if cvs.user_id != tenant_id:
|
||||
return get_error_data_result(message="You do not own the agent.")
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
canvas = Canvas(cvs.dsl, tenant_id)
|
||||
|
||||
if not req.get("session_id"):
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": req.get("user_id",""),
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||
"source": "agent"
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
conv = API4Conversation(**conv)
|
||||
else:
|
||||
session_id = req.get("session_id")
|
||||
e, conv = API4ConversationService.get_by_id(req["session_id"])
|
||||
if not e:
|
||||
return get_error_data_result(message="Session not found!")
|
||||
|
||||
messages = conv.message
|
||||
question = req.get("question")
|
||||
if not question:
|
||||
return get_error_data_result("`question` is required.")
|
||||
question={
|
||||
"role":"user",
|
||||
"content":question,
|
||||
"id": str(uuid4())
|
||||
}
|
||||
messages.append(question)
|
||||
msg = []
|
||||
for m in messages:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
if not msg[-1].get("id"): msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
if "quote" not in req: req["quote"] = False
|
||||
stream = req.get("stream", True)
|
||||
|
||||
def fillin_conv(ans):
|
||||
reference = ans["reference"]
|
||||
temp_reference = deepcopy(ans["reference"])
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(temp_reference)
|
||||
def agent_completions(tenant_id, agent_id):
|
||||
req = request.json
|
||||
cvs = UserCanvasService.query(user_id=tenant_id, id=agent_id)
|
||||
if not cvs:
|
||||
return get_error_data_result(f"You don't own the agent {agent_id}")
|
||||
if req.get("session_id"):
|
||||
conv = API4ConversationService.query(id=req["session_id"], dialog_id=agent_id)
|
||||
if not conv:
|
||||
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
||||
else:
|
||||
conv.reference[-1] = temp_reference
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
||||
if "chunks" in reference:
|
||||
chunks = reference.get("chunks")
|
||||
chunk_list = []
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk["image_id"],
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk["positions"],
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
ans["session_id"] = session_id
|
||||
|
||||
def rename_field(ans):
|
||||
reference = ans['reference']
|
||||
if not isinstance(reference, dict):
|
||||
return
|
||||
for chunk_i in reference.get('chunks', []):
|
||||
if 'docnm_kwd' in chunk_i:
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
conv.message.append(msg[-1])
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
final_ans = {"reference": [], "content": ""}
|
||||
|
||||
canvas.messages.append(msg[-1])
|
||||
canvas.add_user_input(msg[-1]["content"])
|
||||
|
||||
if stream:
|
||||
def sse():
|
||||
nonlocal answer, cvs
|
||||
try:
|
||||
for ans in canvas.run(stream=True):
|
||||
if ans.get("running_status"):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {"answer": ans["content"],
|
||||
"running_status": True}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
continue
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
fillin_conv(ans)
|
||||
rename_field(ans)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(sse(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
for answer in canvas.run(stream=False):
|
||||
if answer.get("running_status"): continue
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
|
||||
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
||||
fillin_conv(result)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
rename_field(result)
|
||||
return get_result(data=result)
|
||||
req["question"]=""
|
||||
if req.get("stream", True):
|
||||
resp = Response(agent_completion(tenant_id, agent_id, **req), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
try:
|
||||
for answer in agent_completion(tenant_id, agent_id, **req):
|
||||
return get_result(data=answer)
|
||||
except Exception as e:
|
||||
return get_error_data_result(str(e))
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['GET'])
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def list_session(chat_id,tenant_id):
|
||||
def list_session(tenant_id, chat_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message=f"You don't own the assistant {chat_id}.")
|
||||
id = request.args.get("id")
|
||||
@ -389,7 +178,7 @@ def list_session(chat_id,tenant_id):
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
convs = ConversationService.get_list(chat_id,page_number,items_per_page,orderby,desc,id,name)
|
||||
convs = ConversationService.get_list(chat_id, page_number, items_per_page, orderby, desc, id, name)
|
||||
if not convs:
|
||||
return get_result(data=[])
|
||||
for conv in convs:
|
||||
@ -415,7 +204,7 @@ def list_session(chat_id,tenant_id):
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk["image_id"],
|
||||
"image_id": chunk.get("image_id", ""),
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
@ -429,9 +218,64 @@ def list_session(chat_id,tenant_id):
|
||||
return get_result(data=convs)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=["DELETE"])
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id,chat_id):
|
||||
def list_agent_session(tenant_id, agent_id):
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_error_data_result(message=f"You don't own the agent {agent_id}.")
|
||||
id = request.args.get("id")
|
||||
if not API4ConversationService.query(id=id, user_id=tenant_id):
|
||||
return get_error_data_result(f"You don't own the session {id}")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "update_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
convs = API4ConversationService.get_list(agent_id, tenant_id, page_number, items_per_page, orderby, desc, id)
|
||||
if not convs:
|
||||
return get_result(data=[])
|
||||
for conv in convs:
|
||||
conv['messages'] = conv.pop("message")
|
||||
infos = conv["messages"]
|
||||
for info in infos:
|
||||
if "prompt" in info:
|
||||
info.pop("prompt")
|
||||
conv["agent_id"] = conv.pop("dialog_id")
|
||||
if conv["reference"]:
|
||||
messages = conv["messages"]
|
||||
message_num = 0
|
||||
chunk_num = 0
|
||||
while message_num < len(messages):
|
||||
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||
chunk_list = []
|
||||
if "chunks" in conv["reference"][chunk_num]:
|
||||
chunks = conv["reference"][chunk_num]["chunks"]
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk.get("image_id", ""),
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk["positions"],
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
chunk_num += 1
|
||||
messages[message_num]["reference"] = chunk_list
|
||||
message_num += 1
|
||||
del conv["reference"]
|
||||
return get_result(data=convs)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id, chat_id):
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message="You don't own the chat")
|
||||
req = request.json
|
||||
@ -439,22 +283,23 @@ def delete(tenant_id,chat_id):
|
||||
if not req:
|
||||
ids = None
|
||||
else:
|
||||
ids=req.get("ids")
|
||||
ids = req.get("ids")
|
||||
|
||||
if not ids:
|
||||
conv_list = []
|
||||
for conv in convs:
|
||||
conv_list.append(conv.id)
|
||||
else:
|
||||
conv_list=ids
|
||||
conv_list = ids
|
||||
for id in conv_list:
|
||||
conv = ConversationService.query(id=id,dialog_id=chat_id)
|
||||
conv = ConversationService.query(id=id, dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(message="The chat doesn't own the session")
|
||||
ConversationService.delete_by_id(id)
|
||||
return get_result()
|
||||
|
||||
@manager.route('/sessions/ask', methods=['POST'])
|
||||
|
||||
@manager.route('/sessions/ask', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def ask_about(tenant_id):
|
||||
req = request.json
|
||||
@ -462,17 +307,18 @@ def ask_about(tenant_id):
|
||||
return get_error_data_result("`question` is required.")
|
||||
if not req.get("dataset_ids"):
|
||||
return get_error_data_result("`dataset_ids` is required.")
|
||||
if not isinstance(req.get("dataset_ids"),list):
|
||||
if not isinstance(req.get("dataset_ids"), list):
|
||||
return get_error_data_result("`dataset_ids` should be a list.")
|
||||
req["kb_ids"]=req.pop("dataset_ids")
|
||||
req["kb_ids"] = req.pop("dataset_ids")
|
||||
for kb_id in req["kb_ids"]:
|
||||
if not KnowledgebaseService.accessible(kb_id,tenant_id):
|
||||
if not KnowledgebaseService.accessible(kb_id, tenant_id):
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}.")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
uid = tenant_id
|
||||
|
||||
def stream():
|
||||
nonlocal req, uid
|
||||
try:
|
||||
@ -492,7 +338,7 @@ def ask_about(tenant_id):
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/sessions/related_questions', methods=['POST'])
|
||||
@manager.route('/sessions/related_questions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def related_questions(tenant_id):
|
||||
req = request.json
|
||||
@ -529,3 +375,59 @@ Keywords: {question}
|
||||
Related search terms:
|
||||
"""}], {"temperature": 0.9})
|
||||
return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||
|
||||
|
||||
@manager.route('/chatbots/<dialog_id>/completions', methods=['POST']) # noqa: F821
|
||||
def chatbot_completions(dialog_id):
|
||||
req = request.json
|
||||
|
||||
token = request.headers.get('Authorization').split()
|
||||
if len(token) != 2:
|
||||
return get_error_data_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_error_data_result(message='Token is not valid!"')
|
||||
|
||||
if "quote" not in req:
|
||||
req["quote"] = False
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(iframe_completion(dialog_id, **req), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
for answer in iframe_completion(dialog_id, **req):
|
||||
return get_result(data=answer)
|
||||
|
||||
|
||||
@manager.route('/agentbots/<agent_id>/completions', methods=['POST']) # noqa: F821
|
||||
def agent_bot_completions(agent_id):
|
||||
req = request.json
|
||||
|
||||
token = request.headers.get('Authorization').split()
|
||||
if len(token) != 2:
|
||||
return get_error_data_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_error_data_result(message='Token is not valid!"')
|
||||
|
||||
if "quote" not in req:
|
||||
req["quote"] = False
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(agent_completion(objs[0].tenant_id, agent_id, **req), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
for answer in agent_completion(objs[0].tenant_id, agent_id, **req):
|
||||
return get_result(data=answer)
|
||||
|
||||
|
||||
|
||||
@ -38,7 +38,7 @@ from timeit import default_timer as timer
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
|
||||
@manager.route("/version", methods=["GET"])
|
||||
@manager.route("/version", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def version():
|
||||
"""
|
||||
@ -61,7 +61,7 @@ def version():
|
||||
return get_json_result(data=get_ragflow_version())
|
||||
|
||||
|
||||
@manager.route("/status", methods=["GET"])
|
||||
@manager.route("/status", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def status():
|
||||
"""
|
||||
@ -170,7 +170,7 @@ def status():
|
||||
return get_json_result(data=res)
|
||||
|
||||
|
||||
@manager.route("/new_token", methods=["POST"])
|
||||
@manager.route("/new_token", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def new_token():
|
||||
"""
|
||||
@ -205,6 +205,7 @@ def new_token():
|
||||
obj = {
|
||||
"tenant_id": tenant_id,
|
||||
"token": generate_confirmation_token(tenant_id),
|
||||
"beta": generate_confirmation_token(generate_confirmation_token(tenant_id)).replace("ragflow-", "")[:32],
|
||||
"create_time": current_timestamp(),
|
||||
"create_date": datetime_format(datetime.now()),
|
||||
"update_time": None,
|
||||
@ -219,7 +220,7 @@ def new_token():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/token_list", methods=["GET"])
|
||||
@manager.route("/token_list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def token_list():
|
||||
"""
|
||||
@ -255,13 +256,19 @@ def token_list():
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
objs = APITokenService.query(tenant_id=tenants[0].tenant_id)
|
||||
return get_json_result(data=[o.to_dict() for o in objs])
|
||||
tenant_id = tenants[0].tenant_id
|
||||
objs = APITokenService.query(tenant_id=tenant_id)
|
||||
objs = [o.to_dict() for o in objs]
|
||||
for o in objs:
|
||||
if not o["beta"]:
|
||||
o["beta"] = generate_confirmation_token(generate_confirmation_token(tenants[0].tenant_id)).replace("ragflow-", "")[:32]
|
||||
APITokenService.filter_update([APIToken.tenant_id == tenant_id, APIToken.token == o["token"]], o)
|
||||
return get_json_result(data=objs)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/token/<token>", methods=["DELETE"])
|
||||
@manager.route("/token/<token>", methods=["DELETE"]) # noqa: F821
|
||||
@login_required
|
||||
def rm(token):
|
||||
"""
|
||||
|
||||
@ -26,7 +26,7 @@ from api.utils import get_uuid, delta_seconds
|
||||
from api.utils.api_utils import get_json_result, validate_request, server_error_response, get_data_error_result
|
||||
|
||||
|
||||
@manager.route("/<tenant_id>/user/list", methods=["GET"])
|
||||
@manager.route("/<tenant_id>/user/list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def user_list(tenant_id):
|
||||
if current_user.id != tenant_id:
|
||||
@ -44,7 +44,7 @@ def user_list(tenant_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/<tenant_id>/user', methods=['POST'])
|
||||
@manager.route('/<tenant_id>/user', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("email")
|
||||
def create(tenant_id):
|
||||
@ -55,32 +55,36 @@ def create(tenant_id):
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
usrs = UserService.query(email=req["email"])
|
||||
if not usrs:
|
||||
invite_user_email = req["email"]
|
||||
invite_users = UserService.query(email=invite_user_email)
|
||||
if not invite_users:
|
||||
return get_data_error_result(message="User not found.")
|
||||
|
||||
user_id = usrs[0].id
|
||||
user_tenants = UserTenantService.query(user_id=user_id, tenant_id=tenant_id)
|
||||
user_id_to_invite = invite_users[0].id
|
||||
user_tenants = UserTenantService.query(user_id=user_id_to_invite, tenant_id=tenant_id)
|
||||
if user_tenants:
|
||||
if user_tenants[0].status == UserTenantRole.NORMAL.value:
|
||||
return get_data_error_result(message="This user is in the team already.")
|
||||
return get_data_error_result(message="Invitation notification is sent.")
|
||||
user_tenant_role = user_tenants[0].role
|
||||
if user_tenant_role == UserTenantRole.NORMAL:
|
||||
return get_data_error_result(message=f"{invite_user_email} is already in the team.")
|
||||
if user_tenant_role == UserTenantRole.OWNER:
|
||||
return get_data_error_result(message=f"{invite_user_email} is the owner of the team.")
|
||||
return get_data_error_result(message=f"{invite_user_email} is in the team, but the role: {user_tenant_role} is invalid.")
|
||||
|
||||
UserTenantService.save(
|
||||
id=get_uuid(),
|
||||
user_id=user_id,
|
||||
user_id=user_id_to_invite,
|
||||
tenant_id=tenant_id,
|
||||
invited_by=current_user.id,
|
||||
role=UserTenantRole.INVITE,
|
||||
status=StatusEnum.VALID.value)
|
||||
|
||||
usr = usrs[0].to_dict()
|
||||
usr = invite_users[0].to_dict()
|
||||
usr = {k: v for k, v in usr.items() if k in ["id", "avatar", "email", "nickname"]}
|
||||
|
||||
return get_json_result(data=usr)
|
||||
|
||||
|
||||
@manager.route('/<tenant_id>/user/<user_id>', methods=['DELETE'])
|
||||
@manager.route('/<tenant_id>/user/<user_id>', methods=['DELETE']) # noqa: F821
|
||||
@login_required
|
||||
def rm(tenant_id, user_id):
|
||||
if current_user.id != tenant_id and current_user.id != user_id:
|
||||
@ -96,7 +100,7 @@ def rm(tenant_id, user_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/list", methods=["GET"])
|
||||
@manager.route("/list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def tenant_list():
|
||||
try:
|
||||
@ -108,7 +112,7 @@ def tenant_list():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/agree/<tenant_id>", methods=["PUT"])
|
||||
@manager.route("/agree/<tenant_id>", methods=["PUT"]) # noqa: F821
|
||||
@login_required
|
||||
def agree(tenant_id):
|
||||
try:
|
||||
|
||||
@ -44,7 +44,7 @@ from api.db.services.file_service import FileService
|
||||
from api.utils.api_utils import get_json_result, construct_response
|
||||
|
||||
|
||||
@manager.route("/login", methods=["POST", "GET"])
|
||||
@manager.route("/login", methods=["POST", "GET"]) # noqa: F821
|
||||
def login():
|
||||
"""
|
||||
User login endpoint.
|
||||
@ -115,7 +115,7 @@ def login():
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/github_callback", methods=["GET"])
|
||||
@manager.route("/github_callback", methods=["GET"]) # noqa: F821
|
||||
def github_callback():
|
||||
"""
|
||||
GitHub OAuth callback endpoint.
|
||||
@ -200,7 +200,7 @@ def github_callback():
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
|
||||
|
||||
@manager.route("/feishu_callback", methods=["GET"])
|
||||
@manager.route("/feishu_callback", methods=["GET"]) # noqa: F821
|
||||
def feishu_callback():
|
||||
"""
|
||||
Feishu OAuth callback endpoint.
|
||||
@ -330,12 +330,12 @@ def user_info_from_github(access_token):
|
||||
headers=headers,
|
||||
).json()
|
||||
user_info["email"] = next(
|
||||
(email for email in email_info if email["primary"] == True), None
|
||||
(email for email in email_info if email["primary"]), None
|
||||
)["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
@manager.route("/logout", methods=["GET"])
|
||||
@manager.route("/logout", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def log_out():
|
||||
"""
|
||||
@ -357,7 +357,7 @@ def log_out():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/setting", methods=["POST"])
|
||||
@manager.route("/setting", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def setting_user():
|
||||
"""
|
||||
@ -429,7 +429,7 @@ def setting_user():
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/info", methods=["GET"])
|
||||
@manager.route("/info", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def user_profile():
|
||||
"""
|
||||
@ -531,7 +531,7 @@ def user_register(user_id, user):
|
||||
return UserService.query(email=user["email"])
|
||||
|
||||
|
||||
@manager.route("/register", methods=["POST"])
|
||||
@manager.route("/register", methods=["POST"]) # noqa: F821
|
||||
@validate_request("nickname", "email", "password")
|
||||
def user_add():
|
||||
"""
|
||||
@ -617,7 +617,7 @@ def user_add():
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/tenant_info", methods=["GET"])
|
||||
@manager.route("/tenant_info", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def tenant_info():
|
||||
"""
|
||||
@ -655,7 +655,7 @@ def tenant_info():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/set_tenant_info", methods=["POST"])
|
||||
@manager.route("/set_tenant_info", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("tenant_id", "asr_id", "embd_id", "img2txt_id", "llm_id")
|
||||
def set_tenant_info():
|
||||
|
||||
@ -130,7 +130,7 @@ def is_continuous_field(cls: typing.Type) -> bool:
|
||||
for p in cls.__bases__:
|
||||
if p in CONTINUOUS_FIELD_TYPE:
|
||||
return True
|
||||
elif p != Field and p != object:
|
||||
elif p is not Field and p is not object:
|
||||
if is_continuous_field(p):
|
||||
return True
|
||||
else:
|
||||
@ -703,6 +703,7 @@ class Knowledgebase(DataBaseModel):
|
||||
default=ParserType.NAIVE.value,
|
||||
index=True)
|
||||
parser_config = JSONField(null=False, default={"pages": [[1, 1000000]]})
|
||||
pagerank = IntegerField(default=0, index=False)
|
||||
status = CharField(
|
||||
max_length=1,
|
||||
null=True,
|
||||
@ -854,6 +855,8 @@ class Task(DataBaseModel):
|
||||
help_text="process message",
|
||||
default="")
|
||||
retry_count = IntegerField(default=0)
|
||||
digest = TextField(null=True, help_text="task digest", default="")
|
||||
chunk_ids = LongTextField(null=True, help_text="chunk ids", default="")
|
||||
|
||||
|
||||
class Dialog(DataBaseModel):
|
||||
@ -933,6 +936,7 @@ class APIToken(DataBaseModel):
|
||||
token = CharField(max_length=255, null=False, index=True)
|
||||
dialog_id = CharField(max_length=32, null=False, index=True)
|
||||
source = CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True)
|
||||
beta = CharField(max_length=255, null=True, index=True)
|
||||
|
||||
class Meta:
|
||||
db_table = "api_token"
|
||||
@ -947,7 +951,7 @@ class API4Conversation(DataBaseModel):
|
||||
reference = JSONField(null=True, default=[])
|
||||
tokens = IntegerField(default=0)
|
||||
source = CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True)
|
||||
|
||||
dsl = JSONField(null=True, default={})
|
||||
duration = FloatField(default=0, index=True)
|
||||
round = IntegerField(default=0, index=True)
|
||||
thumb_up = IntegerField(default=0, index=True)
|
||||
@ -1070,3 +1074,34 @@ def migrate_db():
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("api_4_conversation","dsl",JSONField(null=True, default={}))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("knowledgebase", "pagerank", IntegerField(default=0, index=False))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("api_token", "beta", CharField(max_length=255, null=True, index=True))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("task", "digest", TextField(null=True, help_text="task digest", default=""))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("task", "chunk_ids", LongTextField(null=True, help_text="chunk ids", default=""))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@ -170,7 +170,7 @@ def add_graph_templates():
|
||||
cnvs = json.load(open(os.path.join(dir, fnm), "r"))
|
||||
try:
|
||||
CanvasTemplateService.save(**cnvs)
|
||||
except:
|
||||
except Exception:
|
||||
CanvasTemplateService.update_by_id(cnvs["id"], cnvs)
|
||||
except Exception:
|
||||
logging.exception("Add graph templates error: ")
|
||||
|
||||
@ -15,13 +15,14 @@
|
||||
#
|
||||
import pathlib
|
||||
import re
|
||||
from .user_service import UserService
|
||||
from .user_service import UserService as UserService
|
||||
|
||||
|
||||
def duplicate_name(query_func, **kwargs):
|
||||
fnm = kwargs["name"]
|
||||
objs = query_func(**kwargs)
|
||||
if not objs: return fnm
|
||||
if not objs:
|
||||
return fnm
|
||||
ext = pathlib.Path(fnm).suffix #.jpg
|
||||
nm = re.sub(r"%s$"%ext, "", fnm)
|
||||
r = re.search(r"\(([0-9]+)\)$", nm)
|
||||
@ -31,8 +32,8 @@ def duplicate_name(query_func, **kwargs):
|
||||
nm = re.sub(r"\([0-9]+\)$", "", nm)
|
||||
c += 1
|
||||
nm = f"{nm}({c})"
|
||||
if ext: nm += f"{ext}"
|
||||
if ext:
|
||||
nm += f"{ext}"
|
||||
|
||||
kwargs["name"] = nm
|
||||
return duplicate_name(query_func, **kwargs)
|
||||
|
||||
|
||||
@ -39,6 +39,22 @@ class APITokenService(CommonService):
|
||||
class API4ConversationService(CommonService):
|
||||
model = API4Conversation
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls,dialog_id, tenant_id,
|
||||
page_number, items_per_page, orderby, desc, id):
|
||||
sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
|
||||
if id:
|
||||
sessions = sessions.where(cls.model.id == id)
|
||||
if desc:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
||||
sessions = sessions.where(cls.model.user_id == tenant_id)
|
||||
sessions = sessions.paginate(page_number, items_per_page)
|
||||
|
||||
return list(sessions.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def append_message(cls, id, conversation):
|
||||
@ -48,7 +64,8 @@ class API4ConversationService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def stats(cls, tenant_id, from_date, to_date, source=None):
|
||||
if len(to_date) == 10: to_date += " 23:59:59"
|
||||
if len(to_date) == 10:
|
||||
to_date += " 23:59:59"
|
||||
return cls.model.select(
|
||||
cls.model.create_date.truncate("day").alias("dt"),
|
||||
peewee.fn.COUNT(
|
||||
|
||||
@ -13,10 +13,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
import peewee
|
||||
from api.db.db_models import DB, API4Conversation, APIToken, Dialog, CanvasTemplate, UserCanvas
|
||||
import json
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
from agent.canvas import Canvas
|
||||
from api.db.db_models import DB, CanvasTemplate, UserCanvas, API4Conversation
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.conversation_service import structure_answer
|
||||
from api.utils import get_uuid
|
||||
|
||||
|
||||
class CanvasTemplateService(CommonService):
|
||||
@ -25,3 +30,126 @@ class CanvasTemplateService(CommonService):
|
||||
|
||||
class UserCanvasService(CommonService):
|
||||
model = UserCanvas
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, tenant_id,
|
||||
page_number, items_per_page, orderby, desc, id, title):
|
||||
agents = cls.model.select()
|
||||
if id:
|
||||
agents = agents.where(cls.model.id == id)
|
||||
if title:
|
||||
agents = agents.where(cls.model.title == title)
|
||||
agents = agents.where(cls.model.user_id == tenant_id)
|
||||
if desc:
|
||||
agents = agents.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
agents = agents.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
agents = agents.paginate(page_number, items_per_page)
|
||||
|
||||
return list(agents.dicts())
|
||||
|
||||
|
||||
def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
assert e, "Agent not found."
|
||||
assert cvs.user_id == tenant_id, "You do not own the agent."
|
||||
if not isinstance(cvs.dsl,str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
canvas = Canvas(cvs.dsl, tenant_id)
|
||||
canvas.reset()
|
||||
message_id = str(uuid4())
|
||||
if not session_id:
|
||||
query = canvas.get_preset_param()
|
||||
if query:
|
||||
for ele in query:
|
||||
if not ele["optional"]:
|
||||
if not kwargs.get(ele["key"]):
|
||||
assert False, f"`{ele['key']}` is required"
|
||||
ele["value"] = kwargs[ele["key"]]
|
||||
if ele["optional"]:
|
||||
if kwargs.get(ele["key"]):
|
||||
ele["value"] = kwargs[ele['key']]
|
||||
else:
|
||||
if "value" in ele:
|
||||
ele.pop("value")
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
temp_dsl = cvs.dsl
|
||||
UserCanvasService.update_by_id(agent_id, cvs.to_dict())
|
||||
else:
|
||||
temp_dsl = json.loads(cvs.dsl)
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": kwargs.get("user_id", ""),
|
||||
"source": "agent",
|
||||
"dsl": temp_dsl
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
conv = API4Conversation(**conv)
|
||||
else:
|
||||
e, conv = API4ConversationService.get_by_id(session_id)
|
||||
assert e, "Session not found!"
|
||||
canvas = Canvas(json.dumps(conv.dsl), tenant_id)
|
||||
canvas.messages.append({"role": "user", "content": question, "id": message_id})
|
||||
canvas.add_user_input(question)
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
conv.message.append({
|
||||
"role": "user",
|
||||
"content": question,
|
||||
"id": message_id
|
||||
})
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
final_ans = {"reference": [], "content": ""}
|
||||
if stream:
|
||||
try:
|
||||
for ans in canvas.run(stream=stream):
|
||||
if ans.get("running_status"):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {"answer": ans["content"],
|
||||
"running_status": True}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
continue
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
ans = structure_answer(conv, ans, message_id, session_id)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
else:
|
||||
for answer in canvas.run(stream=False):
|
||||
if answer.get("running_status"):
|
||||
continue
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
|
||||
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
||||
result = structure_answer(conv, result, message_id, session_id)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
yield result
|
||||
break
|
||||
@ -115,7 +115,7 @@ class CommonService:
|
||||
try:
|
||||
obj = cls.model.query(id=pid)[0]
|
||||
return True, obj
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return False, None
|
||||
|
||||
@classmethod
|
||||
|
||||
229
api/db/services/conversation_service.py
Normal file
229
api/db/services/conversation_service.py
Normal file
@ -0,0 +1,229 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from uuid import uuid4
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import Conversation, DB
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.dialog_service import DialogService, chat
|
||||
from api.utils import get_uuid
|
||||
import json
|
||||
|
||||
|
||||
class ConversationService(CommonService):
|
||||
model = Conversation
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls,dialog_id,page_number, items_per_page, orderby, desc, id , name):
|
||||
sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
|
||||
if id:
|
||||
sessions = sessions.where(cls.model.id == id)
|
||||
if name:
|
||||
sessions = sessions.where(cls.model.name == name)
|
||||
if desc:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
sessions = sessions.paginate(page_number, items_per_page)
|
||||
|
||||
return list(sessions.dicts())
|
||||
|
||||
|
||||
def structure_answer(conv, ans, message_id, session_id):
|
||||
reference = ans["reference"]
|
||||
if not isinstance(reference, dict):
|
||||
reference = {}
|
||||
ans["reference"] = {}
|
||||
|
||||
def get_value(d, k1, k2):
|
||||
return d.get(k1, d.get(k2))
|
||||
chunk_list = [{
|
||||
"id": get_value(chunk, "chunk_id", "id"),
|
||||
"content": get_value(chunk, "content", "content_with_weight"),
|
||||
"document_id": get_value(chunk, "doc_id", "document_id"),
|
||||
"document_name": get_value(chunk, "docnm_kwd", "document_name"),
|
||||
"dataset_id": get_value(chunk, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(chunk, "image_id", "img_id"),
|
||||
"positions": get_value(chunk, "positions", "position_int"),
|
||||
} for chunk in reference.get("chunks", [])]
|
||||
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
ans["session_id"] = session_id
|
||||
|
||||
if not conv:
|
||||
return ans
|
||||
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
if not conv.message or conv.message[-1].get("role", "") != "assistant":
|
||||
conv.message.append({"role": "assistant", "content": ans["answer"], "id": message_id})
|
||||
else:
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
||||
if conv.reference:
|
||||
conv.reference[-1] = reference
|
||||
return ans
|
||||
|
||||
|
||||
def completion(tenant_id, chat_id, question, name="New session", session_id=None, stream=True, **kwargs):
|
||||
assert name, "`name` can not be empty."
|
||||
dia = DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||
assert dia, "You do not own the chat."
|
||||
|
||||
if not session_id:
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id":session_id ,
|
||||
"dialog_id": chat_id,
|
||||
"name": name,
|
||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}]
|
||||
}
|
||||
ConversationService.save(**conv)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {
|
||||
"answer": conv["message"][0]["content"],
|
||||
"reference": {},
|
||||
"audio_binary": None,
|
||||
"id": None,
|
||||
"session_id": session_id
|
||||
}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
return
|
||||
|
||||
conv = ConversationService.query(id=session_id, dialog_id=chat_id)
|
||||
if not conv:
|
||||
raise LookupError("Session does not exist")
|
||||
|
||||
conv = conv[0]
|
||||
msg = []
|
||||
question = {
|
||||
"content": question,
|
||||
"role": "user",
|
||||
"id": str(uuid4())
|
||||
}
|
||||
conv.message.append(question)
|
||||
for m in conv.message:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
message_id = msg[-1].get("id")
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
if stream:
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **kwargs):
|
||||
ans = structure_answer(conv, ans, message_id, session_id)
|
||||
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, False, **kwargs):
|
||||
answer = structure_answer(conv, ans, message_id, session_id)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
yield answer
|
||||
|
||||
|
||||
def iframe_completion(dialog_id, question, session_id=None, stream=True, **kwargs):
|
||||
e, dia = DialogService.get_by_id(dialog_id)
|
||||
assert e, "Dialog not found"
|
||||
if not session_id:
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": dialog_id,
|
||||
"user_id": kwargs.get("user_id", ""),
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {
|
||||
"answer": conv["message"][0]["content"],
|
||||
"reference": {},
|
||||
"audio_binary": None,
|
||||
"id": None,
|
||||
"session_id": session_id
|
||||
}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
return
|
||||
else:
|
||||
session_id = session_id
|
||||
e, conv = API4ConversationService.get_by_id(session_id)
|
||||
assert e, "Session not found!"
|
||||
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
messages = conv.message
|
||||
question = {
|
||||
"role": "user",
|
||||
"content": question,
|
||||
"id": str(uuid4())
|
||||
}
|
||||
messages.append(question)
|
||||
|
||||
msg = []
|
||||
for m in messages:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
if not msg[-1].get("id"):
|
||||
msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
if stream:
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **kwargs):
|
||||
ans = structure_answer(conv, ans, message_id, session_id)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, False, **kwargs):
|
||||
answer = structure_answer(conv, ans, message_id, session_id)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
break
|
||||
yield answer
|
||||
|
||||
@ -18,12 +18,13 @@ import binascii
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from copy import deepcopy
|
||||
from timeit import default_timer as timer
|
||||
import datetime
|
||||
from datetime import timedelta
|
||||
from api.db import LLMType, ParserType,StatusEnum
|
||||
from api.db.db_models import Dialog, Conversation,DB
|
||||
from api.db.db_models import Dialog, DB
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||
@ -60,27 +61,6 @@ class DialogService(CommonService):
|
||||
return list(chats.dicts())
|
||||
|
||||
|
||||
class ConversationService(CommonService):
|
||||
model = Conversation
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls,dialog_id,page_number, items_per_page, orderby, desc, id , name):
|
||||
sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
|
||||
if id:
|
||||
sessions = sessions.where(cls.model.id == id)
|
||||
if name:
|
||||
sessions = sessions.where(cls.model.name == name)
|
||||
if desc:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
sessions = sessions.paginate(page_number, items_per_page)
|
||||
|
||||
return list(sessions.dicts())
|
||||
|
||||
|
||||
def message_fit_in(msg, max_length=4000):
|
||||
def count():
|
||||
nonlocal msg
|
||||
@ -106,21 +86,21 @@ def message_fit_in(msg, max_length=4000):
|
||||
return c, msg
|
||||
|
||||
ll = num_tokens_from_string(msg_[0]["content"])
|
||||
l = num_tokens_from_string(msg_[-1]["content"])
|
||||
if ll / (ll + l) > 0.8:
|
||||
ll2 = num_tokens_from_string(msg_[-1]["content"])
|
||||
if ll / (ll + ll2) > 0.8:
|
||||
m = msg_[0]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
||||
msg[0]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
m = msg_[1]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
||||
msg[1]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
|
||||
def llm_id2llm_type(llm_id):
|
||||
llm_id = llm_id.split("@")[0]
|
||||
llm_id, _ = TenantLLMService.split_model_name_and_factory(llm_id)
|
||||
fnm = os.path.join(get_project_base_directory(), "conf")
|
||||
llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
|
||||
for llm_factory in llm_factories["factory_llm_infos"]:
|
||||
@ -129,14 +109,36 @@ def llm_id2llm_type(llm_id):
|
||||
return llm["model_type"].strip(",")[-1]
|
||||
|
||||
|
||||
def kb_prompt(kbinfos, max_tokens):
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
used_token_count = 0
|
||||
chunks_num = 0
|
||||
for i, c in enumerate(knowledges):
|
||||
used_token_count += num_tokens_from_string(c)
|
||||
chunks_num += 1
|
||||
if max_tokens * 0.97 < used_token_count:
|
||||
knowledges = knowledges[:i]
|
||||
break
|
||||
|
||||
doc2chunks = defaultdict(list)
|
||||
for i, ck in enumerate(kbinfos["chunks"]):
|
||||
if i >= chunks_num:
|
||||
break
|
||||
doc2chunks["docnm_kwd"].append(ck["content_with_weight"])
|
||||
|
||||
knowledges = []
|
||||
for nm, chunks in doc2chunks.items():
|
||||
txt = f"Document: {nm} \nContains the following relevant fragments:\n"
|
||||
for i, chunk in enumerate(chunks, 1):
|
||||
txt += f"{i}. {chunk}\n"
|
||||
knowledges.append(txt)
|
||||
return knowledges
|
||||
|
||||
|
||||
def chat(dialog, messages, stream=True, **kwargs):
|
||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||
st = timer()
|
||||
tmp = dialog.llm_id.split("@")
|
||||
fid = None
|
||||
llm_id = tmp[0]
|
||||
if len(tmp)>1: fid = tmp[1]
|
||||
|
||||
llm_id, fid = TenantLLMService.split_model_name_and_factory(dialog.llm_id)
|
||||
llm = LLMService.query(llm_name=llm_id) if not fid else LLMService.query(llm_name=llm_id, fid=fid)
|
||||
if not llm:
|
||||
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id) if not fid else \
|
||||
@ -220,7 +222,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=attachments,
|
||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
logging.debug(
|
||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
retrieval_tm = timer()
|
||||
@ -261,7 +263,8 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
||||
if not recall_docs:
|
||||
recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
|
||||
refs = deepcopy(kbinfos)
|
||||
@ -270,7 +273,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
del c["vector"]
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
||||
done_tm = timer()
|
||||
prompt += "\n\n### Elapsed\n - Refine Question: %.1f ms\n - Keywords: %.1f ms\n - Retrieval: %.1f ms\n - LLM: %.1f ms" % (
|
||||
(refineQ_tm - st) * 1000, (keyword_tm - refineQ_tm) * 1000, (retrieval_tm - keyword_tm) * 1000,
|
||||
@ -437,13 +440,15 @@ def relevant(tenant_id, llm_id, question, contents: list):
|
||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||
No other words needed except 'yes' or 'no'.
|
||||
"""
|
||||
if not contents:return False
|
||||
if not contents:
|
||||
return False
|
||||
contents = "Documents: \n" + " - ".join(contents)
|
||||
contents = f"Question: {question}\n" + contents
|
||||
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
||||
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
||||
if ans.lower().find("yes") >= 0: return True
|
||||
if ans.lower().find("yes") >= 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@ -485,8 +490,10 @@ Requirements:
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||
if isinstance(kwd, tuple): kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >=0: return ""
|
||||
if isinstance(kwd, tuple):
|
||||
kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >=0:
|
||||
return ""
|
||||
return kwd
|
||||
|
||||
|
||||
@ -512,8 +519,10 @@ Requirements:
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||
if isinstance(kwd, tuple): kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >= 0: return ""
|
||||
if isinstance(kwd, tuple):
|
||||
kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >= 0:
|
||||
return ""
|
||||
return kwd
|
||||
|
||||
|
||||
@ -524,7 +533,8 @@ def full_question(tenant_id, llm_id, messages):
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
conv = []
|
||||
for m in messages:
|
||||
if m["role"] not in ["user", "assistant"]: continue
|
||||
if m["role"] not in ["user", "assistant"]:
|
||||
continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
today = datetime.date.today().isoformat()
|
||||
@ -585,7 +595,8 @@ Output: What's the weather in Rochester on {tomorrow}?
|
||||
|
||||
|
||||
def tts(tts_mdl, text):
|
||||
if not tts_mdl or not text: return
|
||||
if not tts_mdl or not text:
|
||||
return
|
||||
bin = b""
|
||||
for chunk in tts_mdl.tts(text):
|
||||
bin += chunk
|
||||
@ -594,7 +605,6 @@ def tts(tts_mdl, text):
|
||||
|
||||
def ask(question, kb_ids, tenant_id):
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
tenant_ids = [kb.tenant_id for kb in kbs]
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
|
||||
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
@ -603,17 +613,9 @@ def ask(question, kb_ids, tenant_id):
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||
max_tokens = chat_mdl.max_length
|
||||
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
kbinfos = retr.retrieval(question, embd_mdl, tenant_ids, kb_ids, 1, 12, 0.1, 0.3, aggs=False)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
|
||||
used_token_count = 0
|
||||
for i, c in enumerate(knowledges):
|
||||
used_token_count += num_tokens_from_string(c)
|
||||
if max_tokens * 0.97 < used_token_count:
|
||||
knowledges = knowledges[:i]
|
||||
break
|
||||
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
prompt = """
|
||||
Role: You're a smart assistant. Your name is Miss R.
|
||||
Task: Summarize the information from knowledge bases and answer user's question.
|
||||
@ -623,29 +625,30 @@ def ask(question, kb_ids, tenant_id):
|
||||
- Answer with markdown format text.
|
||||
- Answer in language of user's question.
|
||||
- DO NOT make things up, especially for numbers.
|
||||
|
||||
|
||||
### Information from knowledge bases
|
||||
%s
|
||||
|
||||
|
||||
The above is information from knowledge bases.
|
||||
|
||||
"""%"\n".join(knowledges)
|
||||
|
||||
""" % "\n".join(knowledges)
|
||||
msg = [{"role": "user", "content": question}]
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal knowledges, kbinfos, prompt
|
||||
answer, idx = retr.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
||||
if not recall_docs:
|
||||
recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
refs = deepcopy(kbinfos)
|
||||
for c in refs["chunks"]:
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import hashlib
|
||||
import xxhash
|
||||
import json
|
||||
import random
|
||||
import re
|
||||
@ -282,6 +282,31 @@ class DocumentService(CommonService):
|
||||
return
|
||||
return docs[0]["embd_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_chunking_config(cls, doc_id):
|
||||
configs = (
|
||||
cls.model.select(
|
||||
cls.model.id,
|
||||
cls.model.kb_id,
|
||||
cls.model.parser_id,
|
||||
cls.model.parser_config,
|
||||
Knowledgebase.language,
|
||||
Knowledgebase.embd_id,
|
||||
Tenant.id.alias("tenant_id"),
|
||||
Tenant.img2txt_id,
|
||||
Tenant.asr_id,
|
||||
Tenant.llm_id,
|
||||
)
|
||||
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id))
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
||||
.where(cls.model.id == doc_id)
|
||||
)
|
||||
configs = configs.dicts()
|
||||
if not configs:
|
||||
return None
|
||||
return configs[0]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_doc_id_by_doc_name(cls, doc_name):
|
||||
@ -319,6 +344,8 @@ class DocumentService(CommonService):
|
||||
old[k] = v
|
||||
|
||||
dfs_update(d.parser_config, config)
|
||||
if not config.get("raptor") and d.parser_config.get("raptor"):
|
||||
del d.parser_config["raptor"]
|
||||
cls.update_by_id(id, {"parser_config": d.parser_config})
|
||||
|
||||
@classmethod
|
||||
@ -407,17 +434,25 @@ class DocumentService(CommonService):
|
||||
|
||||
|
||||
def queue_raptor_tasks(doc):
|
||||
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||
hasher = xxhash.xxh64()
|
||||
for field in sorted(chunking_config.keys()):
|
||||
hasher.update(str(chunking_config[field]).encode("utf-8"))
|
||||
|
||||
def new_task():
|
||||
nonlocal doc
|
||||
return {
|
||||
"id": get_uuid(),
|
||||
"doc_id": doc["id"],
|
||||
"from_page": 0,
|
||||
"to_page": -1,
|
||||
"from_page": 100000000,
|
||||
"to_page": 100000000,
|
||||
"progress_msg": "Start to do RAPTOR (Recursive Abstractive Processing for Tree-Organized Retrieval)."
|
||||
}
|
||||
|
||||
task = new_task()
|
||||
for field in ["doc_id", "from_page", "to_page"]:
|
||||
hasher.update(str(task.get(field, "")).encode("utf-8"))
|
||||
task["digest"] = hasher.hexdigest()
|
||||
bulk_insert_into_db(Task, [task], True)
|
||||
task["type"] = "raptor"
|
||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=task), "Can't access Redis. Please check the Redis' status."
|
||||
@ -425,11 +460,12 @@ def queue_raptor_tasks(doc):
|
||||
|
||||
def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
from rag.app import presentation, picture, naive, audio, email
|
||||
from api.db.services.dialog_service import ConversationService, DialogService
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.conversation_service import ConversationService
|
||||
|
||||
e, conv = ConversationService.get_by_id(conversation_id)
|
||||
if not e:
|
||||
@ -482,10 +518,7 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
for ck in th.result():
|
||||
d = deepcopy(doc)
|
||||
d.update(ck)
|
||||
md5 = hashlib.md5()
|
||||
md5.update((ck["content_with_weight"] +
|
||||
str(d["doc_id"])).encode("utf-8"))
|
||||
d["id"] = md5.hexdigest()
|
||||
d["id"] = xxhash.xxh64((ck["content_with_weight"] + str(d["doc_id"])).encode("utf-8")).hexdigest()
|
||||
d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.now().timestamp()
|
||||
if not d.get("image"):
|
||||
@ -532,7 +565,8 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
try:
|
||||
mind_map = json.dumps(mindmap([c["content_with_weight"] for c in docs if c["doc_id"] == doc_id]).output,
|
||||
ensure_ascii=False, indent=2)
|
||||
if len(mind_map) < 32: raise Exception("Few content: " + mind_map)
|
||||
if len(mind_map) < 32:
|
||||
raise Exception("Few content: " + mind_map)
|
||||
cks.append({
|
||||
"id": get_uuid(),
|
||||
"doc_id": doc_id,
|
||||
|
||||
@ -20,7 +20,7 @@ from api.db.db_models import DB
|
||||
from api.db.db_models import File, File2Document
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.utils import current_timestamp, datetime_format, get_uuid
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class File2DocumentService(CommonService):
|
||||
@ -63,7 +63,7 @@ class File2DocumentService(CommonService):
|
||||
def update_by_file_id(cls, file_id, obj):
|
||||
obj["update_time"] = current_timestamp()
|
||||
obj["update_date"] = datetime_format(datetime.now())
|
||||
num = cls.model.update(obj).where(cls.model.id == file_id).execute()
|
||||
# num = cls.model.update(obj).where(cls.model.id == file_id).execute()
|
||||
e, obj = cls.get_by_id(cls.model.id)
|
||||
return obj
|
||||
|
||||
|
||||
@ -85,7 +85,8 @@ class FileService(CommonService):
|
||||
.join(Document, on=(File2Document.document_id == Document.id))
|
||||
.join(Knowledgebase, on=(Knowledgebase.id == Document.kb_id))
|
||||
.where(cls.model.id == file_id))
|
||||
if not kbs: return []
|
||||
if not kbs:
|
||||
return []
|
||||
kbs_info_list = []
|
||||
for kb in list(kbs.dicts()):
|
||||
kbs_info_list.append({"kb_id": kb['id'], "kb_name": kb['name']})
|
||||
@ -304,7 +305,8 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def add_file_from_kb(cls, doc, kb_folder_id, tenant_id):
|
||||
for _ in File2DocumentService.get_by_document_id(doc["id"]): return
|
||||
for _ in File2DocumentService.get_by_document_id(doc["id"]):
|
||||
return
|
||||
file = {
|
||||
"id": get_uuid(),
|
||||
"parent_id": kb_folder_id,
|
||||
|
||||
@ -104,7 +104,8 @@ class KnowledgebaseService(CommonService):
|
||||
cls.model.token_num,
|
||||
cls.model.chunk_num,
|
||||
cls.model.parser_id,
|
||||
cls.model.parser_config]
|
||||
cls.model.parser_config,
|
||||
cls.model.pagerank]
|
||||
kbs = cls.model.select(*fields).join(Tenant, on=(
|
||||
(Tenant.id == cls.model.tenant_id) & (Tenant.status == StatusEnum.VALID.value))).where(
|
||||
(cls.model.id == kb_id),
|
||||
|
||||
@ -13,8 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.llm import EmbeddingModel, CvModel, ChatModel, RerankModel, Seq2txtModel, TTSModel
|
||||
from api.db import LLMType
|
||||
from api.db.db_models import DB
|
||||
@ -36,11 +40,11 @@ class TenantLLMService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_api_key(cls, tenant_id, model_name):
|
||||
arr = model_name.split("@")
|
||||
if len(arr) < 2:
|
||||
objs = cls.query(tenant_id=tenant_id, llm_name=model_name)
|
||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(model_name)
|
||||
if not fid:
|
||||
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm)
|
||||
else:
|
||||
objs = cls.query(tenant_id=tenant_id, llm_name=arr[0], llm_factory=arr[1])
|
||||
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm, llm_factory=fid)
|
||||
if not objs:
|
||||
return
|
||||
return objs[0]
|
||||
@ -61,6 +65,23 @@ class TenantLLMService(CommonService):
|
||||
|
||||
return list(objs)
|
||||
|
||||
@staticmethod
|
||||
def split_model_name_and_factory(model_name):
|
||||
arr = model_name.split("@")
|
||||
if len(arr) < 2:
|
||||
return model_name, None
|
||||
if len(arr) > 2:
|
||||
return "@".join(arr[0:-1]), arr[-1]
|
||||
try:
|
||||
fact = json.load(open(os.path.join(get_project_base_directory(), "conf/llm_factories.json"), "r"))["factory_llm_infos"]
|
||||
fact = set([f["name"] for f in fact])
|
||||
if arr[-1] not in fact:
|
||||
return model_name, None
|
||||
return arr[0], arr[-1]
|
||||
except Exception as e:
|
||||
logging.exception(f"TenantLLMService.split_model_name_and_factory got exception: {e}")
|
||||
return model_name, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def model_instance(cls, tenant_id, llm_type,
|
||||
@ -85,10 +106,9 @@ class TenantLLMService(CommonService):
|
||||
assert False, "LLM type error"
|
||||
|
||||
model_config = cls.get_api_key(tenant_id, mdlnm)
|
||||
tmp = mdlnm.split("@")
|
||||
fid = None if len(tmp) < 2 else tmp[1]
|
||||
mdlnm = tmp[0]
|
||||
if model_config: model_config = model_config.to_dict()
|
||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||
if model_config:
|
||||
model_config = model_config.to_dict()
|
||||
if not model_config:
|
||||
if llm_type in [LLMType.EMBEDDING, LLMType.RERANK]:
|
||||
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
||||
@ -168,16 +188,23 @@ class TenantLLMService(CommonService):
|
||||
else:
|
||||
assert False, "LLM type error"
|
||||
|
||||
llm_name = mdlnm.split("@")[0] if "@" in mdlnm else mdlnm
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||
|
||||
num = 0
|
||||
try:
|
||||
for u in cls.query(tenant_id=tenant_id, llm_name=llm_name):
|
||||
num += cls.model.update(used_tokens=u.used_tokens + used_tokens)\
|
||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_name == llm_name)\
|
||||
if llm_factory:
|
||||
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory)
|
||||
else:
|
||||
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name)
|
||||
if not tenant_llms:
|
||||
return num
|
||||
else:
|
||||
tenant_llm = tenant_llms[0]
|
||||
num = cls.model.update(used_tokens=tenant_llm.used_tokens + used_tokens)\
|
||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_factory == tenant_llm.llm_factory, cls.model.llm_name == llm_name)\
|
||||
.execute()
|
||||
except Exception as e:
|
||||
pass
|
||||
except Exception:
|
||||
logging.exception("TenantLLMService.increase_usage got exception")
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@ -205,13 +232,13 @@ class LLMBundle(object):
|
||||
self.max_length = lm.max_tokens
|
||||
break
|
||||
|
||||
def encode(self, texts: list, batch_size=32):
|
||||
emd, used_tokens = self.mdl.encode(texts, batch_size)
|
||||
def encode(self, texts: list):
|
||||
embeddings, used_tokens = self.mdl.encode(texts)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error(
|
||||
"LLMBundle.encode can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
return emd, used_tokens
|
||||
return embeddings, used_tokens
|
||||
|
||||
def encode_queries(self, query: str):
|
||||
emd, used_tokens = self.mdl.encode_queries(query)
|
||||
@ -253,7 +280,7 @@ class LLMBundle(object):
|
||||
logging.error(
|
||||
"LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
||||
return
|
||||
yield chunk
|
||||
yield chunk
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
txt, used_tokens = self.mdl.chat(system, history, gen_conf)
|
||||
|
||||
@ -15,6 +15,8 @@
|
||||
#
|
||||
import os
|
||||
import random
|
||||
import xxhash
|
||||
import bisect
|
||||
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from deepdoc.parser import PdfParser
|
||||
@ -29,6 +31,18 @@ from deepdoc.parser.excel_parser import RAGFlowExcelParser
|
||||
from rag.settings import SVR_QUEUE_NAME
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
from api import settings
|
||||
from rag.nlp import search
|
||||
|
||||
|
||||
def trim_header_by_lines(text: str, max_length) -> str:
|
||||
len_text = len(text)
|
||||
if len_text <= max_length:
|
||||
return text
|
||||
for i in range(len_text):
|
||||
if text[i] == '\n' and len_text - i <= max_length:
|
||||
return text[i + 1:]
|
||||
return text
|
||||
|
||||
|
||||
class TaskService(CommonService):
|
||||
@ -53,92 +67,143 @@ class TaskService(CommonService):
|
||||
Knowledgebase.tenant_id,
|
||||
Knowledgebase.language,
|
||||
Knowledgebase.embd_id,
|
||||
Knowledgebase.pagerank,
|
||||
Tenant.img2txt_id,
|
||||
Tenant.asr_id,
|
||||
Tenant.llm_id,
|
||||
cls.model.update_time]
|
||||
docs = cls.model.select(*fields) \
|
||||
.join(Document, on=(cls.model.doc_id == Document.id)) \
|
||||
.join(Knowledgebase, on=(Document.kb_id == Knowledgebase.id)) \
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id)) \
|
||||
.where(cls.model.id == task_id)
|
||||
cls.model.update_time,
|
||||
]
|
||||
docs = (
|
||||
cls.model.select(*fields)
|
||||
.join(Document, on=(cls.model.doc_id == Document.id))
|
||||
.join(Knowledgebase, on=(Document.kb_id == Knowledgebase.id))
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
||||
.where(cls.model.id == task_id)
|
||||
)
|
||||
docs = list(docs.dicts())
|
||||
if not docs: return None
|
||||
if not docs:
|
||||
return None
|
||||
|
||||
msg = "\nTask has been received."
|
||||
prog = random.random() / 10.
|
||||
prog = random.random() / 10.0
|
||||
if docs[0]["retry_count"] >= 3:
|
||||
msg = "\nERROR: Task is abandoned after 3 times attempts."
|
||||
prog = -1
|
||||
|
||||
cls.model.update(progress_msg=cls.model.progress_msg + msg,
|
||||
progress=prog,
|
||||
retry_count=docs[0]["retry_count"]+1
|
||||
).where(
|
||||
cls.model.id == docs[0]["id"]).execute()
|
||||
cls.model.update(
|
||||
progress_msg=cls.model.progress_msg + msg,
|
||||
progress=prog,
|
||||
retry_count=docs[0]["retry_count"] + 1,
|
||||
).where(cls.model.id == docs[0]["id"]).execute()
|
||||
|
||||
if docs[0]["retry_count"] >= 3: return None
|
||||
if docs[0]["retry_count"] >= 3:
|
||||
return None
|
||||
|
||||
return docs[0]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tasks(cls, doc_id: str):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.from_page,
|
||||
cls.model.progress,
|
||||
cls.model.digest,
|
||||
cls.model.chunk_ids,
|
||||
]
|
||||
tasks = (
|
||||
cls.model.select(*fields).order_by(cls.model.from_page.asc(), cls.model.create_time.desc())
|
||||
.where(cls.model.doc_id == doc_id)
|
||||
)
|
||||
tasks = list(tasks.dicts())
|
||||
if not tasks:
|
||||
return None
|
||||
return tasks
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_chunk_ids(cls, id: str, chunk_ids: str):
|
||||
cls.model.update(chunk_ids=chunk_ids).where(cls.model.id == id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_ongoing_doc_name(cls):
|
||||
with DB.lock("get_task", -1):
|
||||
docs = cls.model.select(*[Document.id, Document.kb_id, Document.location, File.parent_id]) \
|
||||
.join(Document, on=(cls.model.doc_id == Document.id)) \
|
||||
.join(File2Document, on=(File2Document.document_id == Document.id), join_type=JOIN.LEFT_OUTER) \
|
||||
.join(File, on=(File2Document.file_id == File.id), join_type=JOIN.LEFT_OUTER) \
|
||||
.where(
|
||||
docs = (
|
||||
cls.model.select(
|
||||
*[Document.id, Document.kb_id, Document.location, File.parent_id]
|
||||
)
|
||||
.join(Document, on=(cls.model.doc_id == Document.id))
|
||||
.join(
|
||||
File2Document,
|
||||
on=(File2Document.document_id == Document.id),
|
||||
join_type=JOIN.LEFT_OUTER,
|
||||
)
|
||||
.join(
|
||||
File,
|
||||
on=(File2Document.file_id == File.id),
|
||||
join_type=JOIN.LEFT_OUTER,
|
||||
)
|
||||
.where(
|
||||
Document.status == StatusEnum.VALID.value,
|
||||
Document.run == TaskStatus.RUNNING.value,
|
||||
~(Document.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress < 1,
|
||||
cls.model.create_time >= current_timestamp() - 1000 * 600
|
||||
cls.model.create_time >= current_timestamp() - 1000 * 600,
|
||||
)
|
||||
)
|
||||
docs = list(docs.dicts())
|
||||
if not docs: return []
|
||||
if not docs:
|
||||
return []
|
||||
|
||||
return list(set([(d["parent_id"] if d["parent_id"] else d["kb_id"], d["location"]) for d in docs]))
|
||||
return list(
|
||||
set(
|
||||
[
|
||||
(
|
||||
d["parent_id"] if d["parent_id"] else d["kb_id"],
|
||||
d["location"],
|
||||
)
|
||||
for d in docs
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def do_cancel(cls, id):
|
||||
try:
|
||||
task = cls.model.get_by_id(id)
|
||||
_, doc = DocumentService.get_by_id(task.doc_id)
|
||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
task = cls.model.get_by_id(id)
|
||||
_, doc = DocumentService.get_by_id(task.doc_id)
|
||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_progress(cls, id, info):
|
||||
if os.environ.get("MACOS"):
|
||||
if info["progress_msg"]:
|
||||
cls.model.update(progress_msg=cls.model.progress_msg + "\n" + info["progress_msg"]).where(
|
||||
cls.model.id == id).execute()
|
||||
task = cls.model.get_by_id(id)
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 1000)
|
||||
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||
if "progress" in info:
|
||||
cls.model.update(progress=info["progress"]).where(
|
||||
cls.model.id == id).execute()
|
||||
cls.model.id == id
|
||||
).execute()
|
||||
return
|
||||
|
||||
with DB.lock("update_progress", -1):
|
||||
if info["progress_msg"]:
|
||||
cls.model.update(progress_msg=cls.model.progress_msg + "\n" + info["progress_msg"]).where(
|
||||
cls.model.id == id).execute()
|
||||
task = cls.model.get_by_id(id)
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 1000)
|
||||
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||
if "progress" in info:
|
||||
cls.model.update(progress=info["progress"]).where(
|
||||
cls.model.id == id).execute()
|
||||
cls.model.id == id
|
||||
).execute()
|
||||
|
||||
|
||||
def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
def new_task():
|
||||
return {
|
||||
"id": get_uuid(),
|
||||
"doc_id": doc["id"]
|
||||
}
|
||||
return {"id": get_uuid(), "doc_id": doc["id"], "progress": 0.0, "from_page": 0, "to_page": 100000000}
|
||||
|
||||
tsks = []
|
||||
|
||||
if doc["type"] == FileType.PDF.value:
|
||||
@ -172,8 +237,57 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
else:
|
||||
tsks.append(new_task())
|
||||
|
||||
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||
for task in tsks:
|
||||
hasher = xxhash.xxh64()
|
||||
for field in sorted(chunking_config.keys()):
|
||||
hasher.update(str(chunking_config[field]).encode("utf-8"))
|
||||
for field in ["doc_id", "from_page", "to_page"]:
|
||||
hasher.update(str(task.get(field, "")).encode("utf-8"))
|
||||
task_digest = hasher.hexdigest()
|
||||
task["digest"] = task_digest
|
||||
task["progress"] = 0.0
|
||||
|
||||
prev_tasks = TaskService.get_tasks(doc["id"])
|
||||
ck_num = 0
|
||||
if prev_tasks:
|
||||
for task in tsks:
|
||||
ck_num += reuse_prev_task_chunks(task, prev_tasks, chunking_config)
|
||||
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
||||
chunk_ids = []
|
||||
for task in prev_tasks:
|
||||
if task["chunk_ids"]:
|
||||
chunk_ids.extend(task["chunk_ids"].split())
|
||||
if chunk_ids:
|
||||
settings.docStoreConn.delete({"id": chunk_ids}, search.index_name(chunking_config["tenant_id"]),
|
||||
chunking_config["kb_id"])
|
||||
DocumentService.update_by_id(doc["id"], {"chunk_num": ck_num})
|
||||
|
||||
bulk_insert_into_db(Task, tsks, True)
|
||||
DocumentService.begin2parse(doc["id"])
|
||||
|
||||
tsks = [task for task in tsks if task["progress"] < 1.0]
|
||||
for t in tsks:
|
||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=t), "Can't access Redis. Please check the Redis' status."
|
||||
assert REDIS_CONN.queue_product(
|
||||
SVR_QUEUE_NAME, message=t
|
||||
), "Can't access Redis. Please check the Redis' status."
|
||||
|
||||
|
||||
def reuse_prev_task_chunks(task: dict, prev_tasks: list[dict], chunking_config: dict):
|
||||
idx = bisect.bisect_left(prev_tasks, (task.get("from_page", 0), task.get("digest", "")),
|
||||
key=lambda x: (x.get("from_page", 0), x.get("digest", "")))
|
||||
if idx >= len(prev_tasks):
|
||||
return 0
|
||||
prev_task = prev_tasks[idx]
|
||||
if prev_task["progress"] < 1.0 or prev_task["digest"] != task["digest"] or not prev_task["chunk_ids"]:
|
||||
return 0
|
||||
task["chunk_ids"] = prev_task["chunk_ids"]
|
||||
task["progress"] = 1.0
|
||||
if "from_page" in task and "to_page" in task:
|
||||
task["progress_msg"] = f"Page({task['from_page']}~{task['to_page']}): "
|
||||
else:
|
||||
task["progress_msg"] = ""
|
||||
task["progress_msg"] += "reused previous task's chunks."
|
||||
prev_task["chunk_ids"] = ""
|
||||
|
||||
return len(task["chunk_ids"].split())
|
||||
|
||||
@ -22,7 +22,7 @@ from api.db import UserTenantRole
|
||||
from api.db.db_models import DB, UserTenant
|
||||
from api.db.db_models import User, Tenant
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import get_uuid, get_format_time, current_timestamp, datetime_format
|
||||
from api.utils import get_uuid, current_timestamp, datetime_format
|
||||
from api.db import StatusEnum
|
||||
|
||||
|
||||
|
||||
@ -18,17 +18,10 @@
|
||||
# from beartype.claw import beartype_all # <-- you didn't sign up for this
|
||||
# beartype_all(conf=BeartypeConf(violation_type=UserWarning)) # <-- emit warnings from all code
|
||||
|
||||
import logging
|
||||
from api.utils.log_utils import initRootLogger
|
||||
initRootLogger("ragflow_server")
|
||||
for module in ["pdfminer"]:
|
||||
module_logger = logging.getLogger(module)
|
||||
module_logger.setLevel(logging.WARNING)
|
||||
for module in ["peewee"]:
|
||||
module_logger = logging.getLogger(module)
|
||||
module_logger.handlers.clear()
|
||||
module_logger.propagate = True
|
||||
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
@ -47,6 +40,7 @@ from api.db.db_models import init_database_tables as init_web_db
|
||||
from api.db.init_data import init_web_data
|
||||
from api.versions import get_ragflow_version
|
||||
from api.utils import show_configs
|
||||
from rag.settings import print_rag_settings
|
||||
|
||||
|
||||
def update_progress():
|
||||
@ -75,6 +69,7 @@ if __name__ == '__main__':
|
||||
)
|
||||
show_configs()
|
||||
settings.init_settings()
|
||||
print_rag_settings()
|
||||
|
||||
# init db
|
||||
init_web_db()
|
||||
|
||||
@ -163,9 +163,10 @@ def init_settings():
|
||||
|
||||
global DOC_ENGINE, docStoreConn, retrievaler, kg_retrievaler
|
||||
DOC_ENGINE = os.environ.get('DOC_ENGINE', "elasticsearch")
|
||||
if DOC_ENGINE == "elasticsearch":
|
||||
lower_case_doc_engine = DOC_ENGINE.lower()
|
||||
if lower_case_doc_engine == "elasticsearch":
|
||||
docStoreConn = rag.utils.es_conn.ESConnection()
|
||||
elif DOC_ENGINE == "infinity":
|
||||
elif lower_case_doc_engine == "infinity":
|
||||
docStoreConn = rag.utils.infinity_conn.InfinityConnection()
|
||||
else:
|
||||
raise Exception(f"Not supported doc engine: {DOC_ENGINE}")
|
||||
|
||||
@ -36,7 +36,6 @@ from werkzeug.http import HTTP_STATUS_CODES
|
||||
from api.db.db_models import APIToken
|
||||
from api import settings
|
||||
|
||||
from api import settings
|
||||
from api.utils import CustomJSONEncoder, get_uuid
|
||||
from api.utils import json_dumps
|
||||
from api.constants import REQUEST_WAIT_SEC, REQUEST_MAX_WAIT_SEC
|
||||
@ -174,6 +173,18 @@ def validate_request(*args, **kwargs):
|
||||
|
||||
return wrapper
|
||||
|
||||
def not_allowed_parameters(*params):
|
||||
def decorator(f):
|
||||
def wrapper(*args, **kwargs):
|
||||
input_arguments = flask_request.json or flask_request.form.to_dict()
|
||||
for param in params:
|
||||
if param in input_arguments:
|
||||
return get_json_result(
|
||||
code=settings.RetCode.ARGUMENT_ERROR, message=f"Parameter {param} isn't allowed")
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def is_localhost(ip):
|
||||
return ip in {'127.0.0.1', '::1', '[::1]', 'localhost'}
|
||||
|
||||
@ -28,7 +28,7 @@ def get_project_base_directory():
|
||||
)
|
||||
return PROJECT_BASE
|
||||
|
||||
def initRootLogger(logfile_basename: str, log_level: int = logging.INFO, log_format: str = "%(asctime)-15s %(levelname)-8s %(process)d %(message)s"):
|
||||
def initRootLogger(logfile_basename: str, log_format: str = "%(asctime)-15s %(levelname)-8s %(process)d %(message)s"):
|
||||
logger = logging.getLogger()
|
||||
if logger.hasHandlers():
|
||||
return
|
||||
@ -36,19 +36,40 @@ def initRootLogger(logfile_basename: str, log_level: int = logging.INFO, log_for
|
||||
log_path = os.path.abspath(os.path.join(get_project_base_directory(), "logs", f"{logfile_basename}.log"))
|
||||
|
||||
os.makedirs(os.path.dirname(log_path), exist_ok=True)
|
||||
logger.setLevel(log_level)
|
||||
formatter = logging.Formatter(log_format)
|
||||
|
||||
handler1 = RotatingFileHandler(log_path, maxBytes=10*1024*1024, backupCount=5)
|
||||
handler1.setLevel(log_level)
|
||||
handler1.setFormatter(formatter)
|
||||
logger.addHandler(handler1)
|
||||
|
||||
handler2 = logging.StreamHandler()
|
||||
handler2.setLevel(log_level)
|
||||
handler2.setFormatter(formatter)
|
||||
logger.addHandler(handler2)
|
||||
|
||||
logging.captureWarnings(True)
|
||||
msg = f"{logfile_basename} log path: {log_path}"
|
||||
|
||||
LOG_LEVELS = os.environ.get("LOG_LEVELS", "")
|
||||
pkg_levels = {}
|
||||
for pkg_name_level in LOG_LEVELS.split(","):
|
||||
terms = pkg_name_level.split("=")
|
||||
if len(terms)!= 2:
|
||||
continue
|
||||
pkg_name, pkg_level = terms[0], terms[1]
|
||||
pkg_name = pkg_name.strip()
|
||||
pkg_level = logging.getLevelName(pkg_level.strip().upper())
|
||||
if not isinstance(pkg_level, int):
|
||||
pkg_level = logging.INFO
|
||||
pkg_levels[pkg_name] = logging.getLevelName(pkg_level)
|
||||
|
||||
for pkg_name in ['peewee', 'pdfminer']:
|
||||
if pkg_name not in pkg_levels:
|
||||
pkg_levels[pkg_name] = logging.getLevelName(logging.WARNING)
|
||||
if 'root' not in pkg_levels:
|
||||
pkg_levels['root'] = logging.getLevelName(logging.INFO)
|
||||
|
||||
for pkg_name, pkg_level in pkg_levels.items():
|
||||
pkg_logger = logging.getLogger(pkg_name)
|
||||
pkg_logger.setLevel(pkg_level)
|
||||
|
||||
msg = f"{logfile_basename} log path: {log_path}, log levels: {pkg_levels}"
|
||||
logger.info(msg)
|
||||
@ -45,5 +45,5 @@ try:
|
||||
pool = Pool(processes=1)
|
||||
thread = pool.apply_async(download_nltk_data)
|
||||
binary = thread.get(timeout=60)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
print('\x1b[6;37;41m WARNING \x1b[0m' + "Downloading NLTK data failure.", flush=True)
|
||||
|
||||
@ -42,28 +42,11 @@ def get_ragflow_version() -> str:
|
||||
def get_closest_tag_and_count():
|
||||
try:
|
||||
# Get the current commit hash
|
||||
commit_id = (
|
||||
subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
|
||||
version_info = (
|
||||
subprocess.check_output(["git", "describe", "--tags", "--match=v*", "--first-parent", "--always"])
|
||||
.strip()
|
||||
.decode("utf-8")
|
||||
)
|
||||
# Get the closest tag
|
||||
closest_tag = (
|
||||
subprocess.check_output(["git", "describe", "--tags", "--abbrev=0"])
|
||||
.strip()
|
||||
.decode("utf-8")
|
||||
)
|
||||
# Get the commit count since the closest tag
|
||||
process = subprocess.Popen(
|
||||
["git", "rev-list", "--count", f"{closest_tag}..HEAD"],
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
commits_count, _ = process.communicate()
|
||||
commits_count = int(commits_count.strip())
|
||||
|
||||
if commits_count == 0:
|
||||
return closest_tag
|
||||
else:
|
||||
return f"{commit_id}({closest_tag}~{commits_count})"
|
||||
return version_info
|
||||
except Exception:
|
||||
return "unknown"
|
||||
|
||||
@ -5,22 +5,27 @@
|
||||
"create_time": {"type": "varchar", "default": ""},
|
||||
"create_timestamp_flt": {"type": "float", "default": 0.0},
|
||||
"img_id": {"type": "varchar", "default": ""},
|
||||
"docnm_kwd": {"type": "varchar", "default": ""},
|
||||
"title_tks": {"type": "varchar", "default": ""},
|
||||
"title_sm_tks": {"type": "varchar", "default": ""},
|
||||
"name_kwd": {"type": "varchar", "default": ""},
|
||||
"important_kwd": {"type": "varchar", "default": ""},
|
||||
"important_tks": {"type": "varchar", "default": ""},
|
||||
"docnm_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"title_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"title_sm_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"name_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"important_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"important_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"question_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"question_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"content_with_weight": {"type": "varchar", "default": ""},
|
||||
"content_ltks": {"type": "varchar", "default": ""},
|
||||
"content_sm_ltks": {"type": "varchar", "default": ""},
|
||||
"page_num_list": {"type": "varchar", "default": ""},
|
||||
"top_list": {"type": "varchar", "default": ""},
|
||||
"position_list": {"type": "varchar", "default": ""},
|
||||
"content_ltks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"content_sm_ltks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"authors_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"authors_sm_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"page_num_int": {"type": "varchar", "default": ""},
|
||||
"top_int": {"type": "varchar", "default": ""},
|
||||
"position_int": {"type": "varchar", "default": ""},
|
||||
"weight_int": {"type": "integer", "default": 0},
|
||||
"weight_flt": {"type": "float", "default": 0.0},
|
||||
"rank_int": {"type": "integer", "default": 0},
|
||||
"available_int": {"type": "integer", "default": 1},
|
||||
"knowledge_graph_kwd": {"type": "varchar", "default": ""},
|
||||
"entities_kwd": {"type": "varchar", "default": ""}
|
||||
"knowledge_graph_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"entities_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"pagerank_fea": {"type": "integer", "default": 0}
|
||||
}
|
||||
|
||||
@ -525,6 +525,18 @@
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 8196,
|
||||
"model_type": "embedding"
|
||||
},
|
||||
{
|
||||
"llm_name": "jina-reranker-v2-base-multilingual",
|
||||
"tags": "RE-RANK,8k",
|
||||
"max_tokens": 8196,
|
||||
"model_type": "rerank"
|
||||
},
|
||||
{
|
||||
"llm_name": "jina-embeddings-v3",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 8196,
|
||||
"model_type": "embedding"
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -606,26 +618,32 @@
|
||||
},
|
||||
{
|
||||
"llm_name": "open-mistral-7b",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"tags": "LLM,CHAT,128k",
|
||||
"max_tokens": 128000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "ministral-8b-latest",
|
||||
"tags": "LLM,CHAT,128k",
|
||||
"max_tokens": 128000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "ministral-3b-latest",
|
||||
"tags": "LLM,CHAT,128k",
|
||||
"max_tokens": 128000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "mistral-large-latest",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"tags": "LLM,CHAT,128k",
|
||||
"max_tokens": 128000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "mistral-small-latest",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "mistral-medium-latest",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"tags": "LLM,CHAT,128k",
|
||||
"max_tokens": 128000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
@ -634,11 +652,29 @@
|
||||
"max_tokens": 32000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "mistral-nemo",
|
||||
"tags": "LLM,CHAT,128k",
|
||||
"max_tokens": 128000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "mistral-embed",
|
||||
"tags": "LLM,CHAT,8k",
|
||||
"max_tokens": 8192,
|
||||
"model_type": "embedding"
|
||||
},
|
||||
{
|
||||
"llm_name": "pixtral-large-latest",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"model_type": "image2text"
|
||||
},
|
||||
{
|
||||
"llm_name": "pixtral-12b",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32000,
|
||||
"model_type": "image2text"
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -2432,6 +2468,18 @@
|
||||
"max_tokens": 4000,
|
||||
"model_type": "embedding"
|
||||
},
|
||||
{
|
||||
"llm_name": "voyage-3",
|
||||
"tags": "TEXT EMBEDDING,32000",
|
||||
"max_tokens": 32000,
|
||||
"model_type": "embedding"
|
||||
},
|
||||
{
|
||||
"llm_name": "voyage-3-lite",
|
||||
"tags": "TEXT EMBEDDING,32000",
|
||||
"max_tokens": 32000,
|
||||
"model_type": "embedding"
|
||||
},
|
||||
{
|
||||
"llm_name": "rerank-1",
|
||||
"tags": "RE-RANK, 8000",
|
||||
@ -2443,6 +2491,18 @@
|
||||
"tags": "RE-RANK, 4000",
|
||||
"max_tokens": 4000,
|
||||
"model_type": "rerank"
|
||||
},
|
||||
{
|
||||
"llm_name": "rerank-2",
|
||||
"tags": "RE-RANK, 16000",
|
||||
"max_tokens": 16000,
|
||||
"model_type": "rerank"
|
||||
},
|
||||
{
|
||||
"llm_name": "rerank-2-lite",
|
||||
"tags": "RE-RANK, 8000",
|
||||
"max_tokens": 8000,
|
||||
"model_type": "rerank"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@ -140,13 +140,21 @@
|
||||
}
|
||||
},
|
||||
{
|
||||
"string": {
|
||||
"rank_feature": {
|
||||
"match": "*_fea",
|
||||
"mapping": {
|
||||
"type": "rank_feature"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"rank_features": {
|
||||
"match": "*_feas",
|
||||
"mapping": {
|
||||
"type": "rank_features"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dense_vector": {
|
||||
"match": "*_512_vec",
|
||||
|
||||
@ -0,0 +1,2 @@
|
||||
from beartype.claw import beartype_this_package
|
||||
beartype_this_package()
|
||||
|
||||
@ -18,4 +18,16 @@ from .ppt_parser import RAGFlowPptParser as PptParser
|
||||
from .html_parser import RAGFlowHtmlParser as HtmlParser
|
||||
from .json_parser import RAGFlowJsonParser as JsonParser
|
||||
from .markdown_parser import RAGFlowMarkdownParser as MarkdownParser
|
||||
from .txt_parser import RAGFlowTxtParser as TxtParser
|
||||
from .txt_parser import RAGFlowTxtParser as TxtParser
|
||||
|
||||
__all__ = [
|
||||
"PdfParser",
|
||||
"PlainParser",
|
||||
"DocxParser",
|
||||
"ExcelParser",
|
||||
"PptParser",
|
||||
"HtmlParser",
|
||||
"JsonParser",
|
||||
"MarkdownParser",
|
||||
"TxtParser",
|
||||
]
|
||||
@ -29,7 +29,8 @@ class RAGFlowExcelParser:
|
||||
for sheetname in wb.sheetnames:
|
||||
ws = wb[sheetname]
|
||||
rows = list(ws.rows)
|
||||
if not rows: continue
|
||||
if not rows:
|
||||
continue
|
||||
|
||||
tb_rows_0 = "<tr>"
|
||||
for t in list(rows[0]):
|
||||
@ -40,7 +41,9 @@ class RAGFlowExcelParser:
|
||||
tb = ""
|
||||
tb += f"<table><caption>{sheetname}</caption>"
|
||||
tb += tb_rows_0
|
||||
for r in list(rows[1 + chunk_i * chunk_rows:1 + (chunk_i + 1) * chunk_rows]):
|
||||
for r in list(
|
||||
rows[1 + chunk_i * chunk_rows : 1 + (chunk_i + 1) * chunk_rows]
|
||||
):
|
||||
tb += "<tr>"
|
||||
for i, c in enumerate(r):
|
||||
if c.value is None:
|
||||
@ -62,20 +65,21 @@ class RAGFlowExcelParser:
|
||||
for sheetname in wb.sheetnames:
|
||||
ws = wb[sheetname]
|
||||
rows = list(ws.rows)
|
||||
if not rows:continue
|
||||
if not rows:
|
||||
continue
|
||||
ti = list(rows[0])
|
||||
for r in list(rows[1:]):
|
||||
l = []
|
||||
fields = []
|
||||
for i, c in enumerate(r):
|
||||
if not c.value:
|
||||
continue
|
||||
t = str(ti[i].value) if i < len(ti) else ""
|
||||
t += (":" if t else "") + str(c.value)
|
||||
l.append(t)
|
||||
l = "; ".join(l)
|
||||
fields.append(t)
|
||||
line = "; ".join(fields)
|
||||
if sheetname.lower().find("sheet") < 0:
|
||||
l += " ——" + sheetname
|
||||
res.append(l)
|
||||
line += " ——" + sheetname
|
||||
res.append(line)
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
|
||||
@ -36,7 +36,7 @@ class RAGFlowHtmlParser:
|
||||
|
||||
@classmethod
|
||||
def parser_txt(cls, txt):
|
||||
if type(txt) != str:
|
||||
if not isinstance(txt, str):
|
||||
raise TypeError("txt type should be str!")
|
||||
html_doc = readability.Document(txt)
|
||||
title = html_doc.title()
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
|
||||
import json
|
||||
from typing import Any
|
||||
|
||||
from rag.nlp import find_codec
|
||||
class RAGFlowJsonParser:
|
||||
def __init__(
|
||||
@ -22,7 +23,7 @@ class RAGFlowJsonParser:
|
||||
txt = binary.decode(encoding, errors="ignore")
|
||||
json_data = json.loads(txt)
|
||||
chunks = self.split_json(json_data, True)
|
||||
sections = [json.dumps(l, ensure_ascii=False) for l in chunks if l]
|
||||
sections = [json.dumps(line, ensure_ascii=False) for line in chunks if line]
|
||||
return sections
|
||||
|
||||
@staticmethod
|
||||
@ -53,7 +54,7 @@ class RAGFlowJsonParser:
|
||||
|
||||
def _json_split(
|
||||
self,
|
||||
data: dict[str, Any],
|
||||
data,
|
||||
current_path: list[str] | None,
|
||||
chunks: list[dict] | None,
|
||||
) -> list[dict]:
|
||||
@ -86,15 +87,16 @@ class RAGFlowJsonParser:
|
||||
|
||||
def split_json(
|
||||
self,
|
||||
json_data: dict[str, Any],
|
||||
json_data,
|
||||
convert_lists: bool = False,
|
||||
) -> list[dict]:
|
||||
"""Splits JSON into a list of JSON chunks"""
|
||||
|
||||
if convert_lists:
|
||||
chunks = self._json_split(self._list_to_dict_preprocessing(json_data))
|
||||
preprocessed_data = self._list_to_dict_preprocessing(json_data)
|
||||
chunks = self._json_split(preprocessed_data, None, None)
|
||||
else:
|
||||
chunks = self._json_split(json_data)
|
||||
chunks = self._json_split(json_data, None, None)
|
||||
|
||||
# Remove the last chunk if it's empty
|
||||
if not chunks[-1]:
|
||||
|
||||
@ -21,7 +21,6 @@ import re
|
||||
import pdfplumber
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
from timeit import default_timer as timer
|
||||
from pypdf import PdfReader as pdf2_read
|
||||
|
||||
from api import settings
|
||||
@ -152,7 +151,7 @@ class RAGFlowPdfParser:
|
||||
max(len(up["text"]), len(down["text"])),
|
||||
len(tks_all) - len(tks_up) - len(tks_down),
|
||||
len(tks_down) - len(tks_up),
|
||||
tks_down[-1] == tks_up[-1],
|
||||
tks_down[-1] == tks_up[-1] if tks_down and tks_up else False,
|
||||
max(down["in_row"], up["in_row"]),
|
||||
abs(down["in_row"] - up["in_row"]),
|
||||
len(tks_down) == 1 and rag_tokenizer.tag(tks_down[0]).find("n") >= 0,
|
||||
@ -752,7 +751,7 @@ class RAGFlowPdfParser:
|
||||
"x1": np.max([b["x1"] for b in bxs]),
|
||||
"bottom": np.max([b["bottom"] for b in bxs]) - ht
|
||||
}
|
||||
louts = [l for l in self.page_layout[pn] if l["type"] == ltype]
|
||||
louts = [layout for layout in self.page_layout[pn] if layout["type"] == ltype]
|
||||
ii = Recognizer.find_overlapped(b, louts, naive=True)
|
||||
if ii is not None:
|
||||
b = louts[ii]
|
||||
@ -763,7 +762,8 @@ class RAGFlowPdfParser:
|
||||
"layoutno", "")))
|
||||
|
||||
left, top, right, bott = b["x0"], b["top"], b["x1"], b["bottom"]
|
||||
if right < left: right = left + 1
|
||||
if right < left:
|
||||
right = left + 1
|
||||
poss.append((pn + self.page_from, left, right, top, bott))
|
||||
return self.page_images[pn] \
|
||||
.crop((left * ZM, top * ZM,
|
||||
@ -845,7 +845,8 @@ class RAGFlowPdfParser:
|
||||
top = bx["top"] - self.page_cum_height[pn[0] - 1]
|
||||
bott = bx["bottom"] - self.page_cum_height[pn[0] - 1]
|
||||
page_images_cnt = len(self.page_images)
|
||||
if pn[-1] - 1 >= page_images_cnt: return ""
|
||||
if pn[-1] - 1 >= page_images_cnt:
|
||||
return ""
|
||||
while bott * ZM > self.page_images[pn[-1] - 1].size[1]:
|
||||
bott -= self.page_images[pn[-1] - 1].size[1] / ZM
|
||||
pn.append(pn[-1] + 1)
|
||||
@ -889,7 +890,6 @@ class RAGFlowPdfParser:
|
||||
nonlocal mh, pw, lines, widths
|
||||
lines.append(line)
|
||||
widths.append(width(line))
|
||||
width_mean = np.mean(widths)
|
||||
mmj = self.proj_match(
|
||||
line["text"]) or line.get(
|
||||
"layout_type",
|
||||
@ -948,7 +948,6 @@ class RAGFlowPdfParser:
|
||||
self.page_cum_height = [0]
|
||||
self.page_layout = []
|
||||
self.page_from = page_from
|
||||
st = timer()
|
||||
try:
|
||||
self.pdf = pdfplumber.open(fnm) if isinstance(
|
||||
fnm, str) else pdfplumber.open(BytesIO(fnm))
|
||||
@ -956,8 +955,12 @@ class RAGFlowPdfParser:
|
||||
enumerate(self.pdf.pages[page_from:page_to])]
|
||||
self.page_images_x2 = [p.to_image(resolution=72 * zoomin * 2).annotated for i, p in
|
||||
enumerate(self.pdf.pages[page_from:page_to])]
|
||||
self.page_chars = [[{**c, 'top': c['top'], 'bottom': c['bottom']} for c in page.dedupe_chars().chars if self._has_color(c)] for page in
|
||||
self.pdf.pages[page_from:page_to]]
|
||||
try:
|
||||
self.page_chars = [[{**c, 'top': c['top'], 'bottom': c['bottom']} for c in page.dedupe_chars().chars if self._has_color(c)] for page in self.pdf.pages[page_from:page_to]]
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to extract characters for pages {page_from}-{page_to}: {str(e)}")
|
||||
self.page_chars = [[] for _ in range(page_to - page_from)] # If failed to extract, using empty list instead.
|
||||
|
||||
self.total_page = len(self.pdf.pages)
|
||||
except Exception:
|
||||
logging.exception("RAGFlowPdfParser __images__")
|
||||
@ -990,7 +993,7 @@ class RAGFlowPdfParser:
|
||||
else:
|
||||
self.is_english = False
|
||||
|
||||
st = timer()
|
||||
# st = timer()
|
||||
for i, img in enumerate(self.page_images_x2):
|
||||
chars = self.page_chars[i] if not self.is_english else []
|
||||
self.mean_height.append(
|
||||
@ -1024,8 +1027,8 @@ class RAGFlowPdfParser:
|
||||
|
||||
self.page_cum_height = np.cumsum(self.page_cum_height)
|
||||
assert len(self.page_cum_height) == len(self.page_images) + 1
|
||||
if len(self.boxes) == 0 and zoomin < 9: self.__images__(fnm, zoomin * 3, page_from,
|
||||
page_to, callback)
|
||||
if len(self.boxes) == 0 and zoomin < 9:
|
||||
self.__images__(fnm, zoomin * 3, page_from, page_to, callback)
|
||||
|
||||
def __call__(self, fnm, need_image=True, zoomin=3, return_html=False):
|
||||
self.__images__(fnm, zoomin)
|
||||
@ -1164,7 +1167,7 @@ class PlainParser(object):
|
||||
if not self.outlines:
|
||||
logging.warning("Miss outlines")
|
||||
|
||||
return [(l, "") for l in lines], []
|
||||
return [(line, "") for line in lines], []
|
||||
|
||||
def crop(self, ck, need_position):
|
||||
raise NotImplementedError
|
||||
|
||||
@ -15,21 +15,42 @@ import datetime
|
||||
|
||||
|
||||
def refactor(cv):
|
||||
for n in ["raw_txt", "parser_name", "inference", "ori_text", "use_time", "time_stat"]:
|
||||
if n in cv and cv[n] is not None: del cv[n]
|
||||
for n in [
|
||||
"raw_txt",
|
||||
"parser_name",
|
||||
"inference",
|
||||
"ori_text",
|
||||
"use_time",
|
||||
"time_stat",
|
||||
]:
|
||||
if n in cv and cv[n] is not None:
|
||||
del cv[n]
|
||||
cv["is_deleted"] = 0
|
||||
if "basic" not in cv: cv["basic"] = {}
|
||||
if cv["basic"].get("photo2"): del cv["basic"]["photo2"]
|
||||
if "basic" not in cv:
|
||||
cv["basic"] = {}
|
||||
if cv["basic"].get("photo2"):
|
||||
del cv["basic"]["photo2"]
|
||||
|
||||
for n in ["education", "work", "certificate", "project", "language", "skill", "training"]:
|
||||
if n not in cv or cv[n] is None: continue
|
||||
if type(cv[n]) == type({}): cv[n] = [v for _, v in cv[n].items()]
|
||||
if type(cv[n]) != type([]):
|
||||
for n in [
|
||||
"education",
|
||||
"work",
|
||||
"certificate",
|
||||
"project",
|
||||
"language",
|
||||
"skill",
|
||||
"training",
|
||||
]:
|
||||
if n not in cv or cv[n] is None:
|
||||
continue
|
||||
if isinstance(cv[n], dict):
|
||||
cv[n] = [v for _, v in cv[n].items()]
|
||||
if not isinstance(cv[n], list):
|
||||
del cv[n]
|
||||
continue
|
||||
vv = []
|
||||
for v in cv[n]:
|
||||
if "external" in v and v["external"] is not None: del v["external"]
|
||||
if "external" in v and v["external"] is not None:
|
||||
del v["external"]
|
||||
vv.append(v)
|
||||
cv[n] = {str(i): vv[i] for i in range(len(vv))}
|
||||
|
||||
@ -42,24 +63,44 @@ def refactor(cv):
|
||||
cv["basic"][t] = cv["basic"][n]
|
||||
del cv["basic"][n]
|
||||
|
||||
work = sorted([v for _, v in cv.get("work", {}).items()], key=lambda x: x.get("start_time", ""))
|
||||
edu = sorted([v for _, v in cv.get("education", {}).items()], key=lambda x: x.get("start_time", ""))
|
||||
work = sorted(
|
||||
[v for _, v in cv.get("work", {}).items()],
|
||||
key=lambda x: x.get("start_time", ""),
|
||||
)
|
||||
edu = sorted(
|
||||
[v for _, v in cv.get("education", {}).items()],
|
||||
key=lambda x: x.get("start_time", ""),
|
||||
)
|
||||
|
||||
if work:
|
||||
cv["basic"]["work_start_time"] = work[0].get("start_time", "")
|
||||
cv["basic"]["management_experience"] = 'Y' if any(
|
||||
[w.get("management_experience", '') == 'Y' for w in work]) else 'N'
|
||||
cv["basic"]["management_experience"] = (
|
||||
"Y"
|
||||
if any([w.get("management_experience", "") == "Y" for w in work])
|
||||
else "N"
|
||||
)
|
||||
cv["basic"]["annual_salary"] = work[-1].get("annual_salary_from", "0")
|
||||
|
||||
for n in ["annual_salary_from", "annual_salary_to", "industry_name", "position_name", "responsibilities",
|
||||
"corporation_type", "scale", "corporation_name"]:
|
||||
for n in [
|
||||
"annual_salary_from",
|
||||
"annual_salary_to",
|
||||
"industry_name",
|
||||
"position_name",
|
||||
"responsibilities",
|
||||
"corporation_type",
|
||||
"scale",
|
||||
"corporation_name",
|
||||
]:
|
||||
cv["basic"][n] = work[-1].get(n, "")
|
||||
|
||||
if edu:
|
||||
for n in ["school_name", "discipline_name"]:
|
||||
if n in edu[-1]: cv["basic"][n] = edu[-1][n]
|
||||
if n in edu[-1]:
|
||||
cv["basic"][n] = edu[-1][n]
|
||||
|
||||
cv["basic"]["updated_at"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
if "contact" not in cv: cv["contact"] = {}
|
||||
if not cv["contact"].get("name"): cv["contact"]["name"] = cv["basic"].get("name", "")
|
||||
return cv
|
||||
if "contact" not in cv:
|
||||
cv["contact"] = {}
|
||||
if not cv["contact"].get("name"):
|
||||
cv["contact"]["name"] = cv["basic"].get("name", "")
|
||||
return cv
|
||||
|
||||
@ -21,13 +21,18 @@ from . import regions
|
||||
|
||||
|
||||
current_file_path = os.path.dirname(os.path.abspath(__file__))
|
||||
GOODS = pd.read_csv(os.path.join(current_file_path, "res/corp_baike_len.csv"), sep="\t", header=0).fillna(0)
|
||||
GOODS = pd.read_csv(
|
||||
os.path.join(current_file_path, "res/corp_baike_len.csv"), sep="\t", header=0
|
||||
).fillna(0)
|
||||
GOODS["cid"] = GOODS["cid"].astype(str)
|
||||
GOODS = GOODS.set_index(["cid"])
|
||||
CORP_TKS = json.load(open(os.path.join(current_file_path, "res/corp.tks.freq.json"), "r"))
|
||||
CORP_TKS = json.load(
|
||||
open(os.path.join(current_file_path, "res/corp.tks.freq.json"), "r")
|
||||
)
|
||||
GOOD_CORP = json.load(open(os.path.join(current_file_path, "res/good_corp.json"), "r"))
|
||||
CORP_TAG = json.load(open(os.path.join(current_file_path, "res/corp_tag.json"), "r"))
|
||||
|
||||
|
||||
def baike(cid, default_v=0):
|
||||
global GOODS
|
||||
try:
|
||||
@ -39,27 +44,41 @@ def baike(cid, default_v=0):
|
||||
|
||||
def corpNorm(nm, add_region=True):
|
||||
global CORP_TKS
|
||||
if not nm or type(nm)!=type(""):return ""
|
||||
if not nm or isinstance(nm, str):
|
||||
return ""
|
||||
nm = rag_tokenizer.tradi2simp(rag_tokenizer.strQ2B(nm)).lower()
|
||||
nm = re.sub(r"&", "&", nm)
|
||||
nm = re.sub(r"[\(\)()\+'\"\t \*\\【】-]+", " ", nm)
|
||||
nm = re.sub(r"([—-]+.*| +co\..*|corp\..*| +inc\..*| +ltd.*)", "", nm, 10000, re.IGNORECASE)
|
||||
nm = re.sub(r"(计算机|技术|(技术|科技|网络)*有限公司|公司|有限|研发中心|中国|总部)$", "", nm, 10000, re.IGNORECASE)
|
||||
if not nm or (len(nm)<5 and not regions.isName(nm[0:2])):return nm
|
||||
nm = re.sub(
|
||||
r"([—-]+.*| +co\..*|corp\..*| +inc\..*| +ltd.*)", "", nm, 10000, re.IGNORECASE
|
||||
)
|
||||
nm = re.sub(
|
||||
r"(计算机|技术|(技术|科技|网络)*有限公司|公司|有限|研发中心|中国|总部)$",
|
||||
"",
|
||||
nm,
|
||||
10000,
|
||||
re.IGNORECASE,
|
||||
)
|
||||
if not nm or (len(nm) < 5 and not regions.isName(nm[0:2])):
|
||||
return nm
|
||||
|
||||
tks = rag_tokenizer.tokenize(nm).split()
|
||||
reg = [t for i,t in enumerate(tks) if regions.isName(t) and (t != "中国" or i > 0)]
|
||||
reg = [t for i, t in enumerate(tks) if regions.isName(t) and (t != "中国" or i > 0)]
|
||||
nm = ""
|
||||
for t in tks:
|
||||
if regions.isName(t) or t in CORP_TKS:continue
|
||||
if re.match(r"[0-9a-zA-Z\\,.]+", t) and re.match(r".*[0-9a-zA-Z\,.]+$", nm):nm += " "
|
||||
if regions.isName(t) or t in CORP_TKS:
|
||||
continue
|
||||
if re.match(r"[0-9a-zA-Z\\,.]+", t) and re.match(r".*[0-9a-zA-Z\,.]+$", nm):
|
||||
nm += " "
|
||||
nm += t
|
||||
|
||||
r = re.search(r"^([^a-z0-9 \(\)&]{2,})[a-z ]{4,}$", nm.strip())
|
||||
if r:nm = r.group(1)
|
||||
if r:
|
||||
nm = r.group(1)
|
||||
r = re.search(r"^([a-z ]{3,})[^a-z0-9 \(\)&]{2,}$", nm.strip())
|
||||
if r:nm = r.group(1)
|
||||
return nm.strip() + (("" if not reg else "(%s)"%reg[0]) if add_region else "")
|
||||
if r:
|
||||
nm = r.group(1)
|
||||
return nm.strip() + (("" if not reg else "(%s)" % reg[0]) if add_region else "")
|
||||
|
||||
|
||||
def rmNoise(n):
|
||||
@ -67,33 +86,40 @@ def rmNoise(n):
|
||||
n = re.sub(r"[,. &()()]+", "", n)
|
||||
return n
|
||||
|
||||
|
||||
GOOD_CORP = set([corpNorm(rmNoise(c), False) for c in GOOD_CORP])
|
||||
for c,v in CORP_TAG.items():
|
||||
for c, v in CORP_TAG.items():
|
||||
cc = corpNorm(rmNoise(c), False)
|
||||
if not cc:
|
||||
logging.debug(c)
|
||||
CORP_TAG = {corpNorm(rmNoise(c), False):v for c,v in CORP_TAG.items()}
|
||||
CORP_TAG = {corpNorm(rmNoise(c), False): v for c, v in CORP_TAG.items()}
|
||||
|
||||
|
||||
def is_good(nm):
|
||||
global GOOD_CORP
|
||||
if nm.find("外派")>=0:return False
|
||||
if nm.find("外派") >= 0:
|
||||
return False
|
||||
nm = rmNoise(nm)
|
||||
nm = corpNorm(nm, False)
|
||||
for n in GOOD_CORP:
|
||||
if re.match(r"[0-9a-zA-Z]+$", n):
|
||||
if n == nm: return True
|
||||
elif nm.find(n)>=0:return True
|
||||
if n == nm:
|
||||
return True
|
||||
elif nm.find(n) >= 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def corp_tag(nm):
|
||||
global CORP_TAG
|
||||
nm = rmNoise(nm)
|
||||
nm = corpNorm(nm, False)
|
||||
for n in CORP_TAG.keys():
|
||||
if re.match(r"[0-9a-zA-Z., ]+$", n):
|
||||
if n == nm: return CORP_TAG[n]
|
||||
elif nm.find(n)>=0:
|
||||
if len(n)<3 and len(nm)/len(n)>=2:continue
|
||||
if n == nm:
|
||||
return CORP_TAG[n]
|
||||
elif nm.find(n) >= 0:
|
||||
if len(n) < 3 and len(nm) / len(n) >= 2:
|
||||
continue
|
||||
return CORP_TAG[n]
|
||||
return []
|
||||
|
||||
|
||||
@ -11,27 +11,31 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
TBL = {"94":"EMBA",
|
||||
"6":"MBA",
|
||||
"95":"MPA",
|
||||
"92":"专升本",
|
||||
"4":"专科",
|
||||
"90":"中专",
|
||||
"91":"中技",
|
||||
"86":"初中",
|
||||
"3":"博士",
|
||||
"10":"博士后",
|
||||
"1":"本科",
|
||||
"2":"硕士",
|
||||
"87":"职高",
|
||||
"89":"高中"
|
||||
TBL = {
|
||||
"94": "EMBA",
|
||||
"6": "MBA",
|
||||
"95": "MPA",
|
||||
"92": "专升本",
|
||||
"4": "专科",
|
||||
"90": "中专",
|
||||
"91": "中技",
|
||||
"86": "初中",
|
||||
"3": "博士",
|
||||
"10": "博士后",
|
||||
"1": "本科",
|
||||
"2": "硕士",
|
||||
"87": "职高",
|
||||
"89": "高中",
|
||||
}
|
||||
|
||||
TBL_ = {v:k for k,v in TBL.items()}
|
||||
TBL_ = {v: k for k, v in TBL.items()}
|
||||
|
||||
|
||||
def get_name(id):
|
||||
return TBL.get(str(id), "")
|
||||
|
||||
|
||||
def get_id(nm):
|
||||
if not nm:return ""
|
||||
if not nm:
|
||||
return ""
|
||||
return TBL_.get(nm.upper().strip(), "")
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -16,8 +16,11 @@ import json
|
||||
import re
|
||||
import copy
|
||||
import pandas as pd
|
||||
|
||||
current_file_path = os.path.dirname(os.path.abspath(__file__))
|
||||
TBL = pd.read_csv(os.path.join(current_file_path, "res/schools.csv"), sep="\t", header=0).fillna("")
|
||||
TBL = pd.read_csv(
|
||||
os.path.join(current_file_path, "res/schools.csv"), sep="\t", header=0
|
||||
).fillna("")
|
||||
TBL["name_en"] = TBL["name_en"].map(lambda x: x.lower().strip())
|
||||
GOOD_SCH = json.load(open(os.path.join(current_file_path, "res/good_sch.json"), "r"))
|
||||
GOOD_SCH = set([re.sub(r"[,. &()()]+", "", c) for c in GOOD_SCH])
|
||||
@ -26,14 +29,15 @@ GOOD_SCH = set([re.sub(r"[,. &()()]+", "", c) for c in GOOD_SCH])
|
||||
def loadRank(fnm):
|
||||
global TBL
|
||||
TBL["rank"] = 1000000
|
||||
with open(fnm, "r", encoding='utf-8') as f:
|
||||
with open(fnm, "r", encoding="utf-8") as f:
|
||||
while True:
|
||||
l = f.readline()
|
||||
if not l:break
|
||||
l = l.strip("\n").split(",")
|
||||
line = f.readline()
|
||||
if not line:
|
||||
break
|
||||
line = line.strip("\n").split(",")
|
||||
try:
|
||||
nm,rk = l[0].strip(),int(l[1])
|
||||
#assert len(TBL[((TBL.name_cn == nm) | (TBL.name_en == nm))]),f"<{nm}>"
|
||||
nm, rk = line[0].strip(), int(line[1])
|
||||
# assert len(TBL[((TBL.name_cn == nm) | (TBL.name_en == nm))]),f"<{nm}>"
|
||||
TBL.loc[((TBL.name_cn == nm) | (TBL.name_en == nm)), "rank"] = rk
|
||||
except Exception:
|
||||
pass
|
||||
@ -44,27 +48,35 @@ loadRank(os.path.join(current_file_path, "res/school.rank.csv"))
|
||||
|
||||
def split(txt):
|
||||
tks = []
|
||||
for t in re.sub(r"[ \t]+", " ",txt).split():
|
||||
if tks and re.match(r".*[a-zA-Z]$", tks[-1]) and \
|
||||
re.match(r"[a-zA-Z]", t) and tks:
|
||||
for t in re.sub(r"[ \t]+", " ", txt).split():
|
||||
if (
|
||||
tks
|
||||
and re.match(r".*[a-zA-Z]$", tks[-1])
|
||||
and re.match(r"[a-zA-Z]", t)
|
||||
and tks
|
||||
):
|
||||
tks[-1] = tks[-1] + " " + t
|
||||
else:tks.append(t)
|
||||
else:
|
||||
tks.append(t)
|
||||
return tks
|
||||
|
||||
|
||||
def select(nm):
|
||||
global TBL
|
||||
if not nm:return
|
||||
if isinstance(nm, list):nm = str(nm[0])
|
||||
if not nm:
|
||||
return
|
||||
if isinstance(nm, list):
|
||||
nm = str(nm[0])
|
||||
nm = split(nm)[0]
|
||||
nm = str(nm).lower().strip()
|
||||
nm = re.sub(r"[((][^()()]+[))]", "", nm.lower())
|
||||
nm = re.sub(r"(^the |[,.&()();;·]+|^(英国|美国|瑞士))", "", nm)
|
||||
nm = re.sub(r"大学.*学院", "大学", nm)
|
||||
tbl = copy.deepcopy(TBL)
|
||||
tbl["hit_alias"] = tbl["alias"].map(lambda x:nm in set(x.split("+")))
|
||||
res = tbl[((tbl.name_cn == nm) | (tbl.name_en == nm) | (tbl.hit_alias == True))]
|
||||
if res.empty:return
|
||||
tbl["hit_alias"] = tbl["alias"].map(lambda x: nm in set(x.split("+")))
|
||||
res = tbl[((tbl.name_cn == nm) | (tbl.name_en == nm) | tbl.hit_alias)]
|
||||
if res.empty:
|
||||
return
|
||||
|
||||
return json.loads(res.to_json(orient="records"))[0]
|
||||
|
||||
@ -74,4 +86,3 @@ def is_good(nm):
|
||||
nm = re.sub(r"[((][^()()]+[))]", "", nm.lower())
|
||||
nm = re.sub(r"[''`‘’“”,. &()();;]+", "", nm)
|
||||
return nm in GOOD_SCH
|
||||
|
||||
|
||||
@ -25,7 +25,8 @@ from xpinyin import Pinyin
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
class TimeoutException(Exception): pass
|
||||
class TimeoutException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@contextmanager
|
||||
@ -50,8 +51,10 @@ def rmHtmlTag(line):
|
||||
|
||||
|
||||
def highest_degree(dg):
|
||||
if not dg: return ""
|
||||
if type(dg) == type(""): dg = [dg]
|
||||
if not dg:
|
||||
return ""
|
||||
if isinstance(dg, str):
|
||||
dg = [dg]
|
||||
m = {"初中": 0, "高中": 1, "中专": 2, "大专": 3, "专升本": 4, "本科": 5, "硕士": 6, "博士": 7, "博士后": 8}
|
||||
return sorted([(d, m.get(d, -1)) for d in dg], key=lambda x: x[1] * -1)[0][0]
|
||||
|
||||
@ -68,10 +71,12 @@ def forEdu(cv):
|
||||
for ii, n in enumerate(sorted(cv["education_obj"], key=lambda x: x.get("start_time", "3"))):
|
||||
e = {}
|
||||
if n.get("end_time"):
|
||||
if n["end_time"] > edu_end_dt: edu_end_dt = n["end_time"]
|
||||
if n["end_time"] > edu_end_dt:
|
||||
edu_end_dt = n["end_time"]
|
||||
try:
|
||||
dt = n["end_time"]
|
||||
if re.match(r"[0-9]{9,}", dt): dt = turnTm2Dt(dt)
|
||||
if re.match(r"[0-9]{9,}", dt):
|
||||
dt = turnTm2Dt(dt)
|
||||
y, m, d = getYMD(dt)
|
||||
ed_dt.append(str(y))
|
||||
e["end_dt_kwd"] = str(y)
|
||||
@ -80,7 +85,8 @@ def forEdu(cv):
|
||||
if n.get("start_time"):
|
||||
try:
|
||||
dt = n["start_time"]
|
||||
if re.match(r"[0-9]{9,}", dt): dt = turnTm2Dt(dt)
|
||||
if re.match(r"[0-9]{9,}", dt):
|
||||
dt = turnTm2Dt(dt)
|
||||
y, m, d = getYMD(dt)
|
||||
st_dt.append(str(y))
|
||||
e["start_dt_kwd"] = str(y)
|
||||
@ -89,13 +95,20 @@ def forEdu(cv):
|
||||
|
||||
r = schools.select(n.get("school_name", ""))
|
||||
if r:
|
||||
if str(r.get("type", "")) == "1": fea.append("211")
|
||||
if str(r.get("type", "")) == "2": fea.append("211")
|
||||
if str(r.get("is_abroad", "")) == "1": fea.append("留学")
|
||||
if str(r.get("is_double_first", "")) == "1": fea.append("双一流")
|
||||
if str(r.get("is_985", "")) == "1": fea.append("985")
|
||||
if str(r.get("is_world_known", "")) == "1": fea.append("海外知名")
|
||||
if r.get("rank") and cv["school_rank_int"] > r["rank"]: cv["school_rank_int"] = r["rank"]
|
||||
if str(r.get("type", "")) == "1":
|
||||
fea.append("211")
|
||||
if str(r.get("type", "")) == "2":
|
||||
fea.append("211")
|
||||
if str(r.get("is_abroad", "")) == "1":
|
||||
fea.append("留学")
|
||||
if str(r.get("is_double_first", "")) == "1":
|
||||
fea.append("双一流")
|
||||
if str(r.get("is_985", "")) == "1":
|
||||
fea.append("985")
|
||||
if str(r.get("is_world_known", "")) == "1":
|
||||
fea.append("海外知名")
|
||||
if r.get("rank") and cv["school_rank_int"] > r["rank"]:
|
||||
cv["school_rank_int"] = r["rank"]
|
||||
|
||||
if n.get("school_name") and isinstance(n["school_name"], str):
|
||||
sch.append(re.sub(r"(211|985|重点大学|[,&;;-])", "", n["school_name"]))
|
||||
@ -106,22 +119,25 @@ def forEdu(cv):
|
||||
maj.append(n["discipline_name"])
|
||||
e["major_kwd"] = n["discipline_name"]
|
||||
|
||||
if not n.get("degree") and "985" in fea and not first_fea: n["degree"] = "1"
|
||||
if not n.get("degree") and "985" in fea and not first_fea:
|
||||
n["degree"] = "1"
|
||||
|
||||
if n.get("degree"):
|
||||
d = degrees.get_name(n["degree"])
|
||||
if d: e["degree_kwd"] = d
|
||||
if d == "本科" and ("专科" in deg or "专升本" in deg or "中专" in deg or "大专" in deg or re.search(r"(成人|自考|自学考试)",
|
||||
n.get(
|
||||
"school_name",
|
||||
""))): d = "专升本"
|
||||
if d: deg.append(d)
|
||||
if d:
|
||||
e["degree_kwd"] = d
|
||||
if d == "本科" and ("专科" in deg or "专升本" in deg or "中专" in deg or "大专" in deg or re.search(r"(成人|自考|自学考试)", n.get("school_name",""))):
|
||||
d = "专升本"
|
||||
if d:
|
||||
deg.append(d)
|
||||
|
||||
# for first degree
|
||||
if not fdeg and d in ["中专", "专升本", "专科", "本科", "大专"]:
|
||||
fdeg = [d]
|
||||
if n.get("school_name"): fsch = [n["school_name"]]
|
||||
if n.get("discipline_name"): fmaj = [n["discipline_name"]]
|
||||
if n.get("school_name"):
|
||||
fsch = [n["school_name"]]
|
||||
if n.get("discipline_name"):
|
||||
fmaj = [n["discipline_name"]]
|
||||
first_fea = copy.deepcopy(fea)
|
||||
|
||||
edu_nst.append(e)
|
||||
@ -140,16 +156,26 @@ def forEdu(cv):
|
||||
else:
|
||||
cv["sch_rank_kwd"].append("一般学校")
|
||||
|
||||
if edu_nst: cv["edu_nst"] = edu_nst
|
||||
if fea: cv["edu_fea_kwd"] = list(set(fea))
|
||||
if first_fea: cv["edu_first_fea_kwd"] = list(set(first_fea))
|
||||
if maj: cv["major_kwd"] = maj
|
||||
if fsch: cv["first_school_name_kwd"] = fsch
|
||||
if fdeg: cv["first_degree_kwd"] = fdeg
|
||||
if fmaj: cv["first_major_kwd"] = fmaj
|
||||
if st_dt: cv["edu_start_kwd"] = st_dt
|
||||
if ed_dt: cv["edu_end_kwd"] = ed_dt
|
||||
if ed_dt: cv["edu_end_int"] = max([int(t) for t in ed_dt])
|
||||
if edu_nst:
|
||||
cv["edu_nst"] = edu_nst
|
||||
if fea:
|
||||
cv["edu_fea_kwd"] = list(set(fea))
|
||||
if first_fea:
|
||||
cv["edu_first_fea_kwd"] = list(set(first_fea))
|
||||
if maj:
|
||||
cv["major_kwd"] = maj
|
||||
if fsch:
|
||||
cv["first_school_name_kwd"] = fsch
|
||||
if fdeg:
|
||||
cv["first_degree_kwd"] = fdeg
|
||||
if fmaj:
|
||||
cv["first_major_kwd"] = fmaj
|
||||
if st_dt:
|
||||
cv["edu_start_kwd"] = st_dt
|
||||
if ed_dt:
|
||||
cv["edu_end_kwd"] = ed_dt
|
||||
if ed_dt:
|
||||
cv["edu_end_int"] = max([int(t) for t in ed_dt])
|
||||
if deg:
|
||||
if "本科" in deg and "专科" in deg:
|
||||
deg.append("专升本")
|
||||
@ -158,8 +184,10 @@ def forEdu(cv):
|
||||
cv["highest_degree_kwd"] = highest_degree(deg)
|
||||
if edu_end_dt:
|
||||
try:
|
||||
if re.match(r"[0-9]{9,}", edu_end_dt): edu_end_dt = turnTm2Dt(edu_end_dt)
|
||||
if edu_end_dt.strip("\n") == "至今": edu_end_dt = cv.get("updated_at_dt", str(datetime.date.today()))
|
||||
if re.match(r"[0-9]{9,}", edu_end_dt):
|
||||
edu_end_dt = turnTm2Dt(edu_end_dt)
|
||||
if edu_end_dt.strip("\n") == "至今":
|
||||
edu_end_dt = cv.get("updated_at_dt", str(datetime.date.today()))
|
||||
y, m, d = getYMD(edu_end_dt)
|
||||
cv["work_exp_flt"] = min(int(str(datetime.date.today())[0:4]) - int(y), cv.get("work_exp_flt", 1000))
|
||||
except Exception as e:
|
||||
@ -171,7 +199,8 @@ def forEdu(cv):
|
||||
or not cv.get("degree_kwd"):
|
||||
for c in sch:
|
||||
if schools.is_good(c):
|
||||
if "tag_kwd" not in cv: cv["tag_kwd"] = []
|
||||
if "tag_kwd" not in cv:
|
||||
cv["tag_kwd"] = []
|
||||
cv["tag_kwd"].append("好学校")
|
||||
cv["tag_kwd"].append("好学历")
|
||||
break
|
||||
@ -180,28 +209,39 @@ def forEdu(cv):
|
||||
any([d.lower() in ["硕士", "博士", "mba", "博士"] for d in cv.get("degree_kwd", [])])) \
|
||||
or all([d.lower() in ["硕士", "博士", "mba", "博士后"] for d in cv.get("degree_kwd", [])]) \
|
||||
or any([d in ["mba", "emba", "博士后"] for d in cv.get("degree_kwd", [])]):
|
||||
if "tag_kwd" not in cv: cv["tag_kwd"] = []
|
||||
if "好学历" not in cv["tag_kwd"]: cv["tag_kwd"].append("好学历")
|
||||
if "tag_kwd" not in cv:
|
||||
cv["tag_kwd"] = []
|
||||
if "好学历" not in cv["tag_kwd"]:
|
||||
cv["tag_kwd"].append("好学历")
|
||||
|
||||
if cv.get("major_kwd"): cv["major_tks"] = rag_tokenizer.tokenize(" ".join(maj))
|
||||
if cv.get("school_name_kwd"): cv["school_name_tks"] = rag_tokenizer.tokenize(" ".join(sch))
|
||||
if cv.get("first_school_name_kwd"): cv["first_school_name_tks"] = rag_tokenizer.tokenize(" ".join(fsch))
|
||||
if cv.get("first_major_kwd"): cv["first_major_tks"] = rag_tokenizer.tokenize(" ".join(fmaj))
|
||||
if cv.get("major_kwd"):
|
||||
cv["major_tks"] = rag_tokenizer.tokenize(" ".join(maj))
|
||||
if cv.get("school_name_kwd"):
|
||||
cv["school_name_tks"] = rag_tokenizer.tokenize(" ".join(sch))
|
||||
if cv.get("first_school_name_kwd"):
|
||||
cv["first_school_name_tks"] = rag_tokenizer.tokenize(" ".join(fsch))
|
||||
if cv.get("first_major_kwd"):
|
||||
cv["first_major_tks"] = rag_tokenizer.tokenize(" ".join(fmaj))
|
||||
|
||||
return cv
|
||||
|
||||
|
||||
def forProj(cv):
|
||||
if not cv.get("project_obj"): return cv
|
||||
if not cv.get("project_obj"):
|
||||
return cv
|
||||
|
||||
pro_nms, desc = [], []
|
||||
for i, n in enumerate(
|
||||
sorted(cv.get("project_obj", []), key=lambda x: str(x.get("updated_at", "")) if type(x) == type({}) else "",
|
||||
sorted(cv.get("project_obj", []), key=lambda x: str(x.get("updated_at", "")) if isinstance(x, dict) else "",
|
||||
reverse=True)):
|
||||
if n.get("name"): pro_nms.append(n["name"])
|
||||
if n.get("describe"): desc.append(str(n["describe"]))
|
||||
if n.get("responsibilities"): desc.append(str(n["responsibilities"]))
|
||||
if n.get("achivement"): desc.append(str(n["achivement"]))
|
||||
if n.get("name"):
|
||||
pro_nms.append(n["name"])
|
||||
if n.get("describe"):
|
||||
desc.append(str(n["describe"]))
|
||||
if n.get("responsibilities"):
|
||||
desc.append(str(n["responsibilities"]))
|
||||
if n.get("achivement"):
|
||||
desc.append(str(n["achivement"]))
|
||||
|
||||
if pro_nms:
|
||||
# cv["pro_nms_tks"] = rag_tokenizer.tokenize(" ".join(pro_nms))
|
||||
@ -233,15 +273,16 @@ def forWork(cv):
|
||||
work_st_tm = ""
|
||||
corp_tags = []
|
||||
for i, n in enumerate(
|
||||
sorted(cv.get("work_obj", []), key=lambda x: str(x.get("start_time", "")) if type(x) == type({}) else "",
|
||||
sorted(cv.get("work_obj", []), key=lambda x: str(x.get("start_time", "")) if isinstance(x, dict) else "",
|
||||
reverse=True)):
|
||||
if type(n) == type(""):
|
||||
if isinstance(n, str):
|
||||
try:
|
||||
n = json_loads(n)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if n.get("start_time") and (not work_st_tm or n["start_time"] < work_st_tm): work_st_tm = n["start_time"]
|
||||
if n.get("start_time") and (not work_st_tm or n["start_time"] < work_st_tm):
|
||||
work_st_tm = n["start_time"]
|
||||
for c in flds:
|
||||
if not n.get(c) or str(n[c]) == '0':
|
||||
fea[c].append("")
|
||||
@ -262,14 +303,18 @@ def forWork(cv):
|
||||
fea[c].append(rmHtmlTag(str(n[c]).lower()))
|
||||
|
||||
y, m, d = getYMD(n.get("start_time"))
|
||||
if not y or not m: continue
|
||||
if not y or not m:
|
||||
continue
|
||||
st = "%s-%02d-%02d" % (y, int(m), int(d))
|
||||
latest_job_tm = st
|
||||
|
||||
y, m, d = getYMD(n.get("end_time"))
|
||||
if (not y or not m) and i > 0: continue
|
||||
if not y or not m or int(y) > 2022: y, m, d = getYMD(str(n.get("updated_at", "")))
|
||||
if not y or not m: continue
|
||||
if (not y or not m) and i > 0:
|
||||
continue
|
||||
if not y or not m or int(y) > 2022:
|
||||
y, m, d = getYMD(str(n.get("updated_at", "")))
|
||||
if not y or not m:
|
||||
continue
|
||||
ed = "%s-%02d-%02d" % (y, int(m), int(d))
|
||||
|
||||
try:
|
||||
@ -279,22 +324,28 @@ def forWork(cv):
|
||||
|
||||
if n.get("scale"):
|
||||
r = re.search(r"^([0-9]+)", str(n["scale"]))
|
||||
if r: scales.append(int(r.group(1)))
|
||||
if r:
|
||||
scales.append(int(r.group(1)))
|
||||
|
||||
if goodcorp:
|
||||
if "tag_kwd" not in cv: cv["tag_kwd"] = []
|
||||
if "tag_kwd" not in cv:
|
||||
cv["tag_kwd"] = []
|
||||
cv["tag_kwd"].append("好公司")
|
||||
if goodcorp_:
|
||||
if "tag_kwd" not in cv: cv["tag_kwd"] = []
|
||||
if "tag_kwd" not in cv:
|
||||
cv["tag_kwd"] = []
|
||||
cv["tag_kwd"].append("好公司(曾)")
|
||||
|
||||
if corp_tags:
|
||||
if "tag_kwd" not in cv: cv["tag_kwd"] = []
|
||||
if "tag_kwd" not in cv:
|
||||
cv["tag_kwd"] = []
|
||||
cv["tag_kwd"].extend(corp_tags)
|
||||
cv["corp_tag_kwd"] = [c for c in corp_tags if re.match(r"(综合|行业)", c)]
|
||||
|
||||
if latest_job_tm: cv["latest_job_dt"] = latest_job_tm
|
||||
if fea["corporation_id"]: cv["corporation_id"] = fea["corporation_id"]
|
||||
if latest_job_tm:
|
||||
cv["latest_job_dt"] = latest_job_tm
|
||||
if fea["corporation_id"]:
|
||||
cv["corporation_id"] = fea["corporation_id"]
|
||||
|
||||
if fea["position_name"]:
|
||||
cv["position_name_tks"] = rag_tokenizer.tokenize(fea["position_name"][0])
|
||||
@ -317,18 +368,23 @@ def forWork(cv):
|
||||
cv["responsibilities_ltks"] = rag_tokenizer.tokenize(fea["responsibilities"][0])
|
||||
cv["resp_ltks"] = rag_tokenizer.tokenize(" ".join(fea["responsibilities"][1:]))
|
||||
|
||||
if fea["subordinates_count"]: fea["subordinates_count"] = [int(i) for i in fea["subordinates_count"] if
|
||||
if fea["subordinates_count"]:
|
||||
fea["subordinates_count"] = [int(i) for i in fea["subordinates_count"] if
|
||||
re.match(r"[^0-9]+$", str(i))]
|
||||
if fea["subordinates_count"]: cv["max_sub_cnt_int"] = np.max(fea["subordinates_count"])
|
||||
if fea["subordinates_count"]:
|
||||
cv["max_sub_cnt_int"] = np.max(fea["subordinates_count"])
|
||||
|
||||
if type(cv.get("corporation_id")) == type(1): cv["corporation_id"] = [str(cv["corporation_id"])]
|
||||
if not cv.get("corporation_id"): cv["corporation_id"] = []
|
||||
if isinstance(cv.get("corporation_id"), int):
|
||||
cv["corporation_id"] = [str(cv["corporation_id"])]
|
||||
if not cv.get("corporation_id"):
|
||||
cv["corporation_id"] = []
|
||||
for i in cv.get("corporation_id", []):
|
||||
cv["baike_flt"] = max(corporations.baike(i), cv["baike_flt"] if "baike_flt" in cv else 0)
|
||||
|
||||
if work_st_tm:
|
||||
try:
|
||||
if re.match(r"[0-9]{9,}", work_st_tm): work_st_tm = turnTm2Dt(work_st_tm)
|
||||
if re.match(r"[0-9]{9,}", work_st_tm):
|
||||
work_st_tm = turnTm2Dt(work_st_tm)
|
||||
y, m, d = getYMD(work_st_tm)
|
||||
cv["work_exp_flt"] = min(int(str(datetime.date.today())[0:4]) - int(y), cv.get("work_exp_flt", 1000))
|
||||
except Exception as e:
|
||||
@ -339,28 +395,37 @@ def forWork(cv):
|
||||
cv["dua_flt"] = np.mean(duas)
|
||||
cv["cur_dua_int"] = duas[0]
|
||||
cv["job_num_int"] = len(duas)
|
||||
if scales: cv["scale_flt"] = np.max(scales)
|
||||
if scales:
|
||||
cv["scale_flt"] = np.max(scales)
|
||||
return cv
|
||||
|
||||
|
||||
def turnTm2Dt(b):
|
||||
if not b: return
|
||||
if not b:
|
||||
return
|
||||
b = str(b).strip()
|
||||
if re.match(r"[0-9]{10,}", b): b = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(b[:10])))
|
||||
if re.match(r"[0-9]{10,}", b):
|
||||
b = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(b[:10])))
|
||||
return b
|
||||
|
||||
|
||||
def getYMD(b):
|
||||
y, m, d = "", "", "01"
|
||||
if not b: return (y, m, d)
|
||||
if not b:
|
||||
return (y, m, d)
|
||||
b = turnTm2Dt(b)
|
||||
if re.match(r"[0-9]{4}", b): y = int(b[:4])
|
||||
if re.match(r"[0-9]{4}", b):
|
||||
y = int(b[:4])
|
||||
r = re.search(r"[0-9]{4}.?([0-9]{1,2})", b)
|
||||
if r: m = r.group(1)
|
||||
if r:
|
||||
m = r.group(1)
|
||||
r = re.search(r"[0-9]{4}.?[0-9]{,2}.?([0-9]{1,2})", b)
|
||||
if r: d = r.group(1)
|
||||
if not d or int(d) == 0 or int(d) > 31: d = "1"
|
||||
if not m or int(m) > 12 or int(m) < 1: m = "1"
|
||||
if r:
|
||||
d = r.group(1)
|
||||
if not d or int(d) == 0 or int(d) > 31:
|
||||
d = "1"
|
||||
if not m or int(m) > 12 or int(m) < 1:
|
||||
m = "1"
|
||||
return (y, m, d)
|
||||
|
||||
|
||||
@ -369,7 +434,8 @@ def birth(cv):
|
||||
cv["integerity_flt"] *= 0.9
|
||||
return cv
|
||||
y, m, d = getYMD(cv["birth"])
|
||||
if not m or not y: return cv
|
||||
if not m or not y:
|
||||
return cv
|
||||
b = "%s-%02d-%02d" % (y, int(m), int(d))
|
||||
cv["birth_dt"] = b
|
||||
cv["birthday_kwd"] = "%02d%02d" % (int(m), int(d))
|
||||
@ -380,7 +446,8 @@ def birth(cv):
|
||||
|
||||
def parse(cv):
|
||||
for k in cv.keys():
|
||||
if cv[k] == '\\N': cv[k] = ''
|
||||
if cv[k] == '\\N':
|
||||
cv[k] = ''
|
||||
# cv = cv.asDict()
|
||||
tks_fld = ["address", "corporation_name", "discipline_name", "email", "expect_city_names",
|
||||
"expect_industry_name", "expect_position_name", "industry_name", "industry_names", "name",
|
||||
@ -402,9 +469,12 @@ def parse(cv):
|
||||
|
||||
rmkeys = []
|
||||
for k in cv.keys():
|
||||
if cv[k] is None: rmkeys.append(k)
|
||||
if (type(cv[k]) == type([]) or type(cv[k]) == type("")) and len(cv[k]) == 0: rmkeys.append(k)
|
||||
for k in rmkeys: del cv[k]
|
||||
if cv[k] is None:
|
||||
rmkeys.append(k)
|
||||
if (isinstance(cv[k], list) or isinstance(cv[k], str)) and len(cv[k]) == 0:
|
||||
rmkeys.append(k)
|
||||
for k in rmkeys:
|
||||
del cv[k]
|
||||
|
||||
integerity = 0.
|
||||
flds_num = 0.
|
||||
@ -414,7 +484,8 @@ def parse(cv):
|
||||
flds_num += len(flds)
|
||||
for f in flds:
|
||||
v = str(cv.get(f, ""))
|
||||
if len(v) > 0 and v != '0' and v != '[]': integerity += 1
|
||||
if len(v) > 0 and v != '0' and v != '[]':
|
||||
integerity += 1
|
||||
|
||||
hasValues(tks_fld)
|
||||
hasValues(small_tks_fld)
|
||||
@ -433,7 +504,8 @@ def parse(cv):
|
||||
(r"[ ()\(\)人/·0-9-]+", ""),
|
||||
(r".*(元|规模|于|=|北京|上海|至今|中国|工资|州|shanghai|强|餐饮|融资|职).*", "")]:
|
||||
cv["corporation_type"] = re.sub(p, r, cv["corporation_type"], 1000, re.IGNORECASE)
|
||||
if len(cv["corporation_type"]) < 2: del cv["corporation_type"]
|
||||
if len(cv["corporation_type"]) < 2:
|
||||
del cv["corporation_type"]
|
||||
|
||||
if cv.get("political_status"):
|
||||
for p, r in [
|
||||
@ -441,9 +513,11 @@ def parse(cv):
|
||||
(r".*(无党派|公民).*", "群众"),
|
||||
(r".*团员.*", "团员")]:
|
||||
cv["political_status"] = re.sub(p, r, cv["political_status"])
|
||||
if not re.search(r"[党团群]", cv["political_status"]): del cv["political_status"]
|
||||
if not re.search(r"[党团群]", cv["political_status"]):
|
||||
del cv["political_status"]
|
||||
|
||||
if cv.get("phone"): cv["phone"] = re.sub(r"^0*86([0-9]{11})", r"\1", re.sub(r"[^0-9]+", "", cv["phone"]))
|
||||
if cv.get("phone"):
|
||||
cv["phone"] = re.sub(r"^0*86([0-9]{11})", r"\1", re.sub(r"[^0-9]+", "", cv["phone"]))
|
||||
|
||||
keys = list(cv.keys())
|
||||
for k in keys:
|
||||
@ -454,9 +528,11 @@ def parse(cv):
|
||||
cv[k] = [a for _, a in cv[k].items()]
|
||||
nms = []
|
||||
for n in cv[k]:
|
||||
if type(n) != type({}) or "name" not in n or not n.get("name"): continue
|
||||
if not isinstance(n, dict) or "name" not in n or not n.get("name"):
|
||||
continue
|
||||
n["name"] = re.sub(r"((442)|\t )", "", n["name"]).strip().lower()
|
||||
if not n["name"]: continue
|
||||
if not n["name"]:
|
||||
continue
|
||||
nms.append(n["name"])
|
||||
if nms:
|
||||
t = k[:-4]
|
||||
@ -469,15 +545,18 @@ def parse(cv):
|
||||
# tokenize fields
|
||||
if k in tks_fld:
|
||||
cv[f"{k}_tks"] = rag_tokenizer.tokenize(cv[k])
|
||||
if k in small_tks_fld: cv[f"{k}_sm_tks"] = rag_tokenizer.tokenize(cv[f"{k}_tks"])
|
||||
if k in small_tks_fld:
|
||||
cv[f"{k}_sm_tks"] = rag_tokenizer.tokenize(cv[f"{k}_tks"])
|
||||
|
||||
# keyword fields
|
||||
if k in kwd_fld: cv[f"{k}_kwd"] = [n.lower()
|
||||
if k in kwd_fld:
|
||||
cv[f"{k}_kwd"] = [n.lower()
|
||||
for n in re.split(r"[\t,,;;. ]",
|
||||
re.sub(r"([^a-zA-Z])[ ]+([^a-zA-Z ])", r"\1,\2", cv[k])
|
||||
) if n]
|
||||
|
||||
if k in num_fld and cv.get(k): cv[f"{k}_int"] = cv[k]
|
||||
if k in num_fld and cv.get(k):
|
||||
cv[f"{k}_int"] = cv[k]
|
||||
|
||||
cv["email_kwd"] = cv.get("email_tks", "").replace(" ", "")
|
||||
# for name field
|
||||
@ -501,10 +580,12 @@ def parse(cv):
|
||||
cv["name_py_pref0_tks"] = ""
|
||||
cv["name_py_pref_tks"] = ""
|
||||
for py in PY.get_pinyins(nm[:20], ''):
|
||||
for i in range(2, len(py) + 1): cv["name_py_pref_tks"] += " " + py[:i]
|
||||
for i in range(2, len(py) + 1):
|
||||
cv["name_py_pref_tks"] += " " + py[:i]
|
||||
for py in PY.get_pinyins(nm[:20], ' '):
|
||||
py = py.split()
|
||||
for i in range(1, len(py) + 1): cv["name_py_pref0_tks"] += " " + "".join(py[:i])
|
||||
for i in range(1, len(py) + 1):
|
||||
cv["name_py_pref0_tks"] += " " + "".join(py[:i])
|
||||
|
||||
cv["name_kwd"] = name
|
||||
cv["name_pinyin_kwd"] = PY.get_pinyins(nm[:20], ' ')[:3]
|
||||
@ -526,22 +607,30 @@ def parse(cv):
|
||||
cv["updated_at_dt"] = cv["updated_at"].strftime('%Y-%m-%d %H:%M:%S')
|
||||
else:
|
||||
y, m, d = getYMD(str(cv.get("updated_at", "")))
|
||||
if not y: y = "2012"
|
||||
if not m: m = "01"
|
||||
if not d: d = "01"
|
||||
if not y:
|
||||
y = "2012"
|
||||
if not m:
|
||||
m = "01"
|
||||
if not d:
|
||||
d = "01"
|
||||
cv["updated_at_dt"] = "%s-%02d-%02d 00:00:00" % (y, int(m), int(d))
|
||||
# long text tokenize
|
||||
|
||||
if cv.get("responsibilities"): cv["responsibilities_ltks"] = rag_tokenizer.tokenize(rmHtmlTag(cv["responsibilities"]))
|
||||
if cv.get("responsibilities"):
|
||||
cv["responsibilities_ltks"] = rag_tokenizer.tokenize(rmHtmlTag(cv["responsibilities"]))
|
||||
|
||||
# for yes or no field
|
||||
fea = []
|
||||
for f, y, n in is_fld:
|
||||
if f not in cv: continue
|
||||
if cv[f] == '是': fea.append(y)
|
||||
if cv[f] == '否': fea.append(n)
|
||||
if f not in cv:
|
||||
continue
|
||||
if cv[f] == '是':
|
||||
fea.append(y)
|
||||
if cv[f] == '否':
|
||||
fea.append(n)
|
||||
|
||||
if fea: cv["tag_kwd"] = fea
|
||||
if fea:
|
||||
cv["tag_kwd"] = fea
|
||||
|
||||
cv = forEdu(cv)
|
||||
cv = forProj(cv)
|
||||
@ -550,9 +639,11 @@ def parse(cv):
|
||||
|
||||
cv["corp_proj_sch_deg_kwd"] = [c for c in cv.get("corp_tag_kwd", [])]
|
||||
for i in range(len(cv["corp_proj_sch_deg_kwd"])):
|
||||
for j in cv.get("sch_rank_kwd", []): cv["corp_proj_sch_deg_kwd"][i] += "+" + j
|
||||
for j in cv.get("sch_rank_kwd", []):
|
||||
cv["corp_proj_sch_deg_kwd"][i] += "+" + j
|
||||
for i in range(len(cv["corp_proj_sch_deg_kwd"])):
|
||||
if cv.get("highest_degree_kwd"): cv["corp_proj_sch_deg_kwd"][i] += "+" + cv["highest_degree_kwd"]
|
||||
if cv.get("highest_degree_kwd"):
|
||||
cv["corp_proj_sch_deg_kwd"][i] += "+" + cv["highest_degree_kwd"]
|
||||
|
||||
try:
|
||||
if not cv.get("work_exp_flt") and cv.get("work_start_time"):
|
||||
@ -565,17 +656,21 @@ def parse(cv):
|
||||
cv["work_exp_flt"] = int(str(datetime.date.today())[0:4]) - int(y)
|
||||
except Exception as e:
|
||||
logging.exception("parse {} ==> {}".format(e, cv.get("work_start_time")))
|
||||
if "work_exp_flt" not in cv and cv.get("work_experience", 0): cv["work_exp_flt"] = int(cv["work_experience"]) / 12.
|
||||
if "work_exp_flt" not in cv and cv.get("work_experience", 0):
|
||||
cv["work_exp_flt"] = int(cv["work_experience"]) / 12.
|
||||
|
||||
keys = list(cv.keys())
|
||||
for k in keys:
|
||||
if not re.search(r"_(fea|tks|nst|dt|int|flt|ltks|kwd|id)$", k): del cv[k]
|
||||
if not re.search(r"_(fea|tks|nst|dt|int|flt|ltks|kwd|id)$", k):
|
||||
del cv[k]
|
||||
for k in cv.keys():
|
||||
if not re.search("_(kwd|id)$", k) or type(cv[k]) != type([]): continue
|
||||
if not re.search("_(kwd|id)$", k) or not isinstance(cv[k], list):
|
||||
continue
|
||||
cv[k] = list(set([re.sub("(市)$", "", str(n)) for n in cv[k] if n not in ['中国', '0']]))
|
||||
keys = [k for k in cv.keys() if re.search(r"_feas*$", k)]
|
||||
for k in keys:
|
||||
if cv[k] <= 0: del cv[k]
|
||||
if cv[k] <= 0:
|
||||
del cv[k]
|
||||
|
||||
cv["tob_resume_id"] = str(cv["tob_resume_id"])
|
||||
cv["id"] = cv["tob_resume_id"]
|
||||
@ -592,5 +687,6 @@ def dealWithInt64(d):
|
||||
if isinstance(d, list):
|
||||
d = [dealWithInt64(t) for t in d]
|
||||
|
||||
if isinstance(d, np.integer): d = int(d)
|
||||
if isinstance(d, np.integer):
|
||||
d = int(d)
|
||||
return d
|
||||
|
||||
@ -51,6 +51,7 @@ class RAGFlowTxtParser:
|
||||
dels = [d for d in dels if d]
|
||||
dels = "|".join(dels)
|
||||
secs = re.split(r"(%s)" % dels, txt)
|
||||
for sec in secs: add_chunk(sec)
|
||||
for sec in secs:
|
||||
add_chunk(sec)
|
||||
|
||||
return [[c, ""] for c in cks]
|
||||
|
||||
@ -15,7 +15,7 @@ import pdfplumber
|
||||
|
||||
from .ocr import OCR
|
||||
from .recognizer import Recognizer
|
||||
from .layout_recognizer import LayoutRecognizer
|
||||
from .layout_recognizer import LayoutRecognizer4YOLOv10 as LayoutRecognizer
|
||||
from .table_structure_recognizer import TableStructureRecognizer
|
||||
|
||||
|
||||
@ -47,7 +47,7 @@ def init_in_out(args):
|
||||
try:
|
||||
images.append(Image.open(fnm))
|
||||
outputs.append(os.path.split(fnm)[-1])
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
|
||||
if os.path.isdir(args.inputs):
|
||||
@ -56,6 +56,16 @@ def init_in_out(args):
|
||||
else:
|
||||
images_and_outputs(args.inputs)
|
||||
|
||||
for i in range(len(outputs)): outputs[i] = os.path.join(args.output_dir, outputs[i])
|
||||
for i in range(len(outputs)):
|
||||
outputs[i] = os.path.join(args.output_dir, outputs[i])
|
||||
|
||||
return images, outputs
|
||||
return images, outputs
|
||||
|
||||
|
||||
__all__ = [
|
||||
"OCR",
|
||||
"Recognizer",
|
||||
"LayoutRecognizer",
|
||||
"TableStructureRecognizer",
|
||||
"init_in_out",
|
||||
]
|
||||
|
||||
@ -14,11 +14,14 @@ import os
|
||||
import re
|
||||
from collections import Counter
|
||||
from copy import deepcopy
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from deepdoc.vision import Recognizer
|
||||
from deepdoc.vision.operators import nms
|
||||
|
||||
|
||||
class LayoutRecognizer(Recognizer):
|
||||
@ -42,7 +45,7 @@ class LayoutRecognizer(Recognizer):
|
||||
get_project_base_directory(),
|
||||
"rag/res/deepdoc")
|
||||
super().__init__(self.labels, domain, model_dir)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc",
|
||||
local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"),
|
||||
local_dir_use_symlinks=False)
|
||||
@ -77,7 +80,7 @@ class LayoutRecognizer(Recognizer):
|
||||
"page_number": pn,
|
||||
} for b in lts if float(b["score"]) >= 0.8 or b["type"] not in self.garbage_layouts]
|
||||
lts = self.sort_Y_firstly(lts, np.mean(
|
||||
[l["bottom"] - l["top"] for l in lts]) / 2)
|
||||
[lt["bottom"] - lt["top"] for lt in lts]) / 2)
|
||||
lts = self.layouts_cleanup(bxs, lts)
|
||||
page_layout.append(lts)
|
||||
|
||||
@ -149,3 +152,88 @@ class LayoutRecognizer(Recognizer):
|
||||
|
||||
ocr_res = [b for b in ocr_res if b["text"].strip() not in garbag_set]
|
||||
return ocr_res, page_layout
|
||||
|
||||
|
||||
class LayoutRecognizer4YOLOv10(LayoutRecognizer):
|
||||
labels = [
|
||||
"title",
|
||||
"Text",
|
||||
"Reference",
|
||||
"Figure",
|
||||
"Figure caption",
|
||||
"Table",
|
||||
"Table caption",
|
||||
"Table caption",
|
||||
"Equation",
|
||||
"Figure caption",
|
||||
]
|
||||
|
||||
def __init__(self, domain):
|
||||
domain = "layout"
|
||||
super().__init__(domain)
|
||||
self.auto = False
|
||||
self.scaleFill = False
|
||||
self.scaleup = True
|
||||
self.stride = 32
|
||||
self.center = True
|
||||
|
||||
def preprocess(self, image_list):
|
||||
inputs = []
|
||||
new_shape = self.input_shape # height, width
|
||||
for img in image_list:
|
||||
shape = img.shape[:2]# current shape [height, width]
|
||||
# Scale ratio (new / old)
|
||||
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
||||
# Compute padding
|
||||
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
|
||||
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
|
||||
dw /= 2 # divide padding into 2 sides
|
||||
dh /= 2
|
||||
ww, hh = new_unpad
|
||||
img = np.array(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)).astype(np.float32)
|
||||
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
|
||||
top, bottom = int(round(dh - 0.1)) if self.center else 0, int(round(dh + 0.1))
|
||||
left, right = int(round(dw - 0.1)) if self.center else 0, int(round(dw + 0.1))
|
||||
img = cv2.copyMakeBorder(
|
||||
img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114)
|
||||
) # add border
|
||||
img /= 255.0
|
||||
img = img.transpose(2, 0, 1)
|
||||
img = img[np.newaxis, :, :, :].astype(np.float32)
|
||||
inputs.append({self.input_names[0]: img, "scale_factor": [shape[1]/ww, shape[0]/hh, dw, dh]})
|
||||
|
||||
return inputs
|
||||
|
||||
def postprocess(self, boxes, inputs, thr):
|
||||
thr = 0.08
|
||||
boxes = np.squeeze(boxes)
|
||||
scores = boxes[:, 4]
|
||||
boxes = boxes[scores > thr, :]
|
||||
scores = scores[scores > thr]
|
||||
if len(boxes) == 0:
|
||||
return []
|
||||
class_ids = boxes[:, -1].astype(int)
|
||||
boxes = boxes[:, :4]
|
||||
boxes[:, 0] -= inputs["scale_factor"][2]
|
||||
boxes[:, 2] -= inputs["scale_factor"][2]
|
||||
boxes[:, 1] -= inputs["scale_factor"][3]
|
||||
boxes[:, 3] -= inputs["scale_factor"][3]
|
||||
input_shape = np.array([inputs["scale_factor"][0], inputs["scale_factor"][1], inputs["scale_factor"][0],
|
||||
inputs["scale_factor"][1]])
|
||||
boxes = np.multiply(boxes, input_shape, dtype=np.float32)
|
||||
|
||||
unique_class_ids = np.unique(class_ids)
|
||||
indices = []
|
||||
for class_id in unique_class_ids:
|
||||
class_indices = np.where(class_ids == class_id)[0]
|
||||
class_boxes = boxes[class_indices, :]
|
||||
class_scores = scores[class_indices]
|
||||
class_keep_boxes = nms(class_boxes, class_scores, 0.45)
|
||||
indices.extend(class_indices[class_keep_boxes])
|
||||
|
||||
return [{
|
||||
"type": self.label_list[class_ids[i]].lower(),
|
||||
"bbox": [float(t) for t in boxes[i].tolist()],
|
||||
"score": float(scores[i])
|
||||
} for i in indices]
|
||||
|
||||
|
||||
@ -18,8 +18,10 @@ import os
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from .operators import *
|
||||
from .operators import * # noqa: F403
|
||||
import math
|
||||
import numpy as np
|
||||
import cv2
|
||||
import onnxruntime as ort
|
||||
|
||||
from .postprocess import build_post_process
|
||||
@ -484,7 +486,7 @@ class OCR(object):
|
||||
"rag/res/deepdoc")
|
||||
self.text_detector = TextDetector(model_dir)
|
||||
self.text_recognizer = TextRecognizer(model_dir)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc",
|
||||
local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"),
|
||||
local_dir_use_symlinks=False)
|
||||
|
||||
@ -232,7 +232,7 @@ class LinearResize(object):
|
||||
"""
|
||||
assert len(self.target_size) == 2
|
||||
assert self.target_size[0] > 0 and self.target_size[1] > 0
|
||||
im_channel = im.shape[2]
|
||||
_im_channel = im.shape[2]
|
||||
im_scale_y, im_scale_x = self.generate_scale(im)
|
||||
im = cv2.resize(
|
||||
im,
|
||||
@ -255,7 +255,7 @@ class LinearResize(object):
|
||||
im_scale_y: the resize ratio of Y
|
||||
"""
|
||||
origin_shape = im.shape[:2]
|
||||
im_c = im.shape[2]
|
||||
_im_c = im.shape[2]
|
||||
if self.keep_ratio:
|
||||
im_size_min = np.min(origin_shape)
|
||||
im_size_max = np.max(origin_shape)
|
||||
@ -581,7 +581,7 @@ class SRResize(object):
|
||||
return data
|
||||
|
||||
images_HR = data["image_hr"]
|
||||
label_strs = data["label"]
|
||||
_label_strs = data["label"]
|
||||
transform = ResizeNormalize((imgW, imgH))
|
||||
images_HR = transform(images_HR)
|
||||
data["img_hr"] = images_HR
|
||||
@ -709,3 +709,29 @@ def preprocess(im, preprocess_ops):
|
||||
for operator in preprocess_ops:
|
||||
im, im_info = operator(im, im_info)
|
||||
return im, im_info
|
||||
|
||||
|
||||
def nms(bboxes, scores, iou_thresh):
|
||||
import numpy as np
|
||||
x1 = bboxes[:, 0]
|
||||
y1 = bboxes[:, 1]
|
||||
x2 = bboxes[:, 2]
|
||||
y2 = bboxes[:, 3]
|
||||
areas = (y2 - y1) * (x2 - x1)
|
||||
|
||||
indices = []
|
||||
index = scores.argsort()[::-1]
|
||||
while index.size > 0:
|
||||
i = index[0]
|
||||
indices.append(i)
|
||||
x11 = np.maximum(x1[i], x1[index[1:]])
|
||||
y11 = np.maximum(y1[i], y1[index[1:]])
|
||||
x22 = np.minimum(x2[i], x2[index[1:]])
|
||||
y22 = np.minimum(y2[i], y2[index[1:]])
|
||||
w = np.maximum(0, x22 - x11 + 1)
|
||||
h = np.maximum(0, y22 - y11 + 1)
|
||||
overlaps = w * h
|
||||
ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)
|
||||
idx = np.where(ious <= iou_thresh)[0]
|
||||
index = index[idx + 1]
|
||||
return indices
|
||||
|
||||
@ -121,7 +121,7 @@ class DBPostProcess(object):
|
||||
outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST,
|
||||
cv2.CHAIN_APPROX_SIMPLE)
|
||||
if len(outs) == 3:
|
||||
img, contours, _ = outs[0], outs[1], outs[2]
|
||||
_img, contours, _ = outs[0], outs[1], outs[2]
|
||||
elif len(outs) == 2:
|
||||
contours, _ = outs[0], outs[1]
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user