mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
214 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 49494d4e3c | |||
| 3839d8abc7 | |||
| d8b150a34c | |||
| 4454b33e51 | |||
| ce6b4c0e05 | |||
| ddf01e0450 | |||
| 86e48179a1 | |||
| b2c33b4df7 | |||
| 9348616659 | |||
| a0e9b62de5 | |||
| 7874aaaf60 | |||
| 08ead81dde | |||
| e5af18d5ea | |||
| 609236f5c1 | |||
| 6a3f9bc32a | |||
| 934d6d9ad1 | |||
| 875096384b | |||
| a10c2f2eff | |||
| 646ac1f2b4 | |||
| 8872aed512 | |||
| 55692e4da6 | |||
| 6314d3c727 | |||
| 06b9256972 | |||
| cc219ff648 | |||
| ee33bf71eb | |||
| ee7fd71fdc | |||
| d56f52eef8 | |||
| 9f3141804f | |||
| 60a3e1a8dc | |||
| 9541d7e7bc | |||
| 811c49d7a2 | |||
| 482c1b59c8 | |||
| 691ea287c2 | |||
| b87d14492f | |||
| cc5960b88e | |||
| ee50f78d99 | |||
| 193b08a3ed | |||
| 3a3e23d8d9 | |||
| 30f111edb3 | |||
| d47ee88454 | |||
| 13ff463845 | |||
| bf9ebda3c8 | |||
| 85dd9fde43 | |||
| c7c8b3812f | |||
| 0ac6dc8f8c | |||
| 58a2200b80 | |||
| e10b0e6b60 | |||
| d9c882399d | |||
| 8930bfcff8 | |||
| 9b9afa9d6e | |||
| 362db857d0 | |||
| 541272eb99 | |||
| 5b44b99cfd | |||
| 6be7901df2 | |||
| 4d42bcd517 | |||
| 9b4c2868bd | |||
| d02a2b131a | |||
| 81c7b6afc5 | |||
| cad341e794 | |||
| e559cebcdc | |||
| 8b4407a68c | |||
| 289034f36e | |||
| 17a7ea42eb | |||
| 2044bb0039 | |||
| c4f2464935 | |||
| bcb6f7168f | |||
| 361cff34fc | |||
| 0cd5b64c3b | |||
| 16fbe9920d | |||
| e4280be5e5 | |||
| d42362deb6 | |||
| 883fafde72 | |||
| 568322aeaf | |||
| 31decadd8e | |||
| dec9b3e540 | |||
| d0f94a42ff | |||
| ed0d47fc8a | |||
| aa9a16e073 | |||
| eef84a86bf | |||
| ed72d1100b | |||
| f4e9dae33a | |||
| 50f7b7e0a3 | |||
| 01c2712941 | |||
| 4413683898 | |||
| 3824c1fec0 | |||
| 4b3eeaa6ef | |||
| 70cd5c1599 | |||
| f9643adc43 | |||
| 7b9e0723d6 | |||
| a1d01a1b2f | |||
| dc05f43eee | |||
| 77bdeb32bd | |||
| af18217d78 | |||
| 4ed5ca2666 | |||
| 1e90a1bf36 | |||
| ac033b62cf | |||
| cb3b9d7ada | |||
| ca9e97d2f2 | |||
| 6d451dbe06 | |||
| e0659a4f0e | |||
| a854bc22d1 | |||
| 48e060aa53 | |||
| 47abfc32d4 | |||
| a1ba228bc2 | |||
| 996c94a8e7 | |||
| 220aaddc62 | |||
| 6878d23a57 | |||
| df9d054551 | |||
| 30c1f7ee29 | |||
| e4c4fdabbd | |||
| 30f6421760 | |||
| ab4384e011 | |||
| 201bbef7c0 | |||
| 95d21e5d9f | |||
| c5368c7745 | |||
| 0657a09e2c | |||
| 4caf932808 | |||
| 400fc3f5e9 | |||
| e44e3a67b0 | |||
| 9d395ab74e | |||
| 83c6b1f308 | |||
| 7ab9715b0e | |||
| 632b23486f | |||
| ccf189cb7f | |||
| 1fe9a2e6fd | |||
| 9fc092a911 | |||
| fa54cd5f5c | |||
| 667d0e5537 | |||
| 91332fa0f8 | |||
| 0c95a3382b | |||
| 7274420ecd | |||
| a2a5631da4 | |||
| 567a7563e7 | |||
| 62a9afd382 | |||
| aa68d3b8db | |||
| 784ae896d1 | |||
| f4c52371ab | |||
| 00b6000b76 | |||
| db23d62827 | |||
| 70ea6661ed | |||
| a01fceb328 | |||
| e9e98ea093 | |||
| 528646a958 | |||
| 8536335e63 | |||
| 88072b1e90 | |||
| 34d1daac67 | |||
| 3faae0b2c2 | |||
| 5e5a35191e | |||
| 7c486ee3f9 | |||
| 20d686737a | |||
| 85047e7e36 | |||
| ac64e35a45 | |||
| 004487cca0 | |||
| 74d1eeb4d3 | |||
| 464a4d6ead | |||
| 3d3913419b | |||
| 63f7d3bae2 | |||
| 8b6e272197 | |||
| 5205bdab24 | |||
| 37d4708880 | |||
| d88f0d43ea | |||
| a2153d61ce | |||
| f16ef57979 | |||
| ff2bbb487f | |||
| 416efbe7e8 | |||
| 9c6cc20356 | |||
| 7c0d28b62d | |||
| 48ab6d7a45 | |||
| 96b5d2b3a9 | |||
| f45c29360c | |||
| cdcbe6c2b3 | |||
| 5038552ed9 | |||
| 1b3e39dd12 | |||
| fbcc0bb408 | |||
| d3bb5e9f3d | |||
| 4097912d59 | |||
| f3aaa0d453 | |||
| 0dff64f6ad | |||
| 601a128cd3 | |||
| af74bf01c0 | |||
| a418a343d1 | |||
| ab6e6019a7 | |||
| 13053172cb | |||
| 38ebf6b2c0 | |||
| a7bf4ca8fc | |||
| 7e89be5ed1 | |||
| b7b30c4b57 | |||
| 55953819c1 | |||
| 677f02c2a7 | |||
| 185c6a0c71 | |||
| 339639a9db | |||
| 18ae8a4091 | |||
| cbca7dfce6 | |||
| a9344e6838 | |||
| aa733b1ea4 | |||
| 8305632852 | |||
| 57f23e0808 | |||
| 16b6a78c1e | |||
| dd1146ec64 | |||
| 07c453500b | |||
| 3e4fc12d30 | |||
| 285fd6ae14 | |||
| 8d9238db14 | |||
| c06e765a5b | |||
| c7ea7e9974 | |||
| 37d71dfa90 | |||
| 44ad9a6cd7 | |||
| 7eafccf78a | |||
| b42d24575c | |||
| 3963aaa23e | |||
| 33e5e5db5b | |||
| 039cde7893 | |||
| fa9d76224b | |||
| 35a451c024 |
12
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
12
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -15,16 +15,16 @@ body:
|
||||
value: "Please provide the following information to help us understand the issue."
|
||||
- type: input
|
||||
attributes:
|
||||
label: Branch name
|
||||
description: Enter the name of the branch where you encountered the issue.
|
||||
placeholder: e.g., main
|
||||
label: RAGFlow workspace code commit ID
|
||||
description: Enter the commit ID associated with the issue.
|
||||
placeholder: e.g., 26d3480e
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Commit ID
|
||||
description: Enter the commit ID associated with the issue.
|
||||
placeholder: e.g., c3b2a1
|
||||
label: RAGFlow image version
|
||||
description: Enter the image version(shown in RAGFlow UI, `System` page) associated with the issue.
|
||||
placeholder: e.g., 26d3480e(v0.13.0~174)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
|
||||
56
.github/workflows/tests.yml
vendored
56
.github/workflows/tests.yml
vendored
@ -42,19 +42,23 @@ jobs:
|
||||
- name: Ensure workspace ownership
|
||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||
|
||||
# https://github.com/actions/checkout/issues/1781
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Build ragflow:dev-slim
|
||||
run: |
|
||||
RUNNER_WORKSPACE_PREFIX=${RUNNER_WORKSPACE_PREFIX:-$HOME}
|
||||
cp -r ${RUNNER_WORKSPACE_PREFIX}/huggingface.co ${RUNNER_WORKSPACE_PREFIX}/nltk_data ${RUNNER_WORKSPACE_PREFIX}/libssl*.deb .
|
||||
sudo docker pull ubuntu:24.04
|
||||
sudo docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
cp -r ${RUNNER_WORKSPACE_PREFIX}/huggingface.co ${RUNNER_WORKSPACE_PREFIX}/nltk_data ${RUNNER_WORKSPACE_PREFIX}/libssl*.deb ${RUNNER_WORKSPACE_PREFIX}/tika-server*.jar* ${RUNNER_WORKSPACE_PREFIX}/chrome* ${RUNNER_WORKSPACE_PREFIX}/cl100k_base.tiktoken .
|
||||
sudo docker pull ubuntu:22.04
|
||||
sudo docker build --progress=plain -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
|
||||
- name: Build ragflow:dev
|
||||
run: |
|
||||
sudo docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
sudo docker build --progress=plain -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
|
||||
- name: Start ragflow:dev-slim
|
||||
run: |
|
||||
@ -70,7 +74,7 @@ jobs:
|
||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:dev" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Run tests
|
||||
- name: Run sdk tests against Elasticsearch
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
@ -78,9 +82,49 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test && pytest t_dataset.py t_chat.py t_session.py
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
|
||||
- name: Run frontend api tests against Elasticsearch
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
|
||||
- name: Stop ragflow:dev
|
||||
if: always() # always run this step even if previous steps failed
|
||||
run: |
|
||||
sudo docker compose -f docker/docker-compose.yml down -v
|
||||
|
||||
- name: Start ragflow:dev
|
||||
run: |
|
||||
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Run sdk tests against Infinity
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
|
||||
- name: Run frontend api tests against Infinity
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Stop ragflow:dev
|
||||
if: always() # always run this step even if previous steps failed
|
||||
run: |
|
||||
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml down -v
|
||||
|
||||
85
Dockerfile
85
Dockerfile
@ -1,8 +1,8 @@
|
||||
# base stage
|
||||
FROM ubuntu:24.04 AS base
|
||||
FROM ubuntu:22.04 AS base
|
||||
USER root
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
ARG ARCH=amd64
|
||||
ENV LIGHTEN=0
|
||||
|
||||
WORKDIR /ragflow
|
||||
@ -13,29 +13,45 @@ RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt-get --no-install-recommends install -y ca-certificates
|
||||
|
||||
# If you download Python modules too slow, you can use a pip mirror site to speed up apt and poetry
|
||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list.d/ubuntu.sources
|
||||
ENV POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||
# Setup apt mirror site
|
||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus python3-pip python3-poetry \
|
||||
&& pip3 install --user --break-system-packages poetry-plugin-pypi-mirror --index-url https://pypi.tuna.tsinghua.edu.cn/simple/ \
|
||||
apt update && DEBIAN_FRONTEND=noninteractive apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus default-jdk python3-pip pipx \
|
||||
libatk-bridge2.0-0 libgtk-4-1 libnss3 xdg-utils unzip libgbm-dev wget git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && pip3 config set global.trusted-host "pypi.tuna.tsinghua.edu.cn mirrors.pku.edu.cn" && pip3 config set global.extra-index-url "https://mirrors.pku.edu.cn/pypi/web/simple" \
|
||||
&& pipx install poetry \
|
||||
&& /root/.local/bin/poetry self add poetry-plugin-pypi-mirror
|
||||
|
||||
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
||||
# aspose-slides on linux/arm64 is unavailable
|
||||
RUN --mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_amd64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb \
|
||||
if [ "${ARCH}" = "amd64" ]; then \
|
||||
--mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_arm64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb \
|
||||
if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb; \
|
||||
fi
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
||||
|
||||
ENV PATH=/root/.local/bin:$PATH
|
||||
# Configure Poetry
|
||||
ENV POETRY_NO_INTERACTION=1
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
||||
ENV POETRY_REQUESTS_TIMEOUT=15
|
||||
ENV POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||
|
||||
# nodejs 12.22 on Ubuntu 22.04 is too old
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||
apt purge -y nodejs npm && \
|
||||
apt autoremove && \
|
||||
apt update && \
|
||||
apt install -y nodejs cargo && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# builder stage
|
||||
FROM base AS builder
|
||||
@ -43,23 +59,38 @@ USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_builder_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt install -y nodejs npm cargo && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
COPY .git /ragflow/.git
|
||||
|
||||
RUN current_commit=$(git rev-parse --short HEAD); \
|
||||
last_tag=$(git describe --tags --abbrev=0); \
|
||||
commit_count=$(git rev-list --count "$last_tag..HEAD"); \
|
||||
version_info=""; \
|
||||
if [ "$commit_count" -eq 0 ]; then \
|
||||
version_info=$last_tag; \
|
||||
else \
|
||||
version_info="$current_commit($last_tag~$commit_count)"; \
|
||||
fi; \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
version_info="$version_info slim"; \
|
||||
else \
|
||||
version_info="$version_info full"; \
|
||||
fi; \
|
||||
echo "RAGFlow version: $version_info"; \
|
||||
echo $version_info > /ragflow/VERSION
|
||||
|
||||
COPY web web
|
||||
COPY docs docs
|
||||
RUN --mount=type=cache,id=ragflow_builder_npm,target=/root/.npm,sharing=locked \
|
||||
cd web && npm i --force && npm run build
|
||||
cd web && npm install --force && npm run build
|
||||
|
||||
# install dependencies from poetry.lock file
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_builder_poetry,target=/root/.cache/pypoetry,sharing=locked \
|
||||
if [ "$LIGHTEN" -eq 0 ]; then \
|
||||
poetry install --sync --no-root --with=full; \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
poetry install --no-root; \
|
||||
else \
|
||||
poetry install --sync --no-root; \
|
||||
poetry install --no-root --with=full; \
|
||||
fi
|
||||
|
||||
# production stage
|
||||
@ -68,6 +99,8 @@ USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
COPY --from=builder /ragflow/VERSION /ragflow/VERSION
|
||||
|
||||
# Install python packages' dependencies
|
||||
# cv2 requires libGL.so.1
|
||||
RUN --mount=type=cache,id=ragflow_production_apt,target=/var/cache/apt,sharing=locked \
|
||||
@ -101,6 +134,25 @@ RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
||||
# Copy nltk data downloaded via download_deps.py
|
||||
COPY nltk_data /root/nltk_data
|
||||
|
||||
# https://github.com/chrismattmann/tika-python
|
||||
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||
COPY tika-server-standard-3.0.0.jar /ragflow/tika-server-standard.jar
|
||||
COPY tika-server-standard-3.0.0.jar.md5 /ragflow/tika-server-standard.jar.md5
|
||||
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard.jar"
|
||||
|
||||
# Copy cl100k_base
|
||||
COPY cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
|
||||
# Add dependencies of selenium
|
||||
RUN --mount=type=bind,source=chrome-linux64-121-0-6167-85,target=/chrome-linux64.zip \
|
||||
unzip /chrome-linux64.zip && \
|
||||
mv chrome-linux64 /opt/chrome && \
|
||||
ln -s /opt/chrome/chrome /usr/local/bin/
|
||||
RUN --mount=type=bind,source=chromedriver-linux64-121-0-6167-85,target=/chromedriver-linux64.zip \
|
||||
unzip -j /chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
|
||||
mv chromedriver /usr/local/bin/ && \
|
||||
rm -f /usr/bin/google-chrome
|
||||
|
||||
# Copy compiled web pages
|
||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
||||
|
||||
@ -111,6 +163,7 @@ ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
|
||||
@ -53,6 +53,7 @@ RUN conda run -n py11 python -m nltk.downloader wordnet
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
# base stage
|
||||
FROM ubuntu:24.04 AS base
|
||||
FROM ubuntu:22.04 AS base
|
||||
USER root
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
ARG ARCH=amd64
|
||||
ENV LIGHTEN=1
|
||||
|
||||
WORKDIR /ragflow
|
||||
@ -13,28 +13,45 @@ RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt-get --no-install-recommends install -y ca-certificates
|
||||
|
||||
# If you download Python modules too slow, you can use a pip mirror site to speed up apt and poetry
|
||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list.d/ubuntu.sources
|
||||
ENV POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||
# Setup apt mirror site
|
||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus python3-pip python3-poetry \
|
||||
&& pip3 install --user --break-system-packages poetry-plugin-pypi-mirror --index-url https://pypi.tuna.tsinghua.edu.cn/simple/ \
|
||||
apt update && DEBIAN_FRONTEND=noninteractive apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus default-jdk python3-pip pipx \
|
||||
libatk-bridge2.0-0 libgtk-4-1 libnss3 xdg-utils unzip libgbm-dev wget git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && pip3 config set global.trusted-host "pypi.tuna.tsinghua.edu.cn mirrors.pku.edu.cn" && pip3 config set global.extra-index-url "https://mirrors.pku.edu.cn/pypi/web/simple" \
|
||||
&& pipx install poetry \
|
||||
&& /root/.local/bin/poetry self add poetry-plugin-pypi-mirror
|
||||
|
||||
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
||||
# aspose-slides on linux/arm64 is unavailable
|
||||
RUN if [ "${ARCH}" = "amd64" ]; then \
|
||||
curl -o libssl1.deb http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb && dpkg -i libssl1.deb && rm -f libssl1.deb; \
|
||||
RUN --mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_amd64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb \
|
||||
--mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_arm64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb \
|
||||
if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb; \
|
||||
fi
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
||||
|
||||
ENV PATH=/root/.local/bin:$PATH
|
||||
# Configure Poetry
|
||||
ENV POETRY_NO_INTERACTION=1
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
||||
ENV POETRY_REQUESTS_TIMEOUT=15
|
||||
ENV POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||
|
||||
# nodejs 12.22 on Ubuntu 22.04 is too old
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||
apt purge -y nodejs npm && \
|
||||
apt autoremove && \
|
||||
apt update && \
|
||||
apt install -y nodejs cargo && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# builder stage
|
||||
FROM base AS builder
|
||||
@ -42,23 +59,38 @@ USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_builder_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt install -y nodejs npm cargo && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
COPY .git /ragflow/.git
|
||||
|
||||
RUN current_commit=$(git rev-parse --short HEAD); \
|
||||
last_tag=$(git describe --tags --abbrev=0); \
|
||||
commit_count=$(git rev-list --count "$last_tag..HEAD"); \
|
||||
version_info=""; \
|
||||
if [ "$commit_count" -eq 0 ]; then \
|
||||
version_info=$last_tag; \
|
||||
else \
|
||||
version_info="$current_commit($last_tag~$commit_count)"; \
|
||||
fi; \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
version_info="$version_info slim"; \
|
||||
else \
|
||||
version_info="$version_info full"; \
|
||||
fi; \
|
||||
echo "RAGFlow version: $version_info"; \
|
||||
echo $version_info > /ragflow/VERSION
|
||||
|
||||
COPY web web
|
||||
COPY docs docs
|
||||
RUN --mount=type=cache,id=ragflow_builder_npm,target=/root/.npm,sharing=locked \
|
||||
cd web && npm i && npm run build
|
||||
cd web && npm install --force && npm run build
|
||||
|
||||
# install dependencies from poetry.lock file
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_builder_poetry,target=/root/.cache/pypoetry,sharing=locked \
|
||||
if [ "$LIGHTEN" -eq 0 ]; then \
|
||||
poetry install --sync --no-root --with=full; \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
poetry install --no-root; \
|
||||
else \
|
||||
poetry install --sync --no-root; \
|
||||
poetry install --no-root --with=full; \
|
||||
fi
|
||||
|
||||
# production stage
|
||||
@ -67,6 +99,8 @@ USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
COPY --from=builder /ragflow/VERSION /ragflow/VERSION
|
||||
|
||||
# Install python packages' dependencies
|
||||
# cv2 requires libGL.so.1
|
||||
RUN --mount=type=cache,id=ragflow_production_apt,target=/var/cache/apt,sharing=locked \
|
||||
@ -93,6 +127,25 @@ RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
||||
# Copy nltk data downloaded via download_deps.py
|
||||
COPY nltk_data /root/nltk_data
|
||||
|
||||
# https://github.com/chrismattmann/tika-python
|
||||
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||
COPY tika-server-standard-3.0.0.jar /ragflow/tika-server-standard.jar
|
||||
COPY tika-server-standard-3.0.0.jar.md5 /ragflow/tika-server-standard.jar.md5
|
||||
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard.jar"
|
||||
|
||||
# Copy cl100k_base
|
||||
COPY cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
|
||||
# Add dependencies of selenium
|
||||
RUN --mount=type=bind,source=chrome-linux64-121-0-6167-85,target=/chrome-linux64.zip \
|
||||
unzip /chrome-linux64.zip && \
|
||||
mv chrome-linux64 /opt/chrome && \
|
||||
ln -s /opt/chrome/chrome /usr/local/bin/
|
||||
RUN --mount=type=bind,source=chromedriver-linux64-121-0-6167-85,target=/chromedriver-linux64.zip \
|
||||
unzip -j /chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
|
||||
mv chromedriver /usr/local/bin/ && \
|
||||
rm -f /usr/bin/google-chrome
|
||||
|
||||
# Copy compiled web pages
|
||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
||||
|
||||
@ -103,6 +156,7 @@ ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
|
||||
66
README.md
66
README.md
@ -8,7 +8,8 @@
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a>
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -19,7 +20,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.13.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.13.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -69,14 +70,14 @@ data.
|
||||
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 Latest Updates
|
||||
|
||||
- 2024-09-29 Optimizes multi-round conversations.
|
||||
- 2024-11-22 Adds more variables to Agent.
|
||||
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
||||
- 2024-09-13 Adds search mode for knowledge base Q&A.
|
||||
- 2024-09-09 Adds a medical consultant agent template.
|
||||
- 2024-08-22 Support text to SQL statements through RAG.
|
||||
- 2024-08-02 Supports GraphRAG inspired by [graphrag](https://github.com/microsoft/graphrag) and mind map.
|
||||
|
||||
@ -174,15 +175,15 @@ releases! 🌟
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - To download a RAGFlow slim Docker image of a specific version, update the `RAGFlow_IMAGE` variable in *
|
||||
*docker/.env** to your desired version. For example, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0-slim`. After
|
||||
> - To download a RAGFlow slim Docker image of a specific version, update the `RAGFLOW_IMAGE` variable in *
|
||||
*docker/.env** to your desired version. For example, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.0-slim`. After
|
||||
making this change, rerun the command above to initiate the download.
|
||||
> - To download the dev version of RAGFlow Docker image *including* embedding models and Python libraries, update the
|
||||
`RAGFlow_IMAGE` variable in **docker/.env** to `RAGFLOW_IMAGE=infiniflow/ragflow:dev`. After making this change,
|
||||
`RAGFLOW_IMAGE` variable in **docker/.env** to `RAGFLOW_IMAGE=infiniflow/ragflow:dev`. After making this change,
|
||||
rerun the command above to initiate the download.
|
||||
> - To download a specific version of RAGFlow Docker image *including* embedding models and Python libraries, update
|
||||
the `RAGFlow_IMAGE` variable in **docker/.env** to your desired version. For example,
|
||||
`RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0`. After making this change, rerun the command above to initiate the
|
||||
the `RAGFLOW_IMAGE` variable in **docker/.env** to your desired version. For example,
|
||||
`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.0`. After making this change, rerun the command above to initiate the
|
||||
download.
|
||||
|
||||
> **NOTE:** A RAGFlow Docker image that includes embedding models and Python libraries is approximately 9GB in size
|
||||
@ -209,13 +210,13 @@ releases! 🌟
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network abnormal`
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
||||
error because, at that moment, your RAGFlow may not be fully initialized.
|
||||
|
||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default
|
||||
HTTP serving port `80` can be omitted when using the default configurations.
|
||||
6. In [service_conf.yaml](./docker/service_conf.yaml), select the desired LLM factory in `user_default_llm` and update
|
||||
6. In [service_conf.yaml.template](./docker/service_conf.yaml.template), select the desired LLM factory in `user_default_llm` and update
|
||||
the `API_KEY` field with the corresponding API key.
|
||||
|
||||
> See [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) for more information.
|
||||
@ -228,17 +229,11 @@ When it comes to system configurations, you will need to manage the following fi
|
||||
|
||||
- [.env](./docker/.env): Keeps the fundamental setups for the system, such as `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, and
|
||||
`MINIO_PASSWORD`.
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): Configures the back-end services.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): The system relies
|
||||
on [docker-compose.yml](./docker/docker-compose.yml) to start up.
|
||||
|
||||
You must ensure that changes to the [.env](./docker/.env) file are in line with what are in
|
||||
the [service_conf.yaml](./docker/service_conf.yaml) file.
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template): Configures the back-end services. The environment variables in this file will be automatically populated when the Docker container starts. Any environment variables set within the Docker container will be available for use, allowing you to customize service behavior based on the deployment environment.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): The system relies on [docker-compose.yml](./docker/docker-compose.yml) to start up.
|
||||
|
||||
> The [./docker/README](./docker/README.md) file provides a detailed description of the environment settings and service
|
||||
> configurations, and you are REQUIRED to ensure that all environment settings listed in
|
||||
> the [./docker/README](./docker/README.md) file are aligned with the corresponding configurations in
|
||||
> the [service_conf.yaml](./docker/service_conf.yaml) file.
|
||||
> configurations which can be used as `${ENV_VARS}` in the [service_conf.yaml.template](./docker/service_conf.yaml.template) file.
|
||||
|
||||
To update the default HTTP serving port (80), go to [docker-compose.yml](./docker/docker-compose.yml) and change `80:80`
|
||||
to `<YOUR_SERVING_PORT>:80`.
|
||||
@ -249,6 +244,27 @@ Updates to the above configurations require a reboot of all containers to take e
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Switch doc engine from Elasticsearch to Infinity
|
||||
|
||||
RAGFlow uses Elasticsearch by default for storing full text and vectors. To switch to [Infinity](https://github.com/infiniflow/infinity/), follow these steps:
|
||||
|
||||
1. Stop all running containers:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
|
||||
2. Set `DOC_ENGINE` in **docker/.env** to `infinity`.
|
||||
|
||||
3. Start the containers:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> Switching to Infinity on a Linux/arm64 machine is not yet officially supported.
|
||||
|
||||
## 🔧 Build a Docker image without embedding models
|
||||
|
||||
This image is approximately 1 GB in size and relies on external LLM and embedding services.
|
||||
@ -285,7 +301,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||
~/.local/bin/poetry install --sync --no-root --with=full # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
||||
@ -293,11 +309,11 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
Add the following line to `/etc/hosts` to resolve all hosts specified in **docker/service_conf.yaml** to `127.0.0.1`:
|
||||
Add the following line to `/etc/hosts` to resolve all hosts specified in **docker/.env** to `127.0.0.1`:
|
||||
```
|
||||
127.0.0.1 es01 mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
In **docker/service_conf.yaml**, update mysql port to `5455` and es port to `1200`, as specified in **docker/.env**.
|
||||
In **docker/service_conf.yaml.template**, update mysql port to `5455` and es port to `1200`, as specified in **docker/.env**.
|
||||
|
||||
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
||||
|
||||
|
||||
341
README_id.md
Normal file
341
README_id.md
Normal file
@ -0,0 +1,341 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="Logo ragflow">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="Ikuti di X (Twitter)">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/Lisensi-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="Lisensi">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Dokumentasi</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Peta Jalan</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Daftar Isi</b></summary>
|
||||
|
||||
- 💡 [Apa Itu RAGFlow?](#-apa-itu-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
- 📌 [Pembaruan Terbaru](#-pembaruan-terbaru)
|
||||
- 🌟 [Fitur Utama](#-fitur-utama)
|
||||
- 🔎 [Arsitektur Sistem](#-arsitektur-sistem)
|
||||
- 🎬 [Mulai](#-mulai)
|
||||
- 🔧 [Konfigurasi](#-konfigurasi)
|
||||
- 🔧 [Membangun Image Docker tanpa Model Embedding](#-membangun-image-docker-tanpa-model-embedding)
|
||||
- 🔧 [Membangun Image Docker dengan Model Embedding](#-membangun-image-docker-dengan-model-embedding)
|
||||
- 🔨 [Meluncurkan aplikasi dari Sumber untuk Pengembangan](#-meluncurkan-aplikasi-dari-sumber-untuk-pengembangan)
|
||||
- 📚 [Dokumentasi](#-dokumentasi)
|
||||
- 📜 [Peta Jalan](#-peta-jalan)
|
||||
- 🏄 [Komunitas](#-komunitas)
|
||||
- 🙌 [Kontribusi](#-kontribusi)
|
||||
|
||||
</details>
|
||||
|
||||
## 💡 Apa Itu RAGFlow?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) adalah mesin RAG (Retrieval-Augmented Generation) open-source berbasis pemahaman dokumen yang mendalam. Platform ini menyediakan alur kerja RAG yang efisien untuk bisnis dengan berbagai skala, menggabungkan LLM (Large Language Models) untuk menyediakan kemampuan tanya-jawab yang benar dan didukung oleh referensi dari data terstruktur kompleks.
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 Pembaruan Terbaru
|
||||
|
||||
- 22-11-2024 Peningkatan definisi dan penggunaan variabel di Agen.
|
||||
- 2024-11-01: Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||
- 2024-09-13: Penambahan mode pencarian untuk Q&A basis pengetahuan.
|
||||
- 2024-08-22: Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||
- 2024-08-02: Dukungan GraphRAG yang terinspirasi oleh [graphrag](https://github.com/microsoft/graphrag) dan mind map.
|
||||
|
||||
## 🎉 Tetap Terkini
|
||||
|
||||
⭐️ Star repositori kami untuk tetap mendapat informasi tentang fitur baru dan peningkatan menarik! 🌟
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🌟 Fitur Utama
|
||||
|
||||
### 🍭 **"Kualitas Masuk, Kualitas Keluar"**
|
||||
|
||||
- Ekstraksi pengetahuan berbasis pemahaman dokumen mendalam dari data tidak terstruktur dengan format yang rumit.
|
||||
- Menemukan "jarum di tumpukan data" dengan token yang hampir tidak terbatas.
|
||||
|
||||
### 🍱 **Pemotongan Berbasis Template**
|
||||
|
||||
- Cerdas dan dapat dijelaskan.
|
||||
- Banyak pilihan template yang tersedia.
|
||||
|
||||
### 🌱 **Referensi yang Didasarkan pada Data untuk Mengurangi Hallusinasi**
|
||||
|
||||
- Visualisasi pemotongan teks memungkinkan intervensi manusia.
|
||||
- Tampilan cepat referensi kunci dan referensi yang dapat dilacak untuk mendukung jawaban yang didasarkan pada fakta.
|
||||
|
||||
### 🍔 **Kompatibilitas dengan Sumber Data Heterogen**
|
||||
|
||||
- Mendukung Word, slide, excel, txt, gambar, salinan hasil scan, data terstruktur, halaman web, dan banyak lagi.
|
||||
|
||||
### 🛀 **Alur Kerja RAG yang Otomatis dan Mudah**
|
||||
|
||||
- Orkestrasi RAG yang ramping untuk bisnis kecil dan besar.
|
||||
- LLM yang dapat dikonfigurasi serta model embedding.
|
||||
- Peringkat ulang berpasangan dengan beberapa pengambilan ulang.
|
||||
- API intuitif untuk integrasi yang mudah dengan bisnis.
|
||||
|
||||
## 🔎 Arsitektur Sistem
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 Mulai
|
||||
|
||||
### 📝 Prasyarat
|
||||
|
||||
- CPU >= 4 inti
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
|
||||
### 🚀 Menjalankan Server
|
||||
|
||||
1. Pastikan `vm.max_map_count` >= 262144:
|
||||
|
||||
> Untuk memeriksa nilai `vm.max_map_count`:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> Jika nilainya kurang dari 262144, setel ulang `vm.max_map_count` ke setidaknya 262144:
|
||||
>
|
||||
> ```bash
|
||||
> # Dalam contoh ini, kita atur menjadi 262144:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> Perubahan ini akan hilang setelah sistem direboot. Untuk membuat perubahan ini permanen, tambahkan atau perbarui nilai
|
||||
`vm.max_map_count` di **/etc/sysctl.conf**:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
|
||||
2. Clone repositori:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. Bangun image Docker pre-built dan jalankan server:
|
||||
|
||||
> Perintah di bawah ini akan mengunduh versi dev dari Docker image RAGFlow slim (`dev-slim`). Image RAGFlow slim
|
||||
tidak termasuk model embedding atau library Python dan berukuran sekitar 1GB.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - Untuk mengunduh versi tertentu dari image Docker RAGFlow slim, perbarui variabel `RAGFlow_IMAGE` di *
|
||||
*docker/.env** sesuai dengan versi yang diinginkan. Misalnya, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.0-slim`.
|
||||
Setelah mengubah ini, jalankan ulang perintah di atas untuk memulai unduhan.
|
||||
> - Untuk mengunduh versi dev dari image Docker RAGFlow *termasuk* model embedding dan library Python, perbarui
|
||||
variabel `RAGFlow_IMAGE` di **docker/.env** menjadi `RAGFLOW_IMAGE=infiniflow/ragflow:dev`. Setelah mengubah ini,
|
||||
jalankan ulang perintah di atas untuk memulai unduhan.
|
||||
> - Untuk mengunduh versi tertentu dari image Docker RAGFlow *termasuk* model embedding dan library Python, perbarui
|
||||
variabel `RAGFlow_IMAGE` di **docker/.env** sesuai dengan versi yang diinginkan. Misalnya,
|
||||
`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.0`. Setelah mengubah ini, jalankan ulang perintah di atas untuk memulai unduhan.
|
||||
|
||||
> **CATATAN:** Image Docker RAGFlow yang mencakup model embedding dan library Python berukuran sekitar 9GB
|
||||
dan mungkin memerlukan waktu lebih lama untuk dimuat.
|
||||
|
||||
4. Periksa status server setelah server aktif dan berjalan:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
```
|
||||
|
||||
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
||||
|
||||
```bash
|
||||
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||
karena RAGFlow mungkin belum sepenuhnya siap.
|
||||
|
||||
5. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||
port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||
6. Dalam [service_conf.yaml](./docker/service_conf.yaml), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||
bidang `API_KEY` dengan kunci API yang sesuai.
|
||||
|
||||
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
||||
|
||||
_Sistem telah siap digunakan!_
|
||||
|
||||
## 🔧 Konfigurasi
|
||||
|
||||
Untuk konfigurasi sistem, Anda perlu mengelola file-file berikut:
|
||||
|
||||
- [.env](./docker/.env): Menyimpan pengaturan dasar sistem, seperti `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, dan
|
||||
`MINIO_PASSWORD`.
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): Mengonfigurasi aplikasi backend.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): Sistem ini bergantung pada [docker-compose.yml](./docker/docker-compose.yml) untuk memulai.
|
||||
|
||||
Anda harus memastikan bahwa perubahan pada file [.env](./docker/.env) sesuai dengan yang ada di file [service_conf.yaml](./docker/service_conf.yaml).
|
||||
|
||||
> File [./docker/README](./docker/README.md) menyediakan penjelasan detail tentang pengaturan lingkungan dan konfigurasi aplikasi,
|
||||
> dan Anda DIWAJIBKAN memastikan bahwa semua pengaturan lingkungan yang tercantum di
|
||||
> [./docker/README](./docker/README.md) selaras dengan konfigurasi yang sesuai di
|
||||
> [service_conf.yaml](./docker/service_conf.yaml).
|
||||
|
||||
Untuk memperbarui port HTTP default (80), buka [docker-compose.yml](./docker/docker-compose.yml) dan ubah `80:80`
|
||||
menjadi `<YOUR_SERVING_PORT>:80`.
|
||||
|
||||
Pembaruan konfigurasi ini memerlukan reboot semua kontainer agar efektif:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
## 🔧 Membangun Docker Image tanpa Model Embedding
|
||||
|
||||
Image ini berukuran sekitar 1 GB dan bergantung pada aplikasi LLM eksternal dan embedding.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
```
|
||||
|
||||
## 🔧 Membangun Docker Image Termasuk Model Embedding
|
||||
|
||||
Image ini berukuran sekitar 9 GB. Karena sudah termasuk model embedding, ia hanya bergantung pada aplikasi LLM eksternal.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
```
|
||||
|
||||
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
||||
|
||||
1. Instal Poetry, atau lewati langkah ini jika sudah terinstal:
|
||||
```bash
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
```
|
||||
|
||||
2. Clone kode sumber dan instal dependensi Python:
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root # install modul python RAGFlow
|
||||
```
|
||||
|
||||
3. Jalankan aplikasi yang diperlukan (MinIO, Elasticsearch, Redis, dan MySQL) menggunakan Docker Compose:
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
Tambahkan baris berikut ke `/etc/hosts` untuk memetakan semua host yang ditentukan di **docker/service_conf.yaml** ke `127.0.0.1`:
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
Di **docker/service_conf.yaml**, perbarui port mysql ke `5455` dan es ke `1200`, sesuai dengan yang ditentukan di **docker/.env**.
|
||||
|
||||
4. Jika Anda tidak dapat mengakses HuggingFace, atur variabel lingkungan `HF_ENDPOINT` untuk menggunakan situs mirror:
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Jalankan aplikasi backend:
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Instal dependensi frontend:
|
||||
```bash
|
||||
cd web
|
||||
npm install --force
|
||||
```
|
||||
7. Konfigurasikan frontend untuk memperbarui `proxy.target` di **.umirc.ts** menjadi `http://127.0.0.1:9380`:
|
||||
8. Jalankan aplikasi frontend:
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
||||
|
||||

|
||||
|
||||
## 📚 Dokumentasi
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [Panduan Pengguna](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Referensi](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
Lihat [Roadmap RAGFlow 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||
|
||||
## 🏄 Komunitas
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 Kontribusi
|
||||
|
||||
RAGFlow berkembang melalui kolaborasi open-source. Dalam semangat ini, kami menerima kontribusi dari komunitas.
|
||||
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](./CONTRIBUTING.md).
|
||||
34
README_ja.md
34
README_ja.md
@ -8,7 +8,8 @@
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a>
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -19,7 +20,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.13.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.13.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -47,15 +48,15 @@
|
||||
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 最新情報
|
||||
|
||||
- 2024-09-29 マルチラウンドダイアログを最適化。
|
||||
- 2024-11-22 エージェントでの変数の定義と使用法を改善しました。
|
||||
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
||||
- 2024-09-13 ナレッジベース Q&A の検索モードを追加しました。
|
||||
- 2024-09-09 エージェントに医療相談テンプレートを追加しました。
|
||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||
- 2024-08-02 [graphrag](https://github.com/microsoft/graphrag) からインスピレーションを得た GraphRAG とマインド マップをサポートします。
|
||||
|
||||
@ -147,9 +148,9 @@
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - 特定のバージョンのRAGFlow slim Dockerイメージをダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を希望のバージョンに更新します。例えば、`RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0`とします。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
||||
> - 特定のバージョンのRAGFlow slim Dockerイメージをダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を希望のバージョンに更新します。例えば、`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.0`とします。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
||||
> - RAGFlowの埋め込みモデルとPythonライブラリを含む開発版Dockerイメージをダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を`RAGFLOW_IMAGE=infiniflow/ragflow:dev`に更新します。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
||||
> - 特定のバージョンのRAGFlow Dockerイメージ(埋め込みモデルとPythonライブラリを含む)をダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を希望のバージョンに更新します。例えば、`RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0`とします。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
||||
> - 特定のバージョンのRAGFlow Dockerイメージ(埋め込みモデルとPythonライブラリを含む)をダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を希望のバージョンに更新します。例えば、`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.0`とします。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
||||
|
||||
> **NOTE:** 埋め込みモデルとPythonライブラリを含むRAGFlow Dockerイメージのサイズは約9GBであり、読み込みにかなりの時間がかかる場合があります。
|
||||
|
||||
@ -203,6 +204,23 @@
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Elasticsearch から Infinity にドキュメントエンジンを切り替えます
|
||||
|
||||
RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトルを保存します。[Infinity]に切り替え(https://github.com/infiniflow/infinity/)、次の手順に従います。
|
||||
|
||||
1. 実行中のすべてのコンテナを停止するには:
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
2. **docker/.env** の「DOC _ ENGINE」を「infinity」に設定します。
|
||||
|
||||
3. 起動コンテナ:
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
```
|
||||
> [!WARNING]
|
||||
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||
|
||||
## 🔧 ソースコードでDockerイメージを作成(埋め込みモデルなし)
|
||||
|
||||
この Docker イメージのサイズは約 1GB で、外部の大モデルと埋め込みサービスに依存しています。
|
||||
@ -249,7 +267,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
|
||||
`/etc/hosts` に以下の行を追加して、**docker/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||
```
|
||||
127.0.0.1 es01 mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
**docker/service_conf.yaml** で mysql のポートを `5455` に、es のポートを `1200` に更新します(**docker/.env** に指定された通り).
|
||||
|
||||
|
||||
34
README_ko.md
34
README_ko.md
@ -9,6 +9,7 @@
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -19,7 +20,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.13.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.13.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -49,18 +50,18 @@
|
||||
데모를 [https://demo.ragflow.io](https://demo.ragflow.io)에서 실행해 보세요.
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 업데이트
|
||||
|
||||
- 2024-09-29 다단계 대화를 최적화합니다.
|
||||
- 2024-11-22 에이전트의 변수 정의 및 사용을 개선했습니다.
|
||||
|
||||
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
||||
|
||||
- 2024-09-13 지식베이스 Q&A 검색 모드를 추가합니다.
|
||||
|
||||
- 2024-09-09 Agent에 의료상담 템플릿을 추가하였습니다.
|
||||
|
||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||
|
||||
- 2024-08-02: [graphrag](https://github.com/microsoft/graphrag)와 마인드맵에서 영감을 받은 GraphRAG를 지원합니다.
|
||||
@ -151,9 +152,9 @@
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - 특정 버전의 RAGFlow slim Docker 이미지를 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 원하는 버전으로 업데이트하세요. 예를 들어, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0-slim`으로 설정합니다. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
||||
> - 특정 버전의 RAGFlow slim Docker 이미지를 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 원하는 버전으로 업데이트하세요. 예를 들어, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.0-slim`으로 설정합니다. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
||||
> - RAGFlow의 임베딩 모델과 Python 라이브러리를 포함한 개발 버전 Docker 이미지를 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 `RAGFLOW_IMAGE=infiniflow/ragflow:dev`로 업데이트하세요. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
||||
> - 특정 버전의 RAGFlow Docker 이미지를 임베딩 모델과 Python 라이브러리를 포함하여 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 원하는 버전으로 업데이트하세요. 예를 들어, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0` 로 설정합니다. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
||||
> - 특정 버전의 RAGFlow Docker 이미지를 임베딩 모델과 Python 라이브러리를 포함하여 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 원하는 버전으로 업데이트하세요. 예를 들어, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.0` 로 설정합니다. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
||||
|
||||
> **NOTE:** 임베딩 모델과 Python 라이브러리를 포함한 RAGFlow Docker 이미지의 크기는 약 9GB이며, 로드하는 데 상당히 오랜 시간이 걸릴 수 있습니다.
|
||||
|
||||
@ -178,7 +179,7 @@
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network abnormal` 오류가 발생할 수 있습니다.
|
||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network anormal` 오류가 발생할 수 있습니다.
|
||||
|
||||
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||
@ -207,6 +208,21 @@
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Elasticsearch 에서 Infinity 로 문서 엔진 전환
|
||||
|
||||
RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및 벡터를 저장합니다. [Infinity]로 전환(https://github.com/infiniflow/infinity/), 다음 절차를 따르십시오.
|
||||
1. 실행 중인 모든 컨테이너를 중지합니다.
|
||||
```bash
|
||||
$docker compose-f docker/docker-compose.yml down-v
|
||||
```
|
||||
2. **docker/.env**의 "DOC_ENGINE" 을 "infinity" 로 설정합니다.
|
||||
3. 컨테이너 부팅:
|
||||
```bash
|
||||
$docker compose-f docker/docker-compose.yml up-d
|
||||
```
|
||||
> [!WARNING]
|
||||
> Linux/arm64 시스템에서 Infinity로 전환하는 것은 공식적으로 지원되지 않습니다.
|
||||
|
||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함하지 않음)
|
||||
|
||||
이 Docker 이미지의 크기는 약 1GB이며, 외부 대형 모델과 임베딩 서비스에 의존합니다.
|
||||
@ -253,7 +269,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
|
||||
`/etc/hosts` 에 다음 줄을 추가하여 **docker/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||
```
|
||||
127.0.0.1 es01 mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
**docker/service_conf.yaml** 에서 mysql 포트를 `5455` 로, es 포트를 `1200` 으로 업데이트합니다( **docker/.env** 에 지정된 대로).
|
||||
|
||||
|
||||
41
README_zh.md
41
README_zh.md
@ -8,7 +8,8 @@
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a>
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -19,7 +20,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.13.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.13.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -47,15 +48,15 @@
|
||||
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2024-09-29 优化多轮对话.
|
||||
- 2024-11-22 完善了 Agent 中的变量定义和使用。
|
||||
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
||||
- 2024-09-13 增加知识库问答搜索模式。
|
||||
- 2024-09-09 在 Agent 中加入医疗问诊模板。
|
||||
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
||||
- 2024-08-02 支持 GraphRAG 启发于 [graphrag](https://github.com/microsoft/graphrag) 和思维导图。
|
||||
|
||||
@ -148,9 +149,9 @@
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - 如果你想下载并运行特定版本的 RAGFlow slim Docker 镜像,请在 **docker/.env** 文件中找到 `RAGFLOW_IMAGE` 变量,将其改为对应版本。例如 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0-slim`,然后再运行上述命令。
|
||||
> - 如果你想下载并运行特定版本的 RAGFlow slim Docker 镜像,请在 **docker/.env** 文件中找到 `RAGFLOW_IMAGE` 变量,将其改为对应版本。例如 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.0-slim`,然后再运行上述命令。
|
||||
> - 如果您想安装内置 embedding 模型和 Python 库的 dev 版本的 Docker 镜像,需要将 **docker/.env** 文件中的 `RAGFLOW_IMAGE` 变量修改为: `RAGFLOW_IMAGE=infiniflow/ragflow:dev`。
|
||||
> - 如果您想安装内置 embedding 模型和 Python 库的指定版本的 RAGFlow Docker 镜像,需要将 **docker/.env** 文件中的 `RAGFLOW_IMAGE` 变量修改为: `RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0`。修改后,再运行上面的命令。
|
||||
> - 如果您想安装内置 embedding 模型和 Python 库的指定版本的 RAGFlow Docker 镜像,需要将 **docker/.env** 文件中的 `RAGFLOW_IMAGE` 变量修改为: `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.0`。修改后,再运行上面的命令。
|
||||
> **注意:** 安装内置 embedding 模型和 Python 库的指定版本的 RAGFlow Docker 镜像大小约 9 GB,可能需要更长时间下载,请耐心等待。
|
||||
|
||||
4. 服务器启动成功后再次确认服务器状态:
|
||||
@ -173,7 +174,7 @@
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network abnormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
||||
|
||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||
@ -205,6 +206,28 @@
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### 把文档引擎从 Elasticsearch 切换成为 Infinity
|
||||
|
||||
RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换为 [Infinity](https://github.com/infiniflow/infinity/), 可以按照下面步骤进行:
|
||||
|
||||
1. 停止所有容器运行:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
|
||||
2. 设置 **docker/.env** 目录中的 `DOC_ENGINE` 为 `infinity`.
|
||||
|
||||
3. 启动容器:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> Infinity 目前官方并未正式支持在 Linux/arm64 架构下的机器上运行.
|
||||
|
||||
|
||||
## 🔧 源码编译 Docker 镜像(不含 embedding 模型)
|
||||
|
||||
本 Docker 镜像大小约 1 GB 左右并且依赖外部的大模型和 embedding 服务。
|
||||
@ -251,7 +274,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
|
||||
在 `/etc/hosts` 中添加以下代码,将 **docker/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
```
|
||||
127.0.0.1 es01 mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
在文件 **docker/service_conf.yaml** 中,对照 **docker/.env** 的配置将 mysql 端口更新为 `5455`,es 端口更新为 `1200`。
|
||||
|
||||
|
||||
@ -13,19 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import importlib
|
||||
import logging
|
||||
import json
|
||||
import traceback
|
||||
from abc import ABC
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from agent.component import component_class
|
||||
from agent.component.base import ComponentBase
|
||||
from agent.settings import flow_logger, DEBUG
|
||||
|
||||
|
||||
class Canvas(ABC):
|
||||
"""
|
||||
@ -162,8 +156,12 @@ class Canvas(ABC):
|
||||
self.components[k]["obj"].reset()
|
||||
self._embed_id = ""
|
||||
|
||||
def get_compnent_name(self, cid):
|
||||
for n in self.dsl["graph"]["nodes"]:
|
||||
if cid == n["id"]: return n["data"]["name"]
|
||||
return ""
|
||||
|
||||
def run(self, **kwargs):
|
||||
ans = ""
|
||||
if self.answer:
|
||||
cpn_id = self.answer[0]
|
||||
self.answer.pop(0)
|
||||
@ -173,10 +171,10 @@ class Canvas(ABC):
|
||||
ans = ComponentBase.be_output(str(e))
|
||||
self.path[-1].append(cpn_id)
|
||||
if kwargs.get("stream"):
|
||||
assert isinstance(ans, partial)
|
||||
return ans
|
||||
self.history.append(("assistant", ans.to_dict("records")))
|
||||
return ans
|
||||
for an in ans():
|
||||
yield an
|
||||
else: yield ans
|
||||
return
|
||||
|
||||
if not self.path:
|
||||
self.components["begin"]["obj"].run(self.history, **kwargs)
|
||||
@ -184,6 +182,8 @@ class Canvas(ABC):
|
||||
|
||||
self.path.append([])
|
||||
ran = -1
|
||||
waiting = []
|
||||
without_dependent_checking = []
|
||||
|
||||
def prepare2run(cpns):
|
||||
nonlocal ran, ans
|
||||
@ -193,18 +193,22 @@ class Canvas(ABC):
|
||||
if cpn.component_name == "Answer":
|
||||
self.answer.append(c)
|
||||
else:
|
||||
if DEBUG: print("RUN: ", c)
|
||||
if cpn.component_name == "Generate":
|
||||
logging.debug(f"Canvas.prepare2run: {c}")
|
||||
if c not in without_dependent_checking:
|
||||
cpids = cpn.get_dependent_components()
|
||||
if any([c not in self.path[-1] for c in cpids]):
|
||||
if any([cc not in self.path[-1] for cc in cpids]):
|
||||
if c not in waiting: waiting.append(c)
|
||||
continue
|
||||
yield "*'{}'* is running...🕞".format(self.get_compnent_name(c))
|
||||
ans = cpn.run(self.history, **kwargs)
|
||||
self.path[-1].append(c)
|
||||
ran += 1
|
||||
|
||||
prepare2run(self.components[self.path[-2][-1]]["downstream"])
|
||||
for m in prepare2run(self.components[self.path[-2][-1]]["downstream"]):
|
||||
yield {"content": m, "running_status": True}
|
||||
|
||||
while 0 <= ran < len(self.path[-1]):
|
||||
if DEBUG: print(ran, self.path)
|
||||
logging.debug(f"Canvas.run: {ran} {self.path}")
|
||||
cpn_id = self.path[-1][ran]
|
||||
cpn = self.get_component(cpn_id)
|
||||
if not cpn["downstream"]: break
|
||||
@ -217,27 +221,26 @@ class Canvas(ABC):
|
||||
assert switch_out in self.components, \
|
||||
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
||||
try:
|
||||
prepare2run([switch_out])
|
||||
for m in prepare2run([switch_out]):
|
||||
yield {"content": m, "running_status": True}
|
||||
except Exception as e:
|
||||
for p in [c for p in self.path for c in p][::-1]:
|
||||
if p.lower().find("answer") >= 0:
|
||||
self.get_component(p)["obj"].set_exception(e)
|
||||
prepare2run([p])
|
||||
break
|
||||
traceback.print_exc()
|
||||
break
|
||||
yield {"content": "*Exception*: {}".format(e), "running_status": True}
|
||||
logging.exception("Canvas.run got exception")
|
||||
continue
|
||||
|
||||
try:
|
||||
prepare2run(cpn["downstream"])
|
||||
for m in prepare2run(cpn["downstream"]):
|
||||
yield {"content": m, "running_status": True}
|
||||
except Exception as e:
|
||||
for p in [c for p in self.path for c in p][::-1]:
|
||||
if p.lower().find("answer") >= 0:
|
||||
self.get_component(p)["obj"].set_exception(e)
|
||||
prepare2run([p])
|
||||
break
|
||||
traceback.print_exc()
|
||||
break
|
||||
yield {"content": "*Exception*: {}".format(e), "running_status": True}
|
||||
logging.exception("Canvas.run got exception")
|
||||
|
||||
if ran >= len(self.path[-1]) and waiting:
|
||||
without_dependent_checking = waiting
|
||||
waiting = []
|
||||
for m in prepare2run(without_dependent_checking):
|
||||
yield {"content": m, "running_status": True}
|
||||
ran -= 1
|
||||
|
||||
if self.answer:
|
||||
cpn_id = self.answer[0]
|
||||
@ -246,11 +249,13 @@ class Canvas(ABC):
|
||||
self.path[-1].append(cpn_id)
|
||||
if kwargs.get("stream"):
|
||||
assert isinstance(ans, partial)
|
||||
return ans
|
||||
for an in ans():
|
||||
yield an
|
||||
else:
|
||||
yield ans
|
||||
|
||||
self.history.append(("assistant", ans.to_dict("records")))
|
||||
|
||||
return ans
|
||||
else:
|
||||
raise Exception("The dialog flow has no way to interact with you. Please add an 'Interact' component to the end of the flow.")
|
||||
|
||||
def get_component(self, cpn_id):
|
||||
return self.components[cpn_id]
|
||||
@ -261,8 +266,10 @@ class Canvas(ABC):
|
||||
def get_history(self, window_size):
|
||||
convs = []
|
||||
for role, obj in self.history[window_size * -1:]:
|
||||
convs.append({"role": role, "content": (obj if role == "user" else
|
||||
'\n'.join([str(s) for s in pd.DataFrame(obj)['content']]))})
|
||||
if isinstance(obj, list) and obj and all([isinstance(o, dict) for o in obj]):
|
||||
convs.append({"role": role, "content": '\n'.join([str(s.get("content", "")) for s in obj])})
|
||||
else:
|
||||
convs.append({"role": role, "content": str(obj)})
|
||||
return convs
|
||||
|
||||
def add_user_input(self, question):
|
||||
|
||||
@ -30,6 +30,7 @@ from .tushare import TuShare, TuShareParam
|
||||
from .akshare import AkShare, AkShareParam
|
||||
from .crawler import Crawler, CrawlerParam
|
||||
from .invoke import Invoke, InvokeParam
|
||||
from .template import Template, TemplateParam
|
||||
|
||||
|
||||
def component_class(class_name):
|
||||
|
||||
@ -13,13 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import arxiv
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class ArXivParam(ComponentParamBase):
|
||||
"""
|
||||
Define the ArXiv component parameters.
|
||||
@ -65,5 +64,5 @@ class ArXiv(ComponentBase, ABC):
|
||||
return ArXiv.be_output("")
|
||||
|
||||
df = pd.DataFrame(arxiv_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug(f"df: {str(df)}")
|
||||
return df
|
||||
|
||||
@ -13,13 +13,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
import logging
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
import pandas as pd
|
||||
import requests
|
||||
import re
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
@ -64,6 +62,6 @@ class Baidu(ComponentBase, ABC):
|
||||
return Baidu.be_output("")
|
||||
|
||||
df = pd.DataFrame(baidu_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug(f"df: {str(df)}")
|
||||
return df
|
||||
|
||||
|
||||
@ -17,14 +17,13 @@ from abc import ABC
|
||||
import builtins
|
||||
import json
|
||||
import os
|
||||
from copy import deepcopy
|
||||
import logging
|
||||
from functools import partial
|
||||
from typing import List, Dict, Tuple, Union
|
||||
from typing import Tuple, Union
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from agent import settings
|
||||
from agent.settings import flow_logger, DEBUG
|
||||
|
||||
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
||||
_DEPRECATED_PARAMS = "_deprecated_params"
|
||||
@ -36,6 +35,8 @@ class ComponentParamBase(ABC):
|
||||
def __init__(self):
|
||||
self.output_var_name = "output"
|
||||
self.message_history_window_size = 22
|
||||
self.query = []
|
||||
self.inputs = []
|
||||
|
||||
def set_name(self, name: str):
|
||||
self._name = name
|
||||
@ -81,7 +82,6 @@ class ComponentParamBase(ABC):
|
||||
return {name: True for name in self.get_feeded_deprecated_params()}
|
||||
|
||||
def __str__(self):
|
||||
|
||||
return json.dumps(self.as_dict(), ensure_ascii=False)
|
||||
|
||||
def as_dict(self):
|
||||
@ -359,13 +359,13 @@ class ComponentParamBase(ABC):
|
||||
|
||||
def _warn_deprecated_param(self, param_name, descr):
|
||||
if self._deprecated_params_set.get(param_name):
|
||||
flow_logger.warning(
|
||||
logging.warning(
|
||||
f"{descr} {param_name} is deprecated and ignored in this version."
|
||||
)
|
||||
|
||||
def _warn_to_deprecate_param(self, param_name, descr, new_param):
|
||||
if self._deprecated_params_set.get(param_name):
|
||||
flow_logger.warning(
|
||||
logging.warning(
|
||||
f"{descr} {param_name} will be deprecated in future release; "
|
||||
f"please use {new_param} instead."
|
||||
)
|
||||
@ -385,10 +385,14 @@ class ComponentBase(ABC):
|
||||
"""
|
||||
return """{{
|
||||
"component_name": "{}",
|
||||
"params": {}
|
||||
"params": {},
|
||||
"output": {},
|
||||
"inputs": {}
|
||||
}}""".format(self.component_name,
|
||||
self._param
|
||||
)
|
||||
self._param,
|
||||
json.dumps(json.loads(str(self._param)).get("output", {}), ensure_ascii=False),
|
||||
json.dumps(json.loads(str(self._param)).get("inputs", []), ensure_ascii=False)
|
||||
)
|
||||
|
||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||
self._canvas = canvas
|
||||
@ -396,8 +400,15 @@ class ComponentBase(ABC):
|
||||
self._param = param
|
||||
self._param.check()
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.query \
|
||||
if para.get("component_id") \
|
||||
and para["component_id"].lower().find("answer") < 0 \
|
||||
and para["component_id"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def run(self, history, **kwargs):
|
||||
flow_logger.info("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
||||
logging.debug("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
||||
json.dumps(kwargs, ensure_ascii=False)))
|
||||
try:
|
||||
res = self._run(history, **kwargs)
|
||||
@ -431,48 +442,85 @@ class ComponentBase(ABC):
|
||||
|
||||
def reset(self):
|
||||
setattr(self._param, self._param.output_var_name, None)
|
||||
self._param.inputs = []
|
||||
|
||||
def set_output(self, v: pd.DataFrame):
|
||||
def set_output(self, v: partial | pd.DataFrame):
|
||||
setattr(self._param, self._param.output_var_name, v)
|
||||
|
||||
def get_input(self):
|
||||
upstream_outs = []
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
|
||||
if DEBUG: print(self.component_name, reversed_cpnts[::-1])
|
||||
if self._param.query:
|
||||
self._param.inputs = []
|
||||
outs = []
|
||||
for q in self._param.query:
|
||||
if q["component_id"]:
|
||||
if q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
||||
cpn_id, key = q["component_id"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
outs.append(pd.DataFrame([{"content": p.get("value", "")}]))
|
||||
self._param.inputs.append({"component_id": q["component_id"],
|
||||
"content": p.get("value", "")})
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
outs.append(self._canvas.get_component(q["component_id"])["obj"].output(allow_partial=False)[1])
|
||||
self._param.inputs.append({"component_id": q["component_id"],
|
||||
"content": "\n".join(
|
||||
[str(d["content"]) for d in outs[-1].to_dict('records')])})
|
||||
elif q["value"]:
|
||||
self._param.inputs.append({"component_id": None, "content": q["value"]})
|
||||
outs.append(pd.DataFrame([{"content": q["value"]}]))
|
||||
if outs:
|
||||
df = pd.concat(outs, ignore_index=True)
|
||||
if "content" in df: df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
return df
|
||||
|
||||
upstream_outs = []
|
||||
|
||||
for u in reversed_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch", "concentrator"]: continue
|
||||
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||
if o is not None:
|
||||
o["component_id"] = u
|
||||
upstream_outs.append(o)
|
||||
continue
|
||||
if u not in self._canvas.get_component(self._id)["upstream"]: continue
|
||||
#if self.component_name.lower()!="answer" and u not in self._canvas.get_component(self._id)["upstream"]: continue
|
||||
if self.component_name.lower().find("switch") < 0 \
|
||||
and self.get_component_name(u) in ["relevant", "categorize"]:
|
||||
continue
|
||||
if u.lower().find("answer") >= 0:
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
upstream_outs.append(pd.DataFrame([{"content": c}]))
|
||||
upstream_outs.append(pd.DataFrame([{"content": c, "component_id": u}]))
|
||||
break
|
||||
break
|
||||
if self.component_name.lower().find("answer") >= 0 and self.get_component_name(u) in ["relevant"]:
|
||||
continue
|
||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||
if o is not None:
|
||||
o["component_id"] = u
|
||||
upstream_outs.append(o)
|
||||
break
|
||||
|
||||
if upstream_outs:
|
||||
df = pd.concat(upstream_outs, ignore_index=True)
|
||||
if "content" in df:
|
||||
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
return df
|
||||
return pd.DataFrame(self._canvas.get_history(3)[-1:])
|
||||
assert upstream_outs, "Can't inference the where the component input is. Please identify whose output is this component's input."
|
||||
|
||||
df = pd.concat(upstream_outs, ignore_index=True)
|
||||
if "content" in df:
|
||||
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
|
||||
self._param.inputs = []
|
||||
for _, r in df.iterrows():
|
||||
self._param.inputs.append({"component_id": r["component_id"], "content": r["content"]})
|
||||
|
||||
return df
|
||||
|
||||
def get_stream_input(self):
|
||||
reversed_cpnts = []
|
||||
|
||||
@ -13,13 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import requests
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class BingParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Bing component parameters.
|
||||
@ -81,5 +80,5 @@ class Bing(ComponentBase, ABC):
|
||||
return Bing.be_output("")
|
||||
|
||||
df = pd.DataFrame(bing_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug(f"df: {str(df)}")
|
||||
return df
|
||||
|
||||
@ -13,11 +13,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
from agent.settings import DEBUG
|
||||
|
||||
|
||||
class CategorizeParam(GenerateParam):
|
||||
@ -34,7 +34,7 @@ class CategorizeParam(GenerateParam):
|
||||
super().check()
|
||||
self.check_empty(self.category_description, "[Categorize] Category examples")
|
||||
for k, v in self.category_description.items():
|
||||
if not k: raise ValueError(f"[Categorize] Category name can not be empty!")
|
||||
if not k: raise ValueError("[Categorize] Category name can not be empty!")
|
||||
if not v.get("to"): raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
||||
|
||||
def get_prompt(self):
|
||||
@ -77,7 +77,7 @@ class Categorize(Generate, ABC):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": input}],
|
||||
self._param.gen_conf())
|
||||
if DEBUG: print(ans, ":::::::::::::::::::::::::::::::::", input)
|
||||
logging.debug(f"input: {input}, answer: {str(ans)}")
|
||||
for c in self._param.category_description.keys():
|
||||
if ans.lower().find(c.lower()) >= 0:
|
||||
return Categorize.be_output(self._param.category_description[c]["to"])
|
||||
|
||||
@ -1,75 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.settings import retrievaler
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class CiteParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Retrieval component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.cite_sources = []
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.cite_source, "Please specify where you want to cite from.")
|
||||
|
||||
|
||||
class Cite(ComponentBase, ABC):
|
||||
component_name = "Cite"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
input = "\n- ".join(self.get_input()["content"])
|
||||
sources = [self._canvas.get_component(cpn_id).output()[1] for cpn_id in self._param.cite_source]
|
||||
query = []
|
||||
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
||||
if role != "user":continue
|
||||
query.append(cnt)
|
||||
query = "\n".join(query)
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||
if not kbs:
|
||||
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
||||
|
||||
embd_mdl = LLMBundle(kbs[0].tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
|
||||
rerank_mdl = None
|
||||
if self._param.rerank_id:
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||
|
||||
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
1, self._param.top_n,
|
||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||
aggs=False, rerank_mdl=rerank_mdl)
|
||||
|
||||
if not kbinfos["chunks"]: return pd.DataFrame()
|
||||
df = pd.DataFrame(kbinfos["chunks"])
|
||||
df["content"] = df["content_with_weight"]
|
||||
del df["content_with_weight"]
|
||||
return df
|
||||
|
||||
|
||||
@ -13,10 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from duckduckgo_search import DDGS
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
@ -62,5 +62,5 @@ class DuckDuckGo(ComponentBase, ABC):
|
||||
return DuckDuckGo.be_output("")
|
||||
|
||||
df = pd.DataFrame(duck_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug("df: {df}")
|
||||
return df
|
||||
|
||||
@ -19,7 +19,7 @@ import pandas as pd
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import message_fit_in
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.settings import retrievaler
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
@ -63,18 +63,22 @@ class Generate(ComponentBase):
|
||||
component_name = "Generate"
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = [para["component_id"] for para in self._param.parameters]
|
||||
return cpnts
|
||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
|
||||
if para.get("component_id") \
|
||||
and para["component_id"].lower().find("answer") < 0 \
|
||||
and para["component_id"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def set_cite(self, retrieval_res, answer):
|
||||
retrieval_res = retrieval_res.dropna(subset=["vector", "content_ltks"]).reset_index(drop=True)
|
||||
if "empty_response" in retrieval_res.columns:
|
||||
retrieval_res["empty_response"].fillna("", inplace=True)
|
||||
answer, idx = retrievaler.insert_citations(answer, [ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
|
||||
[ck["vector"] for _, ck in retrieval_res.iterrows()],
|
||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||
self._canvas.get_embedding_model()), tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
answer, idx = settings.retrievaler.insert_citations(answer,
|
||||
[ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
|
||||
[ck["vector"] for _, ck in retrieval_res.iterrows()],
|
||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||
self._canvas.get_embedding_model()), tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
doc_ids = set([])
|
||||
recall_docs = []
|
||||
for i in idx:
|
||||
@ -101,22 +105,53 @@ class Generate(ComponentBase):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
prompt = self._param.prompt
|
||||
|
||||
retrieval_res = self.get_input()
|
||||
input = (" - "+"\n - ".join([c for c in retrieval_res["content"] if isinstance(c, str)])) if "content" in retrieval_res else ""
|
||||
retrieval_res = []
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||
if not para.get("component_id"): continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
kwargs[para["key"]] = p.get("value", "")
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
kwargs[para["key"]] = self._canvas.get_history(1)[0]["content"]
|
||||
hist = self._canvas.get_history(1)
|
||||
if hist:
|
||||
hist = hist[0]["content"]
|
||||
else:
|
||||
hist = ""
|
||||
kwargs[para["key"]] = hist
|
||||
continue
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
if "content" not in out.columns:
|
||||
kwargs[para["key"]] = "Nothing"
|
||||
kwargs[para["key"]] = ""
|
||||
else:
|
||||
if cpn.component_name.lower() == "retrieval":
|
||||
retrieval_res.append(out)
|
||||
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
|
||||
if retrieval_res:
|
||||
retrieval_res = pd.concat(retrieval_res, ignore_index=True)
|
||||
else: retrieval_res = pd.DataFrame([])
|
||||
|
||||
kwargs["input"] = input
|
||||
for n, v in kwargs.items():
|
||||
prompt = re.sub(r"\{%s\}" % re.escape(n), re.escape(str(v)), prompt)
|
||||
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||
|
||||
if not self._param.inputs and prompt.find("{input}") >= 0:
|
||||
retrieval_res = self.get_input()
|
||||
input = (" - " + "\n - ".join(
|
||||
[c for c in retrieval_res["content"] if isinstance(c, str)])) if "content" in retrieval_res else ""
|
||||
prompt = re.sub(r"\{input\}", re.escape(input), prompt)
|
||||
|
||||
downstreams = self._canvas.get_component(self._id)["downstream"]
|
||||
if kwargs.get("stream") and len(downstreams) == 1 and self._canvas.get_component(downstreams[0])[
|
||||
@ -129,7 +164,9 @@ class Generate(ComponentBase):
|
||||
return pd.DataFrame([res])
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1: msg.append({"role": "user", "content": ""})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2: msg.append({"role": "user", "content": ""})
|
||||
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
@ -148,7 +185,9 @@ class Generate(ComponentBase):
|
||||
return
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1: msg.append({"role": "user", "content": ""})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2: msg.append({"role": "user", "content": ""})
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
||||
res = {"content": ans, "reference": []}
|
||||
|
||||
@ -13,10 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
@ -57,5 +57,5 @@ class GitHub(ComponentBase, ABC):
|
||||
return GitHub.be_output("")
|
||||
|
||||
df = pd.DataFrame(github_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug(f"df: {df}")
|
||||
return df
|
||||
|
||||
@ -13,10 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from serpapi import GoogleSearch
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
@ -85,12 +85,12 @@ class Google(ComponentBase, ABC):
|
||||
"hl": self._param.language, "num": self._param.top_n})
|
||||
google_res = [{"content": '<a href="' + i["link"] + '">' + i["title"] + '</a> ' + i["snippet"]} for i in
|
||||
client.get_dict()["organic_results"]]
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return Google.be_output("**ERROR**: Existing Unavailable Parameters!")
|
||||
|
||||
if not google_res:
|
||||
return Google.be_output("")
|
||||
|
||||
df = pd.DataFrame(google_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug(f"df: {df}")
|
||||
return df
|
||||
|
||||
@ -13,9 +13,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from scholarly import scholarly
|
||||
|
||||
@ -58,13 +58,13 @@ class GoogleScholar(ComponentBase, ABC):
|
||||
'pub_url'] + '"></a> ' + "\n author: " + ",".join(pub['bib']['author']) + '\n Abstract: ' + pub[
|
||||
'bib'].get('abstract', 'no abstract')})
|
||||
|
||||
except StopIteration or Exception as e:
|
||||
print("**ERROR** " + str(e))
|
||||
except StopIteration or Exception:
|
||||
logging.exception("GoogleScholar")
|
||||
break
|
||||
|
||||
if not scholar_res:
|
||||
return GoogleScholar.be_output("")
|
||||
|
||||
df = pd.DataFrame(scholar_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug(f"df: {df}")
|
||||
return df
|
||||
|
||||
@ -51,6 +51,9 @@ class Invoke(ComponentBase, ABC):
|
||||
for para in self._param.variables:
|
||||
if para.get("component_id"):
|
||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
args[para["key"]] = self._canvas.get_history(1)[0]["content"]
|
||||
continue
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
args[para["key"]] = "\n".join(out["content"])
|
||||
else:
|
||||
|
||||
@ -13,12 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
from agent.settings import DEBUG
|
||||
|
||||
|
||||
class KeywordExtractParam(GenerateParam):
|
||||
@ -50,16 +50,13 @@ class KeywordExtract(Generate, ABC):
|
||||
component_name = "KeywordExtract"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
q = ""
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
q += c
|
||||
break
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": q}],
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": query}],
|
||||
self._param.gen_conf())
|
||||
|
||||
ans = re.sub(r".*keyword:", "", ans).strip()
|
||||
if DEBUG: print(ans, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug(f"ans: {ans}")
|
||||
return KeywordExtract.be_output(ans)
|
||||
|
||||
@ -13,12 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from Bio import Entrez
|
||||
import re
|
||||
import pandas as pd
|
||||
import xml.etree.ElementTree as ET
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
@ -65,5 +65,5 @@ class PubMed(ComponentBase, ABC):
|
||||
return PubMed.be_output("")
|
||||
|
||||
df = pd.DataFrame(pubmed_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug(f"df: {df}")
|
||||
return df
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
@ -70,7 +71,7 @@ class Relevant(Generate, ABC):
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": ans}],
|
||||
self._param.gen_conf())
|
||||
|
||||
print(ans, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug(ans)
|
||||
if ans.lower().find("yes") >= 0:
|
||||
return Relevant.be_output(self._param.yes)
|
||||
if ans.lower().find("no") >= 0:
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
|
||||
import pandas as pd
|
||||
@ -20,7 +21,7 @@ import pandas as pd
|
||||
from api.db import LLMType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.settings import retrievaler
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
@ -66,7 +67,7 @@ class Retrieval(ComponentBase, ABC):
|
||||
if self._param.rerank_id:
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||
|
||||
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
kbinfos = settings.retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
1, self._param.top_n,
|
||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||
aggs=False, rerank_mdl=rerank_mdl)
|
||||
@ -80,7 +81,7 @@ class Retrieval(ComponentBase, ABC):
|
||||
df = pd.DataFrame(kbinfos["chunks"])
|
||||
df["content"] = df["content_with_weight"]
|
||||
del df["content_with_weight"]
|
||||
print(">>>>>>>>>>>>>>>>>>>>>>>>>>\n", query, df)
|
||||
logging.debug("{} {}".format(query, df))
|
||||
return df
|
||||
|
||||
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
@ -104,7 +105,7 @@ class RewriteQuestion(Generate, ABC):
|
||||
self._canvas.history.pop()
|
||||
self._canvas.history.append(("user", ans))
|
||||
|
||||
print(ans, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug(ans)
|
||||
return RewriteQuestion.be_output(ans)
|
||||
|
||||
|
||||
|
||||
@ -47,13 +47,35 @@ class SwitchParam(ComponentParamBase):
|
||||
class Switch(ComponentBase, ABC):
|
||||
component_name = "Switch"
|
||||
|
||||
def get_dependent_components(self):
|
||||
res = []
|
||||
for cond in self._param.conditions:
|
||||
for item in cond["items"]:
|
||||
if not item["cpn_id"]: continue
|
||||
if item["cpn_id"].find("begin") >= 0:
|
||||
continue
|
||||
cid = item["cpn_id"].split("@")[0]
|
||||
res.append(cid)
|
||||
|
||||
return list(set(res))
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
for cond in self._param.conditions:
|
||||
res = []
|
||||
for item in cond["items"]:
|
||||
out = self._canvas.get_component(item["cpn_id"])["obj"].output()[1]
|
||||
cpn_input = "" if "content" not in out.columns else " ".join(out["content"])
|
||||
res.append(self.process_operator(cpn_input, item["operator"], item["value"]))
|
||||
if not item["cpn_id"]:continue
|
||||
cid = item["cpn_id"].split("@")[0]
|
||||
if item["cpn_id"].find("@") > 0:
|
||||
cpn_id, key = item["cpn_id"].split("@")
|
||||
for p in self._canvas.get_component(cid)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
res.append(self.process_operator(p.get("value",""), item["operator"], item.get("value", "")))
|
||||
break
|
||||
else:
|
||||
out = self._canvas.get_component(cid)["obj"].output()[1]
|
||||
cpn_input = "" if "content" not in out.columns else " ".join([str(s) for s in out["content"]])
|
||||
res.append(self.process_operator(cpn_input, item["operator"], item.get("value", "")))
|
||||
|
||||
if cond["logical_operator"] != "and" and any(res):
|
||||
return Switch.be_output(cond["to"])
|
||||
|
||||
|
||||
85
agent/component/template.py
Normal file
85
agent/component/template.py
Normal file
@ -0,0 +1,85 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class TemplateParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Generate component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.content = ""
|
||||
self.parameters = []
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.content, "[Template] Content")
|
||||
return True
|
||||
|
||||
|
||||
class Template(ComponentBase):
|
||||
component_name = "Template"
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
|
||||
if para.get("component_id") \
|
||||
and para["component_id"].lower().find("answer") < 0 \
|
||||
and para["component_id"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
content = self._param.content
|
||||
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
if not para.get("component_id"): continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
kwargs[para["key"]] = p.get("value", "")
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
hist = self._canvas.get_history(1)
|
||||
if hist:
|
||||
hist = hist[0]["content"]
|
||||
else:
|
||||
hist = ""
|
||||
kwargs[para["key"]] = hist
|
||||
continue
|
||||
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
if "content" not in out.columns:
|
||||
kwargs[para["key"]] = ""
|
||||
else:
|
||||
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
|
||||
for n, v in kwargs.items():
|
||||
content = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), content)
|
||||
|
||||
return Template.be_output(content)
|
||||
|
||||
@ -13,12 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
import logging
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
import wikipedia
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
@ -65,5 +63,5 @@ class Wikipedia(ComponentBase, ABC):
|
||||
return Wikipedia.be_output("")
|
||||
|
||||
df = pd.DataFrame(wiki_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
logging.debug(f"df: {df}")
|
||||
return df
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
@ -74,8 +75,8 @@ class YahooFinance(ComponentBase, ABC):
|
||||
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
||||
if self._param.news:
|
||||
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
||||
except Exception as e:
|
||||
print("**ERROR** " + str(e))
|
||||
except Exception:
|
||||
logging.exception("YahooFinance got exception")
|
||||
|
||||
if not yohoo_res:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
@ -13,22 +13,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Logger
|
||||
import os
|
||||
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from api.utils.log_utils import LoggerFactory, getLogger
|
||||
|
||||
DEBUG = 0
|
||||
LoggerFactory.set_directory(
|
||||
os.path.join(
|
||||
get_project_base_directory(),
|
||||
"logs",
|
||||
"flow"))
|
||||
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
|
||||
LoggerFactory.LEVEL = 30
|
||||
|
||||
flow_logger = getLogger("flow")
|
||||
database_logger = getLogger("database")
|
||||
FLOAT_ZERO = 1e-8
|
||||
PARAM_MAXDEPTH = 5
|
||||
|
||||
@ -152,7 +152,8 @@
|
||||
"Generate:ToughLawsCheat",
|
||||
"Generate:KindCarrotsSit",
|
||||
"Generate:DirtyToolsTrain",
|
||||
"Generate:FluffyPillowsGrow"
|
||||
"Generate:FluffyPillowsGrow",
|
||||
"Generate:ProudEarsWorry"
|
||||
]
|
||||
},
|
||||
"Retrieval:ShaggyRadiosRetire": {
|
||||
@ -212,7 +213,9 @@
|
||||
"top_p": 0.3
|
||||
}
|
||||
},
|
||||
"downstream": [],
|
||||
"downstream": [
|
||||
"Answer:TwentyMugsDeny"
|
||||
],
|
||||
"upstream": [
|
||||
"categorize:0"
|
||||
]
|
||||
@ -331,9 +334,9 @@
|
||||
"message_history_window_size": 12,
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "Retrieval:ColdEelsArrive",
|
||||
"id": "5166a107-e859-4c71-99a2-3a216c775347",
|
||||
"key": "jd",
|
||||
"component_id": "Retrieval:ColdEelsArrive"
|
||||
"key": "jd"
|
||||
}
|
||||
],
|
||||
"presence_penalty": 0.4,
|
||||
@ -1266,9 +1269,9 @@
|
||||
"parameter": "Precise",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "Retrieval:ColdEelsArrive",
|
||||
"id": "5166a107-e859-4c71-99a2-3a216c775347",
|
||||
"key": "jd",
|
||||
"component_id": "Retrieval:ColdEelsArrive"
|
||||
"key": "jd"
|
||||
}
|
||||
],
|
||||
"presencePenaltyEnabled": true,
|
||||
@ -1541,6 +1544,19 @@
|
||||
"target": "Answer:TwentyMugsDeny",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
},
|
||||
{
|
||||
"type": "buttonEdge",
|
||||
"markerEnd": "logo",
|
||||
"style": {
|
||||
"strokeWidth": 2,
|
||||
"stroke": "rgb(202 197 245)"
|
||||
},
|
||||
"source": "Generate:ProudEarsWorry",
|
||||
"sourceHandle": "b",
|
||||
"target": "Answer:TwentyMugsDeny",
|
||||
"targetHandle": "c",
|
||||
"id": "reactflow__edge-Generate:ProudEarsWorryb-Answer:TwentyMugsDenyc"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
File diff suppressed because one or more lines are too long
@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": 8,
|
||||
"title": "Intelligent investment advisor",
|
||||
"description": "An intelligent investment advisor that can answer your financial questions based on real-time domestic financial data and financial information.",
|
||||
"description": "An intelligent investment advisor that answers your financial questions using real-time domestic financial data.",
|
||||
"canvas_type": "chatbot",
|
||||
"dsl": {
|
||||
"answer": [],
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": 7,
|
||||
"title": "Medical consultation",
|
||||
"description": "Medical Consultation Assistant, can provide you with some professional consultation suggestions for your reference. Please note that the content provided by the medical assistant is for reference only and may not be authentic or available. Knowledge Base Content Reference: <a href = 'https://huggingface.co/datasets/InfiniFlow/medical_QA/tree/main'> Medical Knowledge Base Reference</a>",
|
||||
"description": "A consultant that offers medical suggestions using an internal QA dataset and PubMed search results. Note that this agent's answers are for reference only and may not be valid. The dataset can be found at https://huggingface.co/datasets/InfiniFlow/medical_QA/tree/main",
|
||||
"canvas_type": "chatbot",
|
||||
"dsl": {
|
||||
"answer": [],
|
||||
|
||||
1410
agent/templates/seo_blog.json
Normal file
1410
agent/templates/seo_blog.json
Normal file
File diff suppressed because one or more lines are too long
@ -13,14 +13,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
from flask import Blueprint, Flask
|
||||
from werkzeug.wrappers.request import Request
|
||||
from flask_cors import CORS
|
||||
from flasgger import Swagger
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import close_connection
|
||||
@ -29,32 +31,60 @@ from api.utils import CustomJSONEncoder, commands
|
||||
|
||||
from flask_session import Session
|
||||
from flask_login import LoginManager
|
||||
from api.settings import SECRET_KEY, stat_logger
|
||||
from api.settings import API_VERSION, access_logger
|
||||
from api import settings
|
||||
from api.utils.api_utils import server_error_response
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
from api.constants import API_VERSION
|
||||
|
||||
__all__ = ['app']
|
||||
|
||||
|
||||
logger = logging.getLogger('flask.app')
|
||||
for h in access_logger.handlers:
|
||||
logger.addHandler(h)
|
||||
__all__ = ["app"]
|
||||
|
||||
Request.json = property(lambda self: self.get_json(force=True, silent=True))
|
||||
|
||||
app = Flask(__name__)
|
||||
CORS(app, supports_credentials=True,max_age=2592000)
|
||||
|
||||
# Add this at the beginning of your file to configure Swagger UI
|
||||
swagger_config = {
|
||||
"headers": [],
|
||||
"specs": [
|
||||
{
|
||||
"endpoint": "apispec",
|
||||
"route": "/apispec.json",
|
||||
"rule_filter": lambda rule: True, # Include all endpoints
|
||||
"model_filter": lambda tag: True, # Include all models
|
||||
}
|
||||
],
|
||||
"static_url_path": "/flasgger_static",
|
||||
"swagger_ui": True,
|
||||
"specs_route": "/apidocs/",
|
||||
}
|
||||
|
||||
swagger = Swagger(
|
||||
app,
|
||||
config=swagger_config,
|
||||
template={
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "RAGFlow API",
|
||||
"description": "",
|
||||
"version": "1.0.0",
|
||||
},
|
||||
"securityDefinitions": {
|
||||
"ApiKeyAuth": {"type": "apiKey", "name": "Authorization", "in": "header"}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
CORS(app, supports_credentials=True, max_age=2592000)
|
||||
app.url_map.strict_slashes = False
|
||||
app.json_encoder = CustomJSONEncoder
|
||||
app.errorhandler(Exception)(server_error_response)
|
||||
|
||||
|
||||
## convince for dev and debug
|
||||
#app.config["LOGIN_DISABLED"] = True
|
||||
# app.config["LOGIN_DISABLED"] = True
|
||||
app.config["SESSION_PERMANENT"] = False
|
||||
app.config["SESSION_TYPE"] = "filesystem"
|
||||
app.config['MAX_CONTENT_LENGTH'] = int(os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024))
|
||||
app.config["MAX_CONTENT_LENGTH"] = int(
|
||||
os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024)
|
||||
)
|
||||
|
||||
Session(app)
|
||||
login_manager = LoginManager()
|
||||
@ -64,17 +94,23 @@ commands.register_commands(app)
|
||||
|
||||
|
||||
def search_pages_path(pages_dir):
|
||||
app_path_list = [path for path in pages_dir.glob('*_app.py') if not path.name.startswith('.')]
|
||||
api_path_list = [path for path in pages_dir.glob('*sdk/*.py') if not path.name.startswith('.')]
|
||||
app_path_list = [
|
||||
path for path in pages_dir.glob("*_app.py") if not path.name.startswith(".")
|
||||
]
|
||||
api_path_list = [
|
||||
path for path in pages_dir.glob("*sdk/*.py") if not path.name.startswith(".")
|
||||
]
|
||||
app_path_list.extend(api_path_list)
|
||||
return app_path_list
|
||||
|
||||
|
||||
def register_page(page_path):
|
||||
path = f'{page_path}'
|
||||
path = f"{page_path}"
|
||||
|
||||
page_name = page_path.stem.rstrip('_app')
|
||||
module_name = '.'.join(page_path.parts[page_path.parts.index('api'):-1] + (page_name,))
|
||||
page_name = page_path.stem.rstrip("_app")
|
||||
module_name = ".".join(
|
||||
page_path.parts[page_path.parts.index("api"): -1] + (page_name,)
|
||||
)
|
||||
|
||||
spec = spec_from_file_location(module_name, page_path)
|
||||
page = module_from_spec(spec)
|
||||
@ -82,8 +118,10 @@ def register_page(page_path):
|
||||
page.manager = Blueprint(page_name, module_name)
|
||||
sys.modules[module_name] = page
|
||||
spec.loader.exec_module(page)
|
||||
page_name = getattr(page, 'page_name', page_name)
|
||||
url_prefix = f'/api/{API_VERSION}' if "/sdk/" in path else f'/{API_VERSION}/{page_name}'
|
||||
page_name = getattr(page, "page_name", page_name)
|
||||
url_prefix = (
|
||||
f"/api/{API_VERSION}" if "/sdk/" in path else f"/{API_VERSION}/{page_name}"
|
||||
)
|
||||
|
||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||
return url_prefix
|
||||
@ -91,31 +129,31 @@ def register_page(page_path):
|
||||
|
||||
pages_dir = [
|
||||
Path(__file__).parent,
|
||||
Path(__file__).parent.parent / 'api' / 'apps',
|
||||
Path(__file__).parent.parent / 'api' / 'apps' / 'sdk',
|
||||
Path(__file__).parent.parent / "api" / "apps",
|
||||
Path(__file__).parent.parent / "api" / "apps" / "sdk",
|
||||
]
|
||||
|
||||
client_urls_prefix = [
|
||||
register_page(path)
|
||||
for dir in pages_dir
|
||||
for path in search_pages_path(dir)
|
||||
register_page(path) for dir in pages_dir for path in search_pages_path(dir)
|
||||
]
|
||||
|
||||
|
||||
@login_manager.request_loader
|
||||
def load_user(web_request):
|
||||
jwt = Serializer(secret_key=SECRET_KEY)
|
||||
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||
authorization = web_request.headers.get("Authorization")
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
user = UserService.query(access_token=access_token, status=StatusEnum.VALID.value)
|
||||
user = UserService.query(
|
||||
access_token=access_token, status=StatusEnum.VALID.value
|
||||
)
|
||||
if user:
|
||||
return user[0]
|
||||
else:
|
||||
return None
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
except Exception:
|
||||
logging.exception("load_user got exception")
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
@ -123,4 +161,4 @@ def load_user(web_request):
|
||||
|
||||
@app.teardown_request
|
||||
def _db_close(exc):
|
||||
close_connection()
|
||||
close_connection()
|
||||
|
||||
@ -32,7 +32,7 @@ from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.task_service import queue_tasks, TaskService
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.settings import RetCode, retrievaler
|
||||
from api import settings
|
||||
from api.utils import get_uuid, current_timestamp, datetime_format
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, get_json_result, validate_request, \
|
||||
generate_confirmation_token
|
||||
@ -52,7 +52,7 @@ def new_token():
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = tenants[0].tenant_id
|
||||
obj = {"tenant_id": tenant_id, "token": generate_confirmation_token(tenant_id),
|
||||
@ -68,7 +68,7 @@ def new_token():
|
||||
obj["dialog_id"] = req["dialog_id"]
|
||||
|
||||
if not APITokenService.save(**obj):
|
||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
||||
return get_data_error_result(message="Fail to new a dialog!")
|
||||
|
||||
return get_json_result(data=obj)
|
||||
except Exception as e:
|
||||
@ -81,7 +81,7 @@ def token_list():
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
id = request.args["dialog_id"] if "dialog_id" in request.args else request.args["canvas_id"]
|
||||
objs = APITokenService.query(tenant_id=tenants[0].tenant_id, dialog_id=id)
|
||||
@ -110,7 +110,7 @@ def stats():
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
objs = API4ConversationService.stats(
|
||||
tenants[0].tenant_id,
|
||||
request.args.get(
|
||||
@ -141,7 +141,7 @@ def set_conversation():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
try:
|
||||
if objs[0].source == "agent":
|
||||
@ -163,7 +163,7 @@ def set_conversation():
|
||||
else:
|
||||
e, dia = DialogService.get_by_id(objs[0].dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found")
|
||||
return get_data_error_result(message="Dialog not found")
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": dia.id,
|
||||
@ -183,11 +183,11 @@ def completion():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
if "quote" not in req: req["quote"] = False
|
||||
|
||||
msg = []
|
||||
@ -257,19 +257,20 @@ def completion():
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
fillin_conv(ans)
|
||||
rename_field(ans)
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans},
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(sse(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
@ -289,12 +290,12 @@ def completion():
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
rename_field(result)
|
||||
return get_json_result(data=result)
|
||||
|
||||
#******************For dialog******************
|
||||
|
||||
# ******************For dialog******************
|
||||
conv.message.append(msg[-1])
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
del req["messages"]
|
||||
|
||||
@ -309,14 +310,14 @@ def completion():
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
fillin_conv(ans)
|
||||
rename_field(ans)
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans},
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
@ -325,7 +326,7 @@ def completion():
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
@ -346,18 +347,18 @@ def get(conversation_id):
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
e, conv = API4ConversationService.get_by_id(conversation_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
|
||||
conv = conv.to_dict()
|
||||
if token != APIToken.query(dialog_id=conv['dialog_id'])[0].token:
|
||||
return get_json_result(data=False, retmsg='Token is not valid for this conversation_id!"',
|
||||
retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
return get_json_result(data=False, message='Token is not valid for this conversation_id!"',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
for referenct_i in conv['reference']:
|
||||
if referenct_i is None or len(referenct_i) == 0:
|
||||
continue
|
||||
@ -377,7 +378,7 @@ def upload():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
kb_name = request.form.get("kb_name").strip()
|
||||
tenant_id = objs[0].tenant_id
|
||||
@ -386,19 +387,19 @@ def upload():
|
||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
message="Can't find this knowledgebase!")
|
||||
kb_id = kb.id
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file = request.files['file']
|
||||
if file.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
@ -409,7 +410,7 @@ def upload():
|
||||
try:
|
||||
if DocumentService.get_doc_count(kb.tenant_id) >= int(os.environ.get('MAX_FILE_NUM_PER_USER', 8192)):
|
||||
return get_data_error_result(
|
||||
retmsg="Exceed the maximum file number of a free user!")
|
||||
message="Exceed the maximum file number of a free user!")
|
||||
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
@ -418,7 +419,7 @@ def upload():
|
||||
filetype = filename_type(filename)
|
||||
if not filetype:
|
||||
return get_data_error_result(
|
||||
retmsg="This type of file has not been supported yet!")
|
||||
message="This type of file has not been supported yet!")
|
||||
|
||||
location = filename
|
||||
while STORAGE_IMPL.obj_exist(kb_id, location):
|
||||
@ -467,7 +468,7 @@ def upload():
|
||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||
tenant_id = DocumentService.get_tenant_id(doc["id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
# e, doc = DocumentService.get_by_id(doc["id"])
|
||||
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
||||
@ -489,17 +490,17 @@ def upload_parse():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, objs[0].tenant_id)
|
||||
return get_json_result(data=doc_ids)
|
||||
@ -512,7 +513,7 @@ def list_chunks():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
|
||||
@ -526,15 +527,16 @@ def list_chunks():
|
||||
doc_id = req['doc_id']
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg="Can't find doc_name or doc_id"
|
||||
data=False, message="Can't find doc_name or doc_id"
|
||||
)
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
|
||||
res = retrievaler.chunk_list(doc_id=doc_id, tenant_id=tenant_id)
|
||||
res = settings.retrievaler.chunk_list(doc_id, tenant_id, kb_ids)
|
||||
res = [
|
||||
{
|
||||
"content": res_item["content_with_weight"],
|
||||
"doc_name": res_item["docnm_kwd"],
|
||||
"img_id": res_item["img_id"]
|
||||
"image_id": res_item["img_id"]
|
||||
} for res_item in res
|
||||
]
|
||||
|
||||
@ -551,7 +553,7 @@ def list_kb_docs():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
tenant_id = objs[0].tenant_id
|
||||
@ -561,7 +563,7 @@ def list_kb_docs():
|
||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
message="Can't find this knowledgebase!")
|
||||
kb_id = kb.id
|
||||
|
||||
except Exception as e:
|
||||
@ -583,6 +585,7 @@ def list_kb_docs():
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/document/infos', methods=['POST'])
|
||||
@validate_request("doc_ids")
|
||||
def docinfos():
|
||||
@ -590,7 +593,7 @@ def docinfos():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
doc_ids = req["doc_ids"]
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
@ -604,7 +607,7 @@ def document_rm():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
tenant_id = objs[0].tenant_id
|
||||
req = request.json
|
||||
@ -616,7 +619,7 @@ def document_rm():
|
||||
|
||||
if not doc_ids:
|
||||
return get_json_result(
|
||||
data=False, retmsg="Can't find doc_names or doc_ids"
|
||||
data=False, message="Can't find doc_names or doc_ids"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@ -631,16 +634,16 @@ def document_rm():
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
message="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
@ -651,7 +654,7 @@ def document_rm():
|
||||
errors += str(e)
|
||||
|
||||
if errors:
|
||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=False, message=errors, code=settings.RetCode.SERVER_ERROR)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
@ -666,11 +669,11 @@ def completion_faq():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
if "quote" not in req: req["quote"] = True
|
||||
|
||||
msg = []
|
||||
@ -751,7 +754,7 @@ def completion_faq():
|
||||
conv.message.append(msg[-1])
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
|
||||
if not conv.reference:
|
||||
@ -803,10 +806,10 @@ def retrieval():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
kb_ids = req.get("kb_id",[])
|
||||
kb_ids = req.get("kb_id", [])
|
||||
doc_ids = req.get("doc_ids", [])
|
||||
question = req.get("question")
|
||||
page = int(req.get("page", 1))
|
||||
@ -820,26 +823,26 @@ def retrieval():
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_nms) != 1:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Knowledge bases use different embedding models or does not exist."', retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Knowledge bases use different embedding models or does not exist."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
kbs[0].tenant_id, LLMType.EMBEDDING.value, llm_name=kbs[0].embd_id)
|
||||
rerank_mdl = None
|
||||
if req.get("rerank_id"):
|
||||
rerank_mdl = TenantLLMService.model_instance(
|
||||
kbs[0].tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
||||
kbs[0].tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
||||
if req.get("keyword", False):
|
||||
chat_mdl = TenantLLMService.model_instance(kbs[0].tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
ranks = retrievaler.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl)
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl)
|
||||
for c in ranks["chunks"]:
|
||||
if "vector" in c:
|
||||
del c["vector"]
|
||||
c.pop("vector", None)
|
||||
return get_json_result(data=ranks)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, retmsg=f'No chunk found! Check the chunk status please!',
|
||||
retcode=RetCode.DATA_ERROR)
|
||||
return get_json_result(data=False, message='No chunk found! Check the chunk status please!',
|
||||
code=settings.RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
@ -13,7 +13,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
import traceback
|
||||
from functools import partial
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
@ -46,8 +48,8 @@ def rm():
|
||||
for i in request.json["canvas_ids"]:
|
||||
if not UserCanvasService.query(user_id=current_user.id,id=i):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
UserCanvasService.delete_by_id(i)
|
||||
return get_json_result(data=True)
|
||||
|
||||
@ -66,12 +68,12 @@ def save():
|
||||
return server_error_response(ValueError("Duplicated title."))
|
||||
req["id"] = get_uuid()
|
||||
if not UserCanvasService.save(**req):
|
||||
return get_data_error_result(retmsg="Fail to save canvas.")
|
||||
return get_data_error_result(message="Fail to save canvas.")
|
||||
else:
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
UserCanvasService.update_by_id(req["id"], req)
|
||||
return get_json_result(data=req)
|
||||
|
||||
@ -81,7 +83,7 @@ def save():
|
||||
def get(canvas_id):
|
||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="canvas not found.")
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
return get_json_result(data=c.to_dict())
|
||||
|
||||
|
||||
@ -93,11 +95,11 @@ def run():
|
||||
stream = req.get("stream", True)
|
||||
e, cvs = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="canvas not found.")
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
@ -108,40 +110,40 @@ def run():
|
||||
canvas = Canvas(cvs.dsl, current_user.id)
|
||||
if "message" in req:
|
||||
canvas.messages.append({"role": "user", "content": req["message"], "id": message_id})
|
||||
if len([m for m in canvas.messages if m["role"] == "user"]) > 1:
|
||||
#ten = TenantService.get_info_by(current_user.id)[0]
|
||||
#req["message"] = full_question(ten["tenant_id"], ten["llm_id"], canvas.messages)
|
||||
pass
|
||||
canvas.add_user_input(req["message"])
|
||||
answer = canvas.run(stream=stream)
|
||||
print(canvas)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
assert answer is not None, "Nothing. Is it over?"
|
||||
|
||||
if stream:
|
||||
assert isinstance(answer, partial), "Nothing. Is it over?"
|
||||
|
||||
def sse():
|
||||
nonlocal answer, cvs
|
||||
try:
|
||||
for ans in answer():
|
||||
for ans in canvas.run(stream=True):
|
||||
if ans.get("running_status"):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {"answer": ans["content"],
|
||||
"running_status": True}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
continue
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
traceback.print_exc()
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(sse(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
@ -150,13 +152,15 @@ def run():
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
return get_json_result(data={"answer": final_ans["content"], "reference": final_ans.get("reference", [])})
|
||||
for answer in canvas.run(stream=False):
|
||||
if answer.get("running_status"): continue
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
return get_json_result(data={"answer": final_ans["content"], "reference": final_ans.get("reference", [])})
|
||||
|
||||
|
||||
@manager.route('/reset', methods=['POST'])
|
||||
@ -167,11 +171,11 @@ def reset():
|
||||
try:
|
||||
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="canvas not found.")
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||
canvas.reset()
|
||||
|
||||
@ -15,16 +15,13 @@
|
||||
#
|
||||
import datetime
|
||||
import json
|
||||
import traceback
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from elasticsearch_dsl import Q
|
||||
|
||||
from api.db.services.dialog_service import keyword_extraction
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils import rmSpace
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
@ -32,7 +29,7 @@ from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.settings import RetCode, retrievaler, kg_retrievaler
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
import hashlib
|
||||
import re
|
||||
@ -50,16 +47,17 @@ def list_chunk():
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
query = {
|
||||
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
||||
}
|
||||
if "available_int" in req:
|
||||
query["available_int"] = int(req["available_int"])
|
||||
sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
|
||||
sres = settings.retrievaler.search(query, search.index_name(tenant_id), kb_ids, highlight=True)
|
||||
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
||||
for id in sres.ids:
|
||||
d = {
|
||||
@ -70,22 +68,18 @@ def list_chunk():
|
||||
"doc_id": sres.field[id]["doc_id"],
|
||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||
"img_id": sres.field[id].get("img_id", ""),
|
||||
"image_id": sres.field[id].get("img_id", ""),
|
||||
"available_int": sres.field[id].get("available_int", 1),
|
||||
"positions": sres.field[id].get("position_int", "").split("\t")
|
||||
"positions": json.loads(sres.field[id].get("position_list", "[]")),
|
||||
}
|
||||
if len(d["positions"]) % 5 == 0:
|
||||
poss = []
|
||||
for i in range(0, len(d["positions"]), 5):
|
||||
poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
|
||||
float(d["positions"][i + 3]), float(d["positions"][i + 4])])
|
||||
d["positions"] = poss
|
||||
assert isinstance(d["positions"], list)
|
||||
assert len(d["positions"]) == 0 or (isinstance(d["positions"][0], list) and len(d["positions"][0]) == 5)
|
||||
res["chunks"].append(d)
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, retmsg=f'No chunk found!',
|
||||
retcode=RetCode.DATA_ERROR)
|
||||
return get_json_result(data=False, message='No chunk found!',
|
||||
code=settings.RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@ -96,27 +90,25 @@ def get():
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
res = ELASTICSEARCH.get(
|
||||
chunk_id, search.index_name(
|
||||
tenants[0].tenant_id))
|
||||
if not res.get("found"):
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
tenant_id = tenants[0].tenant_id
|
||||
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
||||
if chunk is None:
|
||||
return server_error_response("Chunk not found")
|
||||
id = res["_id"]
|
||||
res = res["_source"]
|
||||
res["chunk_id"] = id
|
||||
k = []
|
||||
for n in res.keys():
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
k.append(n)
|
||||
for n in k:
|
||||
del res[n]
|
||||
del chunk[n]
|
||||
|
||||
return get_json_result(data=res)
|
||||
return get_json_result(data=chunk)
|
||||
except Exception as e:
|
||||
if str(e).find("NotFoundError") >= 0:
|
||||
return get_json_result(data=False, retmsg=f'Chunk not found!',
|
||||
retcode=RetCode.DATA_ERROR)
|
||||
return get_json_result(data=False, message='Chunk not found!',
|
||||
code=settings.RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@ -139,14 +131,14 @@ def set():
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_id)
|
||||
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
|
||||
if doc.parser_id == ParserType.QA:
|
||||
arr = [
|
||||
@ -155,7 +147,7 @@ def set():
|
||||
req["content_with_weight"]) if len(t) > 1]
|
||||
if len(arr) != 2:
|
||||
return get_data_error_result(
|
||||
retmsg="Q&A must be separated by TAB/ENTER key.")
|
||||
message="Q&A must be separated by TAB/ENTER key.")
|
||||
q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
|
||||
d = beAdoc(d, arr[0], arr[1], not any(
|
||||
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||
@ -163,7 +155,7 @@ def set():
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
||||
settings.docStoreConn.insert([d], search.index_name(tenant_id), doc.kb_id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -175,12 +167,15 @@ def set():
|
||||
def switch():
|
||||
req = request.json
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
if not ELASTICSEARCH.upsert([{"id": i, "available_int": int(req["available_int"])} for i in req["chunk_ids"]],
|
||||
search.index_name(tenant_id)):
|
||||
return get_data_error_result(retmsg="Index updating failure")
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
for cid in req["chunk_ids"]:
|
||||
if not settings.docStoreConn.update({"id": cid},
|
||||
{"available_int": int(req["available_int"])},
|
||||
search.index_name(DocumentService.get_tenant_id(req["doc_id"])),
|
||||
doc.kb_id):
|
||||
return get_data_error_result(message="Index updating failure")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -192,12 +187,11 @@ def switch():
|
||||
def rm():
|
||||
req = request.json
|
||||
try:
|
||||
if not ELASTICSEARCH.deleteByQuery(
|
||||
Q("ids", values=req["chunk_ids"]), search.index_name(current_user.id)):
|
||||
return get_data_error_result(retmsg="Index updating failure")
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if not settings.docStoreConn.delete({"id": req["chunk_ids"]}, search.index_name(current_user.id), doc.kb_id):
|
||||
return get_data_error_result(message="Index updating failure")
|
||||
deleted_chunk_ids = req["chunk_ids"]
|
||||
chunk_number = len(deleted_chunk_ids)
|
||||
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
||||
@ -225,14 +219,14 @@ def create():
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
d["kb_id"] = [doc.kb_id]
|
||||
d["docnm_kwd"] = doc.name
|
||||
d["doc_id"] = doc.id
|
||||
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
@ -240,7 +234,7 @@ def create():
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
||||
settings.docStoreConn.insert([d], search.index_name(tenant_id), doc.kb_id)
|
||||
|
||||
DocumentService.increment_chunk_num(
|
||||
doc.id, doc.kb_id, c, 1, 0)
|
||||
@ -257,28 +251,31 @@ def retrieval_test():
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("size", 30))
|
||||
question = req["question"]
|
||||
kb_id = req["kb_id"]
|
||||
if isinstance(kb_id, str): kb_id = [kb_id]
|
||||
kb_ids = req["kb_id"]
|
||||
if isinstance(kb_ids, str):
|
||||
kb_ids = [kb_ids]
|
||||
doc_ids = req.get("doc_ids", [])
|
||||
similarity_threshold = float(req.get("similarity_threshold", 0.0))
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
top = int(req.get("top_k", 1024))
|
||||
tenant_ids = []
|
||||
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for kid in kb_id:
|
||||
for kb_id in kb_ids:
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(
|
||||
tenant_id=tenant.tenant_id, id=kid):
|
||||
tenant_id=tenant.tenant_id, id=kb_id):
|
||||
tenant_ids.append(tenant.tenant_id)
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id[0])
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Knowledgebase not found!")
|
||||
return get_data_error_result(message="Knowledgebase not found!")
|
||||
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
|
||||
@ -290,19 +287,18 @@ def retrieval_test():
|
||||
chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
|
||||
retr = retrievaler if kb.parser_id != ParserType.KG else kg_retrievaler
|
||||
ranks = retr.retrieval(question, embd_mdl, kb.tenant_id, kb_id, page, size,
|
||||
retr = settings.retrievaler if kb.parser_id != ParserType.KG else settings.kg_retrievaler
|
||||
ranks = retr.retrieval(question, embd_mdl, tenant_ids, kb_ids, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"))
|
||||
for c in ranks["chunks"]:
|
||||
if "vector" in c:
|
||||
del c["vector"]
|
||||
c.pop("vector", None)
|
||||
|
||||
return get_json_result(data=ranks)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, retmsg=f'No chunk found! Check the chunk status please!',
|
||||
retcode=RetCode.DATA_ERROR)
|
||||
return get_json_result(data=False, message='No chunk found! Check the chunk status please!',
|
||||
code=settings.RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@ -310,18 +306,19 @@ def retrieval_test():
|
||||
@login_required
|
||||
def knowledge_graph():
|
||||
doc_id = request.args["doc_id"]
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
req = {
|
||||
"doc_ids":[doc_id],
|
||||
"doc_ids": [doc_id],
|
||||
"knowledge_graph_kwd": ["graph", "mind_map"]
|
||||
}
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
sres = retrievaler.search(req, search.index_name(tenant_id))
|
||||
sres = settings.retrievaler.search(req, search.index_name(tenant_id), kb_ids)
|
||||
obj = {"graph": {}, "mind_map": {}}
|
||||
for id in sres.ids[:2]:
|
||||
ty = sres.field[id]["knowledge_graph_kwd"]
|
||||
try:
|
||||
content_json = json.loads(sres.field[id]["content_with_weight"])
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if ty == 'mind_map':
|
||||
@ -344,4 +341,3 @@ def knowledge_graph():
|
||||
obj[ty] = content_json
|
||||
|
||||
return get_json_result(data=obj)
|
||||
|
||||
|
||||
@ -25,7 +25,7 @@ from api.db import LLMType
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat, ask
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
|
||||
from api.settings import RetCode, retrievaler
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from graphrag.mind_map_extractor import MindMapExtractor
|
||||
@ -42,11 +42,11 @@ def set_conversation():
|
||||
del req["conversation_id"]
|
||||
try:
|
||||
if not ConversationService.update_by_id(conv_id, req):
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Fail to update a conversation!")
|
||||
message="Fail to update a conversation!")
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
@ -55,7 +55,7 @@ def set_conversation():
|
||||
try:
|
||||
e, dia = DialogService.get_by_id(req["dialog_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found")
|
||||
return get_data_error_result(message="Dialog not found")
|
||||
conv = {
|
||||
"id": conv_id,
|
||||
"dialog_id": req["dialog_id"],
|
||||
@ -65,7 +65,7 @@ def set_conversation():
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Fail to new a conversation!")
|
||||
return get_data_error_result(message="Fail to new a conversation!")
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
@ -79,15 +79,15 @@ def get():
|
||||
try:
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of conversation authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of conversation authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
@ -102,15 +102,15 @@ def rm():
|
||||
for cid in conv_ids:
|
||||
exist, conv = ConversationService.get_by_id(cid)
|
||||
if not exist:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of conversation authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of conversation authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
ConversationService.delete_by_id(cid)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
@ -124,8 +124,8 @@ def list_convsersation():
|
||||
try:
|
||||
if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of dialog authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of dialog authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
convs = ConversationService.query(
|
||||
dialog_id=dialog_id,
|
||||
order_by=ConversationService.model.create_time,
|
||||
@ -152,11 +152,11 @@ def completion():
|
||||
try:
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
conv.message = deepcopy(req["messages"])
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
del req["messages"]
|
||||
|
||||
@ -180,14 +180,14 @@ def completion():
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
fillin_conv(ans)
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
@ -217,11 +217,11 @@ def tts():
|
||||
|
||||
tenants = TenantService.get_info_by(current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tts_id = tenants[0]["tts_id"]
|
||||
if not tts_id:
|
||||
return get_data_error_result(retmsg="No default TTS model is set")
|
||||
return get_data_error_result(message="No default TTS model is set")
|
||||
|
||||
tts_mdl = LLMBundle(tenants[0]["tenant_id"], LLMType.TTS, tts_id)
|
||||
|
||||
@ -231,7 +231,7 @@ def tts():
|
||||
for chunk in tts_mdl.tts(txt):
|
||||
yield chunk
|
||||
except Exception as e:
|
||||
yield ("data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
yield ("data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e)}},
|
||||
ensure_ascii=False)).encode('utf-8')
|
||||
|
||||
@ -250,7 +250,7 @@ def delete_msg():
|
||||
req = request.json
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
|
||||
conv = conv.to_dict()
|
||||
for i, msg in enumerate(conv["message"]):
|
||||
@ -273,7 +273,7 @@ def thumbup():
|
||||
req = request.json
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
up_down = req.get("set")
|
||||
feedback = req.get("feedback", "")
|
||||
conv = conv.to_dict()
|
||||
@ -297,16 +297,17 @@ def thumbup():
|
||||
def ask_about():
|
||||
req = request.json
|
||||
uid = current_user.id
|
||||
|
||||
def stream():
|
||||
nonlocal req, uid
|
||||
try:
|
||||
for ans in ask(req["question"], req["kb_ids"], uid):
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
@ -324,13 +325,13 @@ def mindmap():
|
||||
kb_ids = req["kb_ids"]
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Knowledgebase not found!")
|
||||
return get_data_error_result(message="Knowledgebase not found!")
|
||||
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||
ranks = retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
||||
0.3, 0.3, aggs=False)
|
||||
ranks = settings.retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
||||
0.3, 0.3, aggs=False)
|
||||
mindmap = MindMapExtractor(chat_mdl)
|
||||
mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
|
||||
if "error" in mind_map:
|
||||
|
||||
@ -20,7 +20,7 @@ from api.db.services.dialog_service import DialogService
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.settings import RetCode
|
||||
from api import settings
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result
|
||||
@ -68,17 +68,17 @@ def set_dialog():
|
||||
continue
|
||||
if prompt_config["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_data_error_result(
|
||||
retmsg="Parameter '{}' is not used".format(p["key"]))
|
||||
message="Parameter '{}' is not used".format(p["key"]))
|
||||
|
||||
try:
|
||||
e, tenant = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
llm_id = req.get("llm_id", tenant.llm_id)
|
||||
if not dialog_id:
|
||||
if not req.get("kb_ids"):
|
||||
return get_data_error_result(
|
||||
retmsg="Fail! Please select knowledgebase!")
|
||||
message="Fail! Please select knowledgebase!")
|
||||
dia = {
|
||||
"id": get_uuid(),
|
||||
"tenant_id": current_user.id,
|
||||
@ -96,20 +96,20 @@ def set_dialog():
|
||||
"icon": icon
|
||||
}
|
||||
if not DialogService.save(**dia):
|
||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
||||
return get_data_error_result(message="Fail to new a dialog!")
|
||||
e, dia = DialogService.get_by_id(dia["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
||||
return get_data_error_result(message="Fail to new a dialog!")
|
||||
return get_json_result(data=dia.to_json())
|
||||
else:
|
||||
del req["dialog_id"]
|
||||
if "kb_names" in req:
|
||||
del req["kb_names"]
|
||||
if not DialogService.update_by_id(dialog_id, req):
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
e, dia = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Fail to update a dialog!")
|
||||
return get_data_error_result(message="Fail to update a dialog!")
|
||||
dia = dia.to_dict()
|
||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||
return get_json_result(data=dia)
|
||||
@ -124,7 +124,7 @@ def get():
|
||||
try:
|
||||
e, dia = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
dia = dia.to_dict()
|
||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||
return get_json_result(data=dia)
|
||||
@ -174,8 +174,8 @@ def rm():
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of dialog authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of dialog authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
dialog_list.append({"id": id,"status":StatusEnum.INVALID.value})
|
||||
DialogService.update_many_by_id(dialog_list)
|
||||
return get_json_result(data=True)
|
||||
|
||||
@ -13,11 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
import json
|
||||
import os.path
|
||||
import pathlib
|
||||
import re
|
||||
|
||||
import flask
|
||||
from elasticsearch_dsl import Q
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
@ -26,20 +27,20 @@ from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.task_service import TaskService, queue_tasks
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from deepdoc.parser.html_parser import RAGFlowHtmlParser
|
||||
from rag.nlp import search
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType, TaskStatus, ParserType, FileSource
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.settings import RetCode
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
from api.utils.file_utils import filename_type, thumbnail, get_project_base_directory
|
||||
from api.utils.web_utils import html2pdf, is_valid_url
|
||||
from api.contants import IMG_BASE64_PREFIX
|
||||
from api.constants import IMG_BASE64_PREFIX
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST'])
|
||||
@ -49,16 +50,16 @@ def upload():
|
||||
kb_id = request.form.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
@ -67,7 +68,7 @@ def upload():
|
||||
err, _ = FileService.upload_document(kb, file_objs, current_user.id)
|
||||
if err:
|
||||
return get_json_result(
|
||||
data=False, retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
||||
data=False, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@ -78,12 +79,12 @@ def web_crawl():
|
||||
kb_id = request.form.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
name = request.form.get("name")
|
||||
url = request.form.get("url")
|
||||
if not is_valid_url(url):
|
||||
return get_json_result(
|
||||
data=False, retmsg='The URL format is invalid', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='The URL format is invalid', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
@ -145,17 +146,17 @@ def create():
|
||||
kb_id = req["kb_id"]
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
message="Can't find this knowledgebase!")
|
||||
|
||||
if DocumentService.query(name=req["name"], kb_id=kb_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated document name in the same knowledgebase.")
|
||||
message="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
doc = DocumentService.insert({
|
||||
"id": get_uuid(),
|
||||
@ -179,7 +180,7 @@ def list_docs():
|
||||
kb_id = request.args.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(
|
||||
@ -187,8 +188,8 @@ def list_docs():
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
keywords = request.args.get("keywords", "")
|
||||
|
||||
page_number = int(request.args.get("page", 1))
|
||||
@ -217,20 +218,20 @@ def docinfos():
|
||||
if not DocumentService.accessible(doc_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg='No authorization.',
|
||||
retcode=RetCode.AUTHENTICATION_ERROR
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
return get_json_result(data=list(docs.dicts()))
|
||||
|
||||
|
||||
@manager.route('/thumbnails', methods=['GET'])
|
||||
#@login_required
|
||||
# @login_required
|
||||
def thumbnails():
|
||||
doc_ids = request.args.get("doc_ids").split(",")
|
||||
if not doc_ids:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "Document ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='Lack of "Document ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
docs = DocumentService.get_thumbnails(doc_ids)
|
||||
@ -252,41 +253,32 @@ def change_status():
|
||||
if str(req["status"]) not in ["0", "1"]:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg='"Status" must be either 0 or 1!',
|
||||
retcode=RetCode.ARGUMENT_ERROR)
|
||||
message='"Status" must be either 0 or 1!',
|
||||
code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg='No authorization.',
|
||||
retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
message="Can't find this knowledgebase!")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"status": str(req["status"])}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document update)!")
|
||||
message="Database error (Document update)!")
|
||||
|
||||
if str(req["status"]) == "0":
|
||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
||||
scripts="ctx._source.available_int=0;",
|
||||
idxnm=search.index_name(
|
||||
kb.tenant_id)
|
||||
)
|
||||
else:
|
||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
||||
scripts="ctx._source.available_int=1;",
|
||||
idxnm=search.index_name(
|
||||
kb.tenant_id)
|
||||
)
|
||||
status = int(req["status"])
|
||||
settings.docStoreConn.update({"doc_id": req["doc_id"]}, {"available_int": status},
|
||||
search.index_name(kb.tenant_id), doc.kb_id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -304,8 +296,8 @@ def rm():
|
||||
if not DocumentService.accessible4deletion(doc_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg='No authorization.',
|
||||
retcode=RetCode.AUTHENTICATION_ERROR
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
@ -316,16 +308,16 @@ def rm():
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
message="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
@ -336,7 +328,7 @@ def rm():
|
||||
errors += str(e)
|
||||
|
||||
if errors:
|
||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=False, message=errors, code=settings.RetCode.SERVER_ERROR)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
@ -350,8 +342,8 @@ def run():
|
||||
if not DocumentService.accessible(doc_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg='No authorization.',
|
||||
retcode=RetCode.AUTHENTICATION_ERROR
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
try:
|
||||
for id in req["doc_ids"]:
|
||||
@ -364,9 +356,12 @@ def run():
|
||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||
tenant_id = DocumentService.get_tenant_id(id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), doc.kb_id)
|
||||
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
@ -389,28 +384,28 @@ def rename():
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg='No authorization.',
|
||||
retcode=RetCode.AUTHENTICATION_ERROR
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||
doc.name.lower()).suffix:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg="The extension of file can't be changed",
|
||||
retcode=RetCode.ARGUMENT_ERROR)
|
||||
message="The extension of file can't be changed",
|
||||
code=settings.RetCode.ARGUMENT_ERROR)
|
||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||
if d.name == req["name"]:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated document name in the same knowledgebase.")
|
||||
message="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document rename)!")
|
||||
message="Database error (Document rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
||||
if informs:
|
||||
@ -428,7 +423,7 @@ def get(doc_id):
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
|
||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
||||
@ -456,13 +451,13 @@ def change_parser():
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg='No authorization.',
|
||||
retcode=RetCode.AUTHENTICATION_ERROR
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if doc.parser_id.lower() == req["parser_id"].lower():
|
||||
if "parser_config" in req:
|
||||
if req["parser_config"] == doc.parser_config:
|
||||
@ -473,25 +468,25 @@ def change_parser():
|
||||
if ((doc.type == FileType.VISUAL and req["parser_id"] != "picture")
|
||||
or (re.search(
|
||||
r"\.(ppt|pptx|pages)$", doc.name) and req["parser_id"] != "presentation")):
|
||||
return get_data_error_result(retmsg="Not supported yet!")
|
||||
return get_data_error_result(message="Not supported yet!")
|
||||
|
||||
e = DocumentService.update_by_id(doc.id,
|
||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||
"run": TaskStatus.UNSTART.value})
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if "parser_config" in req:
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if doc.token_num > 0:
|
||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
||||
doc.process_duation * -1)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
@ -516,14 +511,74 @@ def get_image(image_id):
|
||||
def upload_and_parse():
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, current_user.id)
|
||||
|
||||
return get_json_result(data=doc_ids)
|
||||
|
||||
|
||||
@manager.route('/parse', methods=['POST'])
|
||||
@login_required
|
||||
def parse():
|
||||
url = request.json.get("url") if request.json else ""
|
||||
if url:
|
||||
if not is_valid_url(url):
|
||||
return get_json_result(
|
||||
data=False, message='The URL format is invalid', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
download_path = os.path.join(get_project_base_directory(), "logs/downloads")
|
||||
os.makedirs(download_path, exist_ok=True)
|
||||
from seleniumwire.webdriver import Chrome, ChromeOptions
|
||||
options = ChromeOptions()
|
||||
options.add_argument('--headless')
|
||||
options.add_argument('--disable-gpu')
|
||||
options.add_argument('--no-sandbox')
|
||||
options.add_argument('--disable-dev-shm-usage')
|
||||
options.add_experimental_option('prefs', {
|
||||
'download.default_directory': download_path,
|
||||
'download.prompt_for_download': False,
|
||||
'download.directory_upgrade': True,
|
||||
'safebrowsing.enabled': True
|
||||
})
|
||||
driver = Chrome(options=options)
|
||||
driver.get(url)
|
||||
res_headers = [r.response.headers for r in driver.requests]
|
||||
if len(res_headers) > 1:
|
||||
sections = RAGFlowHtmlParser().parser_txt(driver.page_source)
|
||||
driver.quit()
|
||||
return get_json_result(data="\n".join(sections))
|
||||
|
||||
class File:
|
||||
filename: str
|
||||
filepath: str
|
||||
|
||||
def __init__(self, filename, filepath):
|
||||
self.filename = filename
|
||||
self.filepath = filepath
|
||||
|
||||
def read(self):
|
||||
with open(self.filepath, "rb") as f:
|
||||
return f.read()
|
||||
|
||||
r = re.search(r"filename=\"([^\"]+)\"", str(res_headers))
|
||||
if not r or not r.group(1):
|
||||
return get_json_result(
|
||||
data=False, message="Can't not identify downloaded file", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
f = File(r.group(1), os.path.join(download_path, r.group(1)))
|
||||
txt = FileService.parse_docs([f], current_user.id)
|
||||
return get_json_result(data=txt)
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
txt = FileService.parse_docs(file_objs, current_user.id)
|
||||
|
||||
return get_json_result(data=txt)
|
||||
|
||||
@ -13,9 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
from elasticsearch_dsl import Q
|
||||
|
||||
from api.db.db_models import File2Document
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
|
||||
@ -26,10 +24,8 @@ from api.utils.api_utils import server_error_response, get_data_error_result, va
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.settings import RetCode
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.nlp import search
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
|
||||
|
||||
@manager.route('/convert', methods=['POST'])
|
||||
@ -54,13 +50,13 @@ def convert():
|
||||
doc_id = inform.document_id
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
message="Database error (Document removal)!")
|
||||
File2DocumentService.delete_by_file_id(id)
|
||||
|
||||
# insert
|
||||
@ -68,11 +64,11 @@ def convert():
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
message="Can't find this knowledgebase!")
|
||||
e, file = FileService.get_by_id(id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this file!")
|
||||
message="Can't find this file!")
|
||||
|
||||
doc = DocumentService.insert({
|
||||
"id": get_uuid(),
|
||||
@ -104,26 +100,26 @@ def rm():
|
||||
file_ids = req["file_ids"]
|
||||
if not file_ids:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "Files ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='Lack of "Files ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
try:
|
||||
for file_id in file_ids:
|
||||
informs = File2DocumentService.get_by_file_id(file_id)
|
||||
if not informs:
|
||||
return get_data_error_result(retmsg="Inform not found!")
|
||||
return get_data_error_result(message="Inform not found!")
|
||||
for inform in informs:
|
||||
if not inform:
|
||||
return get_data_error_result(retmsg="Inform not found!")
|
||||
return get_data_error_result(message="Inform not found!")
|
||||
File2DocumentService.delete_by_file_id(file_id)
|
||||
doc_id = inform.document_id
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
message="Database error (Document removal)!")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -18,7 +18,6 @@ import pathlib
|
||||
import re
|
||||
|
||||
import flask
|
||||
from elasticsearch_dsl import Q
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
@ -29,11 +28,9 @@ from api.utils import get_uuid
|
||||
from api.db import FileType, FileSource
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.file_service import FileService
|
||||
from api.settings import RetCode
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.file_utils import filename_type
|
||||
from rag.nlp import search
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
|
||||
@ -49,24 +46,24 @@ def upload():
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
file_objs = request.files.getlist('file')
|
||||
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
file_res = []
|
||||
try:
|
||||
for file_obj in file_objs:
|
||||
e, file = FileService.get_by_id(pf_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this folder!")
|
||||
message="Can't find this folder!")
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(current_user.id) >= MAX_FILE_NUM_PER_USER:
|
||||
return get_data_error_result(
|
||||
retmsg="Exceed the maximum file number of a free user!")
|
||||
message="Exceed the maximum file number of a free user!")
|
||||
|
||||
# split file name path
|
||||
if not file_obj.filename:
|
||||
@ -85,13 +82,13 @@ def upload():
|
||||
if file_len != len_id_list:
|
||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 1])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Folder not found!")
|
||||
return get_data_error_result(message="Folder not found!")
|
||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 1], file_obj_names,
|
||||
len_id_list)
|
||||
else:
|
||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 2])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Folder not found!")
|
||||
return get_data_error_result(message="Folder not found!")
|
||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 2], file_obj_names,
|
||||
len_id_list)
|
||||
|
||||
@ -137,10 +134,10 @@ def create():
|
||||
try:
|
||||
if not FileService.is_parent_folder_exist(pf_id):
|
||||
return get_json_result(
|
||||
data=False, retmsg="Parent Folder Doesn't Exist!", retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message="Parent Folder Doesn't Exist!", code=settings.RetCode.OPERATING_ERROR)
|
||||
if FileService.query(name=req["name"], parent_id=pf_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated folder name in the same folder.")
|
||||
message="Duplicated folder name in the same folder.")
|
||||
|
||||
if input_file_type == FileType.FOLDER.value:
|
||||
file_type = FileType.FOLDER.value
|
||||
@ -181,14 +178,14 @@ def list_files():
|
||||
try:
|
||||
e, file = FileService.get_by_id(pf_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Folder not found!")
|
||||
return get_data_error_result(message="Folder not found!")
|
||||
|
||||
files, total = FileService.get_by_pf_id(
|
||||
current_user.id, pf_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
|
||||
parent_folder = FileService.get_parent_folder(pf_id)
|
||||
if not FileService.get_parent_folder(pf_id):
|
||||
return get_json_result(retmsg="File not found!")
|
||||
return get_json_result(message="File not found!")
|
||||
|
||||
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
|
||||
except Exception as e:
|
||||
@ -212,7 +209,7 @@ def get_parent_folder():
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Folder not found!")
|
||||
return get_data_error_result(message="Folder not found!")
|
||||
|
||||
parent_folder = FileService.get_parent_folder(file_id)
|
||||
return get_json_result(data={"parent_folder": parent_folder.to_json()})
|
||||
@ -227,7 +224,7 @@ def get_all_parent_folders():
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Folder not found!")
|
||||
return get_data_error_result(message="Folder not found!")
|
||||
|
||||
parent_folders = FileService.get_all_parent_folders(file_id)
|
||||
parent_folders_res = []
|
||||
@ -248,9 +245,9 @@ def rm():
|
||||
for file_id in file_ids:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="File or Folder not found!")
|
||||
return get_data_error_result(message="File or Folder not found!")
|
||||
if not file.tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if file.source_type == FileSource.KNOWLEDGEBASE:
|
||||
continue
|
||||
|
||||
@ -259,13 +256,13 @@ def rm():
|
||||
for inner_file_id in file_id_list:
|
||||
e, file = FileService.get_by_id(inner_file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="File not found!")
|
||||
return get_data_error_result(message="File not found!")
|
||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
||||
else:
|
||||
if not FileService.delete(file):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (File removal)!")
|
||||
message="Database error (File removal)!")
|
||||
|
||||
# delete file2document
|
||||
informs = File2DocumentService.get_by_file_id(file_id)
|
||||
@ -273,13 +270,13 @@ def rm():
|
||||
doc_id = inform.document_id
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
message="Database error (Document removal)!")
|
||||
File2DocumentService.delete_by_file_id(file_id)
|
||||
|
||||
return get_json_result(data=True)
|
||||
@ -295,30 +292,30 @@ def rename():
|
||||
try:
|
||||
e, file = FileService.get_by_id(req["file_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="File not found!")
|
||||
return get_data_error_result(message="File not found!")
|
||||
if file.type != FileType.FOLDER.value \
|
||||
and pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||
file.name.lower()).suffix:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg="The extension of file can't be changed",
|
||||
retcode=RetCode.ARGUMENT_ERROR)
|
||||
message="The extension of file can't be changed",
|
||||
code=settings.RetCode.ARGUMENT_ERROR)
|
||||
for file in FileService.query(name=req["name"], pf_id=file.parent_id):
|
||||
if file.name == req["name"]:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated file name in the same folder.")
|
||||
message="Duplicated file name in the same folder.")
|
||||
|
||||
if not FileService.update_by_id(
|
||||
req["file_id"], {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (File rename)!")
|
||||
message="Database error (File rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_file_id(req["file_id"])
|
||||
if informs:
|
||||
if not DocumentService.update_by_id(
|
||||
informs[0].document_id, {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document rename)!")
|
||||
message="Database error (Document rename)!")
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
@ -331,7 +328,7 @@ def get(file_id):
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
return get_data_error_result(message="Document not found!")
|
||||
b, n = File2DocumentService.get_storage_address(file_id=file_id)
|
||||
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
||||
ext = re.search(r"\.([^.]+)$", file.name)
|
||||
@ -359,12 +356,12 @@ def move():
|
||||
for file_id in file_ids:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="File or Folder not found!")
|
||||
return get_data_error_result(message="File or Folder not found!")
|
||||
if not file.tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
fe, _ = FileService.get_by_id(parent_id)
|
||||
if not fe:
|
||||
return get_data_error_result(retmsg="Parent Folder not found!")
|
||||
return get_data_error_result(message="Parent Folder not found!")
|
||||
FileService.move_file(file_ids, parent_id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
|
||||
@ -26,8 +26,9 @@ from api.utils import get_uuid
|
||||
from api.db import StatusEnum, FileSource
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.db_models import File
|
||||
from api.settings import RetCode
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api import settings
|
||||
from rag.nlp import search
|
||||
|
||||
|
||||
@manager.route('/create', methods=['post'])
|
||||
@ -47,7 +48,7 @@ def create():
|
||||
req["created_by"] = current_user.id
|
||||
e, t = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Tenant not found.")
|
||||
return get_data_error_result(message="Tenant not found.")
|
||||
req["embd_id"] = t.embd_id
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_data_error_result()
|
||||
@ -65,24 +66,24 @@ def update():
|
||||
if not KnowledgebaseService.accessible4deletion(req["kb_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg='No authorization.',
|
||||
retcode=RetCode.AUTHENTICATION_ERROR
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
try:
|
||||
if not KnowledgebaseService.query(
|
||||
created_by=current_user.id, id=req["kb_id"]):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of knowledgebase authorized for this operation.', code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(req["kb_id"])
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
message="Can't find this knowledgebase!")
|
||||
|
||||
if req["name"].lower() != kb.name.lower() \
|
||||
and len(KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated knowledgebase name.")
|
||||
message="Duplicated knowledgebase name.")
|
||||
|
||||
del req["kb_id"]
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
@ -91,7 +92,7 @@ def update():
|
||||
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Knowledgebase rename)!")
|
||||
message="Database error (Knowledgebase rename)!")
|
||||
|
||||
return get_json_result(data=kb.to_json())
|
||||
except Exception as e:
|
||||
@ -110,12 +111,12 @@ def detail():
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
kb = KnowledgebaseService.get_detail(kb_id)
|
||||
if not kb:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
message="Can't find this knowledgebase!")
|
||||
return get_json_result(data=kb)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -145,27 +146,29 @@ def rm():
|
||||
if not KnowledgebaseService.accessible4deletion(req["kb_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg='No authorization.',
|
||||
retcode=RetCode.AUTHENTICATION_ERROR
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
try:
|
||||
kbs = KnowledgebaseService.query(
|
||||
created_by=current_user.id, id=req["kb_id"])
|
||||
if not kbs:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
||||
data=False, message='Only owner of knowledgebase authorized for this operation.', code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
for doc in DocumentService.query(kb_id=req["kb_id"]):
|
||||
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
message="Database error (Document removal)!")
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
|
||||
if not KnowledgebaseService.delete_by_id(req["kb_id"]):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Knowledgebase removal)!")
|
||||
message="Database error (Knowledgebase removal)!")
|
||||
settings.docStoreConn.delete({"kb_id": req["kb_id"]}, search.index_name(kbs[0].tenant_id), req["kb_id"])
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -13,12 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
|
||||
from api.settings import LIGHTEN
|
||||
from api import settings
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.db import StatusEnum, LLMType
|
||||
from api.db.db_models import TenantLLM
|
||||
@ -73,9 +74,9 @@ def set_api_key():
|
||||
mdl = ChatModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
|
||||
{"temperature": 0.9,'max_tokens':50})
|
||||
if m.find("**ERROR**") >=0:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
|
||||
{"temperature": 0.9, 'max_tokens': 50})
|
||||
if m.find("**ERROR**") >= 0:
|
||||
raise Exception(m)
|
||||
chat_passed = True
|
||||
except Exception as e:
|
||||
@ -89,7 +90,7 @@ def set_api_key():
|
||||
if len(arr) == 0 or tc == 0:
|
||||
raise Exception("Fail")
|
||||
rerank_passed = True
|
||||
print(f'passed model rerank{llm.llm_name}',flush=True)
|
||||
logging.debug(f'passed model rerank {llm.llm_name}')
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||
e)
|
||||
@ -98,7 +99,7 @@ def set_api_key():
|
||||
break
|
||||
|
||||
if msg:
|
||||
return get_data_error_result(retmsg=msg)
|
||||
return get_data_error_result(message=msg)
|
||||
|
||||
llm_config = {
|
||||
"api_key": req["api_key"],
|
||||
@ -109,6 +110,7 @@ def set_api_key():
|
||||
llm_config[n] = req[n]
|
||||
|
||||
for llm in LLMService.query(fid=factory):
|
||||
llm_config["max_tokens"]=llm.max_tokens
|
||||
if not TenantLLMService.filter_update(
|
||||
[TenantLLM.tenant_id == current_user.id,
|
||||
TenantLLM.llm_factory == factory,
|
||||
@ -120,7 +122,8 @@ def set_api_key():
|
||||
llm_name=llm.llm_name,
|
||||
model_type=llm.model_type,
|
||||
api_key=llm_config["api_key"],
|
||||
api_base=llm_config["api_base"]
|
||||
api_base=llm_config["api_base"],
|
||||
max_tokens=llm_config["max_tokens"]
|
||||
)
|
||||
|
||||
return get_json_result(data=True)
|
||||
@ -157,23 +160,23 @@ def add_llm():
|
||||
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
||||
|
||||
elif factory == "LocalAI":
|
||||
llm_name = req["llm_name"]+"___LocalAI"
|
||||
llm_name = req["llm_name"] + "___LocalAI"
|
||||
api_key = "xxxxxxxxxxxxxxx"
|
||||
|
||||
|
||||
elif factory == "HuggingFace":
|
||||
llm_name = req["llm_name"]+"___HuggingFace"
|
||||
llm_name = req["llm_name"] + "___HuggingFace"
|
||||
api_key = "xxxxxxxxxxxxxxx"
|
||||
|
||||
elif factory == "OpenAI-API-Compatible":
|
||||
llm_name = req["llm_name"]+"___OpenAI-API"
|
||||
api_key = req.get("api_key","xxxxxxxxxxxxxxx")
|
||||
llm_name = req["llm_name"] + "___OpenAI-API"
|
||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
||||
|
||||
elif factory =="XunFei Spark":
|
||||
elif factory == "XunFei Spark":
|
||||
llm_name = req["llm_name"]
|
||||
if req["model_type"] == "chat":
|
||||
api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx")
|
||||
elif req["model_type"] == "tts":
|
||||
api_key = apikey_json(["spark_app_id", "spark_api_secret","spark_api_key"])
|
||||
api_key = apikey_json(["spark_app_id", "spark_api_secret", "spark_api_key"])
|
||||
|
||||
elif factory == "BaiduYiyan":
|
||||
llm_name = req["llm_name"]
|
||||
@ -201,14 +204,15 @@ def add_llm():
|
||||
"model_type": req["model_type"],
|
||||
"llm_name": llm_name,
|
||||
"api_base": req.get("api_base", ""),
|
||||
"api_key": api_key
|
||||
"api_key": api_key,
|
||||
"max_tokens": req.get("max_tokens")
|
||||
}
|
||||
|
||||
msg = ""
|
||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||
mdl = EmbeddingModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"])
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
@ -224,7 +228,7 @@ def add_llm():
|
||||
)
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||
"temperature": 0.9})
|
||||
"temperature": 0.9})
|
||||
if not tc:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
@ -232,12 +236,12 @@ def add_llm():
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.RERANK:
|
||||
mdl = RerankModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!"])
|
||||
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!", "Ohh, my friend!"])
|
||||
if len(arr) == 0 or tc == 0:
|
||||
raise Exception("Not known.")
|
||||
except Exception as e:
|
||||
@ -245,8 +249,8 @@ def add_llm():
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
||||
mdl = CvModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
@ -278,10 +282,11 @@ def add_llm():
|
||||
pass
|
||||
|
||||
if msg:
|
||||
return get_data_error_result(retmsg=msg)
|
||||
return get_data_error_result(message=msg)
|
||||
|
||||
if not TenantLLMService.filter_update(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory, TenantLLM.llm_name == llm["llm_name"]], llm):
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory,
|
||||
TenantLLM.llm_name == llm["llm_name"]], llm):
|
||||
TenantLLMService.save(**llm)
|
||||
|
||||
return get_json_result(data=True)
|
||||
@ -293,7 +298,8 @@ def add_llm():
|
||||
def delete_llm():
|
||||
req = request.json
|
||||
TenantLLMService.filter_delete(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"], TenantLLM.llm_name == req["llm_name"]])
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"],
|
||||
TenantLLM.llm_name == req["llm_name"]])
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@ -303,7 +309,7 @@ def delete_llm():
|
||||
def delete_factory():
|
||||
req = request.json
|
||||
TenantLLMService.filter_delete(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"]])
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"]])
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@ -331,8 +337,8 @@ def my_llms():
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_app():
|
||||
self_deploied = ["Youdao","FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
|
||||
weighted = ["Youdao","FastEmbed", "BAAI"] if LIGHTEN != 0 else []
|
||||
self_deploied = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
|
||||
weighted = ["Youdao", "FastEmbed", "BAAI"] if settings.LIGHTEN != 0 else []
|
||||
model_type = request.args.get("model_type")
|
||||
try:
|
||||
objs = TenantLLMService.query(tenant_id=current_user.id)
|
||||
@ -343,15 +349,15 @@ def list_app():
|
||||
for m in llms:
|
||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deploied
|
||||
|
||||
llm_set = set([m["llm_name"]+"@"+m["fid"] for m in llms])
|
||||
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
|
||||
for o in objs:
|
||||
if not o.api_key:continue
|
||||
if o.llm_name+"@"+o.llm_factory in llm_set:continue
|
||||
if not o.api_key: continue
|
||||
if o.llm_name + "@" + o.llm_factory in llm_set: continue
|
||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
||||
|
||||
res = {}
|
||||
for m in llms:
|
||||
if model_type and m["model_type"].find(model_type)<0:
|
||||
if model_type and m["model_type"].find(model_type) < 0:
|
||||
continue
|
||||
if m["fid"] not in res:
|
||||
res[m["fid"]] = []
|
||||
@ -359,4 +365,4 @@ def list_app():
|
||||
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
return server_error_response(e)
|
||||
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
from flask import request
|
||||
from api.settings import RetCode
|
||||
from api import settings
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
@ -32,18 +32,19 @@ def create(tenant_id):
|
||||
req=request.json
|
||||
ids= req.get("dataset_ids")
|
||||
if not ids:
|
||||
return get_error_data_result(retmsg="`dataset_ids` is required")
|
||||
return get_error_data_result(message="`dataset_ids` is required")
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.query(id=kb_id,tenant_id=tenant_id)
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id,user_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||
kb=kbs[0]
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_count = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_count) != 1:
|
||||
return get_result(retmsg='Datasets use different embedding models."',retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
return get_result(message='Datasets use different embedding models."',code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req["kb_ids"] = ids
|
||||
# llm
|
||||
llm = req.get("llm")
|
||||
@ -55,7 +56,7 @@ def create(tenant_id):
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
return get_error_data_result(retmsg="Tenant not found!")
|
||||
return get_error_data_result(message="Tenant not found!")
|
||||
# prompt
|
||||
prompt = req.get("prompt")
|
||||
key_mapping = {"parameters": "variables",
|
||||
@ -86,12 +87,12 @@ def create(tenant_id):
|
||||
if not req.get("llm_id"):
|
||||
req["llm_id"] = tenant.llm_id
|
||||
if not req.get("name"):
|
||||
return get_error_data_result(retmsg="`name` is required.")
|
||||
return get_error_data_result(message="`name` is required.")
|
||||
if DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(retmsg="Duplicated chat name in creating chat.")
|
||||
return get_error_data_result(message="Duplicated chat name in creating chat.")
|
||||
# tenant_id
|
||||
if req.get("tenant_id"):
|
||||
return get_error_data_result(retmsg="`tenant_id` must not be provided.")
|
||||
return get_error_data_result(message="`tenant_id` must not be provided.")
|
||||
req["tenant_id"] = tenant_id
|
||||
# prompt more parameter
|
||||
default_prompt = {
|
||||
@ -110,21 +111,21 @@ def create(tenant_id):
|
||||
req['prompt_config'] = {}
|
||||
for key in key_list_2:
|
||||
temp = req['prompt_config'].get(key)
|
||||
if not temp:
|
||||
if (not temp and key == 'system') or (key not in req["prompt_config"]):
|
||||
req['prompt_config'][key] = default_prompt[key]
|
||||
for p in req['prompt_config']["parameters"]:
|
||||
if p["optional"]:
|
||||
continue
|
||||
if req['prompt_config']["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_error_data_result(
|
||||
retmsg="Parameter '{}' is not used".format(p["key"]))
|
||||
message="Parameter '{}' is not used".format(p["key"]))
|
||||
# save
|
||||
if not DialogService.save(**req):
|
||||
return get_error_data_result(retmsg="Fail to new a chat!")
|
||||
return get_error_data_result(message="Fail to new a chat!")
|
||||
# response
|
||||
e, res = DialogService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_error_data_result(retmsg="Fail to new a chat!")
|
||||
return get_error_data_result(message="Fail to new a chat!")
|
||||
res = res.to_json()
|
||||
renamed_dict = {}
|
||||
for key, value in res["prompt_config"].items():
|
||||
@ -150,7 +151,7 @@ def create(tenant_id):
|
||||
@token_required
|
||||
def update(tenant_id,chat_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(retmsg='You do not own the chat')
|
||||
return get_error_data_result(message='You do not own the chat')
|
||||
req =request.json
|
||||
ids = req.get("dataset_ids")
|
||||
if "show_quotation" in req:
|
||||
@ -160,9 +161,10 @@ def update(tenant_id,chat_id):
|
||||
return get_error_data_result("`datasets` can't be empty")
|
||||
if ids:
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.query(id=kb_id, tenant_id=tenant_id)
|
||||
kbs = KnowledgebaseService.accessible(kb_id=chat_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
@ -170,8 +172,8 @@ def update(tenant_id,chat_id):
|
||||
embd_count=list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_count) != 1 :
|
||||
return get_result(
|
||||
retmsg='Datasets use different embedding models."',
|
||||
retcode=RetCode.AUTHENTICATION_ERROR)
|
||||
message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req["kb_ids"] = ids
|
||||
llm = req.get("llm")
|
||||
if llm:
|
||||
@ -182,7 +184,7 @@ def update(tenant_id,chat_id):
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
return get_error_data_result(retmsg="Tenant not found!")
|
||||
return get_error_data_result(message="Tenant not found!")
|
||||
if req.get("rerank_model"):
|
||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_model"),model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_model')} doesn't exist")
|
||||
@ -207,18 +209,18 @@ def update(tenant_id,chat_id):
|
||||
res = res.to_json()
|
||||
if "name" in req:
|
||||
if not req.get("name"):
|
||||
return get_error_data_result(retmsg="`name` is not empty.")
|
||||
return get_error_data_result(message="`name` is not empty.")
|
||||
if req["name"].lower() != res["name"].lower() \
|
||||
and len(
|
||||
DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
||||
return get_error_data_result(retmsg="Duplicated chat name in updating dataset.")
|
||||
return get_error_data_result(message="Duplicated chat name in updating dataset.")
|
||||
if "prompt_config" in req:
|
||||
res["prompt_config"].update(req["prompt_config"])
|
||||
for p in res["prompt_config"]["parameters"]:
|
||||
if p["optional"]:
|
||||
continue
|
||||
if res["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_error_data_result(retmsg="Parameter '{}' is not used".format(p["key"]))
|
||||
return get_error_data_result(message="Parameter '{}' is not used".format(p["key"]))
|
||||
if "llm_setting" in req:
|
||||
res["llm_setting"].update(req["llm_setting"])
|
||||
req["prompt_config"] = res["prompt_config"]
|
||||
@ -229,7 +231,7 @@ def update(tenant_id,chat_id):
|
||||
if "dataset_ids" in req:
|
||||
req.pop("dataset_ids")
|
||||
if not DialogService.update_by_id(chat_id, req):
|
||||
return get_error_data_result(retmsg="Chat not found!")
|
||||
return get_error_data_result(message="Chat not found!")
|
||||
return get_result()
|
||||
|
||||
|
||||
@ -250,7 +252,7 @@ def delete(tenant_id):
|
||||
id_list=ids
|
||||
for id in id_list:
|
||||
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(retmsg=f"You don't own the chat {id}")
|
||||
return get_error_data_result(message=f"You don't own the chat {id}")
|
||||
temp_dict = {"status": StatusEnum.INVALID.value}
|
||||
DialogService.update_by_id(id, temp_dict)
|
||||
return get_result()
|
||||
@ -260,11 +262,11 @@ def delete(tenant_id):
|
||||
def list_chat(tenant_id):
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
chat = DialogService.query(id=id,name=name,status=StatusEnum.VALID.value)
|
||||
chat = DialogService.query(id=id,name=name,status=StatusEnum.VALID.value,tenant_id=tenant_id)
|
||||
if not chat:
|
||||
return get_error_data_result(retmsg="The chat doesn't exist")
|
||||
return get_error_data_result(message="The chat doesn't exist")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 1024))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
@ -302,10 +304,10 @@ def list_chat(tenant_id):
|
||||
for kb_id in res["kb_ids"]:
|
||||
kb = KnowledgebaseService.query(id=kb_id)
|
||||
if not kb :
|
||||
return get_error_data_result(retmsg=f"Don't exist the kb {kb_id}")
|
||||
return get_error_data_result(message=f"Don't exist the kb {kb_id}")
|
||||
kb_list.append(kb[0].to_json())
|
||||
del res["kb_ids"]
|
||||
res["datasets"] = kb_list
|
||||
res["avatar"] = res.pop("icon")
|
||||
list_assts.append(res)
|
||||
return get_result(data=list_assts)
|
||||
return get_result(data=list_assts)
|
||||
@ -1,232 +1,531 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from flask import request
|
||||
from api.db import StatusEnum, FileSource
|
||||
from api.db.db_models import File
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService,LLMService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_result, token_required, get_error_data_result, valid,get_parser_config
|
||||
|
||||
|
||||
@manager.route('/datasets', methods=['POST'])
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
req = request.json
|
||||
e, t = TenantService.get_by_id(tenant_id)
|
||||
permission = req.get("permission")
|
||||
language = req.get("language")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_permission = ["me", "team"]
|
||||
valid_language =["Chinese", "English"]
|
||||
valid_chunk_method = ["naive","manual","qa","table","paper","book","laws","presentation","picture","one","knowledge_graph","email"]
|
||||
check_validation=valid(permission,valid_permission,language,valid_language,chunk_method,valid_chunk_method)
|
||||
if check_validation:
|
||||
return check_validation
|
||||
req["parser_config"]=get_parser_config(chunk_method,parser_config)
|
||||
if "tenant_id" in req:
|
||||
return get_error_data_result(
|
||||
retmsg="`tenant_id` must not be provided")
|
||||
if "chunk_count" in req or "document_count" in req:
|
||||
return get_error_data_result(retmsg="`chunk_count` or `document_count` must not be provided")
|
||||
if "name" not in req:
|
||||
return get_error_data_result(
|
||||
retmsg="`name` is not empty!")
|
||||
req['id'] = get_uuid()
|
||||
req["name"] = req["name"].strip()
|
||||
if req["name"] == "":
|
||||
return get_error_data_result(
|
||||
retmsg="`name` is not empty string!")
|
||||
if KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(
|
||||
retmsg="Duplicated dataset name in creating dataset.")
|
||||
req["tenant_id"] = req['created_by'] = tenant_id
|
||||
if not req.get("embedding_model"):
|
||||
req['embedding_model'] = t.embd_id
|
||||
else:
|
||||
valid_embedding_models=["BAAI/bge-large-zh-v1.5","BAAI/bge-base-en-v1.5","BAAI/bge-large-en-v1.5","BAAI/bge-small-en-v1.5",
|
||||
"BAAI/bge-small-zh-v1.5","jinaai/jina-embeddings-v2-base-en","jinaai/jina-embeddings-v2-small-en",
|
||||
"nomic-ai/nomic-embed-text-v1.5","sentence-transformers/all-MiniLM-L6-v2","text-embedding-v2",
|
||||
"text-embedding-v3","maidalun1020/bce-embedding-base_v1"]
|
||||
embd_model=LLMService.query(llm_name=req["embedding_model"],model_type="embedding")
|
||||
if not embd_model:
|
||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||
if embd_model:
|
||||
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model")):
|
||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "chunk_method",
|
||||
"embd_id": "embedding_model"
|
||||
}
|
||||
mapped_keys = {new_key: req[old_key] for new_key, old_key in key_mapping.items() if old_key in req}
|
||||
req.update(mapped_keys)
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_error_data_result(retmsg="Create dataset error.(Database error)")
|
||||
renamed_data = {}
|
||||
e, k = KnowledgebaseService.get_by_id(req["id"])
|
||||
for key, value in k.to_dict().items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
return get_result(data=renamed_data)
|
||||
|
||||
@manager.route('/datasets', methods=['DELETE'])
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
req = request.json
|
||||
if not req:
|
||||
ids=None
|
||||
else:
|
||||
ids=req.get("ids")
|
||||
if not ids:
|
||||
id_list = []
|
||||
kbs=KnowledgebaseService.query(tenant_id=tenant_id)
|
||||
for kb in kbs:
|
||||
id_list.append(kb.id)
|
||||
else:
|
||||
id_list=ids
|
||||
for id in id_list:
|
||||
kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(retmsg=f"You don't own the dataset {id}")
|
||||
for doc in DocumentService.query(kb_id=id):
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_error_data_result(
|
||||
retmsg="Remove document error.(Database error)")
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
if not KnowledgebaseService.delete_by_id(id):
|
||||
return get_error_data_result(
|
||||
retmsg="Delete dataset error.(Database error)")
|
||||
return get_result(retcode=RetCode.SUCCESS)
|
||||
|
||||
@manager.route('/datasets/<dataset_id>', methods=['PUT'])
|
||||
@token_required
|
||||
def update(tenant_id,dataset_id):
|
||||
if not KnowledgebaseService.query(id=dataset_id,tenant_id=tenant_id):
|
||||
return get_error_data_result(retmsg="You don't own the dataset")
|
||||
req = request.json
|
||||
e, t = TenantService.get_by_id(tenant_id)
|
||||
invalid_keys = {"id", "embd_id", "chunk_num", "doc_num", "parser_id"}
|
||||
if any(key in req for key in invalid_keys):
|
||||
return get_error_data_result(retmsg="The input parameters are invalid.")
|
||||
permission = req.get("permission")
|
||||
language = req.get("language")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_permission = ["me", "team"]
|
||||
valid_language = ["Chinese", "English"]
|
||||
valid_chunk_method = ["naive", "manual", "qa", "table", "paper", "book", "laws", "presentation", "picture", "one",
|
||||
"knowledge_graph", "email"]
|
||||
check_validation = valid(permission, valid_permission, language, valid_language, chunk_method, valid_chunk_method)
|
||||
if check_validation:
|
||||
return check_validation
|
||||
if "tenant_id" in req:
|
||||
if req["tenant_id"] != tenant_id:
|
||||
return get_error_data_result(
|
||||
retmsg="Can't change `tenant_id`.")
|
||||
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if "parser_config" in req:
|
||||
temp_dict=kb.parser_config
|
||||
temp_dict.update(req["parser_config"])
|
||||
req["parser_config"] = temp_dict
|
||||
if "chunk_count" in req:
|
||||
if req["chunk_count"] != kb.chunk_num:
|
||||
return get_error_data_result(
|
||||
retmsg="Can't change `chunk_count`.")
|
||||
req.pop("chunk_count")
|
||||
if "document_count" in req:
|
||||
if req['document_count'] != kb.doc_num:
|
||||
return get_error_data_result(
|
||||
retmsg="Can't change `document_count`.")
|
||||
req.pop("document_count")
|
||||
if "chunk_method" in req:
|
||||
if kb.chunk_num != 0 and req['chunk_method'] != kb.parser_id:
|
||||
return get_error_data_result(
|
||||
retmsg="If `chunk_count` is not 0, `chunk_method` is not changeable.")
|
||||
req['parser_id'] = req.pop('chunk_method')
|
||||
if req['parser_id'] != kb.parser_id:
|
||||
if not req.get("parser_config"):
|
||||
req["parser_config"] = get_parser_config(chunk_method, parser_config)
|
||||
if "embedding_model" in req:
|
||||
if kb.chunk_num != 0 and req['embedding_model'] != kb.embd_id:
|
||||
return get_error_data_result(
|
||||
retmsg="If `chunk_count` is not 0, `embedding_model` is not changeable.")
|
||||
if not req.get("embedding_model"):
|
||||
return get_error_data_result("`embedding_model` can't be empty")
|
||||
valid_embedding_models=["BAAI/bge-large-zh-v1.5","BAAI/bge-base-en-v1.5","BAAI/bge-large-en-v1.5","BAAI/bge-small-en-v1.5",
|
||||
"BAAI/bge-small-zh-v1.5","jinaai/jina-embeddings-v2-base-en","jinaai/jina-embeddings-v2-small-en",
|
||||
"nomic-ai/nomic-embed-text-v1.5","sentence-transformers/all-MiniLM-L6-v2","text-embedding-v2",
|
||||
"text-embedding-v3","maidalun1020/bce-embedding-base_v1"]
|
||||
embd_model=LLMService.query(llm_name=req["embedding_model"],model_type="embedding")
|
||||
if not embd_model:
|
||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||
if embd_model:
|
||||
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model")):
|
||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||
req['embd_id'] = req.pop('embedding_model')
|
||||
if "name" in req:
|
||||
req["name"] = req["name"].strip()
|
||||
if req["name"].lower() != kb.name.lower() \
|
||||
and len(KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id,
|
||||
status=StatusEnum.VALID.value)) > 0:
|
||||
return get_error_data_result(
|
||||
retmsg="Duplicated dataset name in updating dataset.")
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
return get_error_data_result(retmsg="Update dataset error.(Database error)")
|
||||
return get_result(retcode=RetCode.SUCCESS)
|
||||
|
||||
@manager.route('/datasets', methods=['GET'])
|
||||
@token_required
|
||||
def list(tenant_id):
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
kbs = KnowledgebaseService.query(id=id,name=name,status=1)
|
||||
if not kbs:
|
||||
return get_error_data_result(retmsg="The dataset doesn't exist")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 1024))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false" :
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
||||
kbs = KnowledgebaseService.get_list(
|
||||
[m["tenant_id"] for m in tenants], tenant_id, page_number, items_per_page, orderby, desc, id, name)
|
||||
renamed_list = []
|
||||
for kb in kbs:
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "chunk_method",
|
||||
"embd_id": "embedding_model"
|
||||
}
|
||||
renamed_data = {}
|
||||
for key, value in kb.items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
renamed_list.append(renamed_data)
|
||||
return get_result(data=renamed_list)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from flask import request
|
||||
from api.db import StatusEnum, FileSource
|
||||
from api.db.db_models import File
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api import settings
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import (
|
||||
get_result,
|
||||
token_required,
|
||||
get_error_data_result,
|
||||
valid,
|
||||
get_parser_config,
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["POST"])
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
"""
|
||||
Create a new dataset.
|
||||
---
|
||||
tags:
|
||||
- Datasets
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: header
|
||||
name: Authorization
|
||||
type: string
|
||||
required: true
|
||||
description: Bearer token for authentication.
|
||||
- in: body
|
||||
name: body
|
||||
description: Dataset creation parameters.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Name of the dataset.
|
||||
permission:
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Dataset permission.
|
||||
language:
|
||||
type: string
|
||||
enum: ['Chinese', 'English']
|
||||
description: Language of the dataset.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||
"presentation", "picture", "one", "knowledge_graph", "email"]
|
||||
description: Chunking method.
|
||||
parser_config:
|
||||
type: object
|
||||
description: Parser configuration.
|
||||
responses:
|
||||
200:
|
||||
description: Successful operation.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: object
|
||||
"""
|
||||
req = request.json
|
||||
e, t = TenantService.get_by_id(tenant_id)
|
||||
permission = req.get("permission")
|
||||
language = req.get("language")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_permission = ["me", "team"]
|
||||
valid_language = ["Chinese", "English"]
|
||||
valid_chunk_method = [
|
||||
"naive",
|
||||
"manual",
|
||||
"qa",
|
||||
"table",
|
||||
"paper",
|
||||
"book",
|
||||
"laws",
|
||||
"presentation",
|
||||
"picture",
|
||||
"one",
|
||||
"knowledge_graph",
|
||||
"email",
|
||||
]
|
||||
check_validation = valid(
|
||||
permission,
|
||||
valid_permission,
|
||||
language,
|
||||
valid_language,
|
||||
chunk_method,
|
||||
valid_chunk_method,
|
||||
)
|
||||
if check_validation:
|
||||
return check_validation
|
||||
req["parser_config"] = get_parser_config(chunk_method, parser_config)
|
||||
if "tenant_id" in req:
|
||||
return get_error_data_result(message="`tenant_id` must not be provided")
|
||||
if "chunk_count" in req or "document_count" in req:
|
||||
return get_error_data_result(
|
||||
message="`chunk_count` or `document_count` must not be provided"
|
||||
)
|
||||
if "name" not in req:
|
||||
return get_error_data_result(message="`name` is not empty!")
|
||||
req["id"] = get_uuid()
|
||||
req["name"] = req["name"].strip()
|
||||
if req["name"] == "":
|
||||
return get_error_data_result(message="`name` is not empty string!")
|
||||
if KnowledgebaseService.query(
|
||||
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
||||
):
|
||||
return get_error_data_result(
|
||||
message="Duplicated dataset name in creating dataset."
|
||||
)
|
||||
req["tenant_id"] = req["created_by"] = tenant_id
|
||||
if not req.get("embedding_model"):
|
||||
req["embedding_model"] = t.embd_id
|
||||
else:
|
||||
valid_embedding_models = [
|
||||
"BAAI/bge-large-zh-v1.5",
|
||||
"BAAI/bge-base-en-v1.5",
|
||||
"BAAI/bge-large-en-v1.5",
|
||||
"BAAI/bge-small-en-v1.5",
|
||||
"BAAI/bge-small-zh-v1.5",
|
||||
"jinaai/jina-embeddings-v2-base-en",
|
||||
"jinaai/jina-embeddings-v2-small-en",
|
||||
"nomic-ai/nomic-embed-text-v1.5",
|
||||
"sentence-transformers/all-MiniLM-L6-v2",
|
||||
"text-embedding-v2",
|
||||
"text-embedding-v3",
|
||||
"maidalun1020/bce-embedding-base_v1",
|
||||
]
|
||||
embd_model = LLMService.query(
|
||||
llm_name=req["embedding_model"], model_type="embedding"
|
||||
)
|
||||
if embd_model:
|
||||
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
|
||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||
if not embd_model:
|
||||
embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
|
||||
if not embd_model:
|
||||
return get_error_data_result(
|
||||
f"`embedding_model` {req.get('embedding_model')} doesn't exist"
|
||||
)
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "chunk_method",
|
||||
"embd_id": "embedding_model",
|
||||
}
|
||||
mapped_keys = {
|
||||
new_key: req[old_key]
|
||||
for new_key, old_key in key_mapping.items()
|
||||
if old_key in req
|
||||
}
|
||||
req.update(mapped_keys)
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_error_data_result(message="Create dataset error.(Database error)")
|
||||
renamed_data = {}
|
||||
e, k = KnowledgebaseService.get_by_id(req["id"])
|
||||
for key, value in k.to_dict().items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
return get_result(data=renamed_data)
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["DELETE"])
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
"""
|
||||
Delete datasets.
|
||||
---
|
||||
tags:
|
||||
- Datasets
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: header
|
||||
name: Authorization
|
||||
type: string
|
||||
required: true
|
||||
description: Bearer token for authentication.
|
||||
- in: body
|
||||
name: body
|
||||
description: Dataset deletion parameters.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
ids:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: List of dataset IDs to delete.
|
||||
responses:
|
||||
200:
|
||||
description: Successful operation.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
req = request.json
|
||||
if not req:
|
||||
ids = None
|
||||
else:
|
||||
ids = req.get("ids")
|
||||
if not ids:
|
||||
id_list = []
|
||||
kbs = KnowledgebaseService.query(tenant_id=tenant_id)
|
||||
for kb in kbs:
|
||||
id_list.append(kb.id)
|
||||
else:
|
||||
id_list = ids
|
||||
for id in id_list:
|
||||
kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(message=f"You don't own the dataset {id}")
|
||||
for doc in DocumentService.query(kb_id=id):
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_error_data_result(
|
||||
message="Remove document error.(Database error)"
|
||||
)
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[
|
||||
File.source_type == FileSource.KNOWLEDGEBASE,
|
||||
File.id == f2d[0].file_id,
|
||||
]
|
||||
)
|
||||
FileService.filter_delete(
|
||||
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
if not KnowledgebaseService.delete_by_id(id):
|
||||
return get_error_data_result(message="Delete dataset error.(Database error)")
|
||||
return get_result(code=settings.RetCode.SUCCESS)
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>", methods=["PUT"])
|
||||
@token_required
|
||||
def update(tenant_id, dataset_id):
|
||||
"""
|
||||
Update a dataset.
|
||||
---
|
||||
tags:
|
||||
- Datasets
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: path
|
||||
name: dataset_id
|
||||
type: string
|
||||
required: true
|
||||
description: ID of the dataset to update.
|
||||
- in: header
|
||||
name: Authorization
|
||||
type: string
|
||||
required: true
|
||||
description: Bearer token for authentication.
|
||||
- in: body
|
||||
name: body
|
||||
description: Dataset update parameters.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: New name of the dataset.
|
||||
permission:
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Updated permission.
|
||||
language:
|
||||
type: string
|
||||
enum: ['Chinese', 'English']
|
||||
description: Updated language.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||
"presentation", "picture", "one", "knowledge_graph", "email"]
|
||||
description: Updated chunking method.
|
||||
parser_config:
|
||||
type: object
|
||||
description: Updated parser configuration.
|
||||
responses:
|
||||
200:
|
||||
description: Successful operation.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
||||
return get_error_data_result(message="You don't own the dataset")
|
||||
req = request.json
|
||||
e, t = TenantService.get_by_id(tenant_id)
|
||||
invalid_keys = {"id", "embd_id", "chunk_num", "doc_num", "parser_id"}
|
||||
if any(key in req for key in invalid_keys):
|
||||
return get_error_data_result(message="The input parameters are invalid.")
|
||||
permission = req.get("permission")
|
||||
language = req.get("language")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_permission = ["me", "team"]
|
||||
valid_language = ["Chinese", "English"]
|
||||
valid_chunk_method = [
|
||||
"naive",
|
||||
"manual",
|
||||
"qa",
|
||||
"table",
|
||||
"paper",
|
||||
"book",
|
||||
"laws",
|
||||
"presentation",
|
||||
"picture",
|
||||
"one",
|
||||
"knowledge_graph",
|
||||
"email",
|
||||
]
|
||||
check_validation = valid(
|
||||
permission,
|
||||
valid_permission,
|
||||
language,
|
||||
valid_language,
|
||||
chunk_method,
|
||||
valid_chunk_method,
|
||||
)
|
||||
if check_validation:
|
||||
return check_validation
|
||||
if "tenant_id" in req:
|
||||
if req["tenant_id"] != tenant_id:
|
||||
return get_error_data_result(message="Can't change `tenant_id`.")
|
||||
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if "parser_config" in req:
|
||||
temp_dict = kb.parser_config
|
||||
temp_dict.update(req["parser_config"])
|
||||
req["parser_config"] = temp_dict
|
||||
if "chunk_count" in req:
|
||||
if req["chunk_count"] != kb.chunk_num:
|
||||
return get_error_data_result(message="Can't change `chunk_count`.")
|
||||
req.pop("chunk_count")
|
||||
if "document_count" in req:
|
||||
if req["document_count"] != kb.doc_num:
|
||||
return get_error_data_result(message="Can't change `document_count`.")
|
||||
req.pop("document_count")
|
||||
if "chunk_method" in req:
|
||||
if kb.chunk_num != 0 and req["chunk_method"] != kb.parser_id:
|
||||
return get_error_data_result(
|
||||
message="If `chunk_count` is not 0, `chunk_method` is not changeable."
|
||||
)
|
||||
req["parser_id"] = req.pop("chunk_method")
|
||||
if req["parser_id"] != kb.parser_id:
|
||||
if not req.get("parser_config"):
|
||||
req["parser_config"] = get_parser_config(chunk_method, parser_config)
|
||||
if "embedding_model" in req:
|
||||
if kb.chunk_num != 0 and req["embedding_model"] != kb.embd_id:
|
||||
return get_error_data_result(
|
||||
message="If `chunk_count` is not 0, `embedding_model` is not changeable."
|
||||
)
|
||||
if not req.get("embedding_model"):
|
||||
return get_error_data_result("`embedding_model` can't be empty")
|
||||
valid_embedding_models = [
|
||||
"BAAI/bge-large-zh-v1.5",
|
||||
"BAAI/bge-base-en-v1.5",
|
||||
"BAAI/bge-large-en-v1.5",
|
||||
"BAAI/bge-small-en-v1.5",
|
||||
"BAAI/bge-small-zh-v1.5",
|
||||
"jinaai/jina-embeddings-v2-base-en",
|
||||
"jinaai/jina-embeddings-v2-small-en",
|
||||
"nomic-ai/nomic-embed-text-v1.5",
|
||||
"sentence-transformers/all-MiniLM-L6-v2",
|
||||
"text-embedding-v2",
|
||||
"text-embedding-v3",
|
||||
"maidalun1020/bce-embedding-base_v1",
|
||||
]
|
||||
embd_model = LLMService.query(
|
||||
llm_name=req["embedding_model"], model_type="embedding"
|
||||
)
|
||||
if embd_model:
|
||||
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
|
||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||
if not embd_model:
|
||||
embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
|
||||
|
||||
if not embd_model:
|
||||
return get_error_data_result(
|
||||
f"`embedding_model` {req.get('embedding_model')} doesn't exist"
|
||||
)
|
||||
req["embd_id"] = req.pop("embedding_model")
|
||||
if "name" in req:
|
||||
req["name"] = req["name"].strip()
|
||||
if (
|
||||
req["name"].lower() != kb.name.lower()
|
||||
and len(
|
||||
KnowledgebaseService.query(
|
||||
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
||||
)
|
||||
)
|
||||
> 0
|
||||
):
|
||||
return get_error_data_result(
|
||||
message="Duplicated dataset name in updating dataset."
|
||||
)
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
return get_error_data_result(message="Update dataset error.(Database error)")
|
||||
return get_result(code=settings.RetCode.SUCCESS)
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["GET"])
|
||||
@token_required
|
||||
def list(tenant_id):
|
||||
"""
|
||||
List datasets.
|
||||
---
|
||||
tags:
|
||||
- Datasets
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: query
|
||||
name: id
|
||||
type: string
|
||||
required: false
|
||||
description: Dataset ID to filter.
|
||||
- in: query
|
||||
name: name
|
||||
type: string
|
||||
required: false
|
||||
description: Dataset name to filter.
|
||||
- in: query
|
||||
name: page
|
||||
type: integer
|
||||
required: false
|
||||
default: 1
|
||||
description: Page number.
|
||||
- in: query
|
||||
name: page_size
|
||||
type: integer
|
||||
required: false
|
||||
default: 1024
|
||||
description: Number of items per page.
|
||||
- in: query
|
||||
name: orderby
|
||||
type: string
|
||||
required: false
|
||||
default: "create_time"
|
||||
description: Field to order by.
|
||||
- in: query
|
||||
name: desc
|
||||
type: boolean
|
||||
required: false
|
||||
default: true
|
||||
description: Order in descending.
|
||||
- in: header
|
||||
name: Authorization
|
||||
type: string
|
||||
required: true
|
||||
description: Bearer token for authentication.
|
||||
responses:
|
||||
200:
|
||||
description: Successful operation.
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
"""
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
if id:
|
||||
kbs = KnowledgebaseService.get_kb_by_id(id,tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {id}")
|
||||
if name:
|
||||
kbs = KnowledgebaseService.get_kb_by_name(name,tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {name}")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
||||
kbs = KnowledgebaseService.get_list(
|
||||
[m["tenant_id"] for m in tenants],
|
||||
tenant_id,
|
||||
page_number,
|
||||
items_per_page,
|
||||
orderby,
|
||||
desc,
|
||||
id,
|
||||
name,
|
||||
)
|
||||
renamed_list = []
|
||||
for kb in kbs:
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "chunk_method",
|
||||
"embd_id": "embedding_model",
|
||||
}
|
||||
renamed_data = {}
|
||||
for key, value in kb.items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
renamed_list.append(renamed_data)
|
||||
return get_result(data=renamed_list)
|
||||
|
||||
@ -18,7 +18,7 @@ from flask import request, jsonify
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.settings import retrievaler, kg_retrievaler, RetCode
|
||||
from api import settings
|
||||
from api.utils.api_utils import validate_request, build_error_result, apikey_required
|
||||
|
||||
|
||||
@ -37,14 +37,14 @@ def retrieval(tenant_id):
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return build_error_result(error_msg="Knowledgebase not found!", retcode=RetCode.NOT_FOUND)
|
||||
return build_error_result(message="Knowledgebase not found!", code=settings.RetCode.NOT_FOUND)
|
||||
|
||||
if kb.tenant_id != tenant_id:
|
||||
return build_error_result(error_msg="Knowledgebase not found!", retcode=RetCode.NOT_FOUND)
|
||||
return build_error_result(message="Knowledgebase not found!", code=settings.RetCode.NOT_FOUND)
|
||||
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
|
||||
retr = retrievaler if kb.parser_id != ParserType.KG else kg_retrievaler
|
||||
retr = settings.retrievaler if kb.parser_id != ParserType.KG else settings.kg_retrievaler
|
||||
ranks = retr.retrieval(
|
||||
question,
|
||||
embd_mdl,
|
||||
@ -58,8 +58,7 @@ def retrieval(tenant_id):
|
||||
)
|
||||
records = []
|
||||
for c in ranks["chunks"]:
|
||||
if "vector" in c:
|
||||
del c["vector"]
|
||||
c.pop("vector", None)
|
||||
records.append({
|
||||
"content": c["content_ltks"],
|
||||
"score": c["similarity"],
|
||||
@ -71,7 +70,7 @@ def retrieval(tenant_id):
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return build_error_result(
|
||||
error_msg=f'No chunk found! Check the chunk status please!',
|
||||
retcode=RetCode.NOT_FOUND
|
||||
message='No chunk found! Check the chunk status please!',
|
||||
code=settings.RetCode.NOT_FOUND
|
||||
)
|
||||
return build_error_result(error_msg=str(e), retcode=RetCode.SERVER_ERROR)
|
||||
return build_error_result(message=str(e), code=settings.RetCode.SERVER_ERROR)
|
||||
|
||||
1230
api/apps/sdk/doc.py
1230
api/apps/sdk/doc.py
File diff suppressed because it is too large
Load Diff
@ -1,237 +1,531 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from uuid import uuid4
|
||||
|
||||
from flask import request, Response
|
||||
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_error_data_result
|
||||
from api.utils.api_utils import get_result, token_required
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['POST'])
|
||||
@token_required
|
||||
def create(tenant_id,chat_id):
|
||||
req = request.json
|
||||
req["dialog_id"] = chat_id
|
||||
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
|
||||
if not dia:
|
||||
return get_error_data_result(retmsg="You do not own the assistant")
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": req["dialog_id"],
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_error_data_result(retmsg="`name` can not be empty.")
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
if not e:
|
||||
return get_error_data_result(retmsg="Fail to create a session!")
|
||||
conv = conv.to_dict()
|
||||
conv['messages'] = conv.pop("message")
|
||||
conv["chat_id"] = conv.pop("dialog_id")
|
||||
del conv["reference"]
|
||||
return get_result(data=conv)
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions/<session_id>', methods=['PUT'])
|
||||
@token_required
|
||||
def update(tenant_id,chat_id,session_id):
|
||||
req = request.json
|
||||
req["dialog_id"] = chat_id
|
||||
conv_id = session_id
|
||||
conv = ConversationService.query(id=conv_id,dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(retmsg="Session does not exist")
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(retmsg="You do not own the session")
|
||||
if "message" in req or "messages" in req:
|
||||
return get_error_data_result(retmsg="`message` can not be change")
|
||||
if "reference" in req:
|
||||
return get_error_data_result(retmsg="`reference` can not be change")
|
||||
if "name" in req and not req.get("name"):
|
||||
return get_error_data_result(retmsg="`name` can not be empty.")
|
||||
if not ConversationService.update_by_id(conv_id, req):
|
||||
return get_error_data_result(retmsg="Session updates error")
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/completions', methods=['POST'])
|
||||
@token_required
|
||||
def completion(tenant_id,chat_id):
|
||||
req = request.json
|
||||
if not req.get("session_id"):
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": chat_id,
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_error_data_result(retmsg="`name` can not be empty.")
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
session_id=conv.id
|
||||
else:
|
||||
session_id = req.get("session_id")
|
||||
if not req.get("question"):
|
||||
return get_error_data_result(retmsg="Please input your question.")
|
||||
conv = ConversationService.query(id=session_id,dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(retmsg="Session does not exist")
|
||||
conv = conv[0]
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(retmsg="You do not own the chat")
|
||||
msg = []
|
||||
question = {
|
||||
"content": req.get("question"),
|
||||
"role": "user",
|
||||
"id": str(uuid4())
|
||||
}
|
||||
conv.message.append(question)
|
||||
for m in conv.message:
|
||||
if m["role"] == "system": continue
|
||||
if m["role"] == "assistant" and not msg: continue
|
||||
msg.append(m)
|
||||
message_id = msg[-1].get("id")
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else:
|
||||
conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
||||
"id": message_id, "prompt": ans.get("prompt", "")}
|
||||
ans["id"] = message_id
|
||||
ans["session_id"]=session_id
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, **req):
|
||||
fillin_conv(ans)
|
||||
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e),"reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_result(data=answer)
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['GET'])
|
||||
@token_required
|
||||
def list(chat_id,tenant_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(retmsg=f"You don't own the assistant {chat_id}.")
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 1024))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
convs = ConversationService.get_list(chat_id,page_number,items_per_page,orderby,desc,id,name)
|
||||
if not convs:
|
||||
return get_result(data=[])
|
||||
for conv in convs:
|
||||
conv['messages'] = conv.pop("message")
|
||||
infos = conv["messages"]
|
||||
for info in infos:
|
||||
if "prompt" in info:
|
||||
info.pop("prompt")
|
||||
conv["chat"] = conv.pop("dialog_id")
|
||||
if conv["reference"]:
|
||||
messages = conv["messages"]
|
||||
message_num = 0
|
||||
chunk_num = 0
|
||||
while message_num < len(messages):
|
||||
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||
chunk_list = []
|
||||
if "chunks" in conv["reference"][chunk_num]:
|
||||
chunks = conv["reference"][chunk_num]["chunks"]
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk["img_id"],
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk["positions"],
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
chunk_num += 1
|
||||
messages[message_num]["reference"] = chunk_list
|
||||
message_num += 1
|
||||
del conv["reference"]
|
||||
return get_result(data=convs)
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=["DELETE"])
|
||||
@token_required
|
||||
def delete(tenant_id,chat_id):
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(retmsg="You don't own the chat")
|
||||
req = request.json
|
||||
convs = ConversationService.query(dialog_id=chat_id)
|
||||
if not req:
|
||||
ids = None
|
||||
else:
|
||||
ids=req.get("ids")
|
||||
|
||||
if not ids:
|
||||
conv_list = []
|
||||
for conv in convs:
|
||||
conv_list.append(conv.id)
|
||||
else:
|
||||
conv_list=ids
|
||||
for id in conv_list:
|
||||
conv = ConversationService.query(id=id,dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(retmsg="The chat doesn't own the session")
|
||||
ConversationService.delete_by_id(id)
|
||||
return get_result()
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
import json
|
||||
from copy import deepcopy
|
||||
from uuid import uuid4
|
||||
from api.db import LLMType
|
||||
from flask import request, Response
|
||||
from api.db.services.dialog_service import ask
|
||||
from agent.canvas import Canvas
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import API4Conversation
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_error_data_result
|
||||
from api.utils.api_utils import get_result, token_required
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['POST'])
|
||||
@token_required
|
||||
def create(tenant_id,chat_id):
|
||||
req = request.json
|
||||
req["dialog_id"] = chat_id
|
||||
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
|
||||
if not dia:
|
||||
return get_error_data_result(message="You do not own the assistant.")
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": req["dialog_id"],
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_error_data_result(message="`name` can not be empty.")
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
if not e:
|
||||
return get_error_data_result(message="Fail to create a session!")
|
||||
conv = conv.to_dict()
|
||||
conv['messages'] = conv.pop("message")
|
||||
conv["chat_id"] = conv.pop("dialog_id")
|
||||
del conv["reference"]
|
||||
return get_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=['POST'])
|
||||
@token_required
|
||||
def create_agent_session(tenant_id, agent_id):
|
||||
req = request.json
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
if not e:
|
||||
return get_error_data_result("Agent not found.")
|
||||
if cvs.user_id != tenant_id:
|
||||
return get_error_data_result(message="You do not own the agent.")
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
canvas = Canvas(cvs.dsl, tenant_id)
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": req.get("usr_id","") if isinstance(req, dict) else "",
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||
"source": "agent"
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
conv["agent_id"] = conv.pop("dialog_id")
|
||||
return get_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions/<session_id>', methods=['PUT'])
|
||||
@token_required
|
||||
def update(tenant_id,chat_id,session_id):
|
||||
req = request.json
|
||||
req["dialog_id"] = chat_id
|
||||
conv_id = session_id
|
||||
conv = ConversationService.query(id=conv_id,dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(message="Session does not exist")
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message="You do not own the session")
|
||||
if "message" in req or "messages" in req:
|
||||
return get_error_data_result(message="`message` can not be change")
|
||||
if "reference" in req:
|
||||
return get_error_data_result(message="`reference` can not be change")
|
||||
if "name" in req and not req.get("name"):
|
||||
return get_error_data_result(message="`name` can not be empty.")
|
||||
if not ConversationService.update_by_id(conv_id, req):
|
||||
return get_error_data_result(message="Session updates error")
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/completions', methods=['POST'])
|
||||
@token_required
|
||||
def completion(tenant_id, chat_id):
|
||||
req = request.json
|
||||
if not req.get("session_id"):
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": chat_id,
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_error_data_result(message="`name` can not be empty.")
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
session_id=conv.id
|
||||
else:
|
||||
session_id = req.get("session_id")
|
||||
if not req.get("question"):
|
||||
return get_error_data_result(message="Please input your question.")
|
||||
conv = ConversationService.query(id=session_id,dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(message="Session does not exist")
|
||||
conv = conv[0]
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message="You do not own the chat")
|
||||
msg = []
|
||||
question = {
|
||||
"content": req.get("question"),
|
||||
"role": "user",
|
||||
"id": str(uuid4())
|
||||
}
|
||||
conv.message.append(question)
|
||||
for m in conv.message:
|
||||
if m["role"] == "system": continue
|
||||
if m["role"] == "assistant" and not msg: continue
|
||||
msg.append(m)
|
||||
message_id = msg[-1].get("id")
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
reference = ans["reference"]
|
||||
temp_reference = deepcopy(ans["reference"])
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(temp_reference)
|
||||
else:
|
||||
conv.reference[-1] = temp_reference
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
||||
"id": message_id, "prompt": ans.get("prompt", "")}
|
||||
if "chunks" in reference:
|
||||
chunks = reference.get("chunks")
|
||||
chunk_list = []
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk.get("image_id", ""),
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk.get("positions", []),
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
ans["session_id"]=session_id
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, **req):
|
||||
fillin_conv(ans)
|
||||
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e),"reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
|
||||
return resp
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_result(data=answer)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/completions', methods=['POST'])
|
||||
@token_required
|
||||
def agent_completion(tenant_id, agent_id):
|
||||
req = request.json
|
||||
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
if not e:
|
||||
return get_error_data_result("Agent not found.")
|
||||
if cvs.user_id != tenant_id:
|
||||
return get_error_data_result(message="You do not own the agent.")
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
canvas = Canvas(cvs.dsl, tenant_id)
|
||||
|
||||
if not req.get("session_id"):
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": req.get("user_id",""),
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||
"source": "agent"
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
conv = API4Conversation(**conv)
|
||||
else:
|
||||
session_id = req.get("session_id")
|
||||
e, conv = API4ConversationService.get_by_id(req["session_id"])
|
||||
if not e:
|
||||
return get_error_data_result(message="Session not found!")
|
||||
|
||||
messages = conv.message
|
||||
question = req.get("question")
|
||||
if not question:
|
||||
return get_error_data_result("`question` is required.")
|
||||
question={
|
||||
"role":"user",
|
||||
"content":question,
|
||||
"id": str(uuid4())
|
||||
}
|
||||
messages.append(question)
|
||||
msg = []
|
||||
for m in messages:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
if not msg[-1].get("id"): msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
if "quote" not in req: req["quote"] = False
|
||||
stream = req.get("stream", True)
|
||||
|
||||
def fillin_conv(ans):
|
||||
reference = ans["reference"]
|
||||
temp_reference = deepcopy(ans["reference"])
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(temp_reference)
|
||||
else:
|
||||
conv.reference[-1] = temp_reference
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
||||
if "chunks" in reference:
|
||||
chunks = reference.get("chunks")
|
||||
chunk_list = []
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk["image_id"],
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk["positions"],
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
ans["session_id"] = session_id
|
||||
|
||||
def rename_field(ans):
|
||||
reference = ans['reference']
|
||||
if not isinstance(reference, dict):
|
||||
return
|
||||
for chunk_i in reference.get('chunks', []):
|
||||
if 'docnm_kwd' in chunk_i:
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
conv.message.append(msg[-1])
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
final_ans = {"reference": [], "content": ""}
|
||||
|
||||
canvas.messages.append(msg[-1])
|
||||
canvas.add_user_input(msg[-1]["content"])
|
||||
|
||||
if stream:
|
||||
def sse():
|
||||
nonlocal answer, cvs
|
||||
try:
|
||||
for ans in canvas.run(stream=True):
|
||||
if ans.get("running_status"):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {"answer": ans["content"],
|
||||
"running_status": True}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
continue
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
fillin_conv(ans)
|
||||
rename_field(ans)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(sse(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
for answer in canvas.run(stream=False):
|
||||
if answer.get("running_status"): continue
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
|
||||
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
||||
fillin_conv(result)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
rename_field(result)
|
||||
return get_result(data=result)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['GET'])
|
||||
@token_required
|
||||
def list_session(chat_id,tenant_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message=f"You don't own the assistant {chat_id}.")
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
convs = ConversationService.get_list(chat_id,page_number,items_per_page,orderby,desc,id,name)
|
||||
if not convs:
|
||||
return get_result(data=[])
|
||||
for conv in convs:
|
||||
conv['messages'] = conv.pop("message")
|
||||
infos = conv["messages"]
|
||||
for info in infos:
|
||||
if "prompt" in info:
|
||||
info.pop("prompt")
|
||||
conv["chat_id"] = conv.pop("dialog_id")
|
||||
if conv["reference"]:
|
||||
messages = conv["messages"]
|
||||
message_num = 0
|
||||
chunk_num = 0
|
||||
while message_num < len(messages):
|
||||
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||
chunk_list = []
|
||||
if "chunks" in conv["reference"][chunk_num]:
|
||||
chunks = conv["reference"][chunk_num]["chunks"]
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk["image_id"],
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk["positions"],
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
chunk_num += 1
|
||||
messages[message_num]["reference"] = chunk_list
|
||||
message_num += 1
|
||||
del conv["reference"]
|
||||
return get_result(data=convs)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=["DELETE"])
|
||||
@token_required
|
||||
def delete(tenant_id,chat_id):
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message="You don't own the chat")
|
||||
req = request.json
|
||||
convs = ConversationService.query(dialog_id=chat_id)
|
||||
if not req:
|
||||
ids = None
|
||||
else:
|
||||
ids=req.get("ids")
|
||||
|
||||
if not ids:
|
||||
conv_list = []
|
||||
for conv in convs:
|
||||
conv_list.append(conv.id)
|
||||
else:
|
||||
conv_list=ids
|
||||
for id in conv_list:
|
||||
conv = ConversationService.query(id=id,dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(message="The chat doesn't own the session")
|
||||
ConversationService.delete_by_id(id)
|
||||
return get_result()
|
||||
|
||||
@manager.route('/sessions/ask', methods=['POST'])
|
||||
@token_required
|
||||
def ask_about(tenant_id):
|
||||
req = request.json
|
||||
if not req.get("question"):
|
||||
return get_error_data_result("`question` is required.")
|
||||
if not req.get("dataset_ids"):
|
||||
return get_error_data_result("`dataset_ids` is required.")
|
||||
if not isinstance(req.get("dataset_ids"),list):
|
||||
return get_error_data_result("`dataset_ids` should be a list.")
|
||||
req["kb_ids"]=req.pop("dataset_ids")
|
||||
for kb_id in req["kb_ids"]:
|
||||
if not KnowledgebaseService.accessible(kb_id,tenant_id):
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}.")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
uid = tenant_id
|
||||
def stream():
|
||||
nonlocal req, uid
|
||||
try:
|
||||
for ans in ask(req["question"], req["kb_ids"], uid):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/sessions/related_questions', methods=['POST'])
|
||||
@token_required
|
||||
def related_questions(tenant_id):
|
||||
req = request.json
|
||||
if not req.get("question"):
|
||||
return get_error_data_result("`question` is required.")
|
||||
question = req["question"]
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||
prompt = """
|
||||
Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
|
||||
Instructions:
|
||||
- Based on the keywords provided by the user, generate 5-10 related search terms.
|
||||
- Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
|
||||
- Use common, general terms as much as possible, avoiding obscure words or technical jargon.
|
||||
- Keep the term length between 2-4 words, concise and clear.
|
||||
- DO NOT translate, use the language of the original keywords.
|
||||
|
||||
### Example:
|
||||
Keywords: Chinese football
|
||||
Related search terms:
|
||||
1. Current status of Chinese football
|
||||
2. Reform of Chinese football
|
||||
3. Youth training of Chinese football
|
||||
4. Chinese football in the Asian Cup
|
||||
5. Chinese football in the World Cup
|
||||
|
||||
Reason:
|
||||
- When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
|
||||
- Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
|
||||
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
|
||||
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
|
||||
Keywords: {question}
|
||||
Related search terms:
|
||||
"""}], {"temperature": 0.9})
|
||||
return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||
|
||||
@ -13,8 +13,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
@ -22,110 +23,237 @@ from api.db.db_models import APIToken
|
||||
from api.db.services.api_service import APITokenService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.settings import DATABASE_TYPE
|
||||
from api import settings
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
from api.utils.api_utils import get_json_result, get_data_error_result, server_error_response, \
|
||||
generate_confirmation_token, request, validate_request
|
||||
from api.versions import get_rag_version
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from api.utils.api_utils import (
|
||||
get_json_result,
|
||||
get_data_error_result,
|
||||
server_error_response,
|
||||
generate_confirmation_token,
|
||||
)
|
||||
from api.versions import get_ragflow_version
|
||||
from rag.utils.storage_factory import STORAGE_IMPL, STORAGE_IMPL_TYPE
|
||||
from timeit import default_timer as timer
|
||||
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
|
||||
@manager.route('/version', methods=['GET'])
|
||||
@manager.route("/version", methods=["GET"])
|
||||
@login_required
|
||||
def version():
|
||||
return get_json_result(data=get_rag_version())
|
||||
"""
|
||||
Get the current version of the application.
|
||||
---
|
||||
tags:
|
||||
- System
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: Version retrieved successfully.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
version:
|
||||
type: string
|
||||
description: Version number.
|
||||
"""
|
||||
return get_json_result(data=get_ragflow_version())
|
||||
|
||||
|
||||
@manager.route('/status', methods=['GET'])
|
||||
@manager.route("/status", methods=["GET"])
|
||||
@login_required
|
||||
def status():
|
||||
"""
|
||||
Get the system status.
|
||||
---
|
||||
tags:
|
||||
- System
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: System is operational.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
es:
|
||||
type: object
|
||||
description: Elasticsearch status.
|
||||
storage:
|
||||
type: object
|
||||
description: Storage status.
|
||||
database:
|
||||
type: object
|
||||
description: Database status.
|
||||
503:
|
||||
description: Service unavailable.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
description: Error message.
|
||||
"""
|
||||
res = {}
|
||||
st = timer()
|
||||
try:
|
||||
res["es"] = ELASTICSEARCH.health()
|
||||
res["es"]["elapsed"] = "{:.1f}".format((timer() - st)*1000.)
|
||||
res["doc_engine"] = settings.docStoreConn.health()
|
||||
res["doc_engine"]["elapsed"] = "{:.1f}".format((timer() - st) * 1000.0)
|
||||
except Exception as e:
|
||||
res["es"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||
res["doc_engine"] = {
|
||||
"type": "unknown",
|
||||
"status": "red",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
st = timer()
|
||||
try:
|
||||
STORAGE_IMPL.health()
|
||||
res["storage"] = {"storage": STORAGE_IMPL_TYPE.lower(), "status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
||||
res["storage"] = {
|
||||
"storage": STORAGE_IMPL_TYPE.lower(),
|
||||
"status": "green",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
}
|
||||
except Exception as e:
|
||||
res["storage"] = {"storage": STORAGE_IMPL_TYPE.lower(), "status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||
res["storage"] = {
|
||||
"storage": STORAGE_IMPL_TYPE.lower(),
|
||||
"status": "red",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
st = timer()
|
||||
try:
|
||||
KnowledgebaseService.get_by_id("x")
|
||||
res["database"] = {"database": DATABASE_TYPE.lower(), "status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
||||
res["database"] = {
|
||||
"database": settings.DATABASE_TYPE.lower(),
|
||||
"status": "green",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
}
|
||||
except Exception as e:
|
||||
res["database"] = {"database": DATABASE_TYPE.lower(), "status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||
res["database"] = {
|
||||
"database": settings.DATABASE_TYPE.lower(),
|
||||
"status": "red",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
st = timer()
|
||||
try:
|
||||
if not REDIS_CONN.health():
|
||||
raise Exception("Lost connection!")
|
||||
res["redis"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
||||
res["redis"] = {
|
||||
"status": "green",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
}
|
||||
except Exception as e:
|
||||
res["redis"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||
res["redis"] = {
|
||||
"status": "red",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
task_executor_heartbeats = {}
|
||||
try:
|
||||
v = REDIS_CONN.get("TASKEXE")
|
||||
if not v:
|
||||
raise Exception("No task executor running!")
|
||||
obj = json.loads(v)
|
||||
color = "green"
|
||||
for id in obj.keys():
|
||||
arr = obj[id]
|
||||
if len(arr) == 1:
|
||||
obj[id] = [0]
|
||||
else:
|
||||
obj[id] = [arr[i+1]-arr[i] for i in range(len(arr)-1)]
|
||||
elapsed = max(obj[id])
|
||||
if elapsed > 50: color = "yellow"
|
||||
if elapsed > 120: color = "red"
|
||||
res["task_executor"] = {"status": color, "elapsed": obj}
|
||||
except Exception as e:
|
||||
res["task_executor"] = {"status": "red", "error": str(e)}
|
||||
task_executors = REDIS_CONN.smembers("TASKEXE")
|
||||
now = datetime.now().timestamp()
|
||||
for task_executor_id in task_executors:
|
||||
heartbeats = REDIS_CONN.zrangebyscore(task_executor_id, now - 60*30, now)
|
||||
heartbeats = [json.loads(heartbeat) for heartbeat in heartbeats]
|
||||
task_executor_heartbeats[task_executor_id] = heartbeats
|
||||
except Exception:
|
||||
logging.exception("get task executor heartbeats failed!")
|
||||
res["task_executor_heartbeats"] = task_executor_heartbeats
|
||||
|
||||
return get_json_result(data=res)
|
||||
|
||||
|
||||
@manager.route('/new_token', methods=['POST'])
|
||||
@manager.route("/new_token", methods=["POST"])
|
||||
@login_required
|
||||
def new_token():
|
||||
"""
|
||||
Generate a new API token.
|
||||
---
|
||||
tags:
|
||||
- API Tokens
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: query
|
||||
name: name
|
||||
type: string
|
||||
required: false
|
||||
description: Name of the token.
|
||||
responses:
|
||||
200:
|
||||
description: Token generated successfully.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
token:
|
||||
type: string
|
||||
description: The generated API token.
|
||||
"""
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = tenants[0].tenant_id
|
||||
obj = {"tenant_id": tenant_id, "token": generate_confirmation_token(tenant_id),
|
||||
"create_time": current_timestamp(),
|
||||
"create_date": datetime_format(datetime.now()),
|
||||
"update_time": None,
|
||||
"update_date": None
|
||||
}
|
||||
obj = {
|
||||
"tenant_id": tenant_id,
|
||||
"token": generate_confirmation_token(tenant_id),
|
||||
"create_time": current_timestamp(),
|
||||
"create_date": datetime_format(datetime.now()),
|
||||
"update_time": None,
|
||||
"update_date": None,
|
||||
}
|
||||
|
||||
if not APITokenService.save(**obj):
|
||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
||||
return get_data_error_result(message="Fail to new a dialog!")
|
||||
|
||||
return get_json_result(data=obj)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/token_list', methods=['GET'])
|
||||
@manager.route("/token_list", methods=["GET"])
|
||||
@login_required
|
||||
def token_list():
|
||||
"""
|
||||
List all API tokens for the current user.
|
||||
---
|
||||
tags:
|
||||
- API Tokens
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: List of API tokens.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
tokens:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
token:
|
||||
type: string
|
||||
description: The API token.
|
||||
name:
|
||||
type: string
|
||||
description: Name of the token.
|
||||
create_time:
|
||||
type: string
|
||||
description: Token creation time.
|
||||
"""
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
objs = APITokenService.query(tenant_id=tenants[0].tenant_id)
|
||||
return get_json_result(data=[o.to_dict() for o in objs])
|
||||
@ -133,9 +261,33 @@ def token_list():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/token/<token>', methods=['DELETE'])
|
||||
@manager.route("/token/<token>", methods=["DELETE"])
|
||||
@login_required
|
||||
def rm(token):
|
||||
"""
|
||||
Remove an API token.
|
||||
---
|
||||
tags:
|
||||
- API Tokens
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: path
|
||||
name: token
|
||||
type: string
|
||||
required: true
|
||||
description: The API token to remove.
|
||||
responses:
|
||||
200:
|
||||
description: Token removed successfully.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
success:
|
||||
type: boolean
|
||||
description: Deletion status.
|
||||
"""
|
||||
APITokenService.filter_delete(
|
||||
[APIToken.tenant_id == current_user.id, APIToken.token == token])
|
||||
return get_json_result(data=True)
|
||||
[APIToken.tenant_id == current_user.id, APIToken.token == token]
|
||||
)
|
||||
return get_json_result(data=True)
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api import settings
|
||||
from api.db import UserTenantRole, StatusEnum
|
||||
from api.db.db_models import UserTenant
|
||||
from api.db.services.user_service import UserTenantService, UserService
|
||||
@ -28,6 +29,12 @@ from api.utils.api_utils import get_json_result, validate_request, server_error_
|
||||
@manager.route("/<tenant_id>/user/list", methods=["GET"])
|
||||
@login_required
|
||||
def user_list(tenant_id):
|
||||
if current_user.id != tenant_id:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
users = UserTenantService.get_by_tenant_id(tenant_id)
|
||||
for u in users:
|
||||
@ -41,17 +48,23 @@ def user_list(tenant_id):
|
||||
@login_required
|
||||
@validate_request("email")
|
||||
def create(tenant_id):
|
||||
if current_user.id != tenant_id:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
usrs = UserService.query(email=req["email"])
|
||||
if not usrs:
|
||||
return get_data_error_result(retmsg="User not found.")
|
||||
return get_data_error_result(message="User not found.")
|
||||
|
||||
user_id = usrs[0].id
|
||||
user_tenants = UserTenantService.query(user_id=user_id, tenant_id=tenant_id)
|
||||
if user_tenants:
|
||||
if user_tenants[0].status == UserTenantRole.NORMAL.value:
|
||||
return get_data_error_result(retmsg="This user is in the team already.")
|
||||
return get_data_error_result(retmsg="Invitation notification is sent.")
|
||||
return get_data_error_result(message="This user is in the team already.")
|
||||
return get_data_error_result(message="Invitation notification is sent.")
|
||||
|
||||
UserTenantService.save(
|
||||
id=get_uuid(),
|
||||
@ -70,6 +83,12 @@ def create(tenant_id):
|
||||
@manager.route('/<tenant_id>/user/<user_id>', methods=['DELETE'])
|
||||
@login_required
|
||||
def rm(tenant_id, user_id):
|
||||
if current_user.id != tenant_id and current_user.id != user_id:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
UserTenantService.filter_delete([UserTenant.tenant_id == tenant_id, UserTenant.user_id == user_id])
|
||||
return get_json_result(data=True)
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
from datetime import datetime
|
||||
@ -23,65 +24,127 @@ from flask_login import login_required, current_user, login_user, logout_user
|
||||
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||
from api.utils.api_utils import server_error_response, validate_request, get_data_error_result
|
||||
from api.utils import get_uuid, get_format_time, decrypt, download_img, current_timestamp, datetime_format
|
||||
from api.db import UserTenantRole, LLMType, FileType
|
||||
from api.settings import RetCode, GITHUB_OAUTH, FEISHU_OAUTH, CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, \
|
||||
API_KEY, \
|
||||
LLM_FACTORY, LLM_BASE_URL, RERANK_MDL
|
||||
from api.utils.api_utils import (
|
||||
server_error_response,
|
||||
validate_request,
|
||||
get_data_error_result,
|
||||
)
|
||||
from api.utils import (
|
||||
get_uuid,
|
||||
get_format_time,
|
||||
decrypt,
|
||||
download_img,
|
||||
current_timestamp,
|
||||
datetime_format,
|
||||
)
|
||||
from api.db import UserTenantRole, FileType
|
||||
from api import settings
|
||||
from api.db.services.user_service import UserService, TenantService, UserTenantService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.settings import stat_logger
|
||||
from api.utils.api_utils import get_json_result, construct_response
|
||||
|
||||
|
||||
@manager.route('/login', methods=['POST', 'GET'])
|
||||
@manager.route("/login", methods=["POST", "GET"])
|
||||
def login():
|
||||
"""
|
||||
User login endpoint.
|
||||
---
|
||||
tags:
|
||||
- User
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Login credentials.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
email:
|
||||
type: string
|
||||
description: User email.
|
||||
password:
|
||||
type: string
|
||||
description: User password.
|
||||
responses:
|
||||
200:
|
||||
description: Login successful.
|
||||
schema:
|
||||
type: object
|
||||
401:
|
||||
description: Authentication failed.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
if not request.json:
|
||||
return get_json_result(data=False,
|
||||
retcode=RetCode.AUTHENTICATION_ERROR,
|
||||
retmsg='Unauthorized!')
|
||||
return get_json_result(
|
||||
data=False, code=settings.RetCode.AUTHENTICATION_ERROR, message="Unauthorized!"
|
||||
)
|
||||
|
||||
email = request.json.get('email', "")
|
||||
email = request.json.get("email", "")
|
||||
users = UserService.query(email=email)
|
||||
if not users:
|
||||
return get_json_result(data=False,
|
||||
retcode=RetCode.AUTHENTICATION_ERROR,
|
||||
retmsg=f'Email: {email} is not registered!')
|
||||
return get_json_result(
|
||||
data=False,
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||
message=f"Email: {email} is not registered!",
|
||||
)
|
||||
|
||||
password = request.json.get('password')
|
||||
password = request.json.get("password")
|
||||
try:
|
||||
password = decrypt(password)
|
||||
except BaseException:
|
||||
return get_json_result(data=False,
|
||||
retcode=RetCode.SERVER_ERROR,
|
||||
retmsg='Fail to crypt password')
|
||||
return get_json_result(
|
||||
data=False, code=settings.RetCode.SERVER_ERROR, message="Fail to crypt password"
|
||||
)
|
||||
|
||||
user = UserService.query_user(email, password)
|
||||
if user:
|
||||
response_data = user.to_json()
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.update_time = current_timestamp(),
|
||||
user.update_date = datetime_format(datetime.now()),
|
||||
user.update_time = (current_timestamp(),)
|
||||
user.update_date = (datetime_format(datetime.now()),)
|
||||
user.save()
|
||||
msg = "Welcome back!"
|
||||
return construct_response(data=response_data, auth=user.get_id(), retmsg=msg)
|
||||
return construct_response(data=response_data, auth=user.get_id(), message=msg)
|
||||
else:
|
||||
return get_json_result(data=False,
|
||||
retcode=RetCode.AUTHENTICATION_ERROR,
|
||||
retmsg='Email and password do not match!')
|
||||
return get_json_result(
|
||||
data=False,
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||
message="Email and password do not match!",
|
||||
)
|
||||
|
||||
|
||||
@manager.route('/github_callback', methods=['GET'])
|
||||
@manager.route("/github_callback", methods=["GET"])
|
||||
def github_callback():
|
||||
"""
|
||||
GitHub OAuth callback endpoint.
|
||||
---
|
||||
tags:
|
||||
- OAuth
|
||||
parameters:
|
||||
- in: query
|
||||
name: code
|
||||
type: string
|
||||
required: true
|
||||
description: Authorization code from GitHub.
|
||||
responses:
|
||||
200:
|
||||
description: Authentication successful.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
import requests
|
||||
res = requests.post(GITHUB_OAUTH.get("url"),
|
||||
data={
|
||||
"client_id": GITHUB_OAUTH.get("client_id"),
|
||||
"client_secret": GITHUB_OAUTH.get("secret_key"),
|
||||
"code": request.args.get('code')},
|
||||
headers={"Accept": "application/json"})
|
||||
|
||||
res = requests.post(
|
||||
settings.GITHUB_OAUTH.get("url"),
|
||||
data={
|
||||
"client_id": settings.GITHUB_OAUTH.get("client_id"),
|
||||
"client_secret": settings.GITHUB_OAUTH.get("secret_key"),
|
||||
"code": request.args.get("code"),
|
||||
},
|
||||
headers={"Accept": "application/json"},
|
||||
)
|
||||
res = res.json()
|
||||
if "error" in res:
|
||||
return redirect("/?error=%s" % res["error_description"])
|
||||
@ -101,21 +164,24 @@ def github_callback():
|
||||
try:
|
||||
avatar = download_img(user_info["avatar_url"])
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
logging.exception(e)
|
||||
avatar = ""
|
||||
users = user_register(user_id, {
|
||||
"access_token": session["access_token"],
|
||||
"email": email_address,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info["login"],
|
||||
"login_channel": "github",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
})
|
||||
users = user_register(
|
||||
user_id,
|
||||
{
|
||||
"access_token": session["access_token"],
|
||||
"email": email_address,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info["login"],
|
||||
"login_channel": "github",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
},
|
||||
)
|
||||
if not users:
|
||||
raise Exception(f'Fail to register {email_address}.')
|
||||
raise Exception(f"Fail to register {email_address}.")
|
||||
if len(users) > 1:
|
||||
raise Exception(f'Same email: {email_address} exists!')
|
||||
raise Exception(f"Same email: {email_address} exists!")
|
||||
|
||||
# Try to log in
|
||||
user = users[0]
|
||||
@ -123,7 +189,7 @@ def github_callback():
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
stat_logger.exception(e)
|
||||
logging.exception(e)
|
||||
return redirect("/?error=%s" % str(e))
|
||||
|
||||
# User has already registered, try to log in
|
||||
@ -134,30 +200,56 @@ def github_callback():
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
|
||||
|
||||
@manager.route('/feishu_callback', methods=['GET'])
|
||||
@manager.route("/feishu_callback", methods=["GET"])
|
||||
def feishu_callback():
|
||||
"""
|
||||
Feishu OAuth callback endpoint.
|
||||
---
|
||||
tags:
|
||||
- OAuth
|
||||
parameters:
|
||||
- in: query
|
||||
name: code
|
||||
type: string
|
||||
required: true
|
||||
description: Authorization code from Feishu.
|
||||
responses:
|
||||
200:
|
||||
description: Authentication successful.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
import requests
|
||||
app_access_token_res = requests.post(FEISHU_OAUTH.get("app_access_token_url"),
|
||||
data=json.dumps({
|
||||
"app_id": FEISHU_OAUTH.get("app_id"),
|
||||
"app_secret": FEISHU_OAUTH.get("app_secret")
|
||||
}),
|
||||
headers={"Content-Type": "application/json; charset=utf-8"})
|
||||
|
||||
app_access_token_res = requests.post(
|
||||
settings.FEISHU_OAUTH.get("app_access_token_url"),
|
||||
data=json.dumps(
|
||||
{
|
||||
"app_id": settings.FEISHU_OAUTH.get("app_id"),
|
||||
"app_secret": settings.FEISHU_OAUTH.get("app_secret"),
|
||||
}
|
||||
),
|
||||
headers={"Content-Type": "application/json; charset=utf-8"},
|
||||
)
|
||||
app_access_token_res = app_access_token_res.json()
|
||||
if app_access_token_res['code'] != 0:
|
||||
if app_access_token_res["code"] != 0:
|
||||
return redirect("/?error=%s" % app_access_token_res)
|
||||
|
||||
res = requests.post(FEISHU_OAUTH.get("user_access_token_url"),
|
||||
data=json.dumps({
|
||||
"grant_type": FEISHU_OAUTH.get("grant_type"),
|
||||
"code": request.args.get('code')
|
||||
}),
|
||||
headers={
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
'Authorization': f"Bearer {app_access_token_res['app_access_token']}"
|
||||
})
|
||||
res = requests.post(
|
||||
settings.FEISHU_OAUTH.get("user_access_token_url"),
|
||||
data=json.dumps(
|
||||
{
|
||||
"grant_type": settings.FEISHU_OAUTH.get("grant_type"),
|
||||
"code": request.args.get("code"),
|
||||
}
|
||||
),
|
||||
headers={
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
"Authorization": f"Bearer {app_access_token_res['app_access_token']}",
|
||||
},
|
||||
)
|
||||
res = res.json()
|
||||
if res['code'] != 0:
|
||||
if res["code"] != 0:
|
||||
return redirect("/?error=%s" % res["message"])
|
||||
|
||||
if "contact:user.email:readonly" not in res["data"]["scope"].split(" "):
|
||||
@ -174,21 +266,24 @@ def feishu_callback():
|
||||
try:
|
||||
avatar = download_img(user_info["avatar_url"])
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
logging.exception(e)
|
||||
avatar = ""
|
||||
users = user_register(user_id, {
|
||||
"access_token": session["access_token"],
|
||||
"email": email_address,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info["en_name"],
|
||||
"login_channel": "feishu",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
})
|
||||
users = user_register(
|
||||
user_id,
|
||||
{
|
||||
"access_token": session["access_token"],
|
||||
"email": email_address,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info["en_name"],
|
||||
"login_channel": "feishu",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
},
|
||||
)
|
||||
if not users:
|
||||
raise Exception(f'Fail to register {email_address}.')
|
||||
raise Exception(f"Fail to register {email_address}.")
|
||||
if len(users) > 1:
|
||||
raise Exception(f'Same email: {email_address} exists!')
|
||||
raise Exception(f"Same email: {email_address} exists!")
|
||||
|
||||
# Try to log in
|
||||
user = users[0]
|
||||
@ -196,7 +291,7 @@ def feishu_callback():
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
stat_logger.exception(e)
|
||||
logging.exception(e)
|
||||
return redirect("/?error=%s" % str(e))
|
||||
|
||||
# User has already registered, try to log in
|
||||
@ -209,11 +304,14 @@ def feishu_callback():
|
||||
|
||||
def user_info_from_feishu(access_token):
|
||||
import requests
|
||||
headers = {"Content-Type": "application/json; charset=utf-8",
|
||||
'Authorization': f"Bearer {access_token}"}
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
}
|
||||
res = requests.get(
|
||||
f"https://open.feishu.cn/open-apis/authen/v1/user_info",
|
||||
headers=headers)
|
||||
"https://open.feishu.cn/open-apis/authen/v1/user_info", headers=headers
|
||||
)
|
||||
user_info = res.json()["data"]
|
||||
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
|
||||
return user_info
|
||||
@ -221,24 +319,38 @@ def user_info_from_feishu(access_token):
|
||||
|
||||
def user_info_from_github(access_token):
|
||||
import requests
|
||||
headers = {"Accept": "application/json",
|
||||
'Authorization': f"token {access_token}"}
|
||||
|
||||
headers = {"Accept": "application/json", "Authorization": f"token {access_token}"}
|
||||
res = requests.get(
|
||||
f"https://api.github.com/user?access_token={access_token}",
|
||||
headers=headers)
|
||||
f"https://api.github.com/user?access_token={access_token}", headers=headers
|
||||
)
|
||||
user_info = res.json()
|
||||
email_info = requests.get(
|
||||
f"https://api.github.com/user/emails?access_token={access_token}",
|
||||
headers=headers).json()
|
||||
headers=headers,
|
||||
).json()
|
||||
user_info["email"] = next(
|
||||
(email for email in email_info if email['primary'] == True),
|
||||
None)["email"]
|
||||
(email for email in email_info if email["primary"] == True), None
|
||||
)["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
@manager.route("/logout", methods=['GET'])
|
||||
@manager.route("/logout", methods=["GET"])
|
||||
@login_required
|
||||
def log_out():
|
||||
"""
|
||||
User logout endpoint.
|
||||
---
|
||||
tags:
|
||||
- User
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: Logout successful.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
current_user.access_token = ""
|
||||
current_user.save()
|
||||
logout_user()
|
||||
@ -248,20 +360,62 @@ def log_out():
|
||||
@manager.route("/setting", methods=["POST"])
|
||||
@login_required
|
||||
def setting_user():
|
||||
"""
|
||||
Update user settings.
|
||||
---
|
||||
tags:
|
||||
- User
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: User settings to update.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
nickname:
|
||||
type: string
|
||||
description: New nickname.
|
||||
email:
|
||||
type: string
|
||||
description: New email.
|
||||
responses:
|
||||
200:
|
||||
description: Settings updated successfully.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
update_dict = {}
|
||||
request_data = request.json
|
||||
if request_data.get("password"):
|
||||
new_password = request_data.get("new_password")
|
||||
if not check_password_hash(
|
||||
current_user.password, decrypt(request_data["password"])):
|
||||
return get_json_result(data=False, retcode=RetCode.AUTHENTICATION_ERROR, retmsg='Password error!')
|
||||
current_user.password, decrypt(request_data["password"])
|
||||
):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||
message="Password error!",
|
||||
)
|
||||
|
||||
if new_password:
|
||||
update_dict["password"] = generate_password_hash(decrypt(new_password))
|
||||
|
||||
for k in request_data.keys():
|
||||
if k in ["password", "new_password", "email", "status", "is_superuser", "login_channel", "is_anonymous",
|
||||
"is_active", "is_authenticated", "last_login_time"]:
|
||||
if k in [
|
||||
"password",
|
||||
"new_password",
|
||||
"email",
|
||||
"status",
|
||||
"is_superuser",
|
||||
"login_channel",
|
||||
"is_anonymous",
|
||||
"is_active",
|
||||
"is_authenticated",
|
||||
"last_login_time",
|
||||
]:
|
||||
continue
|
||||
update_dict[k] = request_data[k]
|
||||
|
||||
@ -269,34 +423,59 @@ def setting_user():
|
||||
UserService.update_by_id(current_user.id, update_dict)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
return get_json_result(data=False, retmsg='Update failure!', retcode=RetCode.EXCEPTION_ERROR)
|
||||
logging.exception(e)
|
||||
return get_json_result(
|
||||
data=False, message="Update failure!", code=settings.RetCode.EXCEPTION_ERROR
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/info", methods=["GET"])
|
||||
@login_required
|
||||
def user_profile():
|
||||
"""
|
||||
Get user profile information.
|
||||
---
|
||||
tags:
|
||||
- User
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: User profile retrieved successfully.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: User ID.
|
||||
nickname:
|
||||
type: string
|
||||
description: User nickname.
|
||||
email:
|
||||
type: string
|
||||
description: User email.
|
||||
"""
|
||||
return get_json_result(data=current_user.to_dict())
|
||||
|
||||
|
||||
def rollback_user_registration(user_id):
|
||||
try:
|
||||
UserService.delete_by_id(user_id)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
TenantService.delete_by_id(user_id)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
u = UserTenantService.query(tenant_id=user_id)
|
||||
if u:
|
||||
UserTenantService.delete_by_id(u[0].id)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
TenantLLM.delete().where(TenantLLM.tenant_id == user_id).execute()
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
@ -305,18 +484,18 @@ def user_register(user_id, user):
|
||||
tenant = {
|
||||
"id": user_id,
|
||||
"name": user["nickname"] + "‘s Kingdom",
|
||||
"llm_id": CHAT_MDL,
|
||||
"embd_id": EMBEDDING_MDL,
|
||||
"asr_id": ASR_MDL,
|
||||
"parser_ids": PARSERS,
|
||||
"img2txt_id": IMAGE2TEXT_MDL,
|
||||
"rerank_id": RERANK_MDL
|
||||
"llm_id": settings.CHAT_MDL,
|
||||
"embd_id": settings.EMBEDDING_MDL,
|
||||
"asr_id": settings.ASR_MDL,
|
||||
"parser_ids": settings.PARSERS,
|
||||
"img2txt_id": settings.IMAGE2TEXT_MDL,
|
||||
"rerank_id": settings.RERANK_MDL,
|
||||
}
|
||||
usr_tenant = {
|
||||
"tenant_id": user_id,
|
||||
"user_id": user_id,
|
||||
"invited_by": user_id,
|
||||
"role": UserTenantRole.OWNER
|
||||
"role": UserTenantRole.OWNER,
|
||||
}
|
||||
file_id = get_uuid()
|
||||
file = {
|
||||
@ -330,14 +509,18 @@ def user_register(user_id, user):
|
||||
"location": "",
|
||||
}
|
||||
tenant_llm = []
|
||||
for llm in LLMService.query(fid=LLM_FACTORY):
|
||||
tenant_llm.append({"tenant_id": user_id,
|
||||
"llm_factory": LLM_FACTORY,
|
||||
"llm_name": llm.llm_name,
|
||||
"model_type": llm.model_type,
|
||||
"api_key": API_KEY,
|
||||
"api_base": LLM_BASE_URL
|
||||
})
|
||||
for llm in LLMService.query(fid=settings.LLM_FACTORY):
|
||||
tenant_llm.append(
|
||||
{
|
||||
"tenant_id": user_id,
|
||||
"llm_factory": settings.LLM_FACTORY,
|
||||
"llm_name": llm.llm_name,
|
||||
"model_type": llm.model_type,
|
||||
"api_key": settings.API_KEY,
|
||||
"api_base": settings.LLM_BASE_URL,
|
||||
"max_tokens": llm.max_tokens if llm.max_tokens else 8192
|
||||
}
|
||||
)
|
||||
|
||||
if not UserService.save(**user):
|
||||
return
|
||||
@ -351,21 +534,52 @@ def user_register(user_id, user):
|
||||
@manager.route("/register", methods=["POST"])
|
||||
@validate_request("nickname", "email", "password")
|
||||
def user_add():
|
||||
"""
|
||||
Register a new user.
|
||||
---
|
||||
tags:
|
||||
- User
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Registration details.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
nickname:
|
||||
type: string
|
||||
description: User nickname.
|
||||
email:
|
||||
type: string
|
||||
description: User email.
|
||||
password:
|
||||
type: string
|
||||
description: User password.
|
||||
responses:
|
||||
200:
|
||||
description: Registration successful.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
req = request.json
|
||||
email_address = req["email"]
|
||||
|
||||
# Validate the email address
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,5}$", email_address):
|
||||
return get_json_result(data=False,
|
||||
retmsg=f'Invalid email address: {email_address}!',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message=f"Invalid email address: {email_address}!",
|
||||
code=settings.RetCode.OPERATING_ERROR,
|
||||
)
|
||||
|
||||
# Check if the email address is already used
|
||||
if UserService.query(email=email_address):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg=f'Email: {email_address} has already registered!',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
message=f"Email: {email_address} has already registered!",
|
||||
code=settings.RetCode.OPERATING_ERROR,
|
||||
)
|
||||
|
||||
# Construct user info data
|
||||
nickname = req["nickname"]
|
||||
@ -383,29 +597,59 @@ def user_add():
|
||||
try:
|
||||
users = user_register(user_id, user_dict)
|
||||
if not users:
|
||||
raise Exception(f'Fail to register {email_address}.')
|
||||
raise Exception(f"Fail to register {email_address}.")
|
||||
if len(users) > 1:
|
||||
raise Exception(f'Same email: {email_address} exists!')
|
||||
raise Exception(f"Same email: {email_address} exists!")
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return construct_response(data=user.to_json(),
|
||||
auth=user.get_id(),
|
||||
retmsg=f"{nickname}, welcome aboard!")
|
||||
return construct_response(
|
||||
data=user.to_json(),
|
||||
auth=user.get_id(),
|
||||
message=f"{nickname}, welcome aboard!",
|
||||
)
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
stat_logger.exception(e)
|
||||
return get_json_result(data=False,
|
||||
retmsg=f'User registration failure, error: {str(e)}',
|
||||
retcode=RetCode.EXCEPTION_ERROR)
|
||||
logging.exception(e)
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message=f"User registration failure, error: {str(e)}",
|
||||
code=settings.RetCode.EXCEPTION_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/tenant_info", methods=["GET"])
|
||||
@login_required
|
||||
def tenant_info():
|
||||
"""
|
||||
Get tenant information.
|
||||
---
|
||||
tags:
|
||||
- Tenant
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: Tenant information retrieved successfully.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
tenant_id:
|
||||
type: string
|
||||
description: Tenant ID.
|
||||
name:
|
||||
type: string
|
||||
description: Tenant name.
|
||||
llm_id:
|
||||
type: string
|
||||
description: LLM ID.
|
||||
embd_id:
|
||||
type: string
|
||||
description: Embedding model ID.
|
||||
"""
|
||||
try:
|
||||
tenants = TenantService.get_info_by(current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
return get_json_result(data=tenants[0])
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -415,10 +659,45 @@ def tenant_info():
|
||||
@login_required
|
||||
@validate_request("tenant_id", "asr_id", "embd_id", "img2txt_id", "llm_id")
|
||||
def set_tenant_info():
|
||||
"""
|
||||
Update tenant information.
|
||||
---
|
||||
tags:
|
||||
- Tenant
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Tenant information to update.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
tenant_id:
|
||||
type: string
|
||||
description: Tenant ID.
|
||||
llm_id:
|
||||
type: string
|
||||
description: LLM ID.
|
||||
embd_id:
|
||||
type: string
|
||||
description: Embedding model ID.
|
||||
asr_id:
|
||||
type: string
|
||||
description: ASR model ID.
|
||||
img2txt_id:
|
||||
type: string
|
||||
description: Image to Text model ID.
|
||||
responses:
|
||||
200:
|
||||
description: Tenant information updated successfully.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
req = request.json
|
||||
try:
|
||||
tid = req["tenant_id"]
|
||||
del req["tenant_id"]
|
||||
tid = req.pop("tenant_id")
|
||||
TenantService.update_by_id(tid, req)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
|
||||
@ -15,4 +15,11 @@
|
||||
|
||||
NAME_LENGTH_LIMIT = 2 ** 10
|
||||
|
||||
IMG_BASE64_PREFIX = 'data:image/png;base64,'
|
||||
IMG_BASE64_PREFIX = 'data:image/png;base64,'
|
||||
|
||||
SERVICE_CONF = "service_conf.yaml"
|
||||
|
||||
API_VERSION = "v1"
|
||||
RAG_FLOW_SERVICE_NAME = "ragflow"
|
||||
REQUEST_WAIT_SEC = 2
|
||||
REQUEST_MAX_WAIT_SEC = 300
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
@ -29,12 +30,11 @@ from peewee import (
|
||||
Field, Model, Metadata
|
||||
)
|
||||
from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase
|
||||
from api.db import SerializedType, ParserType
|
||||
from api.settings import DATABASE, stat_logger, SECRET_KEY, DATABASE_TYPE
|
||||
from api.utils.log_utils import getLogger
|
||||
from api import utils
|
||||
|
||||
LOGGER = getLogger()
|
||||
|
||||
from api.db import SerializedType, ParserType
|
||||
from api import settings
|
||||
from api import utils
|
||||
|
||||
|
||||
def singleton(cls, *args, **kw):
|
||||
@ -65,7 +65,7 @@ class TextFieldType(Enum):
|
||||
|
||||
|
||||
class LongTextField(TextField):
|
||||
field_type = TextFieldType[DATABASE_TYPE.upper()].value
|
||||
field_type = TextFieldType[settings.DATABASE_TYPE.upper()].value
|
||||
|
||||
|
||||
class JSONField(LongTextField):
|
||||
@ -272,6 +272,7 @@ class JsonSerializedField(SerializedField):
|
||||
super(JsonSerializedField, self).__init__(serialized_type=SerializedType.JSON, object_hook=object_hook,
|
||||
object_pairs_hook=object_pairs_hook, **kwargs)
|
||||
|
||||
|
||||
class PooledDatabase(Enum):
|
||||
MYSQL = PooledMySQLDatabase
|
||||
POSTGRES = PooledPostgresqlDatabase
|
||||
@ -285,10 +286,11 @@ class DatabaseMigrator(Enum):
|
||||
@singleton
|
||||
class BaseDataBase:
|
||||
def __init__(self):
|
||||
database_config = DATABASE.copy()
|
||||
database_config = settings.DATABASE.copy()
|
||||
db_name = database_config.pop("name")
|
||||
self.database_connection = PooledDatabase[DATABASE_TYPE.upper()].value(db_name, **database_config)
|
||||
stat_logger.info('init database on cluster mode successfully')
|
||||
self.database_connection = PooledDatabase[settings.DATABASE_TYPE.upper()].value(db_name, **database_config)
|
||||
logging.info('init database on cluster mode successfully')
|
||||
|
||||
|
||||
class PostgresDatabaseLock:
|
||||
def __init__(self, lock_name, timeout=10, db=None):
|
||||
@ -334,6 +336,7 @@ class PostgresDatabaseLock:
|
||||
|
||||
return magic
|
||||
|
||||
|
||||
class MysqlDatabaseLock:
|
||||
def __init__(self, lock_name, timeout=10, db=None):
|
||||
self.lock_name = lock_name
|
||||
@ -388,7 +391,7 @@ class DatabaseLock(Enum):
|
||||
|
||||
|
||||
DB = BaseDataBase().database_connection
|
||||
DB.lock = DatabaseLock[DATABASE_TYPE.upper()].value
|
||||
DB.lock = DatabaseLock[settings.DATABASE_TYPE.upper()].value
|
||||
|
||||
|
||||
def close_connection():
|
||||
@ -396,7 +399,7 @@ def close_connection():
|
||||
if DB:
|
||||
DB.close_stale(age=30)
|
||||
except Exception as e:
|
||||
LOGGER.exception(e)
|
||||
logging.exception(e)
|
||||
|
||||
|
||||
class DataBaseModel(BaseModel):
|
||||
@ -412,15 +415,15 @@ def init_database_tables(alter_fields=[]):
|
||||
for name, obj in members:
|
||||
if obj != DataBaseModel and issubclass(obj, DataBaseModel):
|
||||
table_objs.append(obj)
|
||||
LOGGER.info(f"start create table {obj.__name__}")
|
||||
logging.debug(f"start create table {obj.__name__}")
|
||||
try:
|
||||
obj.create_table()
|
||||
LOGGER.info(f"create table success: {obj.__name__}")
|
||||
logging.debug(f"create table success: {obj.__name__}")
|
||||
except Exception as e:
|
||||
LOGGER.exception(e)
|
||||
logging.exception(e)
|
||||
create_failed_list.append(obj.__name__)
|
||||
if create_failed_list:
|
||||
LOGGER.info(f"create tables failed: {create_failed_list}")
|
||||
logging.error(f"create tables failed: {create_failed_list}")
|
||||
raise Exception(f"create tables failed: {create_failed_list}")
|
||||
migrate_db()
|
||||
|
||||
@ -470,7 +473,7 @@ class User(DataBaseModel, UserMixin):
|
||||
status = CharField(
|
||||
max_length=1,
|
||||
null=True,
|
||||
help_text="is it validate(0: wasted,1: validate)",
|
||||
help_text="is it validate(0: wasted, 1: validate)",
|
||||
default="1",
|
||||
index=True)
|
||||
is_superuser = BooleanField(null=True, help_text="is root", default=False, index=True)
|
||||
@ -479,7 +482,7 @@ class User(DataBaseModel, UserMixin):
|
||||
return self.email
|
||||
|
||||
def get_id(self):
|
||||
jwt = Serializer(secret_key=SECRET_KEY)
|
||||
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||
return jwt.dumps(str(self.access_token))
|
||||
|
||||
class Meta:
|
||||
@ -525,7 +528,7 @@ class Tenant(DataBaseModel):
|
||||
status = CharField(
|
||||
max_length=1,
|
||||
null=True,
|
||||
help_text="is it validate(0: wasted,1: validate)",
|
||||
help_text="is it validate(0: wasted, 1: validate)",
|
||||
default="1",
|
||||
index=True)
|
||||
|
||||
@ -542,7 +545,7 @@ class UserTenant(DataBaseModel):
|
||||
status = CharField(
|
||||
max_length=1,
|
||||
null=True,
|
||||
help_text="is it validate(0: wasted,1: validate)",
|
||||
help_text="is it validate(0: wasted, 1: validate)",
|
||||
default="1",
|
||||
index=True)
|
||||
|
||||
@ -559,7 +562,7 @@ class InvitationCode(DataBaseModel):
|
||||
status = CharField(
|
||||
max_length=1,
|
||||
null=True,
|
||||
help_text="is it validate(0: wasted,1: validate)",
|
||||
help_text="is it validate(0: wasted, 1: validate)",
|
||||
default="1",
|
||||
index=True)
|
||||
|
||||
@ -582,7 +585,7 @@ class LLMFactories(DataBaseModel):
|
||||
status = CharField(
|
||||
max_length=1,
|
||||
null=True,
|
||||
help_text="is it validate(0: wasted,1: validate)",
|
||||
help_text="is it validate(0: wasted, 1: validate)",
|
||||
default="1",
|
||||
index=True)
|
||||
|
||||
@ -616,7 +619,7 @@ class LLM(DataBaseModel):
|
||||
status = CharField(
|
||||
max_length=1,
|
||||
null=True,
|
||||
help_text="is it validate(0: wasted,1: validate)",
|
||||
help_text="is it validate(0: wasted, 1: validate)",
|
||||
default="1",
|
||||
index=True)
|
||||
|
||||
@ -648,7 +651,7 @@ class TenantLLM(DataBaseModel):
|
||||
index=True)
|
||||
api_key = CharField(max_length=1024, null=True, help_text="API KEY", index=True)
|
||||
api_base = CharField(max_length=255, null=True, help_text="API Base")
|
||||
|
||||
max_tokens = IntegerField(default=8192, index=True)
|
||||
used_tokens = IntegerField(default=0, index=True)
|
||||
|
||||
def __str__(self):
|
||||
@ -703,7 +706,7 @@ class Knowledgebase(DataBaseModel):
|
||||
status = CharField(
|
||||
max_length=1,
|
||||
null=True,
|
||||
help_text="is it validate(0: wasted,1: validate)",
|
||||
help_text="is it validate(0: wasted, 1: validate)",
|
||||
default="1",
|
||||
index=True)
|
||||
|
||||
@ -767,7 +770,7 @@ class Document(DataBaseModel):
|
||||
status = CharField(
|
||||
max_length=1,
|
||||
null=True,
|
||||
help_text="is it validate(0: wasted,1: validate)",
|
||||
help_text="is it validate(0: wasted, 1: validate)",
|
||||
default="1",
|
||||
index=True)
|
||||
|
||||
@ -840,7 +843,7 @@ class Task(DataBaseModel):
|
||||
doc_id = CharField(max_length=32, null=False, index=True)
|
||||
from_page = IntegerField(default=0)
|
||||
|
||||
to_page = IntegerField(default=-1)
|
||||
to_page = IntegerField(default=100000000)
|
||||
|
||||
begin_at = DateTimeField(null=True, index=True)
|
||||
process_duation = FloatField(default=0)
|
||||
@ -879,8 +882,10 @@ class Dialog(DataBaseModel):
|
||||
default="simple",
|
||||
help_text="simple|advanced",
|
||||
index=True)
|
||||
prompt_config = JSONField(null=False, default={"system": "", "prologue": "Hi! I'm your assistant, what can I do for you?",
|
||||
"parameters": [], "empty_response": "Sorry! No relevant content was found in the knowledge base!"})
|
||||
prompt_config = JSONField(null=False,
|
||||
default={"system": "", "prologue": "Hi! I'm your assistant, what can I do for you?",
|
||||
"parameters": [],
|
||||
"empty_response": "Sorry! No relevant content was found in the knowledge base!"})
|
||||
|
||||
similarity_threshold = FloatField(default=0.2)
|
||||
vector_similarity_weight = FloatField(default=0.3)
|
||||
@ -894,7 +899,7 @@ class Dialog(DataBaseModel):
|
||||
null=False,
|
||||
default="1",
|
||||
help_text="it needs to insert reference index into answer or not")
|
||||
|
||||
|
||||
rerank_id = CharField(
|
||||
max_length=128,
|
||||
null=False,
|
||||
@ -904,7 +909,7 @@ class Dialog(DataBaseModel):
|
||||
status = CharField(
|
||||
max_length=1,
|
||||
null=True,
|
||||
help_text="is it validate(0: wasted,1: validate)",
|
||||
help_text="is it validate(0: wasted, 1: validate)",
|
||||
default="1",
|
||||
index=True)
|
||||
|
||||
@ -980,14 +985,14 @@ class CanvasTemplate(DataBaseModel):
|
||||
|
||||
def migrate_db():
|
||||
with DB.transaction():
|
||||
migrator = DatabaseMigrator[DATABASE_TYPE.upper()].value(DB)
|
||||
migrator = DatabaseMigrator[settings.DATABASE_TYPE.upper()].value(DB)
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column('file', 'source_type', CharField(max_length=128, null=False, default="",
|
||||
help_text="where dose this document come from",
|
||||
index=True))
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
@ -996,7 +1001,7 @@ def migrate_db():
|
||||
help_text="default rerank model ID"))
|
||||
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
@ -1004,59 +1009,64 @@ def migrate_db():
|
||||
help_text="default rerank model ID"))
|
||||
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column('dialog', 'top_k', IntegerField(default=1024))
|
||||
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.alter_column_type('tenant_llm', 'api_key',
|
||||
CharField(max_length=1024, null=True, help_text="API KEY", index=True))
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column('api_token', 'source',
|
||||
CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True))
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("tenant","tts_id",
|
||||
CharField(max_length=256,null=True,help_text="default tts model ID",index=True))
|
||||
migrator.add_column("tenant", "tts_id",
|
||||
CharField(max_length=256, null=True, help_text="default tts model ID", index=True))
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column('api_4_conversation', 'source',
|
||||
CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True))
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
DB.execute_sql('ALTER TABLE llm DROP PRIMARY KEY;')
|
||||
DB.execute_sql('ALTER TABLE llm ADD PRIMARY KEY (llm_name,fid);')
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column('task', 'retry_count', IntegerField(default=0))
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.alter_column_type('api_token', 'dialog_id',
|
||||
CharField(max_length=32, null=True, index=True))
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("tenant_llm","max_tokens",IntegerField(default=8192,index=True))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
@ -15,19 +15,12 @@
|
||||
#
|
||||
import operator
|
||||
from functools import reduce
|
||||
from typing import Dict, Type, Union
|
||||
|
||||
from playhouse.pool import PooledMySQLDatabase
|
||||
|
||||
from api.utils import current_timestamp, timestamp_to_date
|
||||
|
||||
from api.db.db_models import DB, DataBaseModel
|
||||
from api.db.runtime_config import RuntimeConfig
|
||||
from api.utils.log_utils import getLogger
|
||||
from enum import Enum
|
||||
|
||||
|
||||
LOGGER = getLogger()
|
||||
|
||||
|
||||
@DB.connection_context()
|
||||
@ -93,7 +86,7 @@ supported_operators = {
|
||||
|
||||
|
||||
def query_dict2expression(
|
||||
model: Type[DataBaseModel], query: Dict[str, Union[bool, int, str, list, tuple]]):
|
||||
model: type[DataBaseModel], query: dict[str, bool | int | str | list | tuple]):
|
||||
expression = []
|
||||
|
||||
for field, value in query.items():
|
||||
@ -111,8 +104,8 @@ def query_dict2expression(
|
||||
return reduce(operator.iand, expression)
|
||||
|
||||
|
||||
def query_db(model: Type[DataBaseModel], limit: int = 0, offset: int = 0,
|
||||
query: dict = None, order_by: Union[str, list, tuple] = None):
|
||||
def query_db(model: type[DataBaseModel], limit: int = 0, offset: int = 0,
|
||||
query: dict = None, order_by: str | list | tuple | None = None):
|
||||
data = model.select()
|
||||
if query:
|
||||
data = data.where(query_dict2expression(model, query))
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
@ -28,7 +29,7 @@ from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY, LLM_BASE_URL
|
||||
from api import settings
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
|
||||
|
||||
@ -50,11 +51,11 @@ def init_superuser():
|
||||
tenant = {
|
||||
"id": user_info["id"],
|
||||
"name": user_info["nickname"] + "‘s Kingdom",
|
||||
"llm_id": CHAT_MDL,
|
||||
"embd_id": EMBEDDING_MDL,
|
||||
"asr_id": ASR_MDL,
|
||||
"parser_ids": PARSERS,
|
||||
"img2txt_id": IMAGE2TEXT_MDL
|
||||
"llm_id": settings.CHAT_MDL,
|
||||
"embd_id": settings.EMBEDDING_MDL,
|
||||
"asr_id": settings.ASR_MDL,
|
||||
"parser_ids": settings.PARSERS,
|
||||
"img2txt_id": settings.IMAGE2TEXT_MDL
|
||||
}
|
||||
usr_tenant = {
|
||||
"tenant_id": user_info["id"],
|
||||
@ -63,42 +64,43 @@ def init_superuser():
|
||||
"role": UserTenantRole.OWNER
|
||||
}
|
||||
tenant_llm = []
|
||||
for llm in LLMService.query(fid=LLM_FACTORY):
|
||||
for llm in LLMService.query(fid=settings.LLM_FACTORY):
|
||||
tenant_llm.append(
|
||||
{"tenant_id": user_info["id"], "llm_factory": LLM_FACTORY, "llm_name": llm.llm_name, "model_type": llm.model_type,
|
||||
"api_key": API_KEY, "api_base": LLM_BASE_URL})
|
||||
{"tenant_id": user_info["id"], "llm_factory": settings.LLM_FACTORY, "llm_name": llm.llm_name,
|
||||
"model_type": llm.model_type,
|
||||
"api_key": settings.API_KEY, "api_base": settings.LLM_BASE_URL})
|
||||
|
||||
if not UserService.save(**user_info):
|
||||
print("\033[93m【ERROR】\033[0mcan't init admin.")
|
||||
logging.error("can't init admin.")
|
||||
return
|
||||
TenantService.insert(**tenant)
|
||||
UserTenantService.insert(**usr_tenant)
|
||||
TenantLLMService.insert_many(tenant_llm)
|
||||
print(
|
||||
"【INFO】Super user initialized. \033[93memail: admin@ragflow.io, password: admin\033[0m. Changing the password after logining is strongly recomanded.")
|
||||
logging.info(
|
||||
"Super user initialized. email: admin@ragflow.io, password: admin. Changing the password after login is strongly recommended.")
|
||||
|
||||
chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
|
||||
msg = chat_mdl.chat(system="", history=[
|
||||
{"role": "user", "content": "Hello!"}], gen_conf={})
|
||||
{"role": "user", "content": "Hello!"}], gen_conf={})
|
||||
if msg.find("ERROR: ") == 0:
|
||||
print(
|
||||
"\33[91m【ERROR】\33[0m: ",
|
||||
logging.error(
|
||||
"'{}' dosen't work. {}".format(
|
||||
tenant["llm_id"],
|
||||
msg))
|
||||
embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
|
||||
v, c = embd_mdl.encode(["Hello!"])
|
||||
if c == 0:
|
||||
print(
|
||||
"\33[91m【ERROR】\33[0m:",
|
||||
" '{}' dosen't work!".format(
|
||||
logging.error(
|
||||
"'{}' dosen't work!".format(
|
||||
tenant["embd_id"]))
|
||||
|
||||
|
||||
def init_llm_factory():
|
||||
try:
|
||||
LLMService.filter_delete([(LLM.fid == "MiniMax" or LLM.fid == "Minimax")])
|
||||
except Exception as e:
|
||||
LLMService.filter_delete([(LLM.fid == "cohere")])
|
||||
LLMFactoriesService.filter_delete([LLMFactories.name == "cohere"])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
factory_llm_infos = json.load(
|
||||
@ -111,14 +113,14 @@ def init_llm_factory():
|
||||
llm_infos = factory_llm_info.pop("llm")
|
||||
try:
|
||||
LLMFactoriesService.save(**factory_llm_info)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
LLMService.filter_delete([LLM.fid == factory_llm_info["name"]])
|
||||
for llm_info in llm_infos:
|
||||
llm_info["fid"] = factory_llm_info["name"]
|
||||
try:
|
||||
LLMService.save(**llm_info)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
LLMFactoriesService.filter_delete([LLMFactories.name == "Local"])
|
||||
@ -129,6 +131,7 @@ def init_llm_factory():
|
||||
LLMFactoriesService.filter_delete([LLMFactoriesService.model.name == "QAnything"])
|
||||
LLMService.filter_delete([LLMService.model.fid == "QAnything"])
|
||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
|
||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "cohere"], {"llm_factory": "Cohere"})
|
||||
TenantService.filter_update([1 == 1], {
|
||||
"parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email"})
|
||||
## insert openai two embedding models to the current openai user.
|
||||
@ -145,7 +148,7 @@ def init_llm_factory():
|
||||
row = deepcopy(row)
|
||||
row["llm_name"] = "text-embedding-3-large"
|
||||
TenantLLMService.save(**row)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
break
|
||||
for kb_id in KnowledgebaseService.get_all_ids():
|
||||
@ -169,20 +172,19 @@ def add_graph_templates():
|
||||
CanvasTemplateService.save(**cnvs)
|
||||
except:
|
||||
CanvasTemplateService.update_by_id(cnvs["id"], cnvs)
|
||||
except Exception as e:
|
||||
print("Add graph templates error: ", e)
|
||||
print("------------", flush=True)
|
||||
except Exception:
|
||||
logging.exception("Add graph templates error: ")
|
||||
|
||||
|
||||
def init_web_data():
|
||||
start_time = time.time()
|
||||
|
||||
init_llm_factory()
|
||||
#if not UserService.get_all().count():
|
||||
# if not UserService.get_all().count():
|
||||
# init_superuser()
|
||||
|
||||
add_graph_templates()
|
||||
print("init web data success:{}".format(time.time() - start_time))
|
||||
logging.info("init web data success:{}".format(time.time() - start_time))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@ -1,21 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import operator
|
||||
import time
|
||||
import typing
|
||||
from api.utils.log_utils import sql_logger
|
||||
import peewee
|
||||
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from api.versions import get_versions
|
||||
from api.versions import get_ragflow_version
|
||||
from .reload_config_base import ReloadConfigBase
|
||||
|
||||
|
||||
@ -35,7 +35,7 @@ class RuntimeConfig(ReloadConfigBase):
|
||||
|
||||
@classmethod
|
||||
def init_env(cls):
|
||||
cls.ENV.update(get_versions())
|
||||
cls.ENV.update({"version": get_ragflow_version()})
|
||||
|
||||
@classmethod
|
||||
def load_config_manager(cls):
|
||||
|
||||
@ -22,5 +22,6 @@ from api.db.services.common_service import CommonService
|
||||
class CanvasTemplateService(CommonService):
|
||||
model = CanvasTemplate
|
||||
|
||||
|
||||
class UserCanvasService(CommonService):
|
||||
model = UserCanvas
|
||||
|
||||
@ -13,20 +13,21 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import binascii
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
from copy import deepcopy
|
||||
from timeit import default_timer as timer
|
||||
|
||||
|
||||
import datetime
|
||||
from datetime import timedelta
|
||||
from api.db import LLMType, ParserType,StatusEnum
|
||||
from api.db.db_models import Dialog, Conversation,DB
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||
from api.settings import chat_logger, retrievaler, kg_retrievaler
|
||||
from api import settings
|
||||
from rag.app.resume import forbidden_select_fields4resume
|
||||
from rag.nlp.search import index_name
|
||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
||||
@ -97,7 +98,8 @@ def message_fit_in(msg, max_length=4000):
|
||||
return c, msg
|
||||
|
||||
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
||||
msg_.append(msg[-1])
|
||||
if len(msg) > 1:
|
||||
msg_.append(msg[-1])
|
||||
msg = msg_
|
||||
c = count()
|
||||
if c < max_length:
|
||||
@ -151,7 +153,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
|
||||
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
retr = retrievaler if not is_kg else kg_retrievaler
|
||||
retr = settings.retrievaler if not is_kg else settings.kg_retrievaler
|
||||
|
||||
questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
|
||||
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
||||
@ -162,6 +164,9 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
attachments.extend(m["doc_ids"])
|
||||
|
||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
if not embd_mdl:
|
||||
raise LookupError("Embedding model(%s) not found" % embd_nms[0])
|
||||
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
@ -174,7 +179,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||
# try to use sql if field mapping is good to go
|
||||
if field_map:
|
||||
chat_logger.info("Use SQL to retrieval:{}".format(questions[-1]))
|
||||
logging.debug("Use SQL to retrieval:{}".format(questions[-1]))
|
||||
ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
|
||||
if ans:
|
||||
yield ans
|
||||
@ -193,6 +198,8 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
|
||||
else:
|
||||
questions = questions[-1:]
|
||||
refineQ_tm = timer()
|
||||
keyword_tm = timer()
|
||||
|
||||
rerank_mdl = None
|
||||
if dialog.rerank_id:
|
||||
@ -205,6 +212,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
else:
|
||||
if prompt_config.get("keyword", False):
|
||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||
keyword_tm = timer()
|
||||
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
kbinfos = retr.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
|
||||
@ -213,7 +221,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
doc_ids=attachments,
|
||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
chat_logger.info(
|
||||
logging.debug(
|
||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
retrieval_tm = timer()
|
||||
|
||||
@ -264,7 +272,9 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
done_tm = timer()
|
||||
prompt += "\n\n### Elapsed\n - Retrieval: %.1f ms\n - LLM: %.1f ms"%((retrieval_tm-st)*1000, (done_tm-st)*1000)
|
||||
prompt += "\n\n### Elapsed\n - Refine Question: %.1f ms\n - Keywords: %.1f ms\n - Retrieval: %.1f ms\n - LLM: %.1f ms" % (
|
||||
(refineQ_tm - st) * 1000, (keyword_tm - refineQ_tm) * 1000, (retrieval_tm - keyword_tm) * 1000,
|
||||
(done_tm - retrieval_tm) * 1000)
|
||||
return {"answer": answer, "reference": refs, "prompt": prompt}
|
||||
|
||||
if stream:
|
||||
@ -283,7 +293,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
yield decorate_answer(answer)
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
|
||||
chat_logger.info("User: {}|Assistant: {}".format(
|
||||
logging.debug("User: {}|Assistant: {}".format(
|
||||
msg[-1]["content"], answer))
|
||||
res = decorate_answer(answer)
|
||||
res["audio_binary"] = tts(tts_mdl, answer)
|
||||
@ -311,8 +321,7 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
nonlocal sys_prompt, user_promt, question, tried_times
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
|
||||
"temperature": 0.06})
|
||||
print(user_promt, sql)
|
||||
chat_logger.info(f"“{question}”==>{user_promt} get SQL: {sql}")
|
||||
logging.debug(f"{question} ==> {user_promt} get SQL: {sql}")
|
||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||
sql = re.sub(r".*select ", "select ", sql.lower())
|
||||
sql = re.sub(r" +", " ", sql)
|
||||
@ -332,11 +341,9 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
flds.append(k)
|
||||
sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
|
||||
|
||||
print(f"“{question}” get SQL(refined): {sql}")
|
||||
|
||||
chat_logger.info(f"“{question}” get SQL(refined): {sql}")
|
||||
logging.debug(f"{question} get SQL(refined): {sql}")
|
||||
tried_times += 1
|
||||
return retrievaler.sql_retrieval(sql, format="json"), sql
|
||||
return settings.retrievaler.sql_retrieval(sql, format="json"), sql
|
||||
|
||||
tbl, sql = get_table()
|
||||
if tbl is None:
|
||||
@ -363,10 +370,9 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
question, sql, tbl["error"]
|
||||
)
|
||||
tbl, sql = get_table()
|
||||
chat_logger.info("TRY it again: {}".format(sql))
|
||||
logging.debug("TRY it again: {}".format(sql))
|
||||
|
||||
chat_logger.info("GET table: {}".format(tbl))
|
||||
print(tbl)
|
||||
logging.debug("GET table: {}".format(tbl))
|
||||
if tbl.get("error") or len(tbl["rows"]) == 0:
|
||||
return None
|
||||
|
||||
@ -388,6 +394,7 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
rows = ["|" +
|
||||
"|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
|
||||
"|" for r in tbl["rows"]]
|
||||
rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
|
||||
if quota:
|
||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
else:
|
||||
@ -395,7 +402,7 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
|
||||
|
||||
if not docid_idx or not docnm_idx:
|
||||
chat_logger.warning("SQL missing field: " + sql)
|
||||
logging.warning("SQL missing field: " + sql)
|
||||
return {
|
||||
"answer": "\n".join([clmns, line, rows]),
|
||||
"reference": {"chunks": [], "doc_aggs": []},
|
||||
@ -520,9 +527,16 @@ def full_question(tenant_id, llm_id, messages):
|
||||
if m["role"] not in ["user", "assistant"]: continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
today = datetime.date.today().isoformat()
|
||||
yesterday = (datetime.date.today() - timedelta(days=1)).isoformat()
|
||||
tomorrow = (datetime.date.today() + timedelta(days=1)).isoformat()
|
||||
prompt = f"""
|
||||
Role: A helpful assistant
|
||||
Task: Generate a full user question that would follow the conversation.
|
||||
|
||||
Task and steps:
|
||||
1. Generate a full user question that would follow the conversation.
|
||||
2. If the user's question involves relative date, you need to convert it into absolute date based on the current date, which is {today}. For example: 'yesterday' would be converted to {yesterday}.
|
||||
|
||||
Requirements & Restrictions:
|
||||
- Text generated MUST be in the same language of the original user's question.
|
||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||
@ -551,6 +565,14 @@ User: What's her full name?
|
||||
###############
|
||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||
|
||||
------------
|
||||
# Example 3
|
||||
## Conversation
|
||||
USER: What's the weather today in London?
|
||||
ASSISTANT: Cloudy.
|
||||
USER: What's about tomorrow in Rochester?
|
||||
###############
|
||||
Output: What's the weather in Rochester on {tomorrow}?
|
||||
######################
|
||||
|
||||
# Real Data
|
||||
@ -572,16 +594,17 @@ def tts(tts_mdl, text):
|
||||
|
||||
def ask(question, kb_ids, tenant_id):
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
tenant_ids = [kb.tenant_id for kb in kbs]
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
|
||||
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
retr = retrievaler if not is_kg else kg_retrievaler
|
||||
retr = settings.retrievaler if not is_kg else settings.kg_retrievaler
|
||||
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||
max_tokens = chat_mdl.max_length
|
||||
|
||||
kbinfos = retr.retrieval(question, embd_mdl, tenant_id, kb_ids, 1, 12, 0.1, 0.3, aggs=False)
|
||||
kbinfos = retr.retrieval(question, embd_mdl, tenant_ids, kb_ids, 1, 12, 0.1, 0.3, aggs=False)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
|
||||
used_token_count = 0
|
||||
|
||||
@ -13,27 +13,23 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from io import BytesIO
|
||||
|
||||
from elasticsearch_dsl import Q
|
||||
from peewee import fn
|
||||
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from api.settings import stat_logger
|
||||
from api import settings
|
||||
from api.utils import current_timestamp, get_format_time, get_uuid
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from graphrag.mind_map_extractor import MindMapExtractor
|
||||
from rag.settings import SVR_QUEUE_NAME
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
|
||||
@ -52,11 +48,15 @@ class DocumentService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, kb_id, page_number, items_per_page,
|
||||
orderby, desc, keywords, id):
|
||||
docs =cls.model.select().where(cls.model.kb_id==kb_id)
|
||||
orderby, desc, keywords, id, name):
|
||||
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
||||
if id:
|
||||
docs = docs.where(
|
||||
cls.model.id== id )
|
||||
cls.model.id == id)
|
||||
if name:
|
||||
docs = docs.where(
|
||||
cls.model.name == name
|
||||
)
|
||||
if keywords:
|
||||
docs = docs.where(
|
||||
fn.LOWER(cls.model.name).contains(keywords.lower())
|
||||
@ -70,7 +70,6 @@ class DocumentService(CommonService):
|
||||
count = docs.count()
|
||||
return list(docs.dicts()), count
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_kb_id(cls, kb_id, page_number, items_per_page,
|
||||
@ -92,35 +91,6 @@ class DocumentService(CommonService):
|
||||
|
||||
return list(docs.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def list_documents_in_dataset(cls, dataset_id, offset, count, order_by, descend, keywords):
|
||||
if keywords:
|
||||
docs = cls.model.select().where(
|
||||
(cls.model.kb_id == dataset_id),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
docs = cls.model.select().where(cls.model.kb_id == dataset_id)
|
||||
|
||||
total = docs.count()
|
||||
|
||||
if descend == 'True':
|
||||
docs = docs.order_by(cls.model.getter_by(order_by).desc())
|
||||
if descend == 'False':
|
||||
docs = docs.order_by(cls.model.getter_by(order_by).asc())
|
||||
|
||||
docs = list(docs.dicts())
|
||||
docs_length = len(docs)
|
||||
|
||||
if offset < 0 or offset > docs_length:
|
||||
raise IndexError("Offset is out of the valid range.")
|
||||
|
||||
if count == -1:
|
||||
return docs[offset:], total
|
||||
|
||||
return docs[offset:offset + count], total
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, doc):
|
||||
@ -138,8 +108,7 @@ class DocumentService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def remove_document(cls, doc, tenant_id):
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
cls.clear_chunk_num(doc.id)
|
||||
return cls.delete_by_id(doc.id)
|
||||
|
||||
@ -162,26 +131,27 @@ class DocumentService(CommonService):
|
||||
cls.model.update_time]
|
||||
docs = cls.model.select(*fields) \
|
||||
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id)) \
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))\
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id)) \
|
||||
.where(
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
~(cls.model.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress == 0,
|
||||
cls.model.update_time >= current_timestamp() - 1000 * 600,
|
||||
cls.model.run == TaskStatus.RUNNING.value)\
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
~(cls.model.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress == 0,
|
||||
cls.model.update_time >= current_timestamp() - 1000 * 600,
|
||||
cls.model.run == TaskStatus.RUNNING.value) \
|
||||
.order_by(cls.model.update_time.asc())
|
||||
return list(docs.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_unfinished_docs(cls):
|
||||
fields = [cls.model.id, cls.model.process_begin_at, cls.model.parser_config, cls.model.progress_msg, cls.model.run]
|
||||
fields = [cls.model.id, cls.model.process_begin_at, cls.model.parser_config, cls.model.progress_msg,
|
||||
cls.model.run]
|
||||
docs = cls.model.select(*fields) \
|
||||
.where(
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
~(cls.model.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress < 1,
|
||||
cls.model.progress > 0)
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
~(cls.model.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress < 1,
|
||||
cls.model.progress > 0)
|
||||
return list(docs.dicts())
|
||||
|
||||
@classmethod
|
||||
@ -196,12 +166,12 @@ class DocumentService(CommonService):
|
||||
"Document not found which is supposed to be there")
|
||||
num = Knowledgebase.update(
|
||||
token_num=Knowledgebase.token_num +
|
||||
token_num,
|
||||
token_num,
|
||||
chunk_num=Knowledgebase.chunk_num +
|
||||
chunk_num).where(
|
||||
chunk_num).where(
|
||||
Knowledgebase.id == kb_id).execute()
|
||||
return num
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def decrement_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duation):
|
||||
@ -214,13 +184,13 @@ class DocumentService(CommonService):
|
||||
"Document not found which is supposed to be there")
|
||||
num = Knowledgebase.update(
|
||||
token_num=Knowledgebase.token_num -
|
||||
token_num,
|
||||
token_num,
|
||||
chunk_num=Knowledgebase.chunk_num -
|
||||
chunk_num
|
||||
chunk_num
|
||||
).where(
|
||||
Knowledgebase.id == kb_id).execute()
|
||||
return num
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def clear_chunk_num(cls, doc_id):
|
||||
@ -229,10 +199,10 @@ class DocumentService(CommonService):
|
||||
|
||||
num = Knowledgebase.update(
|
||||
token_num=Knowledgebase.token_num -
|
||||
doc.token_num,
|
||||
doc.token_num,
|
||||
chunk_num=Knowledgebase.chunk_num -
|
||||
doc.chunk_num,
|
||||
doc_num=Knowledgebase.doc_num-1
|
||||
doc.chunk_num,
|
||||
doc_num=Knowledgebase.doc_num - 1
|
||||
).where(
|
||||
Knowledgebase.id == doc.kb_id).execute()
|
||||
return num
|
||||
@ -243,13 +213,22 @@ class DocumentService(CommonService):
|
||||
docs = cls.model.select(
|
||||
Knowledgebase.tenant_id).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)).where(
|
||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||
Knowledgebase.id == cls.model.kb_id)).where(
|
||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return
|
||||
return docs[0]["tenant_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_knowledgebase_id(cls, doc_id):
|
||||
docs = cls.model.select(cls.model.kb_id).where(cls.model.id == doc_id)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return
|
||||
return docs[0]["kb_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tenant_id_by_name(cls, name):
|
||||
@ -270,8 +249,8 @@ class DocumentService(CommonService):
|
||||
cls.model.id).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)
|
||||
).join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||
).where(cls.model.id == doc_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||
).join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||
).where(cls.model.id == doc_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return False
|
||||
@ -284,7 +263,7 @@ class DocumentService(CommonService):
|
||||
cls.model.id).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)
|
||||
).where(cls.model.id == doc_id, Knowledgebase.created_by == user_id).paginate(0, 1)
|
||||
).where(cls.model.id == doc_id, Knowledgebase.created_by == user_id).paginate(0, 1)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return False
|
||||
@ -296,13 +275,13 @@ class DocumentService(CommonService):
|
||||
docs = cls.model.select(
|
||||
Knowledgebase.embd_id).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)).where(
|
||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||
Knowledgebase.id == cls.model.kb_id)).where(
|
||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return
|
||||
return docs[0]["embd_id"]
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_doc_id_by_doc_name(cls, doc_name):
|
||||
@ -338,6 +317,7 @@ class DocumentService(CommonService):
|
||||
dfs_update(old[k], v)
|
||||
else:
|
||||
old[k] = v
|
||||
|
||||
dfs_update(d.parser_config, config)
|
||||
cls.update_by_id(id, {"parser_config": d.parser_config})
|
||||
|
||||
@ -354,7 +334,7 @@ class DocumentService(CommonService):
|
||||
def begin2parse(cls, docid):
|
||||
cls.update_by_id(
|
||||
docid, {"progress": random.random() * 1 / 100.,
|
||||
"progress_msg": "Task dispatched...",
|
||||
"progress_msg": "Task is queued...",
|
||||
"process_begin_at": get_format_time()
|
||||
})
|
||||
|
||||
@ -372,7 +352,7 @@ class DocumentService(CommonService):
|
||||
finished = True
|
||||
bad = 0
|
||||
e, doc = DocumentService.get_by_id(d["id"])
|
||||
status = doc.run#TaskStatus.RUNNING.value
|
||||
status = doc.run # TaskStatus.RUNNING.value
|
||||
for t in tsks:
|
||||
if 0 <= t.progress < 1:
|
||||
finished = False
|
||||
@ -386,9 +366,10 @@ class DocumentService(CommonService):
|
||||
prg = -1
|
||||
status = TaskStatus.FAIL.value
|
||||
elif finished:
|
||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and d["progress_msg"].lower().find(" raptor")<0:
|
||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and d["progress_msg"].lower().find(
|
||||
" raptor") < 0:
|
||||
queue_raptor_tasks(d)
|
||||
prg = 0.98 * len(tsks)/(len(tsks)+1)
|
||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||
msg.append("------ RAPTOR -------")
|
||||
else:
|
||||
status = TaskStatus.DONE.value
|
||||
@ -406,7 +387,7 @@ class DocumentService(CommonService):
|
||||
cls.update_by_id(d["id"], info)
|
||||
except Exception as e:
|
||||
if str(e).find("'0'") < 0:
|
||||
stat_logger.error("fetch task exception:" + str(e))
|
||||
logging.exception("fetch task exception")
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -414,14 +395,13 @@ class DocumentService(CommonService):
|
||||
return len(cls.model.select(cls.model.id).where(
|
||||
cls.model.kb_id == kb_id).dicts())
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def do_cancel(cls, doc_id):
|
||||
try:
|
||||
_, doc = DocumentService.get_by_id(doc_id)
|
||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
|
||||
@ -434,7 +414,7 @@ def queue_raptor_tasks(doc):
|
||||
"doc_id": doc["id"],
|
||||
"from_page": 0,
|
||||
"to_page": -1,
|
||||
"progress_msg": "Start to do RAPTOR (Recursive Abstractive Processing For Tree-Organized Retrieval)."
|
||||
"progress_msg": "Start to do RAPTOR (Recursive Abstractive Processing for Tree-Organized Retrieval)."
|
||||
}
|
||||
|
||||
task = new_task()
|
||||
@ -462,11 +442,6 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
idxnm = search.index_name(kb.tenant_id)
|
||||
if not ELASTICSEARCH.indexExist(idxnm):
|
||||
ELASTICSEARCH.createIdx(idxnm, json.load(
|
||||
open(os.path.join(get_project_base_directory(), "conf", "mapping.json"), "r")))
|
||||
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id, lang=kb.language)
|
||||
|
||||
err, files = FileService.upload_document(kb, file_objs, user_id)
|
||||
@ -510,7 +485,7 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
md5 = hashlib.md5()
|
||||
md5.update((ck["content_with_weight"] +
|
||||
str(d["doc_id"])).encode("utf-8"))
|
||||
d["_id"] = md5.hexdigest()
|
||||
d["id"] = md5.hexdigest()
|
||||
d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.now().timestamp()
|
||||
if not d.get("image"):
|
||||
@ -523,9 +498,9 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
else:
|
||||
d["image"].save(output_buffer, format='JPEG')
|
||||
|
||||
STORAGE_IMPL.put(kb.id, d["_id"], output_buffer.getvalue())
|
||||
d["img_id"] = "{}-{}".format(kb.id, d["_id"])
|
||||
del d["image"]
|
||||
STORAGE_IMPL.put(kb.id, d["id"], output_buffer.getvalue())
|
||||
d["img_id"] = "{}-{}".format(kb.id, d["id"])
|
||||
d.pop("image", None)
|
||||
docs.append(d)
|
||||
|
||||
parser_ids = {d["id"]: d["parser_id"] for d, _ in files}
|
||||
@ -544,6 +519,9 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
token_counts[doc_id] += c
|
||||
return vects
|
||||
|
||||
idxnm = search.index_name(kb.tenant_id)
|
||||
try_create_idx = True
|
||||
|
||||
_, tenant = TenantService.get_by_id(kb.tenant_id)
|
||||
llm_bdl = LLMBundle(kb.tenant_id, LLMType.CHAT, tenant.llm_id)
|
||||
for doc_id in docids:
|
||||
@ -566,7 +544,7 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
"knowledge_graph_kwd": "mind_map"
|
||||
})
|
||||
except Exception as e:
|
||||
stat_logger.error("Mind map generation error:", traceback.format_exc())
|
||||
logging.exception("Mind map generation error")
|
||||
|
||||
vects = embedding(doc_id, [c["content_with_weight"] for c in cks])
|
||||
assert len(cks) == len(vects)
|
||||
@ -574,9 +552,13 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
v = vects[i]
|
||||
d["q_%d_vec" % len(v)] = v
|
||||
for b in range(0, len(cks), es_bulk_size):
|
||||
ELASTICSEARCH.bulk(cks[b:b + es_bulk_size], idxnm)
|
||||
if try_create_idx:
|
||||
if not settings.docStoreConn.indexExist(idxnm, kb_id):
|
||||
settings.docStoreConn.createIdx(idxnm, kb_id, len(vects[0]))
|
||||
try_create_idx = False
|
||||
settings.docStoreConn.insert(cks[b:b + es_bulk_size], idxnm, kb_id)
|
||||
|
||||
DocumentService.increment_chunk_num(
|
||||
doc_id, kb.id, token_counts[doc_id], chunk_counts[doc_id], 0)
|
||||
|
||||
return [d["id"] for d,_ in files]
|
||||
return [d["id"] for d, _ in files]
|
||||
@ -13,8 +13,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from flask_login import current_user
|
||||
from peewee import fn
|
||||
|
||||
@ -272,8 +275,8 @@ class FileService(CommonService):
|
||||
cls.delete_folder_by_pf_id(user_id, file.id)
|
||||
return cls.model.delete().where((cls.model.tenant_id == user_id)
|
||||
& (cls.model.id == folder_id)).execute(),
|
||||
except Exception as e:
|
||||
print(e)
|
||||
except Exception:
|
||||
logging.exception("delete_folder_by_pf_id")
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
|
||||
@classmethod
|
||||
@ -321,8 +324,8 @@ class FileService(CommonService):
|
||||
def move_file(cls, file_ids, folder_id):
|
||||
try:
|
||||
cls.filter_update((cls.model.id << file_ids, ), { 'parent_id': folder_id })
|
||||
except Exception as e:
|
||||
print(e)
|
||||
except Exception:
|
||||
logging.exception("move_file")
|
||||
raise RuntimeError("Database error (File move)!")
|
||||
|
||||
@classmethod
|
||||
@ -384,6 +387,41 @@ class FileService(CommonService):
|
||||
|
||||
return err, files
|
||||
|
||||
@staticmethod
|
||||
def parse_docs(file_objs, user_id):
|
||||
from rag.app import presentation, picture, naive, audio, email
|
||||
|
||||
def dummy(prog=None, msg=""):
|
||||
pass
|
||||
|
||||
FACTORY = {
|
||||
ParserType.PRESENTATION.value: presentation,
|
||||
ParserType.PICTURE.value: picture,
|
||||
ParserType.AUDIO.value: audio,
|
||||
ParserType.EMAIL.value: email
|
||||
}
|
||||
parser_config = {"chunk_token_num": 16096, "delimiter": "\n!?;。;!?", "layout_recognize": False}
|
||||
exe = ThreadPoolExecutor(max_workers=12)
|
||||
threads = []
|
||||
for file in file_objs:
|
||||
kwargs = {
|
||||
"lang": "English",
|
||||
"callback": dummy,
|
||||
"parser_config": parser_config,
|
||||
"from_page": 0,
|
||||
"to_page": 100000,
|
||||
"tenant_id": user_id
|
||||
}
|
||||
filetype = filename_type(file.filename)
|
||||
blob = file.read()
|
||||
threads.append(exe.submit(FACTORY.get(FileService.get_parser(filetype, file.filename, ""), naive).chunk, file.filename, blob, **kwargs))
|
||||
|
||||
res = []
|
||||
for th in threads:
|
||||
res.append("\n".join([ck["content_with_weight"] for ck in th.result()]))
|
||||
|
||||
return "\n\n".join(res)
|
||||
|
||||
@staticmethod
|
||||
def get_parser(doc_type, filename, default):
|
||||
if doc_type == FileType.VISUAL:
|
||||
|
||||
@ -68,28 +68,13 @@ class KnowledgebaseService(CommonService):
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_ids_by_offset(cls, joined_tenant_ids, user_id, offset, count, orderby, desc):
|
||||
kbs = cls.model.select().where(
|
||||
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||
TenantPermission.TEAM.value)) | (
|
||||
cls.model.tenant_id == user_id))
|
||||
& (cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
if desc:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
kbs = list(kbs.dicts())
|
||||
|
||||
kbs_length = len(kbs)
|
||||
if offset < 0 or offset > kbs_length:
|
||||
raise IndexError("Offset is out of the valid range.")
|
||||
|
||||
if count == -1:
|
||||
return kbs[offset:]
|
||||
|
||||
return kbs[offset:offset + count]
|
||||
def get_kb_ids(cls, tenant_id):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
]
|
||||
kbs = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id)
|
||||
kb_ids = [kb.id for kb in kbs]
|
||||
return kb_ids
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -204,6 +189,22 @@ class KnowledgebaseService(CommonService):
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_by_id(cls, kb_id, user_id):
|
||||
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||
).where(cls.model.id == kb_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||
kbs = kbs.dicts()
|
||||
return list(kbs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_by_name(cls, kb_name, user_id):
|
||||
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||
).where(cls.model.name == kb_name, UserTenant.user_id == user_id).paginate(0, 1)
|
||||
kbs = kbs.dicts()
|
||||
return list(kbs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def accessible4deletion(cls, kb_id, user_id):
|
||||
|
||||
@ -13,8 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.settings import database_logger
|
||||
from rag.llm import EmbeddingModel, CvModel, ChatModel, RerankModel, Seq2txtModel, TTSModel
|
||||
from api.db import LLMType
|
||||
from api.db.db_models import DB
|
||||
@ -209,40 +209,40 @@ class LLMBundle(object):
|
||||
emd, used_tokens = self.mdl.encode(texts, batch_size)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
database_logger.error(
|
||||
"Can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
logging.error(
|
||||
"LLMBundle.encode can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
return emd, used_tokens
|
||||
|
||||
def encode_queries(self, query: str):
|
||||
emd, used_tokens = self.mdl.encode_queries(query)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
database_logger.error(
|
||||
"Can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
logging.error(
|
||||
"LLMBundle.encode_queries can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
return emd, used_tokens
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
sim, used_tokens = self.mdl.similarity(query, texts)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
database_logger.error(
|
||||
"Can't update token usage for {}/RERANK used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
logging.error(
|
||||
"LLMBundle.similarity can't update token usage for {}/RERANK used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
return sim, used_tokens
|
||||
|
||||
def describe(self, image, max_tokens=300):
|
||||
txt, used_tokens = self.mdl.describe(image, max_tokens)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
database_logger.error(
|
||||
"Can't update token usage for {}/IMAGE2TEXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
logging.error(
|
||||
"LLMBundle.describe can't update token usage for {}/IMAGE2TEXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
return txt
|
||||
|
||||
def transcription(self, audio):
|
||||
txt, used_tokens = self.mdl.transcription(audio)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
database_logger.error(
|
||||
"Can't update token usage for {}/SEQUENCE2TXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
logging.error(
|
||||
"LLMBundle.transcription can't update token usage for {}/SEQUENCE2TXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
return txt
|
||||
|
||||
def tts(self, text):
|
||||
@ -250,8 +250,8 @@ class LLMBundle(object):
|
||||
if isinstance(chunk,int):
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, chunk, self.llm_name):
|
||||
database_logger.error(
|
||||
"Can't update token usage for {}/TTS".format(self.tenant_id))
|
||||
logging.error(
|
||||
"LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
||||
return
|
||||
yield chunk
|
||||
|
||||
@ -259,8 +259,8 @@ class LLMBundle(object):
|
||||
txt, used_tokens = self.mdl.chat(system, history, gen_conf)
|
||||
if isinstance(txt, int) and not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens, self.llm_name):
|
||||
database_logger.error(
|
||||
"Can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name, used_tokens))
|
||||
logging.error(
|
||||
"LLMBundle.chat can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name, used_tokens))
|
||||
return txt
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
@ -268,7 +268,7 @@ class LLMBundle(object):
|
||||
if isinstance(txt, int):
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, txt, self.llm_name):
|
||||
database_logger.error(
|
||||
"Can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name, txt))
|
||||
logging.error(
|
||||
"LLMBundle.chat_streamly can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name, txt))
|
||||
return
|
||||
yield txt
|
||||
|
||||
@ -36,7 +36,7 @@ class TaskService(CommonService):
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tasks(cls, task_id):
|
||||
def get_task(cls, task_id):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.doc_id,
|
||||
@ -63,7 +63,7 @@ class TaskService(CommonService):
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id)) \
|
||||
.where(cls.model.id == task_id)
|
||||
docs = list(docs.dicts())
|
||||
if not docs: return []
|
||||
if not docs: return None
|
||||
|
||||
msg = "\nTask has been received."
|
||||
prog = random.random() / 10.
|
||||
@ -77,9 +77,9 @@ class TaskService(CommonService):
|
||||
).where(
|
||||
cls.model.id == docs[0]["id"]).execute()
|
||||
|
||||
if docs[0]["retry_count"] >= 3: return []
|
||||
if docs[0]["retry_count"] >= 3: return None
|
||||
|
||||
return docs
|
||||
return docs[0]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -108,7 +108,7 @@ class TaskService(CommonService):
|
||||
task = cls.model.get_by_id(id)
|
||||
_, doc = DocumentService.get_by_id(task.doc_id)
|
||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
@ -14,7 +14,21 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# from beartype import BeartypeConf
|
||||
# from beartype.claw import beartype_all # <-- you didn't sign up for this
|
||||
# beartype_all(conf=BeartypeConf(violation_type=UserWarning)) # <-- emit warnings from all code
|
||||
|
||||
import logging
|
||||
from api.utils.log_utils import initRootLogger
|
||||
initRootLogger("ragflow_server")
|
||||
for module in ["pdfminer"]:
|
||||
module_logger = logging.getLogger(module)
|
||||
module_logger.setLevel(logging.WARNING)
|
||||
for module in ["peewee"]:
|
||||
module_logger = logging.getLogger(module)
|
||||
module_logger.handlers.clear()
|
||||
module_logger.propagate = True
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
@ -23,17 +37,16 @@ import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from werkzeug.serving import run_simple
|
||||
from api import settings
|
||||
from api.apps import app
|
||||
from api.db.runtime_config import RuntimeConfig
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.settings import (
|
||||
HOST, HTTP_PORT, access_logger, database_logger, stat_logger,
|
||||
)
|
||||
from api import utils
|
||||
|
||||
from api.db.db_models import init_database_tables as init_web_db
|
||||
from api.db.init_data import init_web_data
|
||||
from api.versions import get_versions
|
||||
from api.versions import get_ragflow_version
|
||||
from api.utils import show_configs
|
||||
|
||||
|
||||
def update_progress():
|
||||
@ -41,59 +54,67 @@ def update_progress():
|
||||
time.sleep(3)
|
||||
try:
|
||||
DocumentService.update_progress()
|
||||
except Exception as e:
|
||||
stat_logger.error("update_progress exception:" + str(e))
|
||||
except Exception:
|
||||
logging.exception("update_progress exception")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(r"""
|
||||
logging.info(r"""
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
""", flush=True)
|
||||
stat_logger.info(
|
||||
""")
|
||||
logging.info(
|
||||
f'RAGFlow version: {get_ragflow_version()}'
|
||||
)
|
||||
logging.info(
|
||||
f'project base: {utils.file_utils.get_project_base_directory()}'
|
||||
)
|
||||
show_configs()
|
||||
settings.init_settings()
|
||||
|
||||
# init db
|
||||
init_web_db()
|
||||
init_web_data()
|
||||
# init runtime config
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--version', default=False, help="rag flow version", action='store_true')
|
||||
parser.add_argument('--debug', default=False, help="debug mode", action='store_true')
|
||||
parser.add_argument(
|
||||
"--version", default=False, help="RAGFlow version", action="store_true"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debug", default=False, help="debug mode", action="store_true"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
if args.version:
|
||||
print(get_versions())
|
||||
print(get_ragflow_version())
|
||||
sys.exit(0)
|
||||
|
||||
RuntimeConfig.DEBUG = args.debug
|
||||
if RuntimeConfig.DEBUG:
|
||||
stat_logger.info("run on debug mode")
|
||||
logging.info("run on debug mode")
|
||||
|
||||
RuntimeConfig.init_env()
|
||||
RuntimeConfig.init_config(JOB_SERVER_HOST=HOST, HTTP_PORT=HTTP_PORT)
|
||||
RuntimeConfig.init_config(JOB_SERVER_HOST=settings.HOST_IP, HTTP_PORT=settings.HOST_PORT)
|
||||
|
||||
peewee_logger = logging.getLogger('peewee')
|
||||
peewee_logger.propagate = False
|
||||
# rag_arch.common.log.ROpenHandler
|
||||
peewee_logger.addHandler(database_logger.handlers[0])
|
||||
peewee_logger.setLevel(database_logger.level)
|
||||
|
||||
thr = ThreadPoolExecutor(max_workers=1)
|
||||
thr.submit(update_progress)
|
||||
thread = ThreadPoolExecutor(max_workers=1)
|
||||
thread.submit(update_progress)
|
||||
|
||||
# start http server
|
||||
try:
|
||||
stat_logger.info("RAG Flow http server start...")
|
||||
werkzeug_logger = logging.getLogger("werkzeug")
|
||||
for h in access_logger.handlers:
|
||||
werkzeug_logger.addHandler(h)
|
||||
run_simple(hostname=HOST, port=HTTP_PORT, application=app, threaded=True, use_reloader=RuntimeConfig.DEBUG, use_debugger=RuntimeConfig.DEBUG)
|
||||
logging.info("RAGFlow HTTP server start...")
|
||||
run_simple(
|
||||
hostname=settings.HOST_IP,
|
||||
port=settings.HOST_PORT,
|
||||
application=app,
|
||||
threaded=True,
|
||||
use_reloader=RuntimeConfig.DEBUG,
|
||||
use_debugger=RuntimeConfig.DEBUG,
|
||||
)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
os.kill(os.getpid(), signal.SIGKILL)
|
||||
os.kill(os.getpid(), signal.SIGKILL)
|
||||
|
||||
323
api/settings.py
323
api/settings.py
@ -16,197 +16,162 @@
|
||||
import os
|
||||
from datetime import date
|
||||
from enum import IntEnum, Enum
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from api.utils.log_utils import LoggerFactory, getLogger
|
||||
import rag.utils.es_conn
|
||||
import rag.utils.infinity_conn
|
||||
|
||||
# Logger
|
||||
LoggerFactory.set_directory(
|
||||
os.path.join(
|
||||
get_project_base_directory(),
|
||||
"logs",
|
||||
"api"))
|
||||
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
|
||||
LoggerFactory.LEVEL = 30
|
||||
|
||||
stat_logger = getLogger("stat")
|
||||
access_logger = getLogger("access")
|
||||
database_logger = getLogger("database")
|
||||
chat_logger = getLogger("chat")
|
||||
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
import rag.utils
|
||||
from rag.nlp import search
|
||||
from graphrag import search as kg_search
|
||||
from api.utils import get_base_config, decrypt_database_config
|
||||
from api.constants import RAG_FLOW_SERVICE_NAME
|
||||
|
||||
API_VERSION = "v1"
|
||||
RAG_FLOW_SERVICE_NAME = "ragflow"
|
||||
SERVER_MODULE = "rag_flow_server.py"
|
||||
TEMP_DIRECTORY = os.path.join(get_project_base_directory(), "temp")
|
||||
RAG_FLOW_CONF_PATH = os.path.join(get_project_base_directory(), "conf")
|
||||
LIGHTEN = int(os.environ.get('LIGHTEN', "0"))
|
||||
|
||||
SUBPROCESS_STD_LOG_NAME = "std.log"
|
||||
|
||||
ERROR_REPORT = True
|
||||
ERROR_REPORT_WITH_PATH = False
|
||||
|
||||
MAX_TIMESTAMP_INTERVAL = 60
|
||||
SESSION_VALID_PERIOD = 7 * 24 * 60 * 60
|
||||
|
||||
REQUEST_TRY_TIMES = 3
|
||||
REQUEST_WAIT_SEC = 2
|
||||
REQUEST_MAX_WAIT_SEC = 300
|
||||
|
||||
USE_REGISTRY = get_base_config("use_registry")
|
||||
|
||||
LLM = get_base_config("user_default_llm", {})
|
||||
LLM_FACTORY = LLM.get("factory", "Tongyi-Qianwen")
|
||||
LLM_BASE_URL = LLM.get("base_url")
|
||||
|
||||
if not LIGHTEN:
|
||||
default_llm = {
|
||||
"Tongyi-Qianwen": {
|
||||
"chat_model": "qwen-plus",
|
||||
"embedding_model": "text-embedding-v2",
|
||||
"image2text_model": "qwen-vl-max",
|
||||
"asr_model": "paraformer-realtime-8k-v1",
|
||||
},
|
||||
"OpenAI": {
|
||||
"chat_model": "gpt-3.5-turbo",
|
||||
"embedding_model": "text-embedding-ada-002",
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"Azure-OpenAI": {
|
||||
"chat_model": "gpt-35-turbo",
|
||||
"embedding_model": "text-embedding-ada-002",
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"ZHIPU-AI": {
|
||||
"chat_model": "glm-3-turbo",
|
||||
"embedding_model": "embedding-2",
|
||||
"image2text_model": "glm-4v",
|
||||
"asr_model": "",
|
||||
},
|
||||
"Ollama": {
|
||||
"chat_model": "qwen-14B-chat",
|
||||
"embedding_model": "flag-embedding",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"Moonshot": {
|
||||
"chat_model": "moonshot-v1-8k",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"DeepSeek": {
|
||||
"chat_model": "deepseek-chat",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"VolcEngine": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"BAAI": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "BAAI/bge-large-zh-v1.5",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
"rerank_model": "BAAI/bge-reranker-v2-m3",
|
||||
}
|
||||
}
|
||||
|
||||
CHAT_MDL = default_llm[LLM_FACTORY]["chat_model"]
|
||||
EMBEDDING_MDL = default_llm["BAAI"]["embedding_model"]
|
||||
RERANK_MDL = default_llm["BAAI"]["rerank_model"]
|
||||
ASR_MDL = default_llm[LLM_FACTORY]["asr_model"]
|
||||
IMAGE2TEXT_MDL = default_llm[LLM_FACTORY]["image2text_model"]
|
||||
else:
|
||||
CHAT_MDL = EMBEDDING_MDL = RERANK_MDL = ASR_MDL = IMAGE2TEXT_MDL = ""
|
||||
|
||||
API_KEY = LLM.get("api_key", "")
|
||||
PARSERS = LLM.get(
|
||||
"parsers",
|
||||
"naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email")
|
||||
|
||||
# distribution
|
||||
DEPENDENT_DISTRIBUTION = get_base_config("dependent_distribution", False)
|
||||
RAG_FLOW_UPDATE_CHECK = False
|
||||
|
||||
HOST = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("host", "127.0.0.1")
|
||||
HTTP_PORT = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("http_port")
|
||||
|
||||
SECRET_KEY = get_base_config(
|
||||
RAG_FLOW_SERVICE_NAME,
|
||||
{}).get("secret_key", str(date.today()))
|
||||
|
||||
TOKEN_EXPIRE_IN = get_base_config(
|
||||
RAG_FLOW_SERVICE_NAME, {}).get(
|
||||
"token_expires_in", 3600)
|
||||
|
||||
NGINX_HOST = get_base_config(
|
||||
RAG_FLOW_SERVICE_NAME, {}).get(
|
||||
"nginx", {}).get("host") or HOST
|
||||
NGINX_HTTP_PORT = get_base_config(
|
||||
RAG_FLOW_SERVICE_NAME, {}).get(
|
||||
"nginx", {}).get("http_port") or HTTP_PORT
|
||||
|
||||
RANDOM_INSTANCE_ID = get_base_config(
|
||||
RAG_FLOW_SERVICE_NAME, {}).get(
|
||||
"random_instance_id", False)
|
||||
|
||||
PROXY = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("proxy")
|
||||
PROXY_PROTOCOL = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("protocol")
|
||||
LLM = None
|
||||
LLM_FACTORY = None
|
||||
LLM_BASE_URL = None
|
||||
CHAT_MDL = ""
|
||||
EMBEDDING_MDL = ""
|
||||
RERANK_MDL = ""
|
||||
ASR_MDL = ""
|
||||
IMAGE2TEXT_MDL = ""
|
||||
API_KEY = None
|
||||
PARSERS = None
|
||||
HOST_IP = None
|
||||
HOST_PORT = None
|
||||
SECRET_KEY = None
|
||||
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||
|
||||
# Switch
|
||||
# upload
|
||||
UPLOAD_DATA_FROM_CLIENT = True
|
||||
|
||||
# authentication
|
||||
AUTHENTICATION_CONF = get_base_config("authentication", {})
|
||||
AUTHENTICATION_CONF = None
|
||||
|
||||
# client
|
||||
CLIENT_AUTHENTICATION = AUTHENTICATION_CONF.get(
|
||||
"client", {}).get(
|
||||
CLIENT_AUTHENTICATION = None
|
||||
HTTP_APP_KEY = None
|
||||
GITHUB_OAUTH = None
|
||||
FEISHU_OAUTH = None
|
||||
|
||||
DOC_ENGINE = None
|
||||
docStoreConn = None
|
||||
|
||||
retrievaler = None
|
||||
kg_retrievaler = None
|
||||
|
||||
|
||||
def init_settings():
|
||||
global LLM, LLM_FACTORY, LLM_BASE_URL, LIGHTEN, DATABASE_TYPE, DATABASE
|
||||
LIGHTEN = int(os.environ.get('LIGHTEN', "0"))
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||
LLM = get_base_config("user_default_llm", {})
|
||||
LLM_FACTORY = LLM.get("factory", "Tongyi-Qianwen")
|
||||
LLM_BASE_URL = LLM.get("base_url")
|
||||
|
||||
global CHAT_MDL, EMBEDDING_MDL, RERANK_MDL, ASR_MDL, IMAGE2TEXT_MDL
|
||||
if not LIGHTEN:
|
||||
default_llm = {
|
||||
"Tongyi-Qianwen": {
|
||||
"chat_model": "qwen-plus",
|
||||
"embedding_model": "text-embedding-v2",
|
||||
"image2text_model": "qwen-vl-max",
|
||||
"asr_model": "paraformer-realtime-8k-v1",
|
||||
},
|
||||
"OpenAI": {
|
||||
"chat_model": "gpt-3.5-turbo",
|
||||
"embedding_model": "text-embedding-ada-002",
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"Azure-OpenAI": {
|
||||
"chat_model": "gpt-35-turbo",
|
||||
"embedding_model": "text-embedding-ada-002",
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"ZHIPU-AI": {
|
||||
"chat_model": "glm-3-turbo",
|
||||
"embedding_model": "embedding-2",
|
||||
"image2text_model": "glm-4v",
|
||||
"asr_model": "",
|
||||
},
|
||||
"Ollama": {
|
||||
"chat_model": "qwen-14B-chat",
|
||||
"embedding_model": "flag-embedding",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"Moonshot": {
|
||||
"chat_model": "moonshot-v1-8k",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"DeepSeek": {
|
||||
"chat_model": "deepseek-chat",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"VolcEngine": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"BAAI": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "BAAI/bge-large-zh-v1.5",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
"rerank_model": "BAAI/bge-reranker-v2-m3",
|
||||
}
|
||||
}
|
||||
|
||||
if LLM_FACTORY:
|
||||
CHAT_MDL = default_llm[LLM_FACTORY]["chat_model"] + f"@{LLM_FACTORY}"
|
||||
ASR_MDL = default_llm[LLM_FACTORY]["asr_model"] + f"@{LLM_FACTORY}"
|
||||
IMAGE2TEXT_MDL = default_llm[LLM_FACTORY]["image2text_model"] + f"@{LLM_FACTORY}"
|
||||
EMBEDDING_MDL = default_llm["BAAI"]["embedding_model"] + "@BAAI"
|
||||
RERANK_MDL = default_llm["BAAI"]["rerank_model"] + "@BAAI"
|
||||
|
||||
global API_KEY, PARSERS, HOST_IP, HOST_PORT, SECRET_KEY
|
||||
API_KEY = LLM.get("api_key", "")
|
||||
PARSERS = LLM.get(
|
||||
"parsers",
|
||||
"naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email")
|
||||
|
||||
HOST_IP = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("host", "127.0.0.1")
|
||||
HOST_PORT = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("http_port")
|
||||
|
||||
SECRET_KEY = get_base_config(
|
||||
RAG_FLOW_SERVICE_NAME,
|
||||
{}).get("secret_key", str(date.today()))
|
||||
|
||||
global AUTHENTICATION_CONF, CLIENT_AUTHENTICATION, HTTP_APP_KEY, GITHUB_OAUTH, FEISHU_OAUTH
|
||||
# authentication
|
||||
AUTHENTICATION_CONF = get_base_config("authentication", {})
|
||||
|
||||
# client
|
||||
CLIENT_AUTHENTICATION = AUTHENTICATION_CONF.get(
|
||||
"client", {}).get(
|
||||
"switch", False)
|
||||
HTTP_APP_KEY = AUTHENTICATION_CONF.get("client", {}).get("http_app_key")
|
||||
GITHUB_OAUTH = get_base_config("oauth", {}).get("github")
|
||||
FEISHU_OAUTH = get_base_config("oauth", {}).get("feishu")
|
||||
WECHAT_OAUTH = get_base_config("oauth", {}).get("wechat")
|
||||
HTTP_APP_KEY = AUTHENTICATION_CONF.get("client", {}).get("http_app_key")
|
||||
GITHUB_OAUTH = get_base_config("oauth", {}).get("github")
|
||||
FEISHU_OAUTH = get_base_config("oauth", {}).get("feishu")
|
||||
|
||||
# site
|
||||
SITE_AUTHENTICATION = AUTHENTICATION_CONF.get("site", {}).get("switch", False)
|
||||
global DOC_ENGINE, docStoreConn, retrievaler, kg_retrievaler
|
||||
DOC_ENGINE = os.environ.get('DOC_ENGINE', "elasticsearch")
|
||||
if DOC_ENGINE == "elasticsearch":
|
||||
docStoreConn = rag.utils.es_conn.ESConnection()
|
||||
elif DOC_ENGINE == "infinity":
|
||||
docStoreConn = rag.utils.infinity_conn.InfinityConnection()
|
||||
else:
|
||||
raise Exception(f"Not supported doc engine: {DOC_ENGINE}")
|
||||
|
||||
# permission
|
||||
PERMISSION_CONF = get_base_config("permission", {})
|
||||
PERMISSION_SWITCH = PERMISSION_CONF.get("switch")
|
||||
COMPONENT_PERMISSION = PERMISSION_CONF.get("component")
|
||||
DATASET_PERMISSION = PERMISSION_CONF.get("dataset")
|
||||
|
||||
HOOK_MODULE = get_base_config("hook_module")
|
||||
HOOK_SERVER_NAME = get_base_config("hook_server_name")
|
||||
|
||||
ENABLE_MODEL_STORE = get_base_config('enable_model_store', False)
|
||||
# authentication
|
||||
USE_AUTHENTICATION = False
|
||||
USE_DATA_AUTHENTICATION = False
|
||||
AUTOMATIC_AUTHORIZATION_OUTPUT_DATA = True
|
||||
USE_DEFAULT_TIMEOUT = False
|
||||
AUTHENTICATION_DEFAULT_TIMEOUT = 7 * 24 * 60 * 60 # s
|
||||
PRIVILEGE_COMMAND_WHITELIST = []
|
||||
CHECK_NODES_IDENTITY = False
|
||||
|
||||
retrievaler = search.Dealer(ELASTICSEARCH)
|
||||
kg_retrievaler = kg_search.KGSearch(ELASTICSEARCH)
|
||||
retrievaler = search.Dealer(docStoreConn)
|
||||
kg_retrievaler = kg_search.KGSearch(docStoreConn)
|
||||
|
||||
|
||||
class CustomEnum(Enum):
|
||||
@ -227,16 +192,6 @@ class CustomEnum(Enum):
|
||||
return [member.name for member in cls.__members__.values()]
|
||||
|
||||
|
||||
class PythonDependenceName(CustomEnum):
|
||||
Rag_Source_Code = "python"
|
||||
Python_Env = "miniconda"
|
||||
|
||||
|
||||
class ModelStorage(CustomEnum):
|
||||
REDIS = "redis"
|
||||
MYSQL = "mysql"
|
||||
|
||||
|
||||
class RetCode(IntEnum, CustomEnum):
|
||||
SUCCESS = 0
|
||||
NOT_EFFECTIVE = 10
|
||||
|
||||
@ -23,56 +23,64 @@ import socket
|
||||
import time
|
||||
import uuid
|
||||
import requests
|
||||
import logging
|
||||
from enum import Enum, IntEnum
|
||||
import importlib
|
||||
from Cryptodome.PublicKey import RSA
|
||||
from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
|
||||
|
||||
from filelock import FileLock
|
||||
from api.constants import SERVICE_CONF
|
||||
|
||||
from . import file_utils
|
||||
|
||||
SERVICE_CONF = "service_conf.yaml"
|
||||
|
||||
|
||||
def conf_realpath(conf_name):
|
||||
conf_path = f"conf/{conf_name}"
|
||||
return os.path.join(file_utils.get_project_base_directory(), conf_path)
|
||||
|
||||
|
||||
def get_base_config(key, default=None, conf_name=SERVICE_CONF) -> dict:
|
||||
def read_config(conf_name=SERVICE_CONF):
|
||||
local_config = {}
|
||||
local_path = conf_realpath(f'local.{conf_name}')
|
||||
if default is None:
|
||||
default = os.environ.get(key.upper())
|
||||
|
||||
# load local config file
|
||||
if os.path.exists(local_path):
|
||||
local_config = file_utils.load_yaml_conf(local_path)
|
||||
if not isinstance(local_config, dict):
|
||||
raise ValueError(f'Invalid config file: "{local_path}".')
|
||||
|
||||
if key is not None and key in local_config:
|
||||
return local_config[key]
|
||||
global_config_path = conf_realpath(conf_name)
|
||||
global_config = file_utils.load_yaml_conf(global_config_path)
|
||||
|
||||
config_path = conf_realpath(conf_name)
|
||||
config = file_utils.load_yaml_conf(config_path)
|
||||
if not isinstance(global_config, dict):
|
||||
raise ValueError(f'Invalid config file: "{global_config_path}".')
|
||||
|
||||
if not isinstance(config, dict):
|
||||
raise ValueError(f'Invalid config file: "{config_path}".')
|
||||
global_config.update(local_config)
|
||||
return global_config
|
||||
|
||||
config.update(local_config)
|
||||
return config.get(key, default) if key is not None else config
|
||||
|
||||
CONFIGS = read_config()
|
||||
|
||||
|
||||
def show_configs():
|
||||
msg = f"Current configs, from {conf_realpath(SERVICE_CONF)}:"
|
||||
for k, v in CONFIGS.items():
|
||||
msg += f"\n\t{k}: {v}"
|
||||
logging.info(msg)
|
||||
|
||||
|
||||
def get_base_config(key, default=None):
|
||||
if key is None:
|
||||
return None
|
||||
if default is None:
|
||||
default = os.environ.get(key.upper())
|
||||
return CONFIGS.get(key, default)
|
||||
|
||||
|
||||
use_deserialize_safe_module = get_base_config(
|
||||
'use_deserialize_safe_module', False)
|
||||
|
||||
|
||||
class CoordinationCommunicationProtocol(object):
|
||||
HTTP = "http"
|
||||
GRPC = "grpc"
|
||||
|
||||
|
||||
class BaseType:
|
||||
def to_dict(self):
|
||||
return dict([(k.lstrip("_"), v) for k, v in self.__dict__.items()])
|
||||
@ -98,6 +106,7 @@ class BaseType:
|
||||
data = obj
|
||||
return {"type": obj.__class__.__name__,
|
||||
"data": data, "module": module}
|
||||
|
||||
return _dict(self)
|
||||
|
||||
|
||||
@ -245,7 +254,7 @@ def get_lan_ip():
|
||||
try:
|
||||
ip = get_interface_ip(ifname)
|
||||
break
|
||||
except IOError as e:
|
||||
except IOError:
|
||||
pass
|
||||
return ip or ''
|
||||
|
||||
@ -342,8 +351,8 @@ def download_img(url):
|
||||
return ""
|
||||
response = requests.get(url)
|
||||
return "data:" + \
|
||||
response.headers.get('Content-Type', 'image/jpg') + ";" + \
|
||||
"base64," + base64.b64encode(response.content).decode("utf-8")
|
||||
response.headers.get('Content-Type', 'image/jpg') + ";" + \
|
||||
"base64," + base64.b64encode(response.content).decode("utf-8")
|
||||
|
||||
|
||||
def delta_seconds(date_string: str):
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import functools
|
||||
import json
|
||||
import random
|
||||
@ -33,13 +34,12 @@ from itsdangerous import URLSafeTimedSerializer
|
||||
from werkzeug.http import HTTP_STATUS_CODES
|
||||
|
||||
from api.db.db_models import APIToken
|
||||
from api.settings import (
|
||||
REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC,
|
||||
stat_logger, CLIENT_AUTHENTICATION, HTTP_APP_KEY, SECRET_KEY
|
||||
)
|
||||
from api.settings import RetCode
|
||||
from api import settings
|
||||
|
||||
from api import settings
|
||||
from api.utils import CustomJSONEncoder, get_uuid
|
||||
from api.utils import json_dumps
|
||||
from api.constants import REQUEST_WAIT_SEC, REQUEST_MAX_WAIT_SEC
|
||||
|
||||
requests.models.complexjson.dumps = functools.partial(
|
||||
json.dumps, cls=CustomJSONEncoder)
|
||||
@ -58,13 +58,13 @@ def request(**kwargs):
|
||||
{}).items()}
|
||||
prepped = requests.Request(**kwargs).prepare()
|
||||
|
||||
if CLIENT_AUTHENTICATION and HTTP_APP_KEY and SECRET_KEY:
|
||||
if settings.CLIENT_AUTHENTICATION and settings.HTTP_APP_KEY and settings.SECRET_KEY:
|
||||
timestamp = str(round(time() * 1000))
|
||||
nonce = str(uuid1())
|
||||
signature = b64encode(HMAC(SECRET_KEY.encode('ascii'), b'\n'.join([
|
||||
signature = b64encode(HMAC(settings.SECRET_KEY.encode('ascii'), b'\n'.join([
|
||||
timestamp.encode('ascii'),
|
||||
nonce.encode('ascii'),
|
||||
HTTP_APP_KEY.encode('ascii'),
|
||||
settings.HTTP_APP_KEY.encode('ascii'),
|
||||
prepped.path_url.encode('ascii'),
|
||||
prepped.body if kwargs.get('json') else b'',
|
||||
urlencode(
|
||||
@ -78,7 +78,7 @@ def request(**kwargs):
|
||||
prepped.headers.update({
|
||||
'TIMESTAMP': timestamp,
|
||||
'NONCE': nonce,
|
||||
'APP-KEY': HTTP_APP_KEY,
|
||||
'APP-KEY': settings.HTTP_APP_KEY,
|
||||
'SIGNATURE': signature,
|
||||
})
|
||||
|
||||
@ -97,19 +97,19 @@ def get_exponential_backoff_interval(retries, full_jitter=False):
|
||||
return max(0, countdown)
|
||||
|
||||
|
||||
def get_data_error_result(retcode=RetCode.DATA_ERROR,
|
||||
retmsg='Sorry! Data missing!'):
|
||||
def get_data_error_result(code=settings.RetCode.DATA_ERROR,
|
||||
message='Sorry! Data missing!'):
|
||||
import re
|
||||
result_dict = {
|
||||
"retcode": retcode,
|
||||
"retmsg": re.sub(
|
||||
"code": code,
|
||||
"message": re.sub(
|
||||
r"rag",
|
||||
"seceum",
|
||||
retmsg,
|
||||
message,
|
||||
flags=re.IGNORECASE)}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "retcode":
|
||||
if value is None and key != "code":
|
||||
continue
|
||||
else:
|
||||
response[key] = value
|
||||
@ -117,29 +117,25 @@ def get_data_error_result(retcode=RetCode.DATA_ERROR,
|
||||
|
||||
|
||||
def server_error_response(e):
|
||||
stat_logger.exception(e)
|
||||
logging.exception(e)
|
||||
try:
|
||||
if e.code == 401:
|
||||
return get_json_result(retcode=401, retmsg=repr(e))
|
||||
return get_json_result(code=401, message=repr(e))
|
||||
except BaseException:
|
||||
pass
|
||||
if len(e.args) > 1:
|
||||
return get_json_result(
|
||||
retcode=RetCode.EXCEPTION_ERROR, retmsg=repr(e.args[0]), data=e.args[1])
|
||||
if repr(e).find("index_not_found_exception") >= 0:
|
||||
return get_json_result(retcode=RetCode.EXCEPTION_ERROR,
|
||||
retmsg="No chunk found, please upload file and parse it.")
|
||||
|
||||
return get_json_result(retcode=RetCode.EXCEPTION_ERROR, retmsg=repr(e))
|
||||
code=settings.RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=e.args[1])
|
||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e))
|
||||
|
||||
|
||||
def error_response(response_code, retmsg=None):
|
||||
if retmsg is None:
|
||||
retmsg = HTTP_STATUS_CODES.get(response_code, 'Unknown Error')
|
||||
def error_response(response_code, message=None):
|
||||
if message is None:
|
||||
message = HTTP_STATUS_CODES.get(response_code, 'Unknown Error')
|
||||
|
||||
return Response(json.dumps({
|
||||
'retmsg': retmsg,
|
||||
'retcode': response_code,
|
||||
'message': message,
|
||||
'code': response_code,
|
||||
}), status=response_code, mimetype='application/json')
|
||||
|
||||
|
||||
@ -171,7 +167,7 @@ def validate_request(*args, **kwargs):
|
||||
error_string += "required argument values: {}".format(
|
||||
",".join(["{}={}".format(a[0], a[1]) for a in error_arguments]))
|
||||
return get_json_result(
|
||||
retcode=RetCode.ARGUMENT_ERROR, retmsg=error_string)
|
||||
code=settings.RetCode.ARGUMENT_ERROR, message=error_string)
|
||||
return func(*_args, **_kwargs)
|
||||
|
||||
return decorated_function
|
||||
@ -196,8 +192,8 @@ def send_file_in_mem(data, filename):
|
||||
return send_file(f, as_attachment=True, attachment_filename=filename)
|
||||
|
||||
|
||||
def get_json_result(retcode=RetCode.SUCCESS, retmsg='success', data=None):
|
||||
response = {"retcode": retcode, "retmsg": retmsg, "data": data}
|
||||
def get_json_result(code=settings.RetCode.SUCCESS, message='success', data=None):
|
||||
response = {"code": code, "message": message, "data": data}
|
||||
return jsonify(response)
|
||||
|
||||
def apikey_required(func):
|
||||
@ -207,7 +203,7 @@ def apikey_required(func):
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return build_error_result(
|
||||
error_msg='API-KEY is invalid!', retcode=RetCode.FORBIDDEN
|
||||
message='API-KEY is invalid!', code=settings.RetCode.FORBIDDEN
|
||||
)
|
||||
kwargs['tenant_id'] = objs[0].tenant_id
|
||||
return func(*args, **kwargs)
|
||||
@ -215,19 +211,19 @@ def apikey_required(func):
|
||||
return decorated_function
|
||||
|
||||
|
||||
def build_error_result(retcode=RetCode.FORBIDDEN, error_msg='success'):
|
||||
response = {"error_code": retcode, "error_msg": error_msg}
|
||||
def build_error_result(code=settings.RetCode.FORBIDDEN, message='success'):
|
||||
response = {"code": code, "message": message}
|
||||
response = jsonify(response)
|
||||
response.status_code = retcode
|
||||
response.status_code = code
|
||||
return response
|
||||
|
||||
|
||||
def construct_response(retcode=RetCode.SUCCESS,
|
||||
retmsg='success', data=None, auth=None):
|
||||
result_dict = {"retcode": retcode, "retmsg": retmsg, "data": data}
|
||||
def construct_response(code=settings.RetCode.SUCCESS,
|
||||
message='success', data=None, auth=None):
|
||||
result_dict = {"code": code, "message": message, "data": data}
|
||||
response_dict = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "retcode":
|
||||
if value is None and key != "code":
|
||||
continue
|
||||
else:
|
||||
response_dict[key] = value
|
||||
@ -242,7 +238,7 @@ def construct_response(retcode=RetCode.SUCCESS,
|
||||
return response
|
||||
|
||||
|
||||
def construct_result(code=RetCode.DATA_ERROR, message='data is missing'):
|
||||
def construct_result(code=settings.RetCode.DATA_ERROR, message='data is missing'):
|
||||
import re
|
||||
result_dict = {"code": code, "message": re.sub(r"rag", "seceum", message, flags=re.IGNORECASE)}
|
||||
response = {}
|
||||
@ -254,7 +250,7 @@ def construct_result(code=RetCode.DATA_ERROR, message='data is missing'):
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
def construct_json_result(code=RetCode.SUCCESS, message='success', data=None):
|
||||
def construct_json_result(code=settings.RetCode.SUCCESS, message='success', data=None):
|
||||
if data is None:
|
||||
return jsonify({"code": code, "message": message})
|
||||
else:
|
||||
@ -262,29 +258,28 @@ def construct_json_result(code=RetCode.SUCCESS, message='success', data=None):
|
||||
|
||||
|
||||
def construct_error_response(e):
|
||||
stat_logger.exception(e)
|
||||
logging.exception(e)
|
||||
try:
|
||||
if e.code == 401:
|
||||
return construct_json_result(code=RetCode.UNAUTHORIZED, message=repr(e))
|
||||
return construct_json_result(code=settings.RetCode.UNAUTHORIZED, message=repr(e))
|
||||
except BaseException:
|
||||
pass
|
||||
if len(e.args) > 1:
|
||||
return construct_json_result(code=RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=e.args[1])
|
||||
if repr(e).find("index_not_found_exception") >= 0:
|
||||
return construct_json_result(code=RetCode.EXCEPTION_ERROR,
|
||||
message="No chunk found, please upload file and parse it.")
|
||||
|
||||
return construct_json_result(code=RetCode.EXCEPTION_ERROR, message=repr(e))
|
||||
return construct_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=e.args[1])
|
||||
return construct_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e))
|
||||
|
||||
|
||||
def token_required(func):
|
||||
@wraps(func)
|
||||
def decorated_function(*args, **kwargs):
|
||||
token = flask_request.headers.get('Authorization').split()[1]
|
||||
authorization_list=flask_request.headers.get('Authorization').split()
|
||||
if len(authorization_list) < 2:
|
||||
return get_json_result(data=False,message="Please check your authorization format.")
|
||||
token = authorization_list[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Token is not valid!', retcode=RetCode.AUTHENTICATION_ERROR
|
||||
data=False, message='Token is not valid!', code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
kwargs['tenant_id'] = objs[0].tenant_id
|
||||
return func(*args, **kwargs)
|
||||
@ -292,26 +287,26 @@ def token_required(func):
|
||||
return decorated_function
|
||||
|
||||
|
||||
def get_result(retcode=RetCode.SUCCESS, retmsg='error', data=None):
|
||||
if retcode == 0:
|
||||
def get_result(code=settings.RetCode.SUCCESS, message="", data=None):
|
||||
if code == 0:
|
||||
if data is not None:
|
||||
response = {"code": retcode, "data": data}
|
||||
response = {"code": code, "data": data}
|
||||
else:
|
||||
response = {"code": retcode}
|
||||
response = {"code": code}
|
||||
else:
|
||||
response = {"code": retcode, "message": retmsg}
|
||||
response = {"code": code, "message": message}
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
def get_error_data_result(retmsg='Sorry! Data missing!', retcode=RetCode.DATA_ERROR,
|
||||
def get_error_data_result(message='Sorry! Data missing!', code=settings.RetCode.DATA_ERROR,
|
||||
):
|
||||
import re
|
||||
result_dict = {
|
||||
"code": retcode,
|
||||
"code": code,
|
||||
"message": re.sub(
|
||||
r"rag",
|
||||
"seceum",
|
||||
retmsg,
|
||||
message,
|
||||
flags=re.IGNORECASE)}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
|
||||
@ -25,7 +25,7 @@ from cachetools import LRUCache, cached
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
from api.db import FileType
|
||||
from api.contants import IMG_BASE64_PREFIX
|
||||
from api.constants import IMG_BASE64_PREFIX
|
||||
|
||||
PROJECT_BASE = os.getenv("RAG_PROJECT_BASE") or os.getenv("RAG_DEPLOY_BASE")
|
||||
RAG_BASE = os.getenv("RAG_BASE")
|
||||
@ -71,7 +71,7 @@ def get_home_cache_dir():
|
||||
dir = os.path.join(os.path.expanduser('~'), ".ragflow")
|
||||
try:
|
||||
os.mkdir(dir)
|
||||
except OSError as error:
|
||||
except OSError:
|
||||
pass
|
||||
return dir
|
||||
|
||||
@ -193,7 +193,7 @@ def thumbnail_img(filename, blob):
|
||||
presentation.slides[0].get_thumbnail(0.03, 0.03).save(
|
||||
buffered, drawing.imaging.ImageFormat.png)
|
||||
return buffered.getvalue()
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
@ -14,300 +14,41 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import typing
|
||||
import traceback
|
||||
import os.path
|
||||
import logging
|
||||
import inspect
|
||||
from logging.handlers import TimedRotatingFileHandler
|
||||
from threading import RLock
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
from api.utils import file_utils
|
||||
def get_project_base_directory():
|
||||
PROJECT_BASE = os.path.abspath(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)),
|
||||
os.pardir,
|
||||
os.pardir,
|
||||
)
|
||||
)
|
||||
return PROJECT_BASE
|
||||
|
||||
def initRootLogger(logfile_basename: str, log_level: int = logging.INFO, log_format: str = "%(asctime)-15s %(levelname)-8s %(process)d %(message)s"):
|
||||
logger = logging.getLogger()
|
||||
if logger.hasHandlers():
|
||||
return
|
||||
|
||||
class LoggerFactory(object):
|
||||
TYPE = "FILE"
|
||||
LOG_FORMAT = "[%(levelname)s] [%(asctime)s] [%(module)s.%(funcName)s] [line:%(lineno)d]: %(message)s"
|
||||
logging.basicConfig(format=LOG_FORMAT)
|
||||
LEVEL = logging.DEBUG
|
||||
logger_dict = {}
|
||||
global_handler_dict = {}
|
||||
log_path = os.path.abspath(os.path.join(get_project_base_directory(), "logs", f"{logfile_basename}.log"))
|
||||
|
||||
LOG_DIR = None
|
||||
PARENT_LOG_DIR = None
|
||||
log_share = True
|
||||
os.makedirs(os.path.dirname(log_path), exist_ok=True)
|
||||
logger.setLevel(log_level)
|
||||
formatter = logging.Formatter(log_format)
|
||||
|
||||
append_to_parent_log = None
|
||||
handler1 = RotatingFileHandler(log_path, maxBytes=10*1024*1024, backupCount=5)
|
||||
handler1.setLevel(log_level)
|
||||
handler1.setFormatter(formatter)
|
||||
logger.addHandler(handler1)
|
||||
|
||||
lock = RLock()
|
||||
# CRITICAL = 50
|
||||
# FATAL = CRITICAL
|
||||
# ERROR = 40
|
||||
# WARNING = 30
|
||||
# WARN = WARNING
|
||||
# INFO = 20
|
||||
# DEBUG = 10
|
||||
# NOTSET = 0
|
||||
levels = (10, 20, 30, 40)
|
||||
schedule_logger_dict = {}
|
||||
handler2 = logging.StreamHandler()
|
||||
handler2.setLevel(log_level)
|
||||
handler2.setFormatter(formatter)
|
||||
logger.addHandler(handler2)
|
||||
|
||||
@staticmethod
|
||||
def set_directory(directory=None, parent_log_dir=None,
|
||||
append_to_parent_log=None, force=False):
|
||||
if parent_log_dir:
|
||||
LoggerFactory.PARENT_LOG_DIR = parent_log_dir
|
||||
if append_to_parent_log:
|
||||
LoggerFactory.append_to_parent_log = append_to_parent_log
|
||||
with LoggerFactory.lock:
|
||||
if not directory:
|
||||
directory = file_utils.get_project_base_directory("logs")
|
||||
if not LoggerFactory.LOG_DIR or force:
|
||||
LoggerFactory.LOG_DIR = directory
|
||||
if LoggerFactory.log_share:
|
||||
oldmask = os.umask(000)
|
||||
os.makedirs(LoggerFactory.LOG_DIR, exist_ok=True)
|
||||
os.umask(oldmask)
|
||||
else:
|
||||
os.makedirs(LoggerFactory.LOG_DIR, exist_ok=True)
|
||||
for loggerName, ghandler in LoggerFactory.global_handler_dict.items():
|
||||
for className, (logger,
|
||||
handler) in LoggerFactory.logger_dict.items():
|
||||
logger.removeHandler(ghandler)
|
||||
ghandler.close()
|
||||
LoggerFactory.global_handler_dict = {}
|
||||
for className, (logger,
|
||||
handler) in LoggerFactory.logger_dict.items():
|
||||
logger.removeHandler(handler)
|
||||
_handler = None
|
||||
if handler:
|
||||
handler.close()
|
||||
if className != "default":
|
||||
_handler = LoggerFactory.get_handler(className)
|
||||
logger.addHandler(_handler)
|
||||
LoggerFactory.assemble_global_handler(logger)
|
||||
LoggerFactory.logger_dict[className] = logger, _handler
|
||||
|
||||
@staticmethod
|
||||
def new_logger(name):
|
||||
logger = logging.getLogger(name)
|
||||
logger.propagate = False
|
||||
logger.setLevel(LoggerFactory.LEVEL)
|
||||
return logger
|
||||
|
||||
@staticmethod
|
||||
def get_logger(class_name=None):
|
||||
with LoggerFactory.lock:
|
||||
if class_name in LoggerFactory.logger_dict.keys():
|
||||
logger, handler = LoggerFactory.logger_dict[class_name]
|
||||
if not logger:
|
||||
logger, handler = LoggerFactory.init_logger(class_name)
|
||||
else:
|
||||
logger, handler = LoggerFactory.init_logger(class_name)
|
||||
return logger
|
||||
|
||||
@staticmethod
|
||||
def get_global_handler(logger_name, level=None, log_dir=None):
|
||||
if not LoggerFactory.LOG_DIR:
|
||||
return logging.StreamHandler()
|
||||
if log_dir:
|
||||
logger_name_key = logger_name + "_" + log_dir
|
||||
else:
|
||||
logger_name_key = logger_name + "_" + LoggerFactory.LOG_DIR
|
||||
# if loggerName not in LoggerFactory.globalHandlerDict:
|
||||
if logger_name_key not in LoggerFactory.global_handler_dict:
|
||||
with LoggerFactory.lock:
|
||||
if logger_name_key not in LoggerFactory.global_handler_dict:
|
||||
handler = LoggerFactory.get_handler(
|
||||
logger_name, level, log_dir)
|
||||
LoggerFactory.global_handler_dict[logger_name_key] = handler
|
||||
return LoggerFactory.global_handler_dict[logger_name_key]
|
||||
|
||||
@staticmethod
|
||||
def get_handler(class_name, level=None, log_dir=None,
|
||||
log_type=None, job_id=None):
|
||||
if not log_type:
|
||||
if not LoggerFactory.LOG_DIR or not class_name:
|
||||
return logging.StreamHandler()
|
||||
# return Diy_StreamHandler()
|
||||
|
||||
if not log_dir:
|
||||
log_file = os.path.join(
|
||||
LoggerFactory.LOG_DIR,
|
||||
"{}.log".format(class_name))
|
||||
else:
|
||||
log_file = os.path.join(log_dir, "{}.log".format(class_name))
|
||||
else:
|
||||
log_file = os.path.join(log_dir, "rag_flow_{}.log".format(
|
||||
log_type) if level == LoggerFactory.LEVEL else 'rag_flow_{}_error.log'.format(log_type))
|
||||
|
||||
os.makedirs(os.path.dirname(log_file), exist_ok=True)
|
||||
if LoggerFactory.log_share:
|
||||
handler = ROpenHandler(log_file,
|
||||
when='D',
|
||||
interval=1,
|
||||
backupCount=14,
|
||||
delay=True)
|
||||
else:
|
||||
handler = TimedRotatingFileHandler(log_file,
|
||||
when='D',
|
||||
interval=1,
|
||||
backupCount=14,
|
||||
delay=True)
|
||||
if level:
|
||||
handler.level = level
|
||||
|
||||
return handler
|
||||
|
||||
@staticmethod
|
||||
def init_logger(class_name):
|
||||
with LoggerFactory.lock:
|
||||
logger = LoggerFactory.new_logger(class_name)
|
||||
handler = None
|
||||
if class_name:
|
||||
handler = LoggerFactory.get_handler(class_name)
|
||||
logger.addHandler(handler)
|
||||
LoggerFactory.logger_dict[class_name] = logger, handler
|
||||
|
||||
else:
|
||||
LoggerFactory.logger_dict["default"] = logger, handler
|
||||
|
||||
LoggerFactory.assemble_global_handler(logger)
|
||||
return logger, handler
|
||||
|
||||
@staticmethod
|
||||
def assemble_global_handler(logger):
|
||||
if LoggerFactory.LOG_DIR:
|
||||
for level in LoggerFactory.levels:
|
||||
if level >= LoggerFactory.LEVEL:
|
||||
level_logger_name = logging._levelToName[level]
|
||||
logger.addHandler(
|
||||
LoggerFactory.get_global_handler(
|
||||
level_logger_name, level))
|
||||
if LoggerFactory.append_to_parent_log and LoggerFactory.PARENT_LOG_DIR:
|
||||
for level in LoggerFactory.levels:
|
||||
if level >= LoggerFactory.LEVEL:
|
||||
level_logger_name = logging._levelToName[level]
|
||||
logger.addHandler(
|
||||
LoggerFactory.get_global_handler(level_logger_name, level, LoggerFactory.PARENT_LOG_DIR))
|
||||
|
||||
|
||||
def setDirectory(directory=None):
|
||||
LoggerFactory.set_directory(directory)
|
||||
|
||||
|
||||
def setLevel(level):
|
||||
LoggerFactory.LEVEL = level
|
||||
|
||||
|
||||
def getLogger(className=None, useLevelFile=False):
|
||||
if className is None:
|
||||
frame = inspect.stack()[1]
|
||||
module = inspect.getmodule(frame[0])
|
||||
className = 'stat'
|
||||
return LoggerFactory.get_logger(className)
|
||||
|
||||
|
||||
def exception_to_trace_string(ex):
|
||||
return "".join(traceback.TracebackException.from_exception(ex).format())
|
||||
|
||||
|
||||
class ROpenHandler(TimedRotatingFileHandler):
|
||||
def _open(self):
|
||||
prevumask = os.umask(000)
|
||||
rtv = TimedRotatingFileHandler._open(self)
|
||||
os.umask(prevumask)
|
||||
return rtv
|
||||
|
||||
|
||||
def sql_logger(job_id='', log_type='sql'):
|
||||
key = job_id + log_type
|
||||
if key in LoggerFactory.schedule_logger_dict.keys():
|
||||
return LoggerFactory.schedule_logger_dict[key]
|
||||
return get_job_logger(job_id=job_id, log_type=log_type)
|
||||
|
||||
|
||||
def ready_log(msg, job=None, task=None, role=None, party_id=None, detail=None):
|
||||
prefix, suffix = base_msg(job, task, role, party_id, detail)
|
||||
return f"{prefix}{msg} ready{suffix}"
|
||||
|
||||
|
||||
def start_log(msg, job=None, task=None, role=None, party_id=None, detail=None):
|
||||
prefix, suffix = base_msg(job, task, role, party_id, detail)
|
||||
return f"{prefix}start to {msg}{suffix}"
|
||||
|
||||
|
||||
def successful_log(msg, job=None, task=None, role=None,
|
||||
party_id=None, detail=None):
|
||||
prefix, suffix = base_msg(job, task, role, party_id, detail)
|
||||
return f"{prefix}{msg} successfully{suffix}"
|
||||
|
||||
|
||||
def warning_log(msg, job=None, task=None, role=None,
|
||||
party_id=None, detail=None):
|
||||
prefix, suffix = base_msg(job, task, role, party_id, detail)
|
||||
return f"{prefix}{msg} is not effective{suffix}"
|
||||
|
||||
|
||||
def failed_log(msg, job=None, task=None, role=None,
|
||||
party_id=None, detail=None):
|
||||
prefix, suffix = base_msg(job, task, role, party_id, detail)
|
||||
return f"{prefix}failed to {msg}{suffix}"
|
||||
|
||||
|
||||
def base_msg(job=None, task=None, role: str = None,
|
||||
party_id: typing.Union[str, int] = None, detail=None):
|
||||
if detail:
|
||||
detail_msg = f" detail: \n{detail}"
|
||||
else:
|
||||
detail_msg = ""
|
||||
if task is not None:
|
||||
return f"task {task.f_task_id} {task.f_task_version} ", f" on {task.f_role} {task.f_party_id}{detail_msg}"
|
||||
elif job is not None:
|
||||
return "", f" on {job.f_role} {job.f_party_id}{detail_msg}"
|
||||
elif role and party_id:
|
||||
return "", f" on {role} {party_id}{detail_msg}"
|
||||
else:
|
||||
return "", f"{detail_msg}"
|
||||
|
||||
|
||||
def exception_to_trace_string(ex):
|
||||
return "".join(traceback.TracebackException.from_exception(ex).format())
|
||||
|
||||
|
||||
def get_logger_base_dir():
|
||||
job_log_dir = file_utils.get_rag_flow_directory('logs')
|
||||
return job_log_dir
|
||||
|
||||
|
||||
def get_job_logger(job_id, log_type):
|
||||
rag_flow_log_dir = file_utils.get_rag_flow_directory('logs', 'rag_flow')
|
||||
job_log_dir = file_utils.get_rag_flow_directory('logs', job_id)
|
||||
if not job_id:
|
||||
log_dirs = [rag_flow_log_dir]
|
||||
else:
|
||||
if log_type == 'audit':
|
||||
log_dirs = [job_log_dir, rag_flow_log_dir]
|
||||
else:
|
||||
log_dirs = [job_log_dir]
|
||||
if LoggerFactory.log_share:
|
||||
oldmask = os.umask(000)
|
||||
os.makedirs(job_log_dir, exist_ok=True)
|
||||
os.makedirs(rag_flow_log_dir, exist_ok=True)
|
||||
os.umask(oldmask)
|
||||
else:
|
||||
os.makedirs(job_log_dir, exist_ok=True)
|
||||
os.makedirs(rag_flow_log_dir, exist_ok=True)
|
||||
logger = LoggerFactory.new_logger(f"{job_id}_{log_type}")
|
||||
for job_log_dir in log_dirs:
|
||||
handler = LoggerFactory.get_handler(class_name=None, level=LoggerFactory.LEVEL,
|
||||
log_dir=job_log_dir, log_type=log_type, job_id=job_id)
|
||||
error_handler = LoggerFactory.get_handler(
|
||||
class_name=None,
|
||||
level=logging.ERROR,
|
||||
log_dir=job_log_dir,
|
||||
log_type=log_type,
|
||||
job_id=job_id)
|
||||
logger.addHandler(handler)
|
||||
logger.addHandler(error_handler)
|
||||
with LoggerFactory.lock:
|
||||
LoggerFactory.schedule_logger_dict[job_id + log_type] = logger
|
||||
return logger
|
||||
logging.captureWarnings(True)
|
||||
msg = f"{logfile_basename} log path: {log_path}"
|
||||
logger.info(msg)
|
||||
@ -77,4 +77,4 @@ def __get_pdf_from_html(
|
||||
|
||||
|
||||
def is_valid_url(url: str) -> bool:
|
||||
return bool(re.match(r"(https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]", url))
|
||||
return bool(re.match(r"(https?)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]", url))
|
||||
|
||||
49
api/validation.py
Normal file
49
api/validation.py
Normal file
@ -0,0 +1,49 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
|
||||
def python_version_validation():
|
||||
# Check python version
|
||||
required_python_version = (3, 10)
|
||||
if sys.version_info < required_python_version:
|
||||
logging.info(
|
||||
f"Required Python: >= {required_python_version[0]}.{required_python_version[1]}. Current Python version: {sys.version_info[0]}.{sys.version_info[1]}."
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.info(f"Python version: {sys.version_info[0]}.{sys.version_info[1]}")
|
||||
|
||||
|
||||
python_version_validation()
|
||||
|
||||
|
||||
# Download nltk data
|
||||
def download_nltk_data():
|
||||
import nltk
|
||||
nltk.download('wordnet', halt_on_error=False, quiet=True)
|
||||
nltk.download('punkt_tab', halt_on_error=False, quiet=True)
|
||||
|
||||
|
||||
try:
|
||||
from multiprocessing import Pool
|
||||
pool = Pool(processes=1)
|
||||
thread = pool.apply_async(download_nltk_data)
|
||||
binary = thread.get(timeout=60)
|
||||
except Exception as e:
|
||||
print('\x1b[6;37;41m WARNING \x1b[0m' + "Downloading NLTK data failure.", flush=True)
|
||||
@ -13,14 +13,57 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import dotenv
|
||||
import typing
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
RAGFLOW_VERSION_INFO = "unknown"
|
||||
|
||||
|
||||
def get_versions() -> typing.Mapping[str, typing.Any]:
|
||||
dotenv.load_dotenv(dotenv.find_dotenv())
|
||||
return dotenv.dotenv_values()
|
||||
def get_ragflow_version() -> str:
|
||||
global RAGFLOW_VERSION_INFO
|
||||
if RAGFLOW_VERSION_INFO != "unknown":
|
||||
return RAGFLOW_VERSION_INFO
|
||||
version_path = os.path.abspath(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), os.pardir, "VERSION"
|
||||
)
|
||||
)
|
||||
if os.path.exists(version_path):
|
||||
with open(version_path, "r") as f:
|
||||
RAGFLOW_VERSION_INFO = f.read().strip()
|
||||
else:
|
||||
RAGFLOW_VERSION_INFO = get_closest_tag_and_count()
|
||||
LIGHTEN = int(os.environ.get("LIGHTEN", "0"))
|
||||
RAGFLOW_VERSION_INFO += " slim" if LIGHTEN == 1 else " full"
|
||||
return RAGFLOW_VERSION_INFO
|
||||
|
||||
|
||||
def get_rag_version() -> typing.Optional[str]:
|
||||
return get_versions().get("RAGFLOW_IMAGE", "infiniflow/ragflow:dev").split(":")[-1]
|
||||
def get_closest_tag_and_count():
|
||||
try:
|
||||
# Get the current commit hash
|
||||
commit_id = (
|
||||
subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
|
||||
.strip()
|
||||
.decode("utf-8")
|
||||
)
|
||||
# Get the closest tag
|
||||
closest_tag = (
|
||||
subprocess.check_output(["git", "describe", "--tags", "--abbrev=0"])
|
||||
.strip()
|
||||
.decode("utf-8")
|
||||
)
|
||||
# Get the commit count since the closest tag
|
||||
process = subprocess.Popen(
|
||||
["git", "rev-list", "--count", f"{closest_tag}..HEAD"],
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
commits_count, _ = process.communicate()
|
||||
commits_count = int(commits_count.strip())
|
||||
|
||||
if commits_count == 0:
|
||||
return closest_tag
|
||||
else:
|
||||
return f"{commit_id}({closest_tag}~{commits_count})"
|
||||
except Exception:
|
||||
return "unknown"
|
||||
|
||||
26
conf/infinity_mapping.json
Normal file
26
conf/infinity_mapping.json
Normal file
@ -0,0 +1,26 @@
|
||||
{
|
||||
"id": {"type": "varchar", "default": ""},
|
||||
"doc_id": {"type": "varchar", "default": ""},
|
||||
"kb_id": {"type": "varchar", "default": ""},
|
||||
"create_time": {"type": "varchar", "default": ""},
|
||||
"create_timestamp_flt": {"type": "float", "default": 0.0},
|
||||
"img_id": {"type": "varchar", "default": ""},
|
||||
"docnm_kwd": {"type": "varchar", "default": ""},
|
||||
"title_tks": {"type": "varchar", "default": ""},
|
||||
"title_sm_tks": {"type": "varchar", "default": ""},
|
||||
"name_kwd": {"type": "varchar", "default": ""},
|
||||
"important_kwd": {"type": "varchar", "default": ""},
|
||||
"important_tks": {"type": "varchar", "default": ""},
|
||||
"content_with_weight": {"type": "varchar", "default": ""},
|
||||
"content_ltks": {"type": "varchar", "default": ""},
|
||||
"content_sm_ltks": {"type": "varchar", "default": ""},
|
||||
"page_num_list": {"type": "varchar", "default": ""},
|
||||
"top_list": {"type": "varchar", "default": ""},
|
||||
"position_list": {"type": "varchar", "default": ""},
|
||||
"weight_int": {"type": "integer", "default": 0},
|
||||
"weight_flt": {"type": "float", "default": 0.0},
|
||||
"rank_int": {"type": "integer", "default": 0},
|
||||
"available_int": {"type": "integer", "default": 1},
|
||||
"knowledge_graph_kwd": {"type": "varchar", "default": ""},
|
||||
"entities_kwd": {"type": "varchar", "default": ""}
|
||||
}
|
||||
@ -3,7 +3,7 @@
|
||||
{
|
||||
"name": "OpenAI",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
"tags": "LLM,TEXT EMBEDDING,TTS,TEXT RE-RANK,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
"llm": [
|
||||
{
|
||||
@ -89,7 +89,7 @@
|
||||
{
|
||||
"name": "Tongyi-Qianwen",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,TEXT RE-RANK,SPEECH2TEXT,MODERATION",
|
||||
"tags": "LLM,TEXT EMBEDDING,TEXT RE-RANK,TTS,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
"llm": [
|
||||
{
|
||||
@ -160,6 +160,18 @@
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
"llm": [
|
||||
{
|
||||
"llm_name": "glm-4-plus",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 128000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "glm-4-0520",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 128000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "glm-4",
|
||||
"tags": "LLM,CHAT,",
|
||||
@ -184,6 +196,12 @@
|
||||
"max_tokens": 128000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "glm-4-flashx",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 128000,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "glm-4-long",
|
||||
"tags": "LLM,CHAT,",
|
||||
@ -334,7 +352,7 @@
|
||||
{
|
||||
"name": "Xinference",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION,TEXT RE-RANK",
|
||||
"tags": "LLM,TEXT EMBEDDING,TTS,SPEECH2TEXT,MODERATION,TEXT RE-RANK",
|
||||
"status": "1",
|
||||
"llm": []
|
||||
},
|
||||
@ -640,7 +658,7 @@
|
||||
"llm_name": "gpt-4o",
|
||||
"tags": "LLM,CHAT,128K",
|
||||
"max_tokens": 128000,
|
||||
"model_type": "chat,image2text"
|
||||
"model_type": "image2text"
|
||||
},
|
||||
{
|
||||
"llm_name": "gpt-3.5-turbo",
|
||||
@ -1299,7 +1317,7 @@
|
||||
"llm": []
|
||||
},
|
||||
{
|
||||
"name": "cohere",
|
||||
"name": "Cohere",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING, TEXT RE-RANK",
|
||||
"status": "1",
|
||||
@ -2017,6 +2035,60 @@
|
||||
"max_tokens": 32768,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "Qwen/Qwen2.5-72B-Instruct-128K",
|
||||
"tags": "LLM,CHAT,128k",
|
||||
"max_tokens": 131072,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "Qwen/Qwen2.5-72B-Instruct",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32768,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "Qwen/Qwen2.5-7B-Instruct",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32768,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "Qwen/Qwen2.5-14B-Instruct",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32768,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "Qwen/Qwen2.5-32B-Instruct",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32768,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "Qwen/Qwen2.5-Math-72B-Instruct",
|
||||
"tags": "LLM,CHAT,Math,4k",
|
||||
"max_tokens": 4096,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "Qwen/Qwen2.5-Coder-7B-Instruct",
|
||||
"tags": "LLM,CHAT,FIM,Coder,32k",
|
||||
"max_tokens": 32768,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "Pro/Qwen/Qwen2.5-7B-Instruct",
|
||||
"tags": "LLM,CHAT,32k",
|
||||
"max_tokens": 32768,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "Pro/Qwen/Qwen2.5-Coder-7B-Instruct",
|
||||
"tags": "LLM,CHAT,FIM,Coder,32k",
|
||||
"max_tokens": 32768,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "01-ai/Yi-1.5-34B-Chat-16K",
|
||||
"tags": "LLM,CHAT,16k",
|
||||
@ -2231,7 +2303,7 @@
|
||||
{
|
||||
"name": "XunFei Spark",
|
||||
"logo": "",
|
||||
"tags": "LLM",
|
||||
"tags": "LLM,TTS",
|
||||
"status": "1",
|
||||
"llm": []
|
||||
},
|
||||
@ -2268,6 +2340,12 @@
|
||||
"max_tokens": 204800,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "claude-3-5-sonnet-20241022",
|
||||
"tags": "LLM,CHAT,200k",
|
||||
"max_tokens": 204800,
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "claude-3-opus-20240229",
|
||||
"tags": "LLM,CHAT,200k",
|
||||
@ -2299,8 +2377,8 @@
|
||||
"model_type": "chat"
|
||||
},
|
||||
{
|
||||
"llm_name": "claude-instant-1.2",
|
||||
"tags": "LLM,CHAT,100k",
|
||||
"llm_name": "claude-3-5-sonnet-20241022",
|
||||
"tags": "LLM,CHAT,200k",
|
||||
"max_tokens": 102400,
|
||||
"model_type": "chat"
|
||||
}
|
||||
@ -2376,11 +2454,11 @@
|
||||
"llm": []
|
||||
},
|
||||
{
|
||||
"name": "HuggingFace",
|
||||
"logo": "",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
"llm": []
|
||||
}
|
||||
"name": "HuggingFace",
|
||||
"logo": "",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
"llm": []
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@ -1,200 +1,203 @@
|
||||
{
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
"number_of_shards": 2,
|
||||
"number_of_replicas": 0,
|
||||
"refresh_interval" : "1000ms"
|
||||
"refresh_interval": "1000ms"
|
||||
},
|
||||
"similarity": {
|
||||
"scripted_sim": {
|
||||
"type": "scripted",
|
||||
"script": {
|
||||
"source": "double idf = Math.log(1+(field.docCount-term.docFreq+0.5)/(term.docFreq + 0.5))/Math.log(1+((field.docCount-0.5)/1.5)); return query.boost * idf * Math.min(doc.freq, 1);"
|
||||
}
|
||||
"scripted_sim": {
|
||||
"type": "scripted",
|
||||
"script": {
|
||||
"source": "double idf = Math.log(1+(field.docCount-term.docFreq+0.5)/(term.docFreq + 0.5))/Math.log(1+((field.docCount-0.5)/1.5)); return query.boost * idf * Math.min(doc.freq, 1);"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"lat_lon": {"type": "geo_point", "store":"true"}
|
||||
},
|
||||
"date_detection": "true",
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"int": {
|
||||
"match": "*_int",
|
||||
"mapping": {
|
||||
"type": "integer",
|
||||
"store": "true"
|
||||
}
|
||||
"properties": {
|
||||
"lat_lon": {
|
||||
"type": "geo_point",
|
||||
"store": "true"
|
||||
}
|
||||
},
|
||||
"date_detection": "true",
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"int": {
|
||||
"match": "*_int",
|
||||
"mapping": {
|
||||
"type": "integer",
|
||||
"store": "true"
|
||||
}
|
||||
},
|
||||
{
|
||||
"ulong": {
|
||||
"match": "*_ulong",
|
||||
"mapping": {
|
||||
"type": "unsigned_long",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"long": {
|
||||
"match": "*_long",
|
||||
"mapping": {
|
||||
"type": "long",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"short": {
|
||||
"match": "*_short",
|
||||
"mapping": {
|
||||
"type": "short",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"numeric": {
|
||||
"match": "*_flt",
|
||||
"mapping": {
|
||||
"type": "float",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"tks": {
|
||||
"match": "*_tks",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"similarity": "scripted_sim",
|
||||
"analyzer": "whitespace",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"ltks":{
|
||||
"match": "*_ltks",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"analyzer": "whitespace",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kwd": {
|
||||
"match_pattern": "regex",
|
||||
"match": "^(.*_(kwd|id|ids|uid|uids)|uid)$",
|
||||
"mapping": {
|
||||
"type": "keyword",
|
||||
"similarity": "boolean",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dt": {
|
||||
"match_pattern": "regex",
|
||||
"match": "^.*(_dt|_time|_at)$",
|
||||
"mapping": {
|
||||
"type": "date",
|
||||
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||yyyy-MM-dd_HH:mm:ss",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"nested": {
|
||||
"match": "*_nst",
|
||||
"mapping": {
|
||||
"type": "nested"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"object": {
|
||||
"match": "*_obj",
|
||||
"mapping": {
|
||||
"type": "object",
|
||||
"dynamic": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"string": {
|
||||
"match": "*_with_weight",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"index": "false",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"string": {
|
||||
"match": "*_fea",
|
||||
"mapping": {
|
||||
"type": "rank_feature"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dense_vector": {
|
||||
"match": "*_512_vec",
|
||||
"mapping": {
|
||||
"type": "dense_vector",
|
||||
"index": true,
|
||||
"similarity": "cosine",
|
||||
"dims": 512
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dense_vector": {
|
||||
"match": "*_768_vec",
|
||||
"mapping": {
|
||||
"type": "dense_vector",
|
||||
"index": true,
|
||||
"similarity": "cosine",
|
||||
"dims": 768
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dense_vector": {
|
||||
"match": "*_1024_vec",
|
||||
"mapping": {
|
||||
"type": "dense_vector",
|
||||
"index": true,
|
||||
"similarity": "cosine",
|
||||
"dims": 1024
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dense_vector": {
|
||||
"match": "*_1536_vec",
|
||||
"mapping": {
|
||||
"type": "dense_vector",
|
||||
"index": true,
|
||||
"similarity": "cosine",
|
||||
"dims": 1536
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"match": "*_bin",
|
||||
"mapping": {
|
||||
"type": "binary"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"ulong": {
|
||||
"match": "*_ulong",
|
||||
"mapping": {
|
||||
"type": "unsigned_long",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"long": {
|
||||
"match": "*_long",
|
||||
"mapping": {
|
||||
"type": "long",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"short": {
|
||||
"match": "*_short",
|
||||
"mapping": {
|
||||
"type": "short",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"numeric": {
|
||||
"match": "*_flt",
|
||||
"mapping": {
|
||||
"type": "float",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"tks": {
|
||||
"match": "*_tks",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"similarity": "scripted_sim",
|
||||
"analyzer": "whitespace",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"ltks": {
|
||||
"match": "*_ltks",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"analyzer": "whitespace",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kwd": {
|
||||
"match_pattern": "regex",
|
||||
"match": "^(.*_(kwd|id|ids|uid|uids)|uid)$",
|
||||
"mapping": {
|
||||
"type": "keyword",
|
||||
"similarity": "boolean",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dt": {
|
||||
"match_pattern": "regex",
|
||||
"match": "^.*(_dt|_time|_at)$",
|
||||
"mapping": {
|
||||
"type": "date",
|
||||
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||yyyy-MM-dd_HH:mm:ss",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"nested": {
|
||||
"match": "*_nst",
|
||||
"mapping": {
|
||||
"type": "nested"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"object": {
|
||||
"match": "*_obj",
|
||||
"mapping": {
|
||||
"type": "object",
|
||||
"dynamic": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"string": {
|
||||
"match": "*_(with_weight|list)$",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"index": "false",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"string": {
|
||||
"match": "*_fea",
|
||||
"mapping": {
|
||||
"type": "rank_feature"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dense_vector": {
|
||||
"match": "*_512_vec",
|
||||
"mapping": {
|
||||
"type": "dense_vector",
|
||||
"index": true,
|
||||
"similarity": "cosine",
|
||||
"dims": 512
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dense_vector": {
|
||||
"match": "*_768_vec",
|
||||
"mapping": {
|
||||
"type": "dense_vector",
|
||||
"index": true,
|
||||
"similarity": "cosine",
|
||||
"dims": 768
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dense_vector": {
|
||||
"match": "*_1024_vec",
|
||||
"mapping": {
|
||||
"type": "dense_vector",
|
||||
"index": true,
|
||||
"similarity": "cosine",
|
||||
"dims": 1024
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dense_vector": {
|
||||
"match": "*_1536_vec",
|
||||
"mapping": {
|
||||
"type": "dense_vector",
|
||||
"index": true,
|
||||
"similarity": "cosine",
|
||||
"dims": 1536
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"match": "*_bin",
|
||||
"mapping": {
|
||||
"type": "binary"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@ -1 +0,0 @@
|
||||
../docker/service_conf.yaml
|
||||
77
conf/service_conf.yaml
Normal file
77
conf/service_conf.yaml
Normal file
@ -0,0 +1,77 @@
|
||||
ragflow:
|
||||
host: 0.0.0.0
|
||||
http_port: 9380
|
||||
mysql:
|
||||
name: 'rag_flow'
|
||||
user: 'root'
|
||||
password: 'infini_rag_flow'
|
||||
host: 'mysql'
|
||||
port: 5455
|
||||
max_connections: 100
|
||||
stale_timeout: 30
|
||||
minio:
|
||||
user: 'rag_flow'
|
||||
password: 'infini_rag_flow'
|
||||
host: 'minio:9000'
|
||||
es:
|
||||
hosts: 'http://es01:1200'
|
||||
username: 'elastic'
|
||||
password: 'infini_rag_flow'
|
||||
infinity:
|
||||
uri: 'infinity:23817'
|
||||
db_name: 'default_db'
|
||||
redis:
|
||||
db: 1
|
||||
password: 'infini_rag_flow'
|
||||
host: 'redis:6379'
|
||||
|
||||
# postgres:
|
||||
# name: 'rag_flow'
|
||||
# user: 'rag_flow'
|
||||
# password: 'infini_rag_flow'
|
||||
# host: 'postgres'
|
||||
# port: 5432
|
||||
# max_connections: 100
|
||||
# stale_timeout: 30
|
||||
# s3:
|
||||
# endpoint: 'endpoint'
|
||||
# access_key: 'access_key'
|
||||
# secret_key: 'secret_key'
|
||||
# region: 'region'
|
||||
# azure:
|
||||
# auth_type: 'sas'
|
||||
# container_url: 'container_url'
|
||||
# sas_token: 'sas_token'
|
||||
# azure:
|
||||
# auth_type: 'spn'
|
||||
# account_url: 'account_url'
|
||||
# client_id: 'client_id'
|
||||
# secret: 'secret'
|
||||
# tenant_id: 'tenant_id'
|
||||
# container_name: 'container_name'
|
||||
# user_default_llm:
|
||||
# factory: 'Tongyi-Qianwen'
|
||||
# api_key: 'sk-xxxxxxxxxxxxx'
|
||||
# base_url: ''
|
||||
# oauth:
|
||||
# github:
|
||||
# client_id: xxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# url: https://github.com/login/oauth/access_token
|
||||
# feishu:
|
||||
# app_id: cli_xxxxxxxxxxxxxxxxxxx
|
||||
# app_secret: xxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# app_access_token_url: https://open.feishu.cn/open-apis/auth/v3/app_access_token/internal
|
||||
# user_access_token_url: https://open.feishu.cn/open-apis/authen/v1/oidc/access_token
|
||||
# grant_type: 'authorization_code'
|
||||
# authentication:
|
||||
# client:
|
||||
# switch: false
|
||||
# http_app_key:
|
||||
# http_secret_key:
|
||||
# site:
|
||||
# switch: false
|
||||
# permission:
|
||||
# switch: false
|
||||
# component: false
|
||||
# dataset: false
|
||||
@ -110,7 +110,7 @@ class RAGFlowDocxParser:
|
||||
return lines
|
||||
return ["\n".join(lines)]
|
||||
|
||||
def __call__(self, fnm, from_page=0, to_page=100000):
|
||||
def __call__(self, fnm, from_page=0, to_page=100000000):
|
||||
self.doc = Document(fnm) if isinstance(
|
||||
fnm, str) else Document(BytesIO(fnm))
|
||||
pn = 0 # parsed page
|
||||
@ -130,7 +130,7 @@ class RAGFlowDocxParser:
|
||||
if 'lastRenderedPageBreak' in run._element.xml:
|
||||
pn += 1
|
||||
|
||||
secs.append(("".join(runs_within_single_paragraph), p.style.name)) # then concat run.text as part of the paragraph
|
||||
secs.append(("".join(runs_within_single_paragraph), p.style.name if hasattr(p.style, 'name') else '')) # then concat run.text as part of the paragraph
|
||||
|
||||
tbls = [self.__extract_table_content(tb) for tb in self.doc.tables]
|
||||
return secs, tbls
|
||||
|
||||
@ -3,12 +3,11 @@
|
||||
# from https://github.com/langchain-ai/langchain/blob/master/libs/text-splitters/langchain_text_splitters/json.py
|
||||
|
||||
import json
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
from rag.nlp import find_codec
|
||||
|
||||
class RAGFlowJsonParser:
|
||||
def __init__(
|
||||
self, max_chunk_size: int = 2000, min_chunk_size: Optional[int] = None
|
||||
self, max_chunk_size: int = 2000, min_chunk_size: int | None = None
|
||||
):
|
||||
super().__init__()
|
||||
self.max_chunk_size = max_chunk_size * 2
|
||||
@ -27,12 +26,12 @@ class RAGFlowJsonParser:
|
||||
return sections
|
||||
|
||||
@staticmethod
|
||||
def _json_size(data: Dict) -> int:
|
||||
def _json_size(data: dict) -> int:
|
||||
"""Calculate the size of the serialized JSON object."""
|
||||
return len(json.dumps(data, ensure_ascii=False))
|
||||
|
||||
@staticmethod
|
||||
def _set_nested_dict(d: Dict, path: List[str], value: Any) -> None:
|
||||
def _set_nested_dict(d: dict, path: list[str], value: Any) -> None:
|
||||
"""Set a value in a nested dictionary based on the given path."""
|
||||
for key in path[:-1]:
|
||||
d = d.setdefault(key, {})
|
||||
@ -54,10 +53,10 @@ class RAGFlowJsonParser:
|
||||
|
||||
def _json_split(
|
||||
self,
|
||||
data: Dict[str, Any],
|
||||
current_path: Optional[List[str]] = None,
|
||||
chunks: Optional[List[Dict]] = None,
|
||||
) -> List[Dict]:
|
||||
data: dict[str, Any],
|
||||
current_path: list[str] | None,
|
||||
chunks: list[dict] | None,
|
||||
) -> list[dict]:
|
||||
"""
|
||||
Split json into maximum size dictionaries while preserving structure.
|
||||
"""
|
||||
@ -87,9 +86,9 @@ class RAGFlowJsonParser:
|
||||
|
||||
def split_json(
|
||||
self,
|
||||
json_data: Dict[str, Any],
|
||||
json_data: dict[str, Any],
|
||||
convert_lists: bool = False,
|
||||
) -> List[Dict]:
|
||||
) -> list[dict]:
|
||||
"""Splits JSON into a list of JSON chunks"""
|
||||
|
||||
if convert_lists:
|
||||
@ -104,10 +103,10 @@ class RAGFlowJsonParser:
|
||||
|
||||
def split_text(
|
||||
self,
|
||||
json_data: Dict[str, Any],
|
||||
json_data: dict[str, Any],
|
||||
convert_lists: bool = False,
|
||||
ensure_ascii: bool = True,
|
||||
) -> List[str]:
|
||||
) -> list[str]:
|
||||
"""Splits JSON into a list of JSON formatted strings"""
|
||||
|
||||
chunks = self.split_json(json_data=json_data, convert_lists=convert_lists)
|
||||
|
||||
@ -11,6 +11,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
|
||||
@ -18,22 +19,18 @@ import xgboost as xgb
|
||||
from io import BytesIO
|
||||
import re
|
||||
import pdfplumber
|
||||
import logging
|
||||
from PIL import Image, ImageDraw
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
from timeit import default_timer as timer
|
||||
from pypdf import PdfReader as pdf2_read
|
||||
|
||||
from api.settings import LIGHTEN
|
||||
from api import settings
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from deepdoc.vision import OCR, Recognizer, LayoutRecognizer, TableStructureRecognizer
|
||||
from rag.nlp import rag_tokenizer
|
||||
from copy import deepcopy
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
logging.getLogger("pdfminer").setLevel(logging.WARNING)
|
||||
|
||||
|
||||
class RAGFlowPdfParser:
|
||||
def __init__(self):
|
||||
self.ocr = OCR()
|
||||
@ -44,20 +41,20 @@ class RAGFlowPdfParser:
|
||||
self.tbl_det = TableStructureRecognizer()
|
||||
|
||||
self.updown_cnt_mdl = xgb.Booster()
|
||||
if not LIGHTEN:
|
||||
if not settings.LIGHTEN:
|
||||
try:
|
||||
import torch
|
||||
if torch.cuda.is_available():
|
||||
self.updown_cnt_mdl.set_param({"device": "cuda"})
|
||||
except Exception as e:
|
||||
logging.error(str(e))
|
||||
except Exception:
|
||||
logging.exception("RAGFlowPdfParser __init__")
|
||||
try:
|
||||
model_dir = os.path.join(
|
||||
get_project_base_directory(),
|
||||
"rag/res/deepdoc")
|
||||
self.updown_cnt_mdl.load_model(os.path.join(
|
||||
model_dir, "updown_concat_xgb.model"))
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
model_dir = snapshot_download(
|
||||
repo_id="InfiniFlow/text_concat_xgb_v1.0",
|
||||
local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"),
|
||||
@ -187,7 +184,7 @@ class RAGFlowPdfParser:
|
||||
return True
|
||||
|
||||
def _table_transformer_job(self, ZM):
|
||||
logging.info("Table processing...")
|
||||
logging.debug("Table processing...")
|
||||
imgs, pos = [], []
|
||||
tbcnt = [0]
|
||||
MARGIN = 10
|
||||
@ -425,12 +422,12 @@ class RAGFlowPdfParser:
|
||||
detach_feats = [b["x1"] < b_["x0"],
|
||||
b["x0"] > b_["x1"]]
|
||||
if (any(feats) and not any(concatting_feats)) or any(detach_feats):
|
||||
print(
|
||||
logging.debug("{} {} {} {}".format(
|
||||
b["text"],
|
||||
b_["text"],
|
||||
any(feats),
|
||||
any(concatting_feats),
|
||||
any(detach_feats))
|
||||
))
|
||||
i += 1
|
||||
continue
|
||||
# merge up and down
|
||||
@ -760,7 +757,7 @@ class RAGFlowPdfParser:
|
||||
if ii is not None:
|
||||
b = louts[ii]
|
||||
else:
|
||||
logging.warn(
|
||||
logging.warning(
|
||||
f"Missing layout match: {pn + 1},%s" %
|
||||
(bxs[0].get(
|
||||
"layoutno", "")))
|
||||
@ -919,7 +916,7 @@ class RAGFlowPdfParser:
|
||||
dfs(boxes[0], 0)
|
||||
else:
|
||||
logging.debug("WASTE: " + boxes[0]["text"])
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
boxes.pop(0)
|
||||
mw = np.mean(widths)
|
||||
@ -938,8 +935,8 @@ class RAGFlowPdfParser:
|
||||
pdf = pdfplumber.open(
|
||||
fnm) if not binary else pdfplumber.open(BytesIO(binary))
|
||||
return len(pdf.pages)
|
||||
except Exception as e:
|
||||
logging.error(str(e))
|
||||
except Exception:
|
||||
logging.exception("total_page_number")
|
||||
|
||||
def __images__(self, fnm, zoomin=3, page_from=0,
|
||||
page_to=299, callback=None):
|
||||
@ -962,8 +959,8 @@ class RAGFlowPdfParser:
|
||||
self.page_chars = [[{**c, 'top': c['top'], 'bottom': c['bottom']} for c in page.dedupe_chars().chars if self._has_color(c)] for page in
|
||||
self.pdf.pages[page_from:page_to]]
|
||||
self.total_page = len(self.pdf.pages)
|
||||
except Exception as e:
|
||||
logging.error(str(e))
|
||||
except Exception:
|
||||
logging.exception("RAGFlowPdfParser __images__")
|
||||
|
||||
self.outlines = []
|
||||
try:
|
||||
@ -981,9 +978,9 @@ class RAGFlowPdfParser:
|
||||
except Exception as e:
|
||||
logging.warning(f"Outlines exception: {e}")
|
||||
if not self.outlines:
|
||||
logging.warning(f"Miss outlines")
|
||||
logging.warning("Miss outlines")
|
||||
|
||||
logging.info("Images converted.")
|
||||
logging.debug("Images converted.")
|
||||
self.is_english = [re.search(r"[a-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", "".join(
|
||||
random.choices([c["text"] for c in self.page_chars[i]], k=min(100, len(self.page_chars[i]))))) for i in
|
||||
range(len(self.page_chars))]
|
||||
@ -1023,7 +1020,7 @@ class RAGFlowPdfParser:
|
||||
self.is_english = re.search(r"[\na-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}",
|
||||
"".join([b["text"] for b in random.choices(bxes, k=min(30, len(bxes)))]))
|
||||
|
||||
logging.info("Is it English:", self.is_english)
|
||||
logging.debug("Is it English:", self.is_english)
|
||||
|
||||
self.page_cum_height = np.cumsum(self.page_cum_height)
|
||||
assert len(self.page_cum_height) == len(self.page_images) + 1
|
||||
@ -1162,10 +1159,10 @@ class PlainParser(object):
|
||||
dfs(a, depth + 1)
|
||||
|
||||
dfs(outlines, 0)
|
||||
except Exception as e:
|
||||
logging.warning(f"Outlines exception: {e}")
|
||||
except Exception:
|
||||
logging.exception("Outlines exception")
|
||||
if not self.outlines:
|
||||
logging.warning(f"Miss outlines")
|
||||
logging.warning("Miss outlines")
|
||||
|
||||
return [(l, "") for l in lines], []
|
||||
|
||||
|
||||
@ -11,10 +11,15 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import re,json,os
|
||||
import logging
|
||||
import re
|
||||
import json
|
||||
import os
|
||||
import pandas as pd
|
||||
from rag.nlp import rag_tokenizer
|
||||
from . import regions
|
||||
|
||||
|
||||
current_file_path = os.path.dirname(os.path.abspath(__file__))
|
||||
GOODS = pd.read_csv(os.path.join(current_file_path, "res/corp_baike_len.csv"), sep="\t", header=0).fillna(0)
|
||||
GOODS["cid"] = GOODS["cid"].astype(str)
|
||||
@ -27,7 +32,7 @@ def baike(cid, default_v=0):
|
||||
global GOODS
|
||||
try:
|
||||
return GOODS.loc[str(cid), "len"]
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
return default_v
|
||||
|
||||
@ -65,7 +70,8 @@ def rmNoise(n):
|
||||
GOOD_CORP = set([corpNorm(rmNoise(c), False) for c in GOOD_CORP])
|
||||
for c,v in CORP_TAG.items():
|
||||
cc = corpNorm(rmNoise(c), False)
|
||||
if not cc: print (c)
|
||||
if not cc:
|
||||
logging.debug(c)
|
||||
CORP_TAG = {corpNorm(rmNoise(c), False):v for c,v in CORP_TAG.items()}
|
||||
|
||||
def is_good(nm):
|
||||
|
||||
@ -10,9 +10,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import re, copy, time, datetime, demjson3, \
|
||||
traceback, signal
|
||||
import logging
|
||||
import re
|
||||
import copy
|
||||
import time
|
||||
import datetime
|
||||
import demjson3
|
||||
import traceback
|
||||
import signal
|
||||
import numpy as np
|
||||
from deepdoc.parser.resume.entities import degrees, schools, corporations
|
||||
from rag.nlp import rag_tokenizer, surname
|
||||
@ -79,7 +84,7 @@ def forEdu(cv):
|
||||
y, m, d = getYMD(dt)
|
||||
st_dt.append(str(y))
|
||||
e["start_dt_kwd"] = str(y)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
r = schools.select(n.get("school_name", ""))
|
||||
@ -158,7 +163,7 @@ def forEdu(cv):
|
||||
y, m, d = getYMD(edu_end_dt)
|
||||
cv["work_exp_flt"] = min(int(str(datetime.date.today())[0:4]) - int(y), cv.get("work_exp_flt", 1000))
|
||||
except Exception as e:
|
||||
print("EXCEPTION: ", e, edu_end_dt, cv.get("work_exp_flt"))
|
||||
logging.exception("forEdu {} {} {}".format(e, edu_end_dt, cv.get("work_exp_flt")))
|
||||
if sch:
|
||||
cv["school_name_kwd"] = sch
|
||||
if (len(cv.get("degree_kwd", [])) >= 1 and "本科" in cv["degree_kwd"]) \
|
||||
@ -233,7 +238,7 @@ def forWork(cv):
|
||||
if type(n) == type(""):
|
||||
try:
|
||||
n = json_loads(n)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if n.get("start_time") and (not work_st_tm or n["start_time"] < work_st_tm): work_st_tm = n["start_time"]
|
||||
@ -269,8 +274,8 @@ def forWork(cv):
|
||||
|
||||
try:
|
||||
duas.append((datetime.datetime.strptime(ed, "%Y-%m-%d") - datetime.datetime.strptime(st, "%Y-%m-%d")).days)
|
||||
except Exception as e:
|
||||
print("kkkkkkkkkkkkkkkkkkkk", n.get("start_time"), n.get("end_time"))
|
||||
except Exception:
|
||||
logging.exception("forWork {} {}".format(n.get("start_time"), n.get("end_time")))
|
||||
|
||||
if n.get("scale"):
|
||||
r = re.search(r"^([0-9]+)", str(n["scale"]))
|
||||
@ -327,7 +332,7 @@ def forWork(cv):
|
||||
y, m, d = getYMD(work_st_tm)
|
||||
cv["work_exp_flt"] = min(int(str(datetime.date.today())[0:4]) - int(y), cv.get("work_exp_flt", 1000))
|
||||
except Exception as e:
|
||||
print("EXCEPTION: ", e, work_st_tm, cv.get("work_exp_flt"))
|
||||
logging.exception("forWork {} {} {}".format(e, work_st_tm, cv.get("work_exp_flt")))
|
||||
|
||||
cv["job_num_int"] = 0
|
||||
if duas:
|
||||
@ -457,8 +462,8 @@ def parse(cv):
|
||||
t = k[:-4]
|
||||
cv[f"{t}_kwd"] = nms
|
||||
cv[f"{t}_tks"] = rag_tokenizer.tokenize(" ".join(nms))
|
||||
except Exception as e:
|
||||
print("【EXCEPTION】:", str(traceback.format_exc()), cv[k])
|
||||
except Exception:
|
||||
logging.exception("parse {} {}".format(str(traceback.format_exc()), cv[k]))
|
||||
cv[k] = []
|
||||
|
||||
# tokenize fields
|
||||
@ -524,7 +529,7 @@ def parse(cv):
|
||||
if not y: y = "2012"
|
||||
if not m: m = "01"
|
||||
if not d: d = "01"
|
||||
cv["updated_at_dt"] = f"%s-%02d-%02d 00:00:00" % (y, int(m), int(d))
|
||||
cv["updated_at_dt"] = "%s-%02d-%02d 00:00:00" % (y, int(m), int(d))
|
||||
# long text tokenize
|
||||
|
||||
if cv.get("responsibilities"): cv["responsibilities_ltks"] = rag_tokenizer.tokenize(rmHtmlTag(cv["responsibilities"]))
|
||||
@ -556,10 +561,10 @@ def parse(cv):
|
||||
cv["work_exp_flt"] = (time.time() - int(int(cv["work_start_time"]) / 1000)) / 3600. / 24. / 365.
|
||||
elif re.match(r"[0-9]{4}[^0-9]", str(cv["work_start_time"])):
|
||||
y, m, d = getYMD(str(cv["work_start_time"]))
|
||||
cv["work_start_dt"] = f"%s-%02d-%02d 00:00:00" % (y, int(m), int(d))
|
||||
cv["work_start_dt"] = "%s-%02d-%02d 00:00:00" % (y, int(m), int(d))
|
||||
cv["work_exp_flt"] = int(str(datetime.date.today())[0:4]) - int(y)
|
||||
except Exception as e:
|
||||
print("【EXCEPTION】", e, "==>", cv.get("work_start_time"))
|
||||
logging.exception("parse {} ==> {}".format(e, cv.get("work_start_time")))
|
||||
if "work_exp_flt" not in cv and cv.get("work_experience", 0): cv["work_exp_flt"] = int(cv["work_experience"]) / 12.
|
||||
|
||||
keys = list(cv.keys())
|
||||
@ -574,7 +579,7 @@ def parse(cv):
|
||||
|
||||
cv["tob_resume_id"] = str(cv["tob_resume_id"])
|
||||
cv["id"] = cv["tob_resume_id"]
|
||||
print("CCCCCCCCCCCCCCC")
|
||||
logging.debug("CCCCCCCCCCCCCCC")
|
||||
|
||||
return dealWithInt64(cv)
|
||||
|
||||
@ -589,4 +594,3 @@ def dealWithInt64(d):
|
||||
|
||||
if isinstance(d, np.integer): d = int(d)
|
||||
return d
|
||||
|
||||
|
||||
@ -10,6 +10,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
|
||||
from deepdoc.parser.utils import get_text
|
||||
from rag.nlp import num_tokens_from_string
|
||||
|
||||
@ -29,8 +31,6 @@ class RAGFlowTxtParser:
|
||||
def add_chunk(t):
|
||||
nonlocal cks, tk_nums, delimiter
|
||||
tnum = num_tokens_from_string(t)
|
||||
if tnum < 8:
|
||||
pos = ""
|
||||
if tk_nums[-1] > chunk_token_num:
|
||||
cks.append(t)
|
||||
tk_nums.append(tnum)
|
||||
@ -38,15 +38,19 @@ class RAGFlowTxtParser:
|
||||
cks[-1] += t
|
||||
tk_nums[-1] += tnum
|
||||
|
||||
s, e = 0, 1
|
||||
while e < len(txt):
|
||||
if txt[e] in delimiter:
|
||||
add_chunk(txt[s: e + 1])
|
||||
s = e + 1
|
||||
e = s + 1
|
||||
else:
|
||||
e += 1
|
||||
if s < e:
|
||||
add_chunk(txt[s: e + 1])
|
||||
dels = []
|
||||
s = 0
|
||||
for m in re.finditer(r"`([^`]+)`", delimiter, re.I):
|
||||
f, t = m.span()
|
||||
dels.append(m.group(1))
|
||||
dels.extend(list(delimiter[s: f]))
|
||||
s = t
|
||||
if s < len(delimiter):
|
||||
dels.extend(list(delimiter[s:]))
|
||||
dels = [re.escape(d) for d in delimiter if d]
|
||||
dels = [d for d in dels if d]
|
||||
dels = "|".join(dels)
|
||||
secs = re.split(r"(%s)" % dels, txt)
|
||||
for sec in secs: add_chunk(sec)
|
||||
|
||||
return [[c,""] for c in cks]
|
||||
return [[c, ""] for c in cks]
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import six
|
||||
import cv2
|
||||
@ -402,7 +403,7 @@ class DetResizeForTest(object):
|
||||
return None, (None, None)
|
||||
img = cv2.resize(img, (int(resize_w), int(resize_h)))
|
||||
except BaseException:
|
||||
print(img.shape, resize_w, resize_h)
|
||||
logging.exception("{} {} {}".format(img.shape, resize_w, resize_h))
|
||||
sys.exit(0)
|
||||
ratio_h = resize_h / float(h)
|
||||
ratio_w = resize_w / float(w)
|
||||
@ -452,7 +453,6 @@ class E2EResizeForTest(object):
|
||||
return data
|
||||
|
||||
def resize_image_for_totaltext(self, im, max_side_len=512):
|
||||
|
||||
h, w, _ = im.shape
|
||||
resize_w = w
|
||||
resize_h = h
|
||||
|
||||
@ -11,6 +11,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
from copy import deepcopy
|
||||
|
||||
@ -439,7 +440,7 @@ class Recognizer(object):
|
||||
end_index = min((i + 1) * batch_size, len(imgs))
|
||||
batch_image_list = imgs[start_index:end_index]
|
||||
inputs = self.preprocess(batch_image_list)
|
||||
print("preprocess")
|
||||
logging.debug("preprocess")
|
||||
for ins in inputs:
|
||||
bb = self.postprocess(self.ort_sess.run(None, {k:v for k,v in ins.items() if k in self.input_names})[0], ins, thr)
|
||||
res.append(bb)
|
||||
|
||||
@ -11,6 +11,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
import PIL
|
||||
from PIL import ImageDraw
|
||||
@ -24,7 +25,7 @@ def save_results(image_list, results, labels, output_dir='output/', threshold=0.
|
||||
|
||||
out_path = os.path.join(output_dir, f"{idx}.jpg")
|
||||
im.save(out_path, quality=95)
|
||||
print("save result to: " + out_path)
|
||||
logging.debug("save result to: " + out_path)
|
||||
|
||||
|
||||
def draw_box(im, result, lables, threshold=0.5):
|
||||
|
||||
@ -10,7 +10,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os, sys
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(
|
||||
0,
|
||||
os.path.abspath(
|
||||
@ -56,7 +59,7 @@ def main(args):
|
||||
} for t in lyt]
|
||||
img = draw_box(images[i], lyt, labels, float(args.threshold))
|
||||
img.save(outputs[i], quality=95)
|
||||
print("save result to: " + outputs[i])
|
||||
logging.info("save result to: " + outputs[i])
|
||||
|
||||
|
||||
def get_table_html(img, tb_cpns, ocr):
|
||||
|
||||
@ -38,7 +38,7 @@ class TableStructureRecognizer(Recognizer):
|
||||
super().__init__(self.labels, "tsr", os.path.join(
|
||||
get_project_base_directory(),
|
||||
"rag/res/deepdoc"))
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
super().__init__(self.labels, "tsr", snapshot_download(repo_id="InfiniFlow/deepdoc",
|
||||
local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"),
|
||||
local_dir_use_symlinks=False))
|
||||
|
||||
138
docker/.env
138
docker/.env
@ -1,75 +1,135 @@
|
||||
# Version of Elastic products
|
||||
# The type of doc engine to use.
|
||||
# Available options:
|
||||
# - `elasticsearch` (default)
|
||||
# - `infinity` (https://github.com/infiniflow/infinity)
|
||||
DOC_ENGINE=${DOC_ENGINE:-elasticsearch}
|
||||
|
||||
# ------------------------------
|
||||
# docker env var for specifying vector db type at startup
|
||||
# (based on the vector db type, the corresponding docker
|
||||
# compose profile will be used)
|
||||
# ------------------------------
|
||||
COMPOSE_PROFILES=${DOC_ENGINE}
|
||||
|
||||
# The version of Elasticsearch.
|
||||
STACK_VERSION=8.11.3
|
||||
|
||||
# Port to expose Elasticsearch HTTP API to the host
|
||||
# The hostname where the Elasticsearch service is exposed
|
||||
ES_HOST=es01
|
||||
|
||||
# The port used to expose the Elasticsearch service to the host machine,
|
||||
# allowing EXTERNAL access to the service running inside the Docker container.
|
||||
ES_PORT=1200
|
||||
|
||||
# Set the Elasticsearch password
|
||||
# The password for Elasticsearch.
|
||||
# When updated, you must revise the `es.password` entry in service_conf.yaml accordingly.
|
||||
ELASTIC_PASSWORD=infini_rag_flow
|
||||
|
||||
# Port to expose Kibana to the host
|
||||
# The port used to expose the Kibana service to the host machine,
|
||||
# allowing EXTERNAL access to the service running inside the Docker container.
|
||||
KIBANA_PORT=6601
|
||||
KIBANA_USER=rag_flow
|
||||
KIBANA_PASSWORD=infini_rag_flow
|
||||
|
||||
# Update according to the available host memory (in bytes)
|
||||
|
||||
# The maximum amount of the memory, in bytes, that a specific Docker container can use while running.
|
||||
# Update it according to the available memory in the host machine.
|
||||
MEM_LIMIT=8073741824
|
||||
|
||||
# The hostname where the Infinity service is exposed
|
||||
INFINITY_HOST=infinity
|
||||
|
||||
# Port to expose Infinity API to the host
|
||||
INFINITY_THRIFT_PORT=23817
|
||||
INFINITY_HTTP_PORT=23820
|
||||
INFINITY_PSQL_PORT=5432
|
||||
|
||||
# The password for MySQL.
|
||||
# When updated, you must revise the `mysql.password` entry in service_conf.yaml.
|
||||
MYSQL_PASSWORD=infini_rag_flow
|
||||
# The hostname where the MySQL service is exposed
|
||||
MYSQL_HOST=mysql
|
||||
# The database of the MySQL service to use
|
||||
MYSQL_DBNAME=rag_flow
|
||||
# The port used to expose the MySQL service to the host machine,
|
||||
# allowing EXTERNAL access to the MySQL database running inside the Docker container.
|
||||
MYSQL_PORT=5455
|
||||
|
||||
# Port to expose minio to the host
|
||||
# The hostname where the MySQL service is exposed
|
||||
MINIO_HOST=minio
|
||||
# The port used to expose the MinIO console interface to the host machine,
|
||||
# allowing EXTERNAL access to the web-based console running inside the Docker container.
|
||||
MINIO_CONSOLE_PORT=9001
|
||||
# The port used to expose the MinIO API service to the host machine,
|
||||
# allowing EXTERNAL access to the MinIO object storage service running inside the Docker container.
|
||||
MINIO_PORT=9000
|
||||
|
||||
# The username for MinIO.
|
||||
# When updated, you must revise the `minio.user` entry in service_conf.yaml accordingly.
|
||||
MINIO_USER=rag_flow
|
||||
# The password for MinIO.
|
||||
# When updated, you must revise the `minio.password` entry in service_conf.yaml accordingly.
|
||||
MINIO_PASSWORD=infini_rag_flow
|
||||
|
||||
# The hostname where the Redis service is exposed
|
||||
REDIS_HOST=redis
|
||||
# The port used to expose the Redis service to the host machine,
|
||||
# allowing EXTERNAL access to the Redis service running inside the Docker container.
|
||||
REDIS_PORT=6379
|
||||
# The password for Redis.
|
||||
# When updated, you must revise the `redis.password` entry in service_conf.yaml accordingly.
|
||||
REDIS_PASSWORD=infini_rag_flow
|
||||
|
||||
# The port used to expose RAGFlow's HTTP API service to the host machine,
|
||||
# allowing EXTERNAL access to the service running inside the Docker container.
|
||||
SVR_HTTP_PORT=9380
|
||||
|
||||
# the Docker image for the slim version
|
||||
# The RAGFlow Docker image to download.
|
||||
# Defaults to the dev-slim edition, which is the RAGFlow Docker image without embedding models.
|
||||
RAGFLOW_IMAGE=infiniflow/ragflow:dev-slim
|
||||
#
|
||||
# To download the RAGFlow Docker image with embedding models, uncomment the following line instead:
|
||||
# RAGFLOW_IMAGE=infiniflow/ragflow:dev
|
||||
#
|
||||
# The Docker image of the dev edition includes:
|
||||
# - Built-in embedding models:
|
||||
# - BAAI/bge-large-zh-v1.5
|
||||
# - BAAI/bge-reranker-v2-m3
|
||||
# - maidalun1020/bce-embedding-base_v1
|
||||
# - maidalun1020/bce-reranker-base_v1
|
||||
# - Embedding models that will be downloaded once you select them in the RAGFlow UI:
|
||||
# - BAAI/bge-base-en-v1.5
|
||||
# - BAAI/bge-large-en-v1.5
|
||||
# - BAAI/bge-small-en-v1.5
|
||||
# - BAAI/bge-small-zh-v1.5
|
||||
# - jinaai/jina-embeddings-v2-base-en
|
||||
# - jinaai/jina-embeddings-v2-small-en
|
||||
# - nomic-ai/nomic-embed-text-v1.5
|
||||
# - sentence-transformers/all-MiniLM-L6-v2
|
||||
#
|
||||
#
|
||||
|
||||
# If you cannot download the RAGFlow Docker image, try uncommenting either of the following hub.docker.com mirrors:
|
||||
|
||||
# If you cannot download the RAGFlow Docker image:
|
||||
#
|
||||
# - For the `dev-slim` edition, uncomment either of the following:
|
||||
# RAGFLOW_IMAGE=swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:dev-slim
|
||||
# RAGFLOW_IMAGE=registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow:dev-slim
|
||||
|
||||
# To download the RAGFlow Docker image with embedding models, modify the line above as follows:
|
||||
# RAGFLOW_IMAGE=infiniflow/ragflow:dev
|
||||
|
||||
# This Docker image includes the following four models:
|
||||
# - BAAI/bge-large-zh-v1.5
|
||||
# - BAAI/bge-reranker-v2-m3
|
||||
# - maidalun1020/bce-embedding-base_v1
|
||||
# - maidalun1020/bce-reranker-base_v1
|
||||
|
||||
# And the following models will be downloaded if you select them in the RAGFlow UI.
|
||||
# - BAAI/bge-base-en-v1.5
|
||||
# - BAAI/bge-large-en-v1.5
|
||||
# - BAAI/bge-small-en-v1.5
|
||||
# - BAAI/bge-small-zh-v1.5
|
||||
# - jinaai/jina-embeddings-v2-base-en
|
||||
# - jinaai/jina-embeddings-v2-small-en
|
||||
# - nomic-ai/nomic-embed-text-v1.5
|
||||
# - sentence-transformers/all-MiniLM-L6-v2
|
||||
|
||||
# If you cannot download the RAGFlow Docker image, try uncommenting either of the following hub.docker.com mirrors:
|
||||
#
|
||||
# - For the `dev` edition, uncomment either of the following:
|
||||
# RAGFLOW_IMAGE=swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:dev
|
||||
# RAGFLOW_IMAGE=registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow:dev
|
||||
|
||||
|
||||
# The local time zone.
|
||||
TIMEZONE='Asia/Shanghai'
|
||||
|
||||
# If you cannot download the RAGFlow Docker image, try uncommenting the following huggingface.co mirror:
|
||||
# Uncomment the following line if you have limited access to huggingface.co:
|
||||
# HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
######## OS setup for ES ###########
|
||||
# sysctl vm.max_map_count
|
||||
# sudo sysctl -w vm.max_map_count=262144
|
||||
# Note that this change is not permanent and will be reset after a system reboot.
|
||||
# To make your change permanent, update /etc/sysctl.conf by:
|
||||
# Adding or modifying the following line:
|
||||
# vm.max_map_count=262144
|
||||
# Optimizations for MacOS
|
||||
# Uncomment the following line if your OS is MacOS:
|
||||
# MACOS=1
|
||||
|
||||
# The maximum file size for each uploaded file, in bytes.
|
||||
# You can uncomment this line and update the value if you wish to change 128M file size limit
|
||||
# MAX_CONTENT_LENGTH=134217728
|
||||
|
||||
|
||||
190
docker/README.md
190
docker/README.md
@ -1,80 +1,162 @@
|
||||
# README
|
||||
|
||||
# Docker Environment Variable
|
||||
<details open>
|
||||
<summary></b>📗 Table of Contents</b></summary>
|
||||
|
||||
Look into [.env](./.env), there're some important variables.
|
||||
- 🐳 [Docker Compose](#-docker-compose)
|
||||
- 🐬 [Docker environment variables](#-docker-environment-variables)
|
||||
- 🐋 [Service configuration](#-service-configuration)
|
||||
|
||||
## MYSQL_PASSWORD
|
||||
The mysql password could be changed by this variable. But you need to change *mysql.password* in [service_conf.yaml](./service_conf.yaml) at the same time.
|
||||
</details>
|
||||
|
||||
## 🐳 Docker Compose
|
||||
|
||||
## MYSQL_PORT
|
||||
It refers to exported port number of mysql docker container, it's useful if you want to access the database outside the docker containers.
|
||||
- **docker-compose.yml**
|
||||
Sets up environment for RAGFlow and its dependencies.
|
||||
- **docker-compose-base.yml**
|
||||
Sets up environment for RAGFlow's base services: Elasticsearch, MySQL, MinIO, and Redis.
|
||||
|
||||
## MINIO_USER
|
||||
It refers to user name of [Mino](https://github.com/minio/minio). The modification should be synchronous updating at minio.user of [service_conf.yaml](./service_conf.yaml).
|
||||
## 🐬 Docker environment variables
|
||||
|
||||
## MINIO_PASSWORD
|
||||
It refers to user password of [Mino](https://github.com/minio/minio). The modification should be synchronous updating at minio.password of [service_conf.yaml](./service_conf.yaml).
|
||||
The [.env](./.env) file contains important environment variables for Docker.
|
||||
|
||||
### Elasticsearch
|
||||
|
||||
## SVR_HTTP_PORT
|
||||
It refers to The API server serving port.
|
||||
- `STACK_VERSION`
|
||||
The version of Elasticsearch. Defaults to `8.11.3`
|
||||
- `ES_PORT`
|
||||
The port used to expose the Elasticsearch service to the host machine, allowing **external** access to the service running inside the Docker container. Defaults to `1200`.
|
||||
- `ELASTIC_PASSWORD`
|
||||
The password for Elasticsearch.
|
||||
|
||||
### Kibana
|
||||
|
||||
# Service Configuration
|
||||
[service_conf.yaml](./service_conf.yaml) is used by the *API server* and *task executor*. It's the most important configuration of the system.
|
||||
- `KIBANA_PORT`
|
||||
The port used to expose the Kibana service to the host machine, allowing **external** access to the service running inside the Docker container. Defaults to `6601`.
|
||||
- `KIBANA_USER`
|
||||
The username for Kibana. Defaults to `rag_flow`.
|
||||
- `KIBANA_PASSWORD`
|
||||
The password for Kibana. Defaults to `infini_rag_flow`.
|
||||
|
||||
## ragflow
|
||||
### Resource management
|
||||
|
||||
### host
|
||||
The IP address used by the API server.
|
||||
- `MEM_LIMIT`
|
||||
The maximum amount of the memory, in bytes, that *a specific* Docker container can use while running. Defaults to `8073741824`.
|
||||
|
||||
### port
|
||||
The serving port of API server.
|
||||
### MySQL
|
||||
|
||||
## mysql
|
||||
- `MYSQL_PASSWORD`
|
||||
The password for MySQL.
|
||||
- `MYSQL_PORT`
|
||||
The port used to expose the MySQL service to the host machine, allowing **external** access to the MySQL database running inside the Docker container. Defaults to `5455`.
|
||||
|
||||
### name
|
||||
The database name in mysql used by this system.
|
||||
### MinIO
|
||||
|
||||
### user
|
||||
The database user name.
|
||||
- `MINIO_CONSOLE_PORT`
|
||||
The port used to expose the MinIO console interface to the host machine, allowing **external** access to the web-based console running inside the Docker container. Defaults to `9001`
|
||||
- `MINIO_PORT`
|
||||
The port used to expose the MinIO API service to the host machine, allowing **external** access to the MinIO object storage service running inside the Docker container. Defaults to `9000`.
|
||||
- `MINIO_USER`
|
||||
The username for MinIO.
|
||||
- `MINIO_PASSWORD`
|
||||
The password for MinIO.
|
||||
|
||||
### password
|
||||
The database password. The modification should be synchronous updating at *MYSQL_PASSWORD* in [.env](./.env).
|
||||
### Redis
|
||||
|
||||
### port
|
||||
The serving port of mysql inside the container. The modification should be synchronous updating at [docker-compose.yml](./docker-compose.yml)
|
||||
- `REDIS_PORT`
|
||||
The port used to expose the Redis service to the host machine, allowing **external** access to the Redis service running inside the Docker container. Defaults to `6379`.
|
||||
- `REDIS_PASSWORD`
|
||||
The password for Redis.
|
||||
|
||||
### max_connections
|
||||
The max database connection.
|
||||
### RAGFlow
|
||||
|
||||
### stale_timeout
|
||||
The timeout duration in seconds.
|
||||
|
||||
## minio
|
||||
|
||||
### user
|
||||
The username of minio. The modification should be synchronous updating at *MINIO_USER* in [.env](./.env).
|
||||
|
||||
### password
|
||||
The password of minio. The modification should be synchronous updating at *MINIO_PASSWORD* in [.env](./.env).
|
||||
|
||||
### host
|
||||
The serving IP and port inside the docker container. This is not updating until changing the minio part in [docker-compose.yml](./docker-compose.yml)
|
||||
|
||||
## user_default_llm
|
||||
Newly signed-up users use LLM configured by this part. Otherwise, user need to configure his own LLM in *setting*.
|
||||
- `SVR_HTTP_PORT`
|
||||
The port used to expose RAGFlow's HTTP API service to the host machine, allowing **external** access to the service running inside the Docker container. Defaults to `9380`.
|
||||
- `RAGFLOW-IMAGE`
|
||||
The Docker image edition. Available editions:
|
||||
|
||||
### factory
|
||||
The LLM suppliers. "OpenAI", "Tongyi-Qianwen", "ZHIPU-AI", "Moonshot", "DeepSeek", "Baichuan", and "VolcEngine" are supported.
|
||||
- `infiniflow/ragflow:dev-slim` (default): The RAGFlow Docker image without embedding models.
|
||||
- `infiniflow/ragflow:dev`: The RAGFlow Docker image with embedding models including:
|
||||
- Built-in embedding models:
|
||||
- `BAAI/bge-large-zh-v1.5`
|
||||
- `BAAI/bge-reranker-v2-m3`
|
||||
- `maidalun1020/bce-embedding-base_v1`
|
||||
- `maidalun1020/bce-reranker-base_v1`
|
||||
- Embedding models that will be downloaded once you select them in the RAGFlow UI:
|
||||
- `BAAI/bge-base-en-v1.5`
|
||||
- `BAAI/bge-large-en-v1.5`
|
||||
- `BAAI/bge-small-en-v1.5`
|
||||
- `BAAI/bge-small-zh-v1.5`
|
||||
- `jinaai/jina-embeddings-v2-base-en`
|
||||
- `jinaai/jina-embeddings-v2-small-en`
|
||||
- `nomic-ai/nomic-embed-text-v1.5`
|
||||
- `sentence-transformers/all-MiniLM-L6-v2`
|
||||
|
||||
> [!TIP]
|
||||
> If you cannot download the RAGFlow Docker image, try the following mirrors.
|
||||
>
|
||||
> - For the `dev-slim` edition:
|
||||
> - `RAGFLOW_IMAGE=swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:dev-slim` or,
|
||||
> - `RAGFLOW_IMAGE=registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow:dev-slim`.
|
||||
> - For the `dev` edition:
|
||||
> - `RAGFLOW_IMAGE=swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:dev` or,
|
||||
> - `RAGFLOW_IMAGE=registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow:dev`.
|
||||
|
||||
### api_key
|
||||
The corresponding API key of your assigned LLM vendor.
|
||||
### Timezone
|
||||
|
||||
## oauth
|
||||
This is OAuth configuration which allows your system using the third-party account to sign-up and sign-in to the system.
|
||||
- `TIMEZONE`
|
||||
The local time zone. Defaults to `'Asia/Shanghai'`.
|
||||
|
||||
### github
|
||||
Got to [Github](https://github.com/settings/developers), register new application, the *client_id* and *secret_key* will be given.
|
||||
### Hugging Face mirror site
|
||||
|
||||
- `HF_ENDPOINT`
|
||||
The mirror site for huggingface.co. It is disabled by default. You can uncomment this line if you have limited access to the primary Hugging Face domain.
|
||||
|
||||
### MacOS
|
||||
|
||||
- `MACOS`
|
||||
Optimizations for MacOS. It is disabled by default. You can uncomment this line if your OS is MacOS.
|
||||
|
||||
### Maximum file size
|
||||
|
||||
- `MAX_CONTENT_LENGTH`
|
||||
The maximum file size for each uploaded file, in bytes. You can uncomment this line if you wish to change the 128M file size limit.
|
||||
|
||||
## 🐋 Service configuration
|
||||
|
||||
[service_conf.yaml](./service_conf.yaml) specifies the system-level configuration for RAGFlow and is used by its API server and task executor. In a dockerized setup, this file is automatically created based on the [service_conf.yaml.template](./service_conf.yaml.template) file (replacing all environment variables by their values).
|
||||
|
||||
- `ragflow`
|
||||
- `host`: The API server's IP address inside the Docker container. Defaults to `0.0.0.0`.
|
||||
- `port`: The API server's serving port inside the Docker container. Defaults to `9380`.
|
||||
|
||||
- `mysql`
|
||||
- `name`: The MySQL database name. Defaults to `rag_flow`.
|
||||
- `user`: The username for MySQL.
|
||||
- `password`: The password for MySQL. When updated, you must revise the `MYSQL_PASSWORD` variable in [.env](./.env) accordingly.
|
||||
- `port`: The MySQL serving port inside the Docker container. Defaults to `3306`.
|
||||
- `max_connections`: The maximum number of concurrent connections to the MySQL database. Defaults to `100`.
|
||||
- `stale_timeout`: Timeout in seconds.
|
||||
|
||||
- `minio`
|
||||
- `user`: The username for MinIO. When updated, you must revise the `MINIO_USER` variable in [.env](./.env) accordingly.
|
||||
- `password`: The password for MinIO. When updated, you must revise the `MINIO_PASSWORD` variable in [.env](./.env) accordingly.
|
||||
- `host`: The MinIO serving IP *and* port inside the Docker container. Defaults to `minio:9000`.
|
||||
|
||||
- `oauth`
|
||||
The OAuth configuration for signing up or signing in to RAGFlow using a third-party account. It is disabled by default. To enable this feature, uncomment the corresponding lines in **service_conf.yaml.template**.
|
||||
- `github`: The GitHub authentication settings for your application. Visit the [Github Developer Settings page](https://github.com/settings/developers) to obtain your client_id and secret_key.
|
||||
|
||||
- `user_default_llm`
|
||||
The default LLM to use for a new RAGFlow user. It is disabled by default. To enable this feature, uncomment the corresponding lines in **service_conf.yaml.template**.
|
||||
- `factory`: The LLM supplier. Available options:
|
||||
- `"OpenAI"`
|
||||
- `"DeepSeek"`
|
||||
- `"Moonshot"`
|
||||
- `"Tongyi-Qianwen"`
|
||||
- `"VolcEngine"`
|
||||
- `"ZHIPU-AI"`
|
||||
- `api_key`: The API key for the specified LLM. You will need to apply for your model API key online.
|
||||
|
||||
> [!TIP]
|
||||
> If you do not set the default LLM here, configure the default LLM on the **Settings** page in the RAGFlow UI.
|
||||
@ -1,11 +1,14 @@
|
||||
services:
|
||||
es01:
|
||||
container_name: ragflow-es-01
|
||||
profiles:
|
||||
- elasticsearch
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
|
||||
volumes:
|
||||
- esdata01:/usr/share/elasticsearch/data
|
||||
ports:
|
||||
- ${ES_PORT}:9200
|
||||
env_file: .env
|
||||
environment:
|
||||
- node.name=es01
|
||||
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||
@ -27,12 +30,42 @@ services:
|
||||
retries: 120
|
||||
networks:
|
||||
- ragflow
|
||||
restart: always
|
||||
restart: on-failure
|
||||
|
||||
infinity:
|
||||
container_name: ragflow-infinity
|
||||
profiles:
|
||||
- infinity
|
||||
image: infiniflow/infinity:v0.5.0-dev5
|
||||
volumes:
|
||||
- infinity_data:/var/infinity
|
||||
ports:
|
||||
- ${INFINITY_THRIFT_PORT}:23817
|
||||
- ${INFINITY_HTTP_PORT}:23820
|
||||
- ${INFINITY_PSQL_PORT}:5432
|
||||
env_file: .env
|
||||
environment:
|
||||
- TZ=${TIMEZONE}
|
||||
mem_limit: ${MEM_LIMIT}
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 500000
|
||||
hard: 500000
|
||||
networks:
|
||||
- ragflow
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "http://localhost:23820/admin/node/current"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 120
|
||||
restart: on-failure
|
||||
|
||||
|
||||
mysql:
|
||||
# mysql:5.7 linux/arm64 image is unavailable.
|
||||
image: mysql:8.0.39
|
||||
container_name: ragflow-mysql
|
||||
env_file: .env
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=${MYSQL_PASSWORD}
|
||||
- TZ=${TIMEZONE}
|
||||
@ -55,7 +88,7 @@ services:
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
restart: always
|
||||
restart: on-failure
|
||||
|
||||
minio:
|
||||
image: quay.io/minio/minio:RELEASE.2023-12-20T01-00-02Z
|
||||
@ -64,6 +97,7 @@ services:
|
||||
ports:
|
||||
- ${MINIO_PORT}:9000
|
||||
- ${MINIO_CONSOLE_PORT}:9001
|
||||
env_file: .env
|
||||
environment:
|
||||
- MINIO_ROOT_USER=${MINIO_USER}
|
||||
- MINIO_ROOT_PASSWORD=${MINIO_PASSWORD}
|
||||
@ -72,25 +106,28 @@ services:
|
||||
- minio_data:/data
|
||||
networks:
|
||||
- ragflow
|
||||
restart: always
|
||||
restart: on-failure
|
||||
|
||||
redis:
|
||||
image: redis:7.2.4
|
||||
image: valkey/valkey:8
|
||||
container_name: ragflow-redis
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD} --maxmemory 128mb --maxmemory-policy allkeys-lru
|
||||
env_file: .env
|
||||
ports:
|
||||
- ${REDIS_PORT}:6379
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
networks:
|
||||
- ragflow
|
||||
restart: always
|
||||
restart: on-failure
|
||||
|
||||
|
||||
|
||||
volumes:
|
||||
esdata01:
|
||||
driver: local
|
||||
infinity_data:
|
||||
driver: local
|
||||
mysql_data:
|
||||
driver: local
|
||||
minio_data:
|
||||
|
||||
@ -16,7 +16,6 @@ services:
|
||||
- 80:80
|
||||
- 443:443
|
||||
volumes:
|
||||
- ./service_conf.yaml:/ragflow/conf/service_conf.yaml
|
||||
- ./ragflow-logs:/ragflow/logs
|
||||
- ./nginx/ragflow.conf:/etc/nginx/conf.d/ragflow.conf
|
||||
- ./nginx/proxy.conf:/etc/nginx/proxy.conf
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user