Files
ragflow/docker/.env
Billy Bao de53498b39 Fix: Update env to support PPTX and update README for version changes (#11167)
### What problem does this PR solve?

Fix: Update env to support PPTX
Fix: update README for version changes #11138

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
- [x] Documentation Update

---------

Co-authored-by: writinwaters <93570324+writinwaters@users.noreply.github.com>
2025-11-11 19:56:54 +08:00

222 lines
8.7 KiB
Bash

# ------------------------------
# docker env var for specifying vector db type at startup
# (based on the vector db type, the corresponding docker
# compose profile will be used)
# ------------------------------
# The type of doc engine to use.
# Available options:
# - `elasticsearch` (default)
# - `infinity` (https://github.com/infiniflow/infinity)
# - `opensearch` (https://github.com/opensearch-project/OpenSearch)
DOC_ENGINE=${DOC_ENGINE:-elasticsearch}
# Device on which deepdoc inference run.
# Available levels:
# - `cpu` (default)
# - `gpu`
DEVICE=${DEVICE:-cpu}
COMPOSE_PROFILES=${DOC_ENGINE},${DEVICE}
# The version of Elasticsearch.
STACK_VERSION=8.11.3
# The hostname where the Elasticsearch service is exposed
ES_HOST=es01
# The port used to expose the Elasticsearch service to the host machine,
# allowing EXTERNAL access to the service running inside the Docker container.
ES_PORT=1200
# The password for Elasticsearch.
ELASTIC_PASSWORD=infini_rag_flow
# the hostname where OpenSearch service is exposed, set it not the same as elasticsearch
OS_PORT=1201
# The hostname where the OpenSearch service is exposed
OS_HOST=opensearch01
# The password for OpenSearch.
# At least one uppercase letter, one lowercase letter, one digit, and one special character
OPENSEARCH_PASSWORD=infini_rag_flow_OS_01
# The port used to expose the Kibana service to the host machine,
# allowing EXTERNAL access to the service running inside the Docker container.
# To enable kibana, you need to:
# 1. Ensure that COMPOSE_PROFILES includes kibana, for example: COMPOSE_PROFILES=${COMPOSE_PROFILES},kibana
# 2. Comment out or delete the following configurations of the es service in docker-compose-base.yml: xpack.security.enabled、xpack.security.http.ssl.enabled、xpack.security.transport.ssl.enabled (for details: https://www.elastic.co/docs/deploy-manage/security/self-auto-setup#stack-existing-settings-detected)
# 3. Adjust the es.hosts in conf/service_config.yaml or docker/service_conf.yaml.template to 'https://localhost:1200'
# 4. After the startup is successful, in the es container, execute the command to generate the kibana token: `bin/elasticsearch-create-enrollment-token -s kibana`, then you can use kibana normally
KIBANA_PORT=6601
# The maximum amount of the memory, in bytes, that a specific Docker container can use while running.
# Update it according to the available memory in the host machine.
MEM_LIMIT=8073741824
# The hostname where the Infinity service is exposed
INFINITY_HOST=infinity
# Port to expose Infinity API to the host
INFINITY_THRIFT_PORT=23817
INFINITY_HTTP_PORT=23820
INFINITY_PSQL_PORT=5432
# The password for MySQL.
MYSQL_PASSWORD=infini_rag_flow
# The hostname where the MySQL service is exposed
MYSQL_HOST=mysql
# The database of the MySQL service to use
MYSQL_DBNAME=rag_flow
# The port used to expose the MySQL service to the host machine,
# allowing EXTERNAL access to the MySQL database running inside the Docker container.
MYSQL_PORT=5455
# The maximum size of communication packets sent to the MySQL server
MYSQL_MAX_PACKET=1073741824
# The hostname where the MinIO service is exposed
MINIO_HOST=minio
# The port used to expose the MinIO console interface to the host machine,
# allowing EXTERNAL access to the web-based console running inside the Docker container.
MINIO_CONSOLE_PORT=9001
# The port used to expose the MinIO API service to the host machine,
# allowing EXTERNAL access to the MinIO object storage service running inside the Docker container.
MINIO_PORT=9000
# The username for MinIO.
# When updated, you must revise the `minio.user` entry in service_conf.yaml accordingly.
MINIO_USER=rag_flow
# The password for MinIO.
# When updated, you must revise the `minio.password` entry in service_conf.yaml accordingly.
MINIO_PASSWORD=infini_rag_flow
# The hostname where the Redis service is exposed
REDIS_HOST=redis
# The port used to expose the Redis service to the host machine,
# allowing EXTERNAL access to the Redis service running inside the Docker container.
REDIS_PORT=6379
# The password for Redis.
REDIS_PASSWORD=infini_rag_flow
# The port used to expose RAGFlow's HTTP API service to the host machine,
# allowing EXTERNAL access to the service running inside the Docker container.
SVR_WEB_HTTP_PORT=80
SVR_WEB_HTTPS_PORT=443
SVR_HTTP_PORT=9380
ADMIN_SVR_HTTP_PORT=9381
SVR_MCP_PORT=9382
# The RAGFlow Docker image to download. v0.22+ doesn't include embedding models.
# Defaults to the v0.21.1-slim edition, which is the RAGFlow Docker image without embedding models.
RAGFLOW_IMAGE=infiniflow/ragflow:v0.21.1-slim
# RAGFLOW_IMAGE=infiniflow/ragflow:v0.21.1
# The Docker image of the v0.21.1 edition includes built-in embedding models:
# - BAAI/bge-large-zh-v1.5
# - maidalun1020/bce-embedding-base_v1
#
# If you cannot download the RAGFlow Docker image:
# RAGFLOW_IMAGE=swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:v0.21.1
# RAGFLOW_IMAGE=registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow:v0.21.1
#
# - For the `nightly` edition, uncomment either of the following:
# RAGFLOW_IMAGE=swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:nightly
# RAGFLOW_IMAGE=registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow:nightly
# The embedding service image, model and port.
# Important: To enable the embedding service, you need to uncomment one of the following two lines:
# COMPOSE_PROFILES=${COMPOSE_PROFILES},tei-cpu
# COMPOSE_PROFILES=${COMPOSE_PROFILES},tei-gpu
# The embedding service image:
TEI_IMAGE_CPU=infiniflow/text-embeddings-inference:cpu-1.8
TEI_IMAGE_GPU=infiniflow/text-embeddings-inference:1.8
# The embedding service model:
# Available options:
# - `Qwen/Qwen3-Embedding-0.6B` (default, requires 25GB RAM/vRAM to load)
# - `BAAI/bge-m3` (requires 21GB RAM/vRAM to load)
# - `BAAI/bge-small-en-v1.5` (requires 1.2GB RAM/vRAM to load)
TEI_MODEL=${TEI_MODEL:-Qwen/Qwen3-Embedding-0.6B}
# The embedding service port:
TEI_HOST=tei
# The port used to expose the TEI service to the host machine,
# allowing EXTERNAL access to the service running inside the Docker container.
TEI_PORT=6380
# The local time zone.
TZ=Asia/Shanghai
# Uncomment the following line if you have limited access to huggingface.co:
# HF_ENDPOINT=https://hf-mirror.com
# Optimizations for MacOS
# Uncomment the following line if your operating system is MacOS:
# MACOS=1
# The maximum file size limit (in bytes) for each upload to your knowledge base or File Management.
# To change the 1GB file size limit, uncomment the line below and update as needed.
# MAX_CONTENT_LENGTH=1073741824
# After updating, ensure `client_max_body_size` in nginx/nginx.conf is updated accordingly.
# Note that neither `MAX_CONTENT_LENGTH` nor `client_max_body_size` sets the maximum size for files uploaded to an agent.
# See https://ragflow.io/docs/dev/begin_component for details.
# Controls how many documents are processed in a single batch.
# Defaults to 4 if DOC_BULK_SIZE is not explicitly set.
DOC_BULK_SIZE=${DOC_BULK_SIZE:-4}
# Defines the number of items to process per batch when generating embeddings.
# Defaults to 16 if EMBEDDING_BATCH_SIZE is not set in the environment.
EMBEDDING_BATCH_SIZE=${EMBEDDING_BATCH_SIZE:-16}
# Log level for the RAGFlow's own and imported packages.
# Available levels:
# - `DEBUG`
# - `INFO` (default)
# - `WARNING`
# - `ERROR`
# For example, the following line changes the log level of `ragflow.es_conn` to `DEBUG`:
# LOG_LEVELS=ragflow.es_conn=DEBUG
# aliyun OSS configuration
# STORAGE_IMPL=OSS
# ACCESS_KEY=xxx
# SECRET_KEY=eee
# ENDPOINT=http://oss-cn-hangzhou.aliyuncs.com
# REGION=cn-hangzhou
# BUCKET=ragflow65536
# A user registration switch:
# - Enable registration: 1
# - Disable registration: 0
REGISTER_ENABLED=1
# Important: To enable sandbox, you need to uncomment following two lines:
# SANDBOX_ENABLED=1
# COMPOSE_PROFILES=${COMPOSE_PROFILES},sandbox
# Sandbox settings
# Double check if you add `sandbox-executor-manager` to your `/etc/hosts`
# Pull the required base images before running:
# docker pull infiniflow/sandbox-base-nodejs:latest
# docker pull infiniflow/sandbox-base-python:latest
# Our default sandbox environments include:
# - Node.js base image: includes axios
# - Python base image: includes requests, numpy, and pandas
# Specify custom executor images below if you're using non-default environments.
# SANDBOX_HOST=sandbox-executor-manager
# SANDBOX_EXECUTOR_MANAGER_IMAGE=infiniflow/sandbox-executor-manager:latest
# SANDBOX_EXECUTOR_MANAGER_POOL_SIZE=3
# SANDBOX_BASE_PYTHON_IMAGE=infiniflow/sandbox-base-python:latest
# SANDBOX_BASE_NODEJS_IMAGE=infiniflow/sandbox-base-nodejs:latest
# SANDBOX_EXECUTOR_MANAGER_PORT=9385
# SANDBOX_ENABLE_SECCOMP=false
# SANDBOX_MAX_MEMORY=256m # b, k, m, g
# SANDBOX_TIMEOUT=10s # s, m, 1m30s
# Enable DocLing and Mineru
USE_DOCLING=false
USE_MINERU=false
# pptx support
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1