mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 12:32:30 +08:00
### What problem does this PR solve? - Original rag/nlp/rag_tokenizer.py is put to Infinity and infinity-sdk via https://github.com/infiniflow/infinity/pull/3117 . Import rag_tokenizer from infinity and inherit from rag_tokenizer.RagTokenizer in new rag/nlp/rag_tokenizer.py. - Bump infinity to 0.6.8 ### Type of change - [x] Refactoring
300 lines
7.7 KiB
YAML
300 lines
7.7 KiB
YAML
services:
|
|
es01:
|
|
profiles:
|
|
- elasticsearch
|
|
image: elasticsearch:${STACK_VERSION}
|
|
volumes:
|
|
- esdata01:/usr/share/elasticsearch/data
|
|
ports:
|
|
- ${ES_PORT}:9200
|
|
env_file: .env
|
|
environment:
|
|
- node.name=es01
|
|
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
|
- bootstrap.memory_lock=false
|
|
- discovery.type=single-node
|
|
- xpack.security.enabled=true
|
|
- xpack.security.http.ssl.enabled=false
|
|
- xpack.security.transport.ssl.enabled=false
|
|
- cluster.routing.allocation.disk.watermark.low=5gb
|
|
- cluster.routing.allocation.disk.watermark.high=3gb
|
|
- cluster.routing.allocation.disk.watermark.flood_stage=2gb
|
|
mem_limit: ${MEM_LIMIT}
|
|
ulimits:
|
|
memlock:
|
|
soft: -1
|
|
hard: -1
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "curl http://localhost:9200"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 120
|
|
networks:
|
|
- ragflow
|
|
restart: on-failure
|
|
|
|
opensearch01:
|
|
profiles:
|
|
- opensearch
|
|
image: hub.icert.top/opensearchproject/opensearch:2.19.1
|
|
volumes:
|
|
- osdata01:/usr/share/opensearch/data
|
|
ports:
|
|
- ${OS_PORT}:9201
|
|
env_file: .env
|
|
environment:
|
|
- node.name=opensearch01
|
|
- OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD}
|
|
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_PASSWORD}
|
|
- bootstrap.memory_lock=false
|
|
- discovery.type=single-node
|
|
- plugins.security.disabled=false
|
|
- plugins.security.ssl.http.enabled=false
|
|
- plugins.security.ssl.transport.enabled=true
|
|
- cluster.routing.allocation.disk.watermark.low=5gb
|
|
- cluster.routing.allocation.disk.watermark.high=3gb
|
|
- cluster.routing.allocation.disk.watermark.flood_stage=2gb
|
|
- http.port=9201
|
|
mem_limit: ${MEM_LIMIT}
|
|
ulimits:
|
|
memlock:
|
|
soft: -1
|
|
hard: -1
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "curl http://localhost:9201"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 120
|
|
networks:
|
|
- ragflow
|
|
restart: on-failure
|
|
|
|
infinity:
|
|
profiles:
|
|
- infinity
|
|
image: infiniflow/infinity:v0.6.8
|
|
volumes:
|
|
- infinity_data:/var/infinity
|
|
- ./infinity_conf.toml:/infinity_conf.toml
|
|
command: ["-f", "/infinity_conf.toml"]
|
|
ports:
|
|
- ${INFINITY_THRIFT_PORT}:23817
|
|
- ${INFINITY_HTTP_PORT}:23820
|
|
- ${INFINITY_PSQL_PORT}:5432
|
|
env_file: .env
|
|
mem_limit: ${MEM_LIMIT}
|
|
ulimits:
|
|
nofile:
|
|
soft: 500000
|
|
hard: 500000
|
|
networks:
|
|
- ragflow
|
|
healthcheck:
|
|
test: ["CMD", "curl", "http://localhost:23820/admin/node/current"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 120
|
|
restart: on-failure
|
|
|
|
oceanbase:
|
|
profiles:
|
|
- oceanbase
|
|
image: oceanbase/oceanbase-ce:4.4.1.0-100000032025101610
|
|
volumes:
|
|
- ./oceanbase/data:/root/ob
|
|
- ./oceanbase/conf:/root/.obd/cluster
|
|
- ./oceanbase/init.d:/root/boot/init.d
|
|
ports:
|
|
- ${OCEANBASE_PORT:-2881}:2881
|
|
env_file: .env
|
|
environment:
|
|
- MODE=normal
|
|
- OB_SERVER_IP=127.0.0.1
|
|
mem_limit: ${MEM_LIMIT}
|
|
healthcheck:
|
|
test: [ 'CMD-SHELL', 'obclient -h127.0.0.1 -P2881 -uroot@${OB_TENANT_NAME:-ragflow} -p${OB_TENANT_PASSWORD:-infini_rag_flow} -e "CREATE DATABASE IF NOT EXISTS ${OCEANBASE_DOC_DBNAME:-ragflow_doc};"' ]
|
|
interval: 10s
|
|
retries: 30
|
|
start_period: 30s
|
|
timeout: 10s
|
|
networks:
|
|
- ragflow
|
|
restart: on-failure
|
|
|
|
sandbox-executor-manager:
|
|
profiles:
|
|
- sandbox
|
|
image: ${SANDBOX_EXECUTOR_MANAGER_IMAGE-infiniflow/sandbox-executor-manager:latest}
|
|
privileged: true
|
|
ports:
|
|
- ${SANDBOX_EXECUTOR_MANAGER_PORT-9385}:9385
|
|
env_file: .env
|
|
volumes:
|
|
- /var/run/docker.sock:/var/run/docker.sock
|
|
networks:
|
|
- ragflow
|
|
security_opt:
|
|
- no-new-privileges:true
|
|
environment:
|
|
- SANDBOX_EXECUTOR_MANAGER_POOL_SIZE=${SANDBOX_EXECUTOR_MANAGER_POOL_SIZE:-3}
|
|
- SANDBOX_BASE_PYTHON_IMAGE=${SANDBOX_BASE_PYTHON_IMAGE:-infiniflow/sandbox-base-python:latest}
|
|
- SANDBOX_BASE_NODEJS_IMAGE=${SANDBOX_BASE_NODEJS_IMAGE:-infiniflow/sandbox-base-nodejs:latest}
|
|
- SANDBOX_ENABLE_SECCOMP=${SANDBOX_ENABLE_SECCOMP:-false}
|
|
- SANDBOX_MAX_MEMORY=${SANDBOX_MAX_MEMORY:-256m}
|
|
- SANDBOX_TIMEOUT=${SANDBOX_TIMEOUT:-10s}
|
|
healthcheck:
|
|
test: ["CMD", "curl", "http://localhost:9385/healthz"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 120
|
|
restart: on-failure
|
|
|
|
mysql:
|
|
# mysql:5.7 linux/arm64 image is unavailable.
|
|
image: mysql:8.0.39
|
|
env_file: .env
|
|
environment:
|
|
- MYSQL_ROOT_PASSWORD=${MYSQL_PASSWORD}
|
|
command:
|
|
--max_connections=1000
|
|
--character-set-server=utf8mb4
|
|
--collation-server=utf8mb4_unicode_ci
|
|
--default-authentication-plugin=mysql_native_password
|
|
--tls_version="TLSv1.2,TLSv1.3"
|
|
--init-file /data/application/init.sql
|
|
--binlog_expire_logs_seconds=604800
|
|
ports:
|
|
- ${MYSQL_PORT}:3306
|
|
volumes:
|
|
- mysql_data:/var/lib/mysql
|
|
- ./init.sql:/data/application/init.sql
|
|
networks:
|
|
- ragflow
|
|
healthcheck:
|
|
test: ["CMD", "mysqladmin" ,"ping", "-uroot", "-p${MYSQL_PASSWORD}"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 120
|
|
restart: on-failure
|
|
|
|
minio:
|
|
image: quay.io/minio/minio:RELEASE.2025-06-13T11-33-47Z
|
|
command: ["server", "--console-address", ":9001", "/data"]
|
|
ports:
|
|
- ${MINIO_PORT}:9000
|
|
- ${MINIO_CONSOLE_PORT}:9001
|
|
env_file: .env
|
|
environment:
|
|
- MINIO_ROOT_USER=${MINIO_USER}
|
|
- MINIO_ROOT_PASSWORD=${MINIO_PASSWORD}
|
|
volumes:
|
|
- minio_data:/data
|
|
networks:
|
|
- ragflow
|
|
restart: on-failure
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 120
|
|
|
|
redis:
|
|
# swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/valkey/valkey:8
|
|
image: valkey/valkey:8
|
|
command: ["redis-server", "--requirepass", "${REDIS_PASSWORD}", "--maxmemory", "128mb", "--maxmemory-policy", "allkeys-lru"]
|
|
env_file: .env
|
|
ports:
|
|
- ${REDIS_PORT}:6379
|
|
volumes:
|
|
- redis_data:/data
|
|
networks:
|
|
- ragflow
|
|
restart: on-failure
|
|
healthcheck:
|
|
test: ["CMD", "redis-cli", "-a", "${REDIS_PASSWORD}", "ping"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 120
|
|
|
|
|
|
tei-cpu:
|
|
profiles:
|
|
- tei-cpu
|
|
image: ${TEI_IMAGE_CPU}
|
|
hostname: tei
|
|
ports:
|
|
- ${TEI_PORT-6380}:80
|
|
env_file: .env
|
|
networks:
|
|
- ragflow
|
|
command: ["--model-id", "/data/${TEI_MODEL}", "--auto-truncate"]
|
|
restart: on-failure
|
|
|
|
|
|
tei-gpu:
|
|
profiles:
|
|
- tei-gpu
|
|
image: ${TEI_IMAGE_GPU}
|
|
hostname: tei
|
|
ports:
|
|
- ${TEI_PORT-6380}:80
|
|
env_file: .env
|
|
networks:
|
|
- ragflow
|
|
command: ["--model-id", "/data/${TEI_MODEL}", "--auto-truncate"]
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
count: all
|
|
capabilities: [gpu]
|
|
restart: on-failure
|
|
|
|
|
|
kibana:
|
|
profiles:
|
|
- kibana
|
|
image: kibana:${STACK_VERSION}
|
|
ports:
|
|
- ${KIBANA_PORT-5601}:5601
|
|
env_file: .env
|
|
volumes:
|
|
- kibana_data:/usr/share/kibana/data
|
|
depends_on:
|
|
es01:
|
|
condition: service_started
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:5601/api/status"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 120
|
|
networks:
|
|
- ragflow
|
|
restart: on-failure
|
|
|
|
|
|
volumes:
|
|
esdata01:
|
|
driver: local
|
|
osdata01:
|
|
driver: local
|
|
infinity_data:
|
|
driver: local
|
|
ob_data:
|
|
driver: local
|
|
mysql_data:
|
|
driver: local
|
|
minio_data:
|
|
driver: local
|
|
redis_data:
|
|
driver: local
|
|
tei_data:
|
|
driver: local
|
|
kibana_data:
|
|
driver: local
|
|
|
|
networks:
|
|
ragflow:
|
|
driver: bridge
|