update qwen3 tts docker

This commit is contained in:
yourwilliam
2026-01-29 14:14:24 +08:00
commit dc6bd6b17f
13 changed files with 338 additions and 0 deletions

1
.env Normal file
View File

@ -0,0 +1 @@
QWEN_TTS_VERSION=1.2.1

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
*.tar
/qwen3-tts-base/
/qwen3-tts-customer/
/qwen3-tts-design/

27
README.md Normal file
View File

@ -0,0 +1,27 @@
qwen3-tts 项目
本项目用于构建和运行 qwen3-tts 相关服务,包含多个不同用途的镜像:
- qwen3-tts-base基础 TTS 服务
- qwen3-tts-customer面向客户场景的 TTS 服务
- qwen3-tts-design面向配音设计场景的 TTS 服务
仓库中包含若干 docker-compose 配置文件,可按需选择启动:
- docker-compose.yml默认组合
- docker-compose-base.yml仅基础服务
- docker-compose-customer.yml客户版服务
- docker-compose-design.yml配音设计版服务
基本使用
1. 确保已安装 Docker 与 Docker Compose
2. 在仓库根目录执行:
docker compose -f docker-compose.yml up -d
3. 根据需要选择其他 docker-compose 文件替换上面的配置文件名称
注意
- 模型目录 qwen3-tts-base、qwen3-tts-customer、qwen3-tts-design 以及生成的 tar 镜像文件已通过 .gitignore 排除,不会被提交到仓库

1
docker-build/.env Normal file
View File

@ -0,0 +1 @@
QWEN_TTS_VERSION=1.2.1

10
docker-build/Dockerfile Normal file
View File

@ -0,0 +1,10 @@
FROM pytorch/pytorch:2.4.0-cuda12.4-cudnn9-runtime
WORKDIR /app
RUN apt-get update && apt-get install -y --no-install-recommends \
git ffmpeg sox libsox-fmt-all \
&& rm -rf /var/lib/apt/lists/*
RUN pip install -U pip \
&& pip install -U qwen-tts soundfile -i https://mirrors.aliyun.com/pypi/simple/

View File

@ -0,0 +1,23 @@
services:
qwen3-tts-clone:
image: qwen3-tts-clone:${QWEN_TTS_VERSION:-1.2.1}
build: .
container_name: qwen3-tts-clone
restart: unless-stopped
gpus: all
environment:
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
# 这里换成你本地 Base 模型目录(用于 voice clone
- ../qwen3-tts-base:/models/qwen3-base:ro
ports:
- "8002:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-base
--ip 0.0.0.0
--port 8000
--no-flash-attn
"

View File

@ -0,0 +1,32 @@
services:
qwen3-tts:
image: qwen3-tts-custom:${QWEN_TTS_VERSION:-1.2.1}
build: .
container_name: qwen3-tts-custom
restart: unless-stopped
gpus: all
environment:
# HF cache 仍然保留Tokenizer 可能还会用到)
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
# HF 缓存(可选,但推荐)
- ./cache:/cache
# ✅ 你的本地模型目录,挂载进容器
- ../qwen3-tts-customer:/models/qwen3-tts:ro
ports:
- "8000:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-tts
--ip 0.0.0.0
--port 8000
--no-flash-attn
"

View File

@ -0,0 +1,24 @@
services:
qwen3-tts-voicedesign:
image: qwen3-tts-voicedesign:${QWEN_TTS_VERSION:-1.2.1}
build: .
container_name: qwen3-tts-voicedesign
restart: unless-stopped
gpus: all
environment:
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
- ./cache:/cache
# 这里换成你本地 VoiceDesign 模型目录(你需要提前下载好)
- ../qwen3-tts-design:/models/qwen3-voicedesign:ro
ports:
- "8001:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-voicedesign
--ip 0.0.0.0
--port 8000
--no-flash-attn
"

View File

@ -0,0 +1,71 @@
services:
qwen3-tts-custom:
image: qwen3-tts-custom:${QWEN_TTS_VERSION:-1.2.1}
build: .
container_name: qwen3-tts-custom
restart: unless-stopped
gpus: all
environment:
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
- ./cache:/cache
- ./qwen3-tts-customer:/models/qwen3-custom:ro
ports:
- "8000:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-custom
--ip 0.0.0.0
--port 8000
--no-flash-attn
"
qwen3-tts-voicedesign:
image: qwen3-tts-voicedesign:${QWEN_TTS_VERSION:-1.2.1}
build: .
container_name: qwen3-tts-voicedesign
restart: unless-stopped
gpus: all
environment:
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
- ./cache:/cache
# 这里换成你本地 VoiceDesign 模型目录(你需要提前下载好)
- ./qwen3-tts-design:/models/qwen3-voicedesign:ro
ports:
- "8001:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-voicedesign
--ip 0.0.0.0
--port 8000
--no-flash-attn
"
qwen3-tts-clone:
image: qwen3-tts-base:${QWEN_TTS_VERSION:-1.2.1}
build: .
container_name: qwen3-tts-base
restart: unless-stopped
gpus: all
environment:
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
- ./cache:/cache
# 这里换成你本地 Base 模型目录(用于 voice clone
- ./qwen3-tts-base:/models/qwen3-base:ro
ports:
- "8002:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-base
--ip 0.0.0.0
--port 8000
--no-flash-attn
"

23
docker-compose-base.yml Normal file
View File

@ -0,0 +1,23 @@
services:
qwen3-tts-clone:
image: qwen3-tts-clone:${QWEN_TTS_VERSION:-1.2.1}
container_name: qwen3-tts-clone
restart: unless-stopped
gpus: all
environment:
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
- ./cache:/cache
# 这里换成你本地 Base 模型目录(用于 voice clone
- ./qwen3-tts-base:/models/qwen3-base:ro
ports:
- "8002:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-base
--ip 0.0.0.0
--port 8000
--no-flash-attn
"

View File

@ -0,0 +1,31 @@
services:
qwen3-tts:
image: qwen3-tts-custom:${QWEN_TTS_VERSION:-1.2.1}
container_name: qwen3-tts
restart: unless-stopped
gpus: all
environment:
# HF cache 仍然保留Tokenizer 可能还会用到)
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
# HF 缓存(可选,但推荐)
- ./cache:/cache
# ✅ 你的本地模型目录,挂载进容器
- ./qwen3-tts-customer:/models/qwen3-tts:ro
ports:
- "8000:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-tts
--ip 0.0.0.0
--port 8000
--no-flash-attn
"

23
docker-compose-design.yml Normal file
View File

@ -0,0 +1,23 @@
services:
qwen3-tts-voicedesign:
image: qwen3-tts-voicedesign:${QWEN_TTS_VERSION:-1.2.1}
container_name: qwen3-tts-voicedesign
restart: unless-stopped
gpus: all
environment:
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
- ./cache:/cache
# 这里换成你本地 VoiceDesign 模型目录(你需要提前下载好)
- ./qwen3-tts-design:/models/qwen3-voicedesign:ro
ports:
- "8001:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-voicedesign
--ip 0.0.0.0
--port 8000
--no-flash-attn
"

68
docker-compose.yml Normal file
View File

@ -0,0 +1,68 @@
services:
qwen3-tts-custom:
image: qwen3-tts-custom:${QWEN_TTS_VERSION:-1.2.1}
container_name: qwen3-tts-custom
restart: unless-stopped
gpus: all
environment:
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
- ./cache:/cache
- ./qwen3-tts-customer:/models/qwen3-custom:ro
ports:
- "8000:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-custom
--ip 0.0.0.0
--port 8000
--no-flash-attn
"
qwen3-tts-voicedesign:
image: qwen3-tts-voicedesign:${QWEN_TTS_VERSION:-1.2.1}
container_name: qwen3-tts-voicedesign
restart: unless-stopped
gpus: all
environment:
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
- ./cache:/cache
# 这里换成你本地 VoiceDesign 模型目录(你需要提前下载好)
- ./qwen3-tts-design:/models/qwen3-voicedesign:ro
ports:
- "8001:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-voicedesign
--ip 0.0.0.0
--port 8000
--no-flash-attn
"
qwen3-tts-base:
image: qwen3-tts-base:${QWEN_TTS_VERSION:-1.2.1}
container_name: qwen3-tts-base
restart: unless-stopped
gpus: all
environment:
HF_HOME: /cache/hf
HUGGINGFACE_HUB_CACHE: /cache/hf/hub
TRANSFORMERS_CACHE: /cache/hf/transformers
volumes:
- ./cache:/cache
# 这里换成你本地 Base 模型目录(用于 voice clone
- ./qwen3-tts-base:/models/qwen3-base:ro
ports:
- "8002:8000"
command: >
bash -lc "
qwen-tts-demo /models/qwen3-base
--ip 0.0.0.0
--port 8000
--no-flash-attn
"