mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
add Qwen models (#10263)
### What problem does this PR solve? add Qwen models ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
@ -408,6 +408,48 @@
|
|||||||
"model_type": "chat",
|
"model_type": "chat",
|
||||||
"is_tools": true
|
"is_tools": true
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"llm_name": "Qwen/Qwen3-Max",
|
||||||
|
"tags": "LLM,CHAT,256k",
|
||||||
|
"max_tokens": 256000,
|
||||||
|
"model_type": "chat",
|
||||||
|
"is_tools": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"llm_name": "Qwen/Qwen3-VL-Plus",
|
||||||
|
"tags": "LLM,IMAGE2TEXT",
|
||||||
|
"max_tokens": 256000,
|
||||||
|
"model_type": "chat",
|
||||||
|
"is_tools": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"llm_name": "Qwen/Qwen3-VL-23B-A22B-Thinking",
|
||||||
|
"tags": "LLM,IMAGE2TEXT",
|
||||||
|
"max_tokens": 124000,
|
||||||
|
"model_type": "chat",
|
||||||
|
"is_tools": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"llm_name": "Qwen/Qwen3-Omni-Flash-Realtime",
|
||||||
|
"tags": "LLM,IMAGE2TEXT",
|
||||||
|
"max_tokens": 64000,
|
||||||
|
"model_type": "chat",
|
||||||
|
"is_tools": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"llm_name": "Qwen/Qwen3-Omni-Flash",
|
||||||
|
"tags": "LLM,IMAGE2TEXT",
|
||||||
|
"max_tokens": 64000,
|
||||||
|
"model_type": "chat",
|
||||||
|
"is_tools": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"llm_name": "Qwen/Qwen-Image-Plus",
|
||||||
|
"tags": "LLM,IMAGE,IMAGE2TEXT",
|
||||||
|
"max_tokens": 0,
|
||||||
|
"model_type": "image",
|
||||||
|
"is_tools": true
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"llm_name": "qwen3-coder-480b-a35b-instruct",
|
"llm_name": "qwen3-coder-480b-a35b-instruct",
|
||||||
"tags": "LLM,CHAT,256k",
|
"tags": "LLM,CHAT,256k",
|
||||||
|
|||||||
Reference in New Issue
Block a user