mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 12:32:30 +08:00
Chore(config): remove Youdao and BAAI embedding model providers (#10873)
### What problem does this PR solve? This commit removes the Youdao and BAAI entries from the LLM factories configuration as they are no longer needed or supported. ### Type of change - [x] Config update
This commit is contained in:
@ -368,7 +368,7 @@ def my_llms():
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_app():
|
||||
self_deployed = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio", "GPUStack"]
|
||||
self_deployed = ["FastEmbed", "Ollama", "Xinference", "LocalAI", "LM-Studio", "GPUStack"]
|
||||
weighted = []
|
||||
model_type = request.args.get("model_type")
|
||||
try:
|
||||
|
||||
@ -974,20 +974,6 @@
|
||||
"status": "1",
|
||||
"llm": []
|
||||
},
|
||||
{
|
||||
"name": "Youdao",
|
||||
"logo": "",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
"llm": [
|
||||
{
|
||||
"llm_name": "maidalun1020/bce-embedding-base_v1",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 512,
|
||||
"model_type": "embedding"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "DeepSeek",
|
||||
"logo": "",
|
||||
@ -1140,20 +1126,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "BAAI",
|
||||
"logo": "",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
"llm": [
|
||||
{
|
||||
"llm_name": "BAAI/bge-large-zh-v1.5",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 1024,
|
||||
"model_type": "embedding"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Builtin",
|
||||
"logo": "",
|
||||
|
||||
Reference in New Issue
Block a user