add support for SILICONFLOW (#1926)

### What problem does this PR solve?

#1853 add support for SILICONFLOW

### Type of change


- [x] New Feature (non-breaking change which adds functionality)

---------

Co-authored-by: Zhedong Cen <cenzhedong2@126.com>
This commit is contained in:
黄腾
2024-08-13 16:09:10 +08:00
committed by GitHub
parent 06700850df
commit e013ac52af
7 changed files with 349 additions and 7 deletions

View File

@ -2767,6 +2767,290 @@
"model_type": "chat"
}
]
}
},
{
"name": "SILICONFLOW",
"logo": "",
"tags": "LLM,TEXT EMBEDDING,TEXT RE-RANK",
"status": "1",
"llm": [
{
"llm_name": "Qwen/Qwen2-7B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen2-1.5B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen1.5-7B-Chat",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "THUDM/glm-4-9b-chat",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "THUDM/chatglm3-6b",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "01-ai/Yi-1.5-9B-Chat-16K",
"tags": "LLM,CHAT,16k",
"max_tokens": 16384,
"model_type": "chat"
},
{
"llm_name": "01-ai/Yi-1.5-6B-Chat",
"tags": "LLM,CHAT,4k",
"max_tokens": 4096,
"model_type": "chat"
},
{
"llm_name": "google/gemma-2-9b-it",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat"
},
{
"llm_name": "internlm/internlm2_5-7b-chat",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "meta-llama/Meta-Llama-3-8B-Instruct",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat"
},
{
"llm_name": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat"
},
{
"llm_name": "mistralai/Mistral-7B-Instruct-v0.2",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Pro/Qwen/Qwen2-7B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Pro/Qwen/Qwen2-1.5B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Pro/Qwen/Qwen1.5-7B-Chat",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Pro/THUDM/glm-4-9b-chat",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Pro/THUDM/chatglm3-6b",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Pro/01-ai/Yi-1.5-9B-Chat-16K",
"tags": "LLM,CHAT,16k",
"max_tokens": 16384,
"model_type": "chat"
},
{
"llm_name": "Pro/01-ai/Yi-1.5-6B-Chat",
"tags": "LLM,CHAT,4k",
"max_tokens": 4096,
"model_type": "chat"
},
{
"llm_name": "Pro/internlm/internlm2_5-7b-chat",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Pro/google/gemma-2-9b-it",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat"
},
{
"llm_name": "Pro/meta-llama/Meta-Llama-3.1-8B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Pro/meta-llama/Meta-Llama-3-8B-Instruct",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat"
},
{
"llm_name": "Pro/mistralai/Mistral-7B-Instruct-v0.2",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen2-72B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen2-Math-72B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen2-57B-A14B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen1.5-110B-Chat",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen1.5-32B-Chat",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen1.5-14B-Chat",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "01-ai/Yi-1.5-34B-Chat-16K",
"tags": "LLM,CHAT,16k",
"max_tokens": 16384,
"model_type": "chat"
},
{
"llm_name": "deepseek-ai/DeepSeek-Coder-V2-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "deepseek-ai/DeepSeek-V2-Chat",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "deepseek-ai/deepseek-llm-67b-chat",
"tags": "LLM,CHAT,4k",
"max_tokens": 4096,
"model_type": "chat"
},
{
"llm_name": "internlm/internlm2_5-20b-chat",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "meta-llama/Meta-Llama-3.1-405B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "meta-llama/Meta-Llama-3-70B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "google/gemma-2-27b-it",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat"
},
{
"llm_name": "BAAI/bge-m3 ",
"tags": "TEXT EMBEDDING,8K",
"max_tokens": 8192,
"model_type": "embedding"
},
{
"llm_name": "BAAI/bge-large-en-v1.5 ",
"tags": "TEXT EMBEDDING,512",
"max_tokens": 512,
"model_type": "embedding"
},
{
"llm_name": "BAAI/bge-large-zh-v1.5 ",
"tags": "TEXT EMBEDDING,512",
"max_tokens": 512,
"model_type": "embedding"
},
{
"llm_name": "netease-youdao/bce-embedding-base_vl",
"tags": "TEXT EMBEDDING,512",
"max_tokens": 512,
"model_type": "embedding"
},
{
"llm_name": "BAAI/bge-reranker-v2-m3",
"tags": "RE-RANK, 512",
"max_tokens": 1024,
"model_type": "rerank"
},
{
"llm_name": "netease-youdao/bce-reranker-base-v1",
"tags": "RE-RANK, 512",
"max_tokens": 1024,
"model_type": "rerank"
}
]
}
]
}