feat: replace models of novita (#7360)

### What problem does this PR solve?

Replace models of novita

### Type of change

- [x] Other (please describe): Replace models of novita
This commit is contained in:
Neal Davis
2025-04-28 13:35:09 +08:00
committed by GitHub
parent af770c5ced
commit 23dcbc94ef
5 changed files with 74 additions and 166 deletions

View File

@ -2203,58 +2203,21 @@
]
},
{
"name": "novita.ai",
"name": "NovitaAI",
"logo": "",
"tags": "LLM,IMAGE2TEXT",
"tags": "LLM,TEXT EMBEDDING",
"status": "1",
"llm": [
{
"llm_name": "deepseek/deepseek-r1",
"tags": "LLM,CHAT,64k",
"max_tokens": 64000,
"model_type": "chat"
},
{
"llm_name": "deepseek/deepseek_v3",
"tags": "LLM,CHAT,64k",
"max_tokens": 64000,
"model_type": "chat"
},
{
"llm_name": "deepseek/deepseek-r1-distill-llama-70b",
"tags": "LLM,CHAT,64k",
"max_tokens": 64000,
"model_type": "chat"
},
{
"llm_name": "deepseek/deepseek-r1-distill-qwen-32b",
"tags": "LLM,CHAT,64k",
"max_tokens": 64000,
"model_type": "chat"
},
{
"llm_name": "deepseek/deepseek-r1-distill-qwen-14b",
"tags": "LLM,CHAT,64k",
"max_tokens": 64000,
"model_type": "chat"
},
{
"llm_name": "deepseek/deepseek-r1-distill-llama-8b",
"llm_name": "qwen/qwen2.5-7b-instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32000,
"model_type": "chat"
},
{
"llm_name": "meta-llama/llama-3.3-70b-instruct",
"tags": "LLM,CHAT,128k",
"max_tokens": 131072,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "meta-llama/llama-3.2-11b-vision-instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"llm_name": "meta-llama/llama-3.2-1b-instruct",
"tags": "LLM,CHAT,131k",
"max_tokens": 131000,
"model_type": "chat"
},
{
@ -2264,15 +2227,21 @@
"model_type": "chat"
},
{
"llm_name": "meta-llama/llama-3.2-1b-instruct",
"llm_name": "thudm/glm-4-9b-0414",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"max_tokens": 32000,
"model_type": "chat"
},
{
"llm_name": "meta-llama/llama-3.1-70b-instruct",
"llm_name": "thudm/glm-z1-9b-0414",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"max_tokens": 32000,
"model_type": "chat"
},
{
"llm_name": "meta-llama/llama-3.1-8b-instruct-bf16",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat",
"is_tools": true
},
@ -2284,55 +2253,15 @@
"is_tools": true
},
{
"llm_name": "meta-llama/llama-3.1-8b-instruct-bf16",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "meta-llama/llama-3-70b-instruct",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "meta-llama/llama-3-8b-instruct",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "qwen/qwen-2.5-72b-instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "qwen/qwen-2-vl-72b-instruct",
"tags": "LLM,IMAGE2TEXT,32k",
"max_tokens": 32768,
"model_type": "image2text"
},
{
"llm_name": "qwen/qwen-2-7b-instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "mistralai/mistral-nemo",
"llm_name": "deepseek/deepseek-v3-0324",
"tags": "LLM,CHAT,128k",
"max_tokens": 131072,
"max_tokens": 128000,
"model_type": "chat"
},
{
"llm_name": "mistralai/mistral-7b-instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"llm_name": "deepseek/deepseek-r1-turbo",
"tags": "LLM,CHAT,64k",
"max_tokens": 64000,
"model_type": "chat"
},
{
@ -2342,82 +2271,61 @@
"model_type": "chat"
},
{
"llm_name": "sao10k/l3-70b-euryale-v2.1",
"llm_name": "meta-llama/llama-3.3-70b-instruct",
"tags": "LLM,CHAT,128k",
"max_tokens": 131072,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "deepseek/deepseek-r1-distill-llama-8b",
"tags": "LLM,CHAT,32k",
"max_tokens": 32000,
"model_type": "chat"
},
{
"llm_name": "mistralai/mistral-nemo",
"tags": "LLM,CHAT,128k",
"max_tokens": 131072,
"model_type": "chat"
},
{
"llm_name": "meta-llama/llama-3-8b-instruct",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "deepseek/deepseek-v3-turbo",
"tags": "LLM,CHAT,64k",
"max_tokens": 64000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "mistralai/mistral-7b-instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "sao10k/l3-8b-lunaris",
"tags": "LLM,CHAT,8k",
"llm_name": "deepseek/deepseek-r1",
"tags": "LLM,CHAT,64k",
"max_tokens": 64000,
"model_type": "chat"
},
{
"llm_name": "deepseek/deepseek-r1-distill-qwen-14b",
"tags": "LLM,CHAT,64k",
"max_tokens": 64000,
"model_type": "chat"
},
{
"llm_name": "baai/bge-m3",
"tags": "TEXT EMBEDDING,8K",
"max_tokens": 8192,
"model_type": "chat"
},
{
"llm_name": "sao10k/l31-70b-euryale-v2.2",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat"
},
{
"llm_name": "nousresearch/hermes-2-pro-llama-3-8b",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat"
},
{
"llm_name": "nousresearch/nous-hermes-llama2-13b",
"tags": "LLM,CHAT,4k",
"max_tokens": 4096,
"model_type": "chat"
},
{
"llm_name": "openchat/openchat-7b",
"tags": "LLM,CHAT,4k",
"max_tokens": 4096,
"model_type": "chat"
},
{
"llm_name": "gryphe/mythomax-l2-13b",
"tags": "LLM,CHAT,4k",
"max_tokens": 4096,
"model_type": "chat"
},
{
"llm_name": "microsoft/wizardlm-2-8x22b",
"tags": "LLM,CHAT,65k",
"max_tokens": 65535,
"model_type": "chat"
},
{
"llm_name": "google/gemma-2-9b-it",
"tags": "LLM,CHAT,8k",
"max_tokens": 8192,
"model_type": "chat"
},
{
"llm_name": "cognitivecomputations/dolphin-mixtral-8x22b",
"tags": "LLM,CHAT,16k",
"max_tokens": 16000,
"model_type": "chat"
},
{
"llm_name": "jondurbin/airoboros-l2-70b",
"tags": "LLM,CHAT,4k",
"max_tokens": 4096,
"model_type": "chat"
},
{
"llm_name": "teknium/openhermes-2.5-mistral-7b",
"tags": "LLM,CHAT,4k",
"max_tokens": 4096,
"model_type": "chat"
},
{
"llm_name": "sophosympatheia/midnight-rose-70b",
"tags": "LLM,CHAT,4k",
"max_tokens": 4096,
"model_type": "chat"
"model_type": "embedding"
}
]
},