Feat: add gitee as LLM provider. (#8545)

### What problem does this PR solve?


### Type of change

- [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
Kevin Hu
2025-06-30 09:22:31 +08:00
committed by GitHub
parent e441c17c2c
commit aafeffa292
6 changed files with 452 additions and 5 deletions

View File

@ -3287,6 +3287,420 @@
} }
] ]
}, },
{
"name": "GiteeAI",
"logo": "",
"tags": "LLM,TEXT EMBEDDING,IMAGE2TEXT,SPEECH2TEXT,TEXT RE-RANK",
"status": "1",
"llm": [
{
"llm_name": "ERNIE-4.5-Turbo",
"tags": "LLM,CHAT",
"max_tokens": 32768,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "ERNIE-X1-Turbo",
"tags": "LLM,CHAT",
"max_tokens": 4096,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "DeepSeek-R1",
"tags": "LLM,CHAT",
"max_tokens": 65792,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "DeepSeek-V3",
"tags": "LLM,CHAT",
"max_tokens": 65792,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "Qwen3-235B-A22B",
"tags": "LLM,CHAT",
"max_tokens": 128000,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "Qwen3-30B-A3B",
"tags": "LLM,CHAT",
"max_tokens": 128000,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "Qwen3-32B",
"tags": "LLM,CHAT",
"max_tokens": 128000,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "Qwen3-8B",
"tags": "LLM,CHAT",
"max_tokens": 128000,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "Qwen3-4B",
"tags": "LLM,CHAT",
"max_tokens": 128000,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "Qwen3-0.6B",
"tags": "LLM,CHAT",
"max_tokens": 32000,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "QwQ-32B",
"tags": "LLM,CHAT",
"max_tokens": 131072,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "DeepSeek-R1-Distill-Qwen-32B",
"tags": "LLM,CHAT",
"max_tokens": 65792,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "DeepSeek-R1-Distill-Qwen-14B",
"tags": "LLM,CHAT",
"max_tokens": 65792,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "DeepSeek-R1-Distill-Qwen-7B",
"tags": "LLM,CHAT",
"max_tokens": 65792,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "DeepSeek-R1-Distill-Qwen-1.5B",
"tags": "LLM,CHAT",
"max_tokens": 65792,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "Qwen2.5-72B-Instruct",
"tags": "LLM,CHAT",
"max_tokens": 4096,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "Qwen2.5-32B-Instruct",
"tags": "LLM,CHAT",
"max_tokens": 4096,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "Qwen2.5-14B-Instruct",
"tags": "LLM,CHAT",
"max_tokens": 4096,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "Qwen2.5-7B-Instruct",
"tags": "LLM,CHAT",
"max_tokens": 131072,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "Qwen2-72B-Instruct",
"tags": "LLM,CHAT",
"max_tokens": 131072,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "Qwen2-7B-Instruct",
"tags": "LLM,CHAT",
"max_tokens": 131072,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "GLM-4-32B",
"tags": "LLM,CHAT",
"max_tokens": 128000,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "GLM-4-9B-0414",
"tags": "LLM,CHAT",
"max_tokens": 128000,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "glm-4-9b-chat",
"tags": "LLM,CHAT",
"max_tokens": 128000,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "internlm3-8b-instruct",
"tags": "LLM,CHAT",
"max_tokens": 4096,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "Yi-34B-Chat",
"tags": "LLM,CHAT",
"max_tokens": 32768,
"model_type": "chat",
"is_tools": false
},
{
"llm_name": "ERNIE-4.5-Turbo-VL",
"tags": "LLM,IMAGE2TEXT",
"max_tokens": 4096,
"model_type": "image2text",
"is_tools": false
},
{
"llm_name": "Qwen2.5-VL-32B-Instruct",
"tags": "LLM,IMAGE2TEXT",
"max_tokens": 32768,
"model_type": "image2text",
"is_tools": true
},
{
"llm_name": "Qwen2-VL-72B",
"tags": "LLM,IMAGE2TEXT",
"max_tokens": 4096,
"model_type": "image2text",
"is_tools": false
},
{
"llm_name": "Align-DS-V",
"tags": "LLM,IMAGE2TEXT",
"max_tokens": 4096,
"model_type": "image2text",
"is_tools": false
},
{
"llm_name": "InternVL3-78B",
"tags": "LLM,IMAGE2TEXT",
"max_tokens": 32768,
"model_type": "image2text",
"is_tools": false
},
{
"llm_name": "InternVL3-38B",
"tags": "LLM,IMAGE2TEXT",
"max_tokens": 32768,
"model_type": "image2text",
"is_tools": false
},
{
"llm_name": "InternVL2.5-78B",
"tags": "LLM,IMAGE2TEXT",
"max_tokens": 32768,
"model_type": "image2text",
"is_tools": false
},
{
"llm_name": "InternVL2.5-26B",
"tags": "LLM,IMAGE2TEXT",
"max_tokens": 16384,
"model_type": "image2text",
"is_tools": false
},
{
"llm_name": "InternVL2-8B",
"tags": "LLM,IMAGE2TEXT",
"max_tokens": 8192,
"model_type": "image2text",
"is_tools": false
},
{
"llm_name": "Qwen2-Audio-7B-Instruct",
"tags": "LLM,SPEECH2TEXT,IMAGE2TEXT",
"max_tokens": 8192,
"model_type": "speech2text",
"is_tools": false
},
{
"llm_name": "whisper-base",
"tags": "SPEECH2TEXT",
"max_tokens": 512,
"model_type": "speech2text",
"is_tools": false
},
{
"llm_name": "whisper-large",
"tags": "SPEECH2TEXT",
"max_tokens": 512,
"model_type": "speech2text",
"is_tools": false
},
{
"llm_name": "whisper-large-v3-turbo",
"tags": "SPEECH2TEXT",
"max_tokens": 512,
"model_type": "speech2text",
"is_tools": false
},
{
"llm_name": "whisper-large-v3",
"tags": "SPEECH2TEXT",
"max_tokens": 512,
"model_type": "speech2text",
"is_tools": false
},
{
"llm_name": "SenseVoiceSmall",
"tags": "SPEECH2TEXT",
"max_tokens": 512,
"model_type": "speech2text",
"is_tools": false
},
{
"llm_name": "Qwen3-Reranker-8B",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 32768,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "Qwen3-Reranker-4B",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 32768,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "Qwen3-Reranker-0.6B",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 32768,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "Qwen3-Embedding-8B",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 8192,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "Qwen3-Embedding-4B",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 4096,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "Qwen3-Embedding-0.6B",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 4096,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "jina-clip-v1",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 512,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "jina-clip-v2",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 8192,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "jina-reranker-m0",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 10240,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "bce-embedding-base_v1",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 512,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "bce-reranker-base_v1",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 512,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "bge-m3",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 8192,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "bge-reranker-v2-m3",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 8192,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "bge-large-zh-v1.5",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 1024,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "bge-small-zh-v1.5",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 512,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "nomic-embed-code",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 512,
"model_type": "embedding",
"is_tools": false
},
{
"llm_name": "all-mpnet-base-v2",
"tags": "TEXT EMBEDDING,TEXT RE-RANK",
"max_tokens": 512,
"model_type": "embedding",
"is_tools": false
}
]
},
{ {
"name": "Google Cloud", "name": "Google Cloud",
"logo": "", "logo": "",

View File

@ -45,7 +45,8 @@ from .embedding_model import (
HuggingFaceEmbed, HuggingFaceEmbed,
VolcEngineEmbed, VolcEngineEmbed,
GPUStackEmbed, GPUStackEmbed,
NovitaEmbed NovitaEmbed,
GiteeEmbed
) )
from .chat_model import ( from .chat_model import (
GptTurbo, GptTurbo,
@ -87,6 +88,7 @@ from .chat_model import (
HuggingFaceChat, HuggingFaceChat,
GPUStackChat, GPUStackChat,
ModelScopeChat, ModelScopeChat,
GiteeChat
) )
from .cv_model import ( from .cv_model import (
@ -129,7 +131,8 @@ from .rerank_model import (
QWenRerank, QWenRerank,
GPUStackRerank, GPUStackRerank,
HuggingfaceRerank, HuggingfaceRerank,
NovitaRerank NovitaRerank,
GiteeRerank
) )
from .sequence2txt_model import ( from .sequence2txt_model import (
@ -139,6 +142,7 @@ from .sequence2txt_model import (
XinferenceSeq2txt, XinferenceSeq2txt,
TencentCloudSeq2txt, TencentCloudSeq2txt,
GPUStackSeq2txt, GPUStackSeq2txt,
GiteeSeq2txt
) )
from .tts_model import ( from .tts_model import (
@ -182,7 +186,8 @@ EmbeddingModel = {
"HuggingFace": HuggingFaceEmbed, "HuggingFace": HuggingFaceEmbed,
"VolcEngine": VolcEngineEmbed, "VolcEngine": VolcEngineEmbed,
"GPUStack": GPUStackEmbed, "GPUStack": GPUStackEmbed,
"NovitaAI": NovitaEmbed "NovitaAI": NovitaEmbed,
"GiteeAI": GiteeEmbed
} }
CvModel = { CvModel = {
@ -206,7 +211,7 @@ CvModel = {
"Tencent Hunyuan": HunyuanCV, "Tencent Hunyuan": HunyuanCV,
"Anthropic": AnthropicCV, "Anthropic": AnthropicCV,
"SILICONFLOW": SILICONFLOWCV, "SILICONFLOW": SILICONFLOWCV,
"GPUStack": GPUStackCV, "GPUStack": GPUStackCV
} }
ChatModel = { ChatModel = {
@ -250,6 +255,7 @@ ChatModel = {
"HuggingFace": HuggingFaceChat, "HuggingFace": HuggingFaceChat,
"GPUStack": GPUStackChat, "GPUStack": GPUStackChat,
"ModelScope":ModelScopeChat, "ModelScope":ModelScopeChat,
"GiteeAI": GiteeChat
} }
RerankModel = { RerankModel = {
@ -270,7 +276,8 @@ RerankModel = {
"Tongyi-Qianwen": QWenRerank, "Tongyi-Qianwen": QWenRerank,
"GPUStack": GPUStackRerank, "GPUStack": GPUStackRerank,
"HuggingFace": HuggingfaceRerank, "HuggingFace": HuggingfaceRerank,
"NovitaAI": NovitaRerank "NovitaAI": NovitaRerank,
"GiteeAI": GiteeRerank
} }
Seq2txtModel = { Seq2txtModel = {
@ -280,6 +287,7 @@ Seq2txtModel = {
"Xinference": XinferenceSeq2txt, "Xinference": XinferenceSeq2txt,
"Tencent Cloud": TencentCloudSeq2txt, "Tencent Cloud": TencentCloudSeq2txt,
"GPUStack": GPUStackSeq2txt, "GPUStack": GPUStackSeq2txt,
"GiteeAI": GiteeSeq2txt
} }
TTSModel = { TTSModel = {

View File

@ -1253,6 +1253,13 @@ class YiChat(Base):
super().__init__(key, model_name, base_url, **kwargs) super().__init__(key, model_name, base_url, **kwargs)
class GiteeChat(Base):
def __init__(self, key, model_name, base_url="https://ai.gitee.com/v1/", **kwargs):
if not base_url:
base_url = "https://ai.gitee.com/v1/"
super().__init__(key, model_name, base_url, **kwargs)
class ReplicateChat(Base): class ReplicateChat(Base):
def __init__(self, key, model_name, base_url=None, **kwargs): def __init__(self, key, model_name, base_url=None, **kwargs):
super().__init__(key, model_name, base_url=base_url, **kwargs) super().__init__(key, model_name, base_url=base_url, **kwargs)

View File

@ -912,3 +912,8 @@ class GPUStackEmbed(OpenAIEmbed):
class NovitaEmbed(SILICONFLOWEmbed): class NovitaEmbed(SILICONFLOWEmbed):
def __init__(self, key, model_name, base_url="https://api.novita.ai/v3/openai/embeddings"): def __init__(self, key, model_name, base_url="https://api.novita.ai/v3/openai/embeddings"):
super().__init__(key, model_name, base_url) super().__init__(key, model_name, base_url)
class GiteeEmbed(SILICONFLOWEmbed):
def __init__(self, key, model_name, base_url="https://ai.gitee.com/v1/embeddings"):
super().__init__(key, model_name, base_url)

View File

@ -630,3 +630,8 @@ class GPUStackRerank(Base):
class NovitaRerank(JinaRerank): class NovitaRerank(JinaRerank):
def __init__(self, key, model_name, base_url="https://api.novita.ai/v3/openai/rerank"): def __init__(self, key, model_name, base_url="https://api.novita.ai/v3/openai/rerank"):
super().__init__(key, model_name, base_url) super().__init__(key, model_name, base_url)
class GiteeRerank(JinaRerank):
def __init__(self, key, model_name, base_url="https://ai.gitee.com/v1/rerank"):
super().__init__(key, model_name, base_url)

View File

@ -203,3 +203,11 @@ class GPUStackSeq2txt(Base):
self.base_url = base_url self.base_url = base_url
self.model_name = model_name self.model_name = model_name
self.key = key self.key = key
class GiteeSeq2txt(Base):
def __init__(self, key, model_name="whisper-1", base_url="https://ai.gitee.com/v1/"):
if not base_url:
base_url = "https://ai.gitee.com/v1/"
self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name