mirror of
https://github.com/infiniflow/ragflow.git
synced 2026-01-23 03:26:53 +08:00
feat: Add n1n provider (#12680)
This PR adds n1n as an LLM provider to RAGFlow. Co-authored-by: Qun <qun@ip-10-5-5-38.us-west-2.compute.internal>
This commit is contained in:
@ -56,6 +56,7 @@ class SupportedLiteLLMProvider(StrEnum):
|
||||
GPUStack = "GPUStack"
|
||||
OpenAI = "OpenAI"
|
||||
Azure_OpenAI = "Azure-OpenAI"
|
||||
n1n = "n1n"
|
||||
|
||||
|
||||
FACTORY_DEFAULT_BASE_URL = {
|
||||
@ -81,6 +82,7 @@ FACTORY_DEFAULT_BASE_URL = {
|
||||
SupportedLiteLLMProvider.MiniMax: "https://api.minimaxi.com/v1",
|
||||
SupportedLiteLLMProvider.DeerAPI: "https://api.deerapi.com/v1",
|
||||
SupportedLiteLLMProvider.OpenAI: "https://api.openai.com/v1",
|
||||
SupportedLiteLLMProvider.n1n: "https://api.n1n.ai/v1",
|
||||
}
|
||||
|
||||
|
||||
@ -118,6 +120,7 @@ LITELLM_PROVIDER_PREFIX = {
|
||||
SupportedLiteLLMProvider.GPUStack: "openai/",
|
||||
SupportedLiteLLMProvider.OpenAI: "openai/",
|
||||
SupportedLiteLLMProvider.Azure_OpenAI: "azure/",
|
||||
SupportedLiteLLMProvider.n1n: "openai/",
|
||||
}
|
||||
|
||||
ChatModel = globals().get("ChatModel", {})
|
||||
|
||||
@ -1165,6 +1165,15 @@ class TokenPonyChat(Base):
|
||||
super().__init__(key, model_name, base_url, **kwargs)
|
||||
|
||||
|
||||
class N1nChat(Base):
|
||||
_FACTORY_NAME = "n1n"
|
||||
|
||||
def __init__(self, key, model_name, base_url="https://api.n1n.ai/v1", **kwargs):
|
||||
if not base_url:
|
||||
base_url = "https://api.n1n.ai/v1"
|
||||
super().__init__(key, model_name, base_url, **kwargs)
|
||||
|
||||
|
||||
class LiteLLMBase(ABC):
|
||||
_FACTORY_NAME = [
|
||||
"Tongyi-Qianwen",
|
||||
|
||||
Reference in New Issue
Block a user