From e9e39d57cea43d482bfd8cb0ee7336a32e058ae1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=84=E8=85=BE?= <101850389+hangters@users.noreply.github.com> Date: Mon, 12 Aug 2024 11:06:25 +0800 Subject: [PATCH] add support for Upstage (#1902) ### What problem does this PR solve? #1853 add support for Upstage ### Type of change - [x] New Feature (non-breaking change which adds functionality) Co-authored-by: Zhedong Cen --- conf/llm_factories.json | 32 +++++++++++++++++++ rag/llm/__init__.py | 4 ++- rag/llm/chat_model.py | 7 ++++ rag/llm/embedding_model.py | 8 ++++- web/src/assets/svg/llm/upstage.svg | 21 ++++++++++++ .../user-setting/setting-model/constant.ts | 3 +- 6 files changed, 72 insertions(+), 3 deletions(-) create mode 100644 web/src/assets/svg/llm/upstage.svg diff --git a/conf/llm_factories.json b/conf/llm_factories.json index cb8020c14..64982895f 100644 --- a/conf/llm_factories.json +++ b/conf/llm_factories.json @@ -2601,6 +2601,38 @@ "model_type": "embedding" } ] + }, + { + "name": "Upstage", + "logo": "", + "tags": "LLM,TEXT EMBEDDING", + "status": "1", + "llm": [ + { + "llm_name": "solar-1-mini-chat", + "tags": "LLM,CHAT,32k", + "max_tokens": 32768, + "model_type": "chat" + }, + { + "llm_name": "solar-1-mini-chat-ja", + "tags": "LLM,CHAT,32k", + "max_tokens": 32768, + "model_type": "chat" + }, + { + "llm_name": "solar-embedding-1-large-query", + "tags": "TEXT EMBEDDING", + "max_tokens": 4000, + "model_type": "embedding" + }, + { + "llm_name": "solar-embedding-1-large-passage", + "tags": "TEXT EMBEDDING", + "max_tokens": 4000, + "model_type": "embedding" + } + ] } ] } diff --git a/rag/llm/__init__.py b/rag/llm/__init__.py index a6e2aa417..7d1d1731c 100644 --- a/rag/llm/__init__.py +++ b/rag/llm/__init__.py @@ -41,6 +41,7 @@ EmbeddingModel = { "cohere": CoHereEmbed, "TogetherAI": TogetherAIEmbed, "PerfXCloud": PerfXCloudEmbed, + "Upstage": UpstageEmbed } @@ -89,7 +90,8 @@ ChatModel = { "cohere": CoHereChat, "LeptonAI": LeptonAIChat, "TogetherAI": TogetherAIChat, - "PerfXCloud": PerfXCloudChat + "PerfXCloud": PerfXCloudChat, + "Upstage":UpstageChat } diff --git a/rag/llm/chat_model.py b/rag/llm/chat_model.py index 256e0f0ba..89dbfc304 100644 --- a/rag/llm/chat_model.py +++ b/rag/llm/chat_model.py @@ -1002,3 +1002,10 @@ class PerfXCloudChat(Base): if not base_url: base_url = "https://cloud.perfxlab.cn/v1" super().__init__(key, model_name, base_url) + + +class UpstageChat(Base): + def __init__(self, key, model_name, base_url="https://api.upstage.ai/v1/solar"): + if not base_url: + base_url = "https://api.upstage.ai/v1/solar" + super().__init__(key, model_name, base_url) diff --git a/rag/llm/embedding_model.py b/rag/llm/embedding_model.py index c663a762e..db5f450a4 100644 --- a/rag/llm/embedding_model.py +++ b/rag/llm/embedding_model.py @@ -567,4 +567,10 @@ class PerfXCloudEmbed(OpenAIEmbed): if not base_url: base_url = "https://cloud.perfxlab.cn/v1" super().__init__(key, model_name, base_url) - \ No newline at end of file + + +class UpstageEmbed(OpenAIEmbed): + def __init__(self, key, model_name, base_url="https://api.upstage.ai/v1/solar"): + if not base_url: + base_url = "https://api.upstage.ai/v1/solar" + super().__init__(key, model_name, base_url) diff --git a/web/src/assets/svg/llm/upstage.svg b/web/src/assets/svg/llm/upstage.svg new file mode 100644 index 000000000..584be623a --- /dev/null +++ b/web/src/assets/svg/llm/upstage.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/web/src/pages/user-setting/setting-model/constant.ts b/web/src/pages/user-setting/setting-model/constant.ts index 2fa10d554..ab4863954 100644 --- a/web/src/pages/user-setting/setting-model/constant.ts +++ b/web/src/pages/user-setting/setting-model/constant.ts @@ -26,7 +26,8 @@ export const IconMap = { cohere: 'cohere', Lepton: 'lepton', TogetherAI:'together-ai', - PerfXCould: 'perfx-could' + PerfXCould: 'perfx-could', + Upstage: 'upstage' }; export const BedrockRegionList = [