Refa: replace Chat Ollama implementation with LiteLLM (#9693)

### What problem does this PR solve?

replace Chat Ollama implementation with LiteLLM.

### Type of change

- [x] Refactoring
This commit is contained in:
Yongteng Lei
2025-08-25 17:56:31 +08:00
committed by GitHub
parent d367c7e226
commit b6c1ca828e
2 changed files with 3 additions and 69 deletions

View File

@ -36,6 +36,7 @@ class SupportedLiteLLMProvider(StrEnum):
Nvidia = "NVIDIA"
TogetherAI = "TogetherAI"
Anthropic = "Anthropic"
Ollama = "Ollama"
FACTORY_DEFAULT_BASE_URL = {
@ -59,6 +60,7 @@ LITELLM_PROVIDER_PREFIX = {
SupportedLiteLLMProvider.Nvidia: "nvidia_nim/",
SupportedLiteLLMProvider.TogetherAI: "together_ai/",
SupportedLiteLLMProvider.Anthropic: "", # don't need a prefix
SupportedLiteLLMProvider.Ollama: "ollama_chat/",
}
ChatModel = globals().get("ChatModel", {})