mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
feat: support Xinference (#319)
### What problem does this PR solve? support xorbitsai inference as model provider Issue link:#299 ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
@ -132,6 +132,7 @@ export const useSelectModelProvidersLoading = () => {
|
||||
|
||||
export const useSubmitOllama = () => {
|
||||
const loading = useOneNamespaceEffectsLoading('settingModel', ['add_llm']);
|
||||
const [selectedLlmFactory, setSelectedLlmFactory] = useState<string>('');
|
||||
const addLlm = useAddLlm();
|
||||
const {
|
||||
visible: llmAddingVisible,
|
||||
@ -149,11 +150,17 @@ export const useSubmitOllama = () => {
|
||||
[hideLlmAddingModal, addLlm],
|
||||
);
|
||||
|
||||
const handleShowLlmAddingModal = (llmFactory: string) => {
|
||||
setSelectedLlmFactory(llmFactory);
|
||||
showLlmAddingModal();
|
||||
};
|
||||
|
||||
return {
|
||||
llmAddingLoading: loading,
|
||||
onLlmAddingOk,
|
||||
llmAddingVisible,
|
||||
hideLlmAddingModal,
|
||||
showLlmAddingModal,
|
||||
showLlmAddingModal: handleShowLlmAddingModal,
|
||||
selectedLlmFactory,
|
||||
};
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user