Fix: ollama model list issue. (#11175)

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
Kevin Hu
2025-11-11 19:46:41 +08:00
committed by GitHub
parent 6dcff7db97
commit c30ffb5716
5 changed files with 9 additions and 9 deletions

View File

@ -358,7 +358,7 @@ def list_app():
for o in objs:
if o.llm_name + "@" + o.llm_factory in llm_set:
continue
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True, "status": StatusEnum.VALID.value})
res = {}
for m in llms:

View File

@ -101,6 +101,7 @@ def init_llm_factory():
info = deepcopy(factory_llm_info)
llm_infos = info.pop("llm")
try:
LLMFactoriesService.filter_delete([LLMFactories.name == factory_llm_info["name"]])
LLMFactoriesService.save(**info)
except Exception:
pass

View File

@ -846,7 +846,7 @@ def queue_raptor_o_graphrag_tasks(sample_doc_id, ty, priority, fake_doc_id="", d
"to_page": 100000000,
"task_type": ty,
"progress_msg": datetime.now().strftime("%H:%M:%S") + " created task " + ty,
"begin_at": datetime.now(),
"begin_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
task = new_task()

View File

@ -97,7 +97,7 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
async def __call__(self, chunks, random_state, callback=None, task_id: str = ""):
if len(chunks) <= 1:
return []
chunks = [(s, a) for s, a in chunks if s and a and len(a) > 0]
chunks = [(s, a) for s, a in chunks if s and a is not None and len(a) > 0]
layers = [(0, len(chunks))]
start, end = 0, len(chunks)

View File

@ -647,7 +647,7 @@ async def run_raptor_for_kb(row, kb_parser_config, chat_mdl, embd_mdl, vector_si
res = []
tk_count = 0
async def generate(chunks):
async def generate(chunks, did):
nonlocal tk_count, res
raptor = Raptor(
raptor_config.get("max_cluster", 64),
@ -660,7 +660,7 @@ async def run_raptor_for_kb(row, kb_parser_config, chat_mdl, embd_mdl, vector_si
original_length = len(chunks)
chunks = await raptor(chunks, kb_parser_config["raptor"]["random_seed"], callback, row["id"])
doc = {
"doc_id": fake_doc_id,
"doc_id": did,
"kb_id": [str(row["kb_id"])],
"docnm_kwd": row["name"],
"title_tks": rag_tokenizer.tokenize(row["name"]),
@ -688,9 +688,8 @@ async def run_raptor_for_kb(row, kb_parser_config, chat_mdl, embd_mdl, vector_si
fields=["content_with_weight", vctr_nm],
sort_by_position=True):
chunks.append((d["content_with_weight"], np.array(d[vctr_nm])))
callback(progress=(x+1.)/len(doc_ids))
await generate(chunks)
await generate(chunks, doc_id)
callback(prog=(x+1.)/len(doc_ids))
else:
chunks = []
for doc_id in doc_ids:
@ -699,7 +698,7 @@ async def run_raptor_for_kb(row, kb_parser_config, chat_mdl, embd_mdl, vector_si
sort_by_position=True):
chunks.append((d["content_with_weight"], np.array(d[vctr_nm])))
await generate(chunks)
await generate(chunks, fake_doc_id)
return res, tk_count