mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Fix: ollama model list issue. (#11175)
### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
@ -358,7 +358,7 @@ def list_app():
|
|||||||
for o in objs:
|
for o in objs:
|
||||||
if o.llm_name + "@" + o.llm_factory in llm_set:
|
if o.llm_name + "@" + o.llm_factory in llm_set:
|
||||||
continue
|
continue
|
||||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True, "status": StatusEnum.VALID.value})
|
||||||
|
|
||||||
res = {}
|
res = {}
|
||||||
for m in llms:
|
for m in llms:
|
||||||
|
|||||||
@ -101,6 +101,7 @@ def init_llm_factory():
|
|||||||
info = deepcopy(factory_llm_info)
|
info = deepcopy(factory_llm_info)
|
||||||
llm_infos = info.pop("llm")
|
llm_infos = info.pop("llm")
|
||||||
try:
|
try:
|
||||||
|
LLMFactoriesService.filter_delete([LLMFactories.name == factory_llm_info["name"]])
|
||||||
LLMFactoriesService.save(**info)
|
LLMFactoriesService.save(**info)
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|||||||
@ -846,7 +846,7 @@ def queue_raptor_o_graphrag_tasks(sample_doc_id, ty, priority, fake_doc_id="", d
|
|||||||
"to_page": 100000000,
|
"to_page": 100000000,
|
||||||
"task_type": ty,
|
"task_type": ty,
|
||||||
"progress_msg": datetime.now().strftime("%H:%M:%S") + " created task " + ty,
|
"progress_msg": datetime.now().strftime("%H:%M:%S") + " created task " + ty,
|
||||||
"begin_at": datetime.now(),
|
"begin_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
||||||
}
|
}
|
||||||
|
|
||||||
task = new_task()
|
task = new_task()
|
||||||
|
|||||||
@ -97,7 +97,7 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
|
|||||||
async def __call__(self, chunks, random_state, callback=None, task_id: str = ""):
|
async def __call__(self, chunks, random_state, callback=None, task_id: str = ""):
|
||||||
if len(chunks) <= 1:
|
if len(chunks) <= 1:
|
||||||
return []
|
return []
|
||||||
chunks = [(s, a) for s, a in chunks if s and a and len(a) > 0]
|
chunks = [(s, a) for s, a in chunks if s and a is not None and len(a) > 0]
|
||||||
layers = [(0, len(chunks))]
|
layers = [(0, len(chunks))]
|
||||||
start, end = 0, len(chunks)
|
start, end = 0, len(chunks)
|
||||||
|
|
||||||
|
|||||||
@ -647,7 +647,7 @@ async def run_raptor_for_kb(row, kb_parser_config, chat_mdl, embd_mdl, vector_si
|
|||||||
|
|
||||||
res = []
|
res = []
|
||||||
tk_count = 0
|
tk_count = 0
|
||||||
async def generate(chunks):
|
async def generate(chunks, did):
|
||||||
nonlocal tk_count, res
|
nonlocal tk_count, res
|
||||||
raptor = Raptor(
|
raptor = Raptor(
|
||||||
raptor_config.get("max_cluster", 64),
|
raptor_config.get("max_cluster", 64),
|
||||||
@ -660,7 +660,7 @@ async def run_raptor_for_kb(row, kb_parser_config, chat_mdl, embd_mdl, vector_si
|
|||||||
original_length = len(chunks)
|
original_length = len(chunks)
|
||||||
chunks = await raptor(chunks, kb_parser_config["raptor"]["random_seed"], callback, row["id"])
|
chunks = await raptor(chunks, kb_parser_config["raptor"]["random_seed"], callback, row["id"])
|
||||||
doc = {
|
doc = {
|
||||||
"doc_id": fake_doc_id,
|
"doc_id": did,
|
||||||
"kb_id": [str(row["kb_id"])],
|
"kb_id": [str(row["kb_id"])],
|
||||||
"docnm_kwd": row["name"],
|
"docnm_kwd": row["name"],
|
||||||
"title_tks": rag_tokenizer.tokenize(row["name"]),
|
"title_tks": rag_tokenizer.tokenize(row["name"]),
|
||||||
@ -688,9 +688,8 @@ async def run_raptor_for_kb(row, kb_parser_config, chat_mdl, embd_mdl, vector_si
|
|||||||
fields=["content_with_weight", vctr_nm],
|
fields=["content_with_weight", vctr_nm],
|
||||||
sort_by_position=True):
|
sort_by_position=True):
|
||||||
chunks.append((d["content_with_weight"], np.array(d[vctr_nm])))
|
chunks.append((d["content_with_weight"], np.array(d[vctr_nm])))
|
||||||
callback(progress=(x+1.)/len(doc_ids))
|
await generate(chunks, doc_id)
|
||||||
await generate(chunks)
|
callback(prog=(x+1.)/len(doc_ids))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
chunks = []
|
chunks = []
|
||||||
for doc_id in doc_ids:
|
for doc_id in doc_ids:
|
||||||
@ -699,7 +698,7 @@ async def run_raptor_for_kb(row, kb_parser_config, chat_mdl, embd_mdl, vector_si
|
|||||||
sort_by_position=True):
|
sort_by_position=True):
|
||||||
chunks.append((d["content_with_weight"], np.array(d[vctr_nm])))
|
chunks.append((d["content_with_weight"], np.array(d[vctr_nm])))
|
||||||
|
|
||||||
await generate(chunks)
|
await generate(chunks, fake_doc_id)
|
||||||
|
|
||||||
return res, tk_count
|
return res, tk_count
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user