mirror of
https://github.com/infiniflow/ragflow.git
synced 2026-01-04 03:25:30 +08:00
Compare commits
9 Commits
4ec6a4e493
...
ff2c70608d
| Author | SHA1 | Date | |
|---|---|---|---|
| ff2c70608d | |||
| 5903d1c8f1 | |||
| f0392e7501 | |||
| 4037788e0c | |||
| 59884ab0fb | |||
| 4a6d37f0e8 | |||
| 731e2d5f26 | |||
| df3cbb9b9e | |||
| 5402666b19 |
@ -233,7 +233,7 @@ releases! 🌟
|
||||
* Running on all addresses (0.0.0.0)
|
||||
```
|
||||
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network abnormal`
|
||||
> error because, at that moment, your RAGFlow may not be fully initialized.
|
||||
>
|
||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||
|
||||
@ -233,7 +233,7 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
* Running on all addresses (0.0.0.0)
|
||||
```
|
||||
|
||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network abnormal`
|
||||
> karena RAGFlow mungkin belum sepenuhnya siap.
|
||||
>
|
||||
2. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||
|
||||
@ -214,7 +214,7 @@
|
||||
* Running on all addresses (0.0.0.0)
|
||||
```
|
||||
|
||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network anormal` 오류가 발생할 수 있습니다.
|
||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network abnormal` 오류가 발생할 수 있습니다.
|
||||
|
||||
2. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||
|
||||
@ -232,7 +232,7 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
* Rodando em todos os endereços (0.0.0.0)
|
||||
```
|
||||
|
||||
> Se você pular essa etapa de confirmação e acessar diretamente o RAGFlow, seu navegador pode exibir um erro `network anormal`, pois, nesse momento, seu RAGFlow pode não estar totalmente inicializado.
|
||||
> Se você pular essa etapa de confirmação e acessar diretamente o RAGFlow, seu navegador pode exibir um erro `network abnormal`, pois, nesse momento, seu RAGFlow pode não estar totalmente inicializado.
|
||||
>
|
||||
5. No seu navegador, insira o endereço IP do seu servidor e faça login no RAGFlow.
|
||||
|
||||
|
||||
@ -125,7 +125,7 @@
|
||||
|
||||
### 🍔 **相容各類異質資料來源**
|
||||
|
||||
- 支援豐富的文件類型,包括 Word 文件、PPT、excel 表格、txt 檔案、圖片、PDF、影印件、影印件、結構化資料、網頁等。
|
||||
- 支援豐富的文件類型,包括 Word 文件、PPT、excel 表格、txt 檔案、圖片、PDF、影印件、複印件、結構化資料、網頁等。
|
||||
|
||||
### 🛀 **全程無憂、自動化的 RAG 工作流程**
|
||||
|
||||
@ -237,7 +237,7 @@
|
||||
* Running on all addresses (0.0.0.0)
|
||||
```
|
||||
|
||||
> 如果您跳過這一步驟系統確認步驟就登入 RAGFlow,你的瀏覽器有可能會提示 `network anormal` 或 `網路異常`,因為 RAGFlow 可能並未完全啟動成功。
|
||||
> 如果您跳過這一步驟系統確認步驟就登入 RAGFlow,你的瀏覽器有可能會提示 `network abnormal` 或 `網路異常`,因為 RAGFlow 可能並未完全啟動成功。
|
||||
>
|
||||
5. 在你的瀏覽器中輸入你的伺服器對應的 IP 位址並登入 RAGFlow。
|
||||
|
||||
|
||||
@ -238,7 +238,7 @@
|
||||
* Running on all addresses (0.0.0.0)
|
||||
```
|
||||
|
||||
> 如果您在没有看到上面的提示信息出来之前,就尝试登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`。
|
||||
> 如果您在没有看到上面的提示信息出来之前,就尝试登录 RAGFlow,你的浏览器有可能会提示 `network abnormal` 或 `网络异常`。
|
||||
|
||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||
|
||||
@ -33,7 +33,7 @@ from common.connection_utils import timeout
|
||||
from common.misc_utils import get_uuid
|
||||
from common import settings
|
||||
|
||||
from api.db.joint_services.memory_message_service import save_to_memory
|
||||
from api.db.joint_services.memory_message_service import queue_save_to_memory_task
|
||||
|
||||
|
||||
class MessageParam(ComponentParamBase):
|
||||
@ -437,17 +437,4 @@ class Message(ComponentBase):
|
||||
"user_input": self._canvas.get_sys_query(),
|
||||
"agent_response": content
|
||||
}
|
||||
res = []
|
||||
for memory_id in self._param.memory_ids:
|
||||
success, msg = await save_to_memory(memory_id, message_dict)
|
||||
res.append({
|
||||
"memory_id": memory_id,
|
||||
"success": success,
|
||||
"msg": msg
|
||||
})
|
||||
if all([r["success"] for r in res]):
|
||||
return True, "Successfully added to memories."
|
||||
|
||||
error_text = "Some messages failed to add. " + " ".join([f"Add to memory {r['memory_id']} failed, detail: {r['msg']}" for r in res if not r["success"]])
|
||||
logging.error(error_text)
|
||||
return False, error_text
|
||||
return await queue_save_to_memory_task(self._param.memory_ids, message_dict)
|
||||
|
||||
@ -159,7 +159,8 @@ async def delete_memory(memory_id):
|
||||
return get_json_result(message=True, code=RetCode.NOT_FOUND)
|
||||
try:
|
||||
MemoryService.delete_memory(memory_id)
|
||||
MessageService.delete_message({"memory_id": memory_id}, memory.tenant_id, memory_id)
|
||||
if MessageService.has_index(memory.tenant_id, memory_id):
|
||||
MessageService.delete_message({"memory_id": memory_id}, memory.tenant_id, memory_id)
|
||||
return get_json_result(message=True)
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
|
||||
@ -177,7 +177,7 @@ def healthz():
|
||||
return jsonify(result), (200 if all_ok else 500)
|
||||
|
||||
|
||||
@manager.route("/ping", methods=["GET"]) # noqa: F821
|
||||
@manager.route("/ping", methods=["GET"]) # noqa: F821
|
||||
def ping():
|
||||
return "pong", 200
|
||||
|
||||
@ -213,7 +213,7 @@ def new_token():
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == "owner"][0].tenant_id
|
||||
obj = {
|
||||
"tenant_id": tenant_id,
|
||||
"token": generate_confirmation_token(),
|
||||
@ -268,13 +268,12 @@ def token_list():
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == "owner"][0].tenant_id
|
||||
objs = APITokenService.query(tenant_id=tenant_id)
|
||||
objs = [o.to_dict() for o in objs]
|
||||
for o in objs:
|
||||
if not o["beta"]:
|
||||
o["beta"] = generate_confirmation_token().replace(
|
||||
"ragflow-", "")[:32]
|
||||
o["beta"] = generate_confirmation_token().replace("ragflow-", "")[:32]
|
||||
APITokenService.filter_update([APIToken.tenant_id == tenant_id, APIToken.token == o["token"]], o)
|
||||
return get_json_result(data=objs)
|
||||
except Exception as e:
|
||||
@ -307,13 +306,19 @@ def rm(token):
|
||||
type: boolean
|
||||
description: Deletion status.
|
||||
"""
|
||||
APITokenService.filter_delete(
|
||||
[APIToken.tenant_id == current_user.id, APIToken.token == token]
|
||||
)
|
||||
return get_json_result(data=True)
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = tenants[0].tenant_id
|
||||
APITokenService.filter_delete([APIToken.tenant_id == tenant_id, APIToken.token == token])
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/config', methods=['GET']) # noqa: F821
|
||||
@manager.route("/config", methods=["GET"]) # noqa: F821
|
||||
def get_config():
|
||||
"""
|
||||
Get system configuration.
|
||||
@ -330,6 +335,4 @@ def get_config():
|
||||
type: integer 0 means disabled, 1 means enabled
|
||||
description: Whether user registration is enabled
|
||||
"""
|
||||
return get_json_result(data={
|
||||
"registerEnabled": settings.REGISTER_ENABLED
|
||||
})
|
||||
return get_json_result(data={"registerEnabled": settings.REGISTER_ENABLED})
|
||||
|
||||
@ -16,9 +16,14 @@
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from api.db.services.task_service import TaskService
|
||||
from common import settings
|
||||
from common.time_utils import current_timestamp, timestamp_to_date, format_iso_8601_to_ymd_hms
|
||||
from common.constants import MemoryType, LLMType
|
||||
from common.doc_store.doc_store_base import FusionExpr
|
||||
from common.misc_utils import get_uuid
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from api.db.db_models import Task
|
||||
from api.db.services.memory_service import MemoryService
|
||||
from api.db.services.tenant_llm_service import TenantLLMService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
@ -82,32 +87,44 @@ async def save_to_memory(memory_id: str, message_dict: dict):
|
||||
"forget_at": None,
|
||||
"status": True
|
||||
} for content in extracted_content]]
|
||||
embedding_model = LLMBundle(tenant_id, llm_type=LLMType.EMBEDDING, llm_name=memory.embd_id)
|
||||
vector_list, _ = embedding_model.encode([msg["content"] for msg in message_list])
|
||||
for idx, msg in enumerate(message_list):
|
||||
msg["content_embed"] = vector_list[idx]
|
||||
vector_dimension = len(vector_list[0])
|
||||
if not MessageService.has_index(tenant_id, memory_id):
|
||||
created = MessageService.create_index(tenant_id, memory_id, vector_size=vector_dimension)
|
||||
if not created:
|
||||
return False, "Failed to create message index."
|
||||
return await embed_and_save(memory, message_list)
|
||||
|
||||
new_msg_size = sum([MessageService.calculate_message_size(m) for m in message_list])
|
||||
current_memory_size = get_memory_size_cache(memory_id, tenant_id)
|
||||
if new_msg_size + current_memory_size > memory.memory_size:
|
||||
size_to_delete = current_memory_size + new_msg_size - memory.memory_size
|
||||
if memory.forgetting_policy == "FIFO":
|
||||
message_ids_to_delete, delete_size = MessageService.pick_messages_to_delete_by_fifo(memory_id, tenant_id, size_to_delete)
|
||||
MessageService.delete_message({"message_id": message_ids_to_delete}, tenant_id, memory_id)
|
||||
decrease_memory_size_cache(memory_id, delete_size)
|
||||
else:
|
||||
return False, "Failed to insert message into memory. Memory size reached limit and cannot decide which to delete."
|
||||
fail_cases = MessageService.insert_message(message_list, tenant_id, memory_id)
|
||||
if fail_cases:
|
||||
return False, "Failed to insert message into memory. Details: " + "; ".join(fail_cases)
|
||||
|
||||
increase_memory_size_cache(memory_id, new_msg_size)
|
||||
return True, "Message saved successfully."
|
||||
async def save_extracted_to_memory_only(memory_id: str, message_dict, source_message_id: int):
|
||||
memory = MemoryService.get_by_memory_id(memory_id)
|
||||
if not memory:
|
||||
return False, f"Memory '{memory_id}' not found."
|
||||
|
||||
if memory.memory_type == MemoryType.RAW.value:
|
||||
return True, f"Memory '{memory_id}' don't need to extract."
|
||||
|
||||
tenant_id = memory.tenant_id
|
||||
extracted_content = await extract_by_llm(
|
||||
tenant_id,
|
||||
memory.llm_id,
|
||||
{"temperature": memory.temperature},
|
||||
get_memory_type_human(memory.memory_type),
|
||||
message_dict.get("user_input", ""),
|
||||
message_dict.get("agent_response", "")
|
||||
)
|
||||
message_list = [{
|
||||
"message_id": REDIS_CONN.generate_auto_increment_id(namespace="memory"),
|
||||
"message_type": content["message_type"],
|
||||
"source_id": source_message_id,
|
||||
"memory_id": memory_id,
|
||||
"user_id": "",
|
||||
"agent_id": message_dict["agent_id"],
|
||||
"session_id": message_dict["session_id"],
|
||||
"content": content["content"],
|
||||
"valid_at": content["valid_at"],
|
||||
"invalid_at": content["invalid_at"] if content["invalid_at"] else None,
|
||||
"forget_at": None,
|
||||
"status": True
|
||||
} for content in extracted_content]
|
||||
if not message_list:
|
||||
return True, "No memory extracted from raw message."
|
||||
|
||||
return await embed_and_save(memory, message_list)
|
||||
|
||||
|
||||
async def extract_by_llm(tenant_id: str, llm_id: str, extract_conf: dict, memory_type: List[str], user_input: str,
|
||||
@ -136,6 +153,36 @@ async def extract_by_llm(tenant_id: str, llm_id: str, extract_conf: dict, memory
|
||||
} for message_type, extracted_content_list in res_json.items() for extracted_content in extracted_content_list]
|
||||
|
||||
|
||||
async def embed_and_save(memory, message_list: list[dict]):
|
||||
embedding_model = LLMBundle(memory.tenant_id, llm_type=LLMType.EMBEDDING, llm_name=memory.embd_id)
|
||||
vector_list, _ = embedding_model.encode([msg["content"] for msg in message_list])
|
||||
for idx, msg in enumerate(message_list):
|
||||
msg["content_embed"] = vector_list[idx]
|
||||
vector_dimension = len(vector_list[0])
|
||||
if not MessageService.has_index(memory.tenant_id, memory.id):
|
||||
created = MessageService.create_index(memory.tenant_id, memory.id, vector_size=vector_dimension)
|
||||
if not created:
|
||||
return False, "Failed to create message index."
|
||||
|
||||
new_msg_size = sum([MessageService.calculate_message_size(m) for m in message_list])
|
||||
current_memory_size = get_memory_size_cache(memory.tenant_id, memory.id)
|
||||
if new_msg_size + current_memory_size > memory.memory_size:
|
||||
size_to_delete = current_memory_size + new_msg_size - memory.memory_size
|
||||
if memory.forgetting_policy == "FIFO":
|
||||
message_ids_to_delete, delete_size = MessageService.pick_messages_to_delete_by_fifo(memory.id, memory.tenant_id,
|
||||
size_to_delete)
|
||||
MessageService.delete_message({"message_id": message_ids_to_delete}, memory.tenant_id, memory.id)
|
||||
decrease_memory_size_cache(memory.id, delete_size)
|
||||
else:
|
||||
return False, "Failed to insert message into memory. Memory size reached limit and cannot decide which to delete."
|
||||
fail_cases = MessageService.insert_message(message_list, memory.tenant_id, memory.id)
|
||||
if fail_cases:
|
||||
return False, "Failed to insert message into memory. Details: " + "; ".join(fail_cases)
|
||||
|
||||
increase_memory_size_cache(memory.id, new_msg_size)
|
||||
return True, "Message saved successfully."
|
||||
|
||||
|
||||
def query_message(filter_dict: dict, params: dict):
|
||||
"""
|
||||
:param filter_dict: {
|
||||
@ -231,3 +278,112 @@ def init_memory_size_cache():
|
||||
def judge_system_prompt_is_default(system_prompt: str, memory_type: int|list[str]):
|
||||
memory_type_list = memory_type if isinstance(memory_type, list) else get_memory_type_human(memory_type)
|
||||
return system_prompt == PromptAssembler.assemble_system_prompt({"memory_type": memory_type_list})
|
||||
|
||||
|
||||
async def queue_save_to_memory_task(memory_ids: list[str], message_dict: dict):
|
||||
"""
|
||||
:param memory_ids:
|
||||
:param message_dict: {
|
||||
"user_id": str,
|
||||
"agent_id": str,
|
||||
"session_id": str,
|
||||
"user_input": str,
|
||||
"agent_response": str
|
||||
}
|
||||
"""
|
||||
def new_task(_memory_id: str, _source_id: int):
|
||||
return {
|
||||
"id": get_uuid(),
|
||||
"doc_id": _memory_id,
|
||||
"task_type": "memory",
|
||||
"progress": 0.0,
|
||||
"digest": str(_source_id)
|
||||
}
|
||||
|
||||
not_found_memory = []
|
||||
failed_memory = []
|
||||
for memory_id in memory_ids:
|
||||
memory = MemoryService.get_by_memory_id(memory_id)
|
||||
if not memory:
|
||||
not_found_memory.append(memory_id)
|
||||
continue
|
||||
|
||||
raw_message_id = REDIS_CONN.generate_auto_increment_id(namespace="memory")
|
||||
raw_message = {
|
||||
"message_id": raw_message_id,
|
||||
"message_type": MemoryType.RAW.name.lower(),
|
||||
"source_id": 0,
|
||||
"memory_id": memory_id,
|
||||
"user_id": "",
|
||||
"agent_id": message_dict["agent_id"],
|
||||
"session_id": message_dict["session_id"],
|
||||
"content": f"User Input: {message_dict.get('user_input')}\nAgent Response: {message_dict.get('agent_response')}",
|
||||
"valid_at": timestamp_to_date(current_timestamp()),
|
||||
"invalid_at": None,
|
||||
"forget_at": None,
|
||||
"status": True
|
||||
}
|
||||
res, msg = await embed_and_save(memory, [raw_message])
|
||||
if not res:
|
||||
failed_memory.append({"memory_id": memory_id, "fail_msg": msg})
|
||||
continue
|
||||
|
||||
task = new_task(memory_id, raw_message_id)
|
||||
bulk_insert_into_db(Task, [task], replace_on_conflict=True)
|
||||
task_message = {
|
||||
"id": task["id"],
|
||||
"task_id": task["id"],
|
||||
"task_type": task["task_type"],
|
||||
"memory_id": memory_id,
|
||||
"source_id": raw_message_id,
|
||||
"message_dict": message_dict
|
||||
}
|
||||
if not REDIS_CONN.queue_product(settings.get_svr_queue_name(priority=0), message=task_message):
|
||||
failed_memory.append({"memory_id": memory_id, "fail_msg": "Can't access Redis."})
|
||||
|
||||
error_msg = ""
|
||||
if not_found_memory:
|
||||
error_msg = f"Memory {not_found_memory} not found."
|
||||
if failed_memory:
|
||||
error_msg += "".join([f"Memory {fm['memory_id']} failed. Detail: {fm['fail_msg']}" for fm in failed_memory])
|
||||
|
||||
if error_msg:
|
||||
return False, error_msg
|
||||
|
||||
return True, "All add to task."
|
||||
|
||||
|
||||
async def handle_save_to_memory_task(task_param: dict):
|
||||
"""
|
||||
:param task_param: {
|
||||
"id": task_id
|
||||
"memory_id": id
|
||||
"source_id": id
|
||||
"message_dict": {
|
||||
"user_id": str,
|
||||
"agent_id": str,
|
||||
"session_id": str,
|
||||
"user_input": str,
|
||||
"agent_response": str
|
||||
}
|
||||
}
|
||||
"""
|
||||
_, task = TaskService.get_by_id(task_param["id"])
|
||||
if not task:
|
||||
return False, f"Task {task_param['id']} is not found."
|
||||
if task.progress == -1:
|
||||
return False, f"Task {task_param['id']} is already failed."
|
||||
now_time = current_timestamp()
|
||||
TaskService.update_by_id(task_param["id"], {"begin_at": timestamp_to_date(now_time)})
|
||||
|
||||
memory_id = task_param["memory_id"]
|
||||
source_id = task_param["source_id"]
|
||||
message_dict = task_param["message_dict"]
|
||||
success, msg = await save_extracted_to_memory_only(memory_id, message_dict, source_id)
|
||||
if success:
|
||||
TaskService.update_progress(task.id, {"progress": 1.0, "progress_msg": msg})
|
||||
return True, msg
|
||||
|
||||
logging.error(msg)
|
||||
TaskService.update_progress(task.id, {"progress": -1, "progress_msg": None})
|
||||
return False, msg
|
||||
|
||||
@ -130,6 +130,7 @@ class FileSource(StrEnum):
|
||||
GOOGLE_CLOUD_STORAGE = "google_cloud_storage"
|
||||
AIRTABLE = "airtable"
|
||||
ASANA = "asana"
|
||||
GITHUB = "github"
|
||||
GITLAB = "gitlab"
|
||||
|
||||
class PipelineTaskType(StrEnum):
|
||||
@ -138,6 +139,7 @@ class PipelineTaskType(StrEnum):
|
||||
RAPTOR = "RAPTOR"
|
||||
GRAPH_RAG = "GraphRAG"
|
||||
MINDMAP = "Mindmap"
|
||||
MEMORY = "Memory"
|
||||
|
||||
|
||||
VALID_PIPELINE_TASK_TYPES = {PipelineTaskType.PARSE, PipelineTaskType.DOWNLOAD, PipelineTaskType.RAPTOR,
|
||||
|
||||
@ -58,6 +58,7 @@ class DocumentSource(str, Enum):
|
||||
GITHUB = "github"
|
||||
GITLAB = "gitlab"
|
||||
|
||||
|
||||
class FileOrigin(str, Enum):
|
||||
"""File origins"""
|
||||
CONNECTOR = "connector"
|
||||
@ -234,6 +235,8 @@ _REPLACEMENT_EXPANSIONS = "body.view.value"
|
||||
|
||||
BOX_WEB_OAUTH_REDIRECT_URI = os.environ.get("BOX_WEB_OAUTH_REDIRECT_URI", "http://localhost:9380/v1/connector/box/oauth/web/callback")
|
||||
|
||||
GITHUB_CONNECTOR_BASE_URL = os.environ.get("GITHUB_CONNECTOR_BASE_URL") or None
|
||||
|
||||
class HtmlBasedConnectorTransformLinksStrategy(str, Enum):
|
||||
# remove links entirely
|
||||
STRIP = "strip"
|
||||
|
||||
217
common/data_source/connector_runner.py
Normal file
217
common/data_source/connector_runner.py
Normal file
@ -0,0 +1,217 @@
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
from collections.abc import Generator
|
||||
from datetime import datetime
|
||||
from typing import Generic
|
||||
from typing import TypeVar
|
||||
from common.data_source.interfaces import (
|
||||
BaseConnector,
|
||||
CheckpointedConnector,
|
||||
CheckpointedConnectorWithPermSync,
|
||||
CheckpointOutput,
|
||||
LoadConnector,
|
||||
PollConnector,
|
||||
)
|
||||
from common.data_source.models import ConnectorCheckpoint, ConnectorFailure, Document
|
||||
|
||||
|
||||
TimeRange = tuple[datetime, datetime]
|
||||
|
||||
CT = TypeVar("CT", bound=ConnectorCheckpoint)
|
||||
|
||||
|
||||
def batched_doc_ids(
|
||||
checkpoint_connector_generator: CheckpointOutput[CT],
|
||||
batch_size: int,
|
||||
) -> Generator[set[str], None, None]:
|
||||
batch: set[str] = set()
|
||||
for document, failure, next_checkpoint in CheckpointOutputWrapper[CT]()(
|
||||
checkpoint_connector_generator
|
||||
):
|
||||
if document is not None:
|
||||
batch.add(document.id)
|
||||
elif (
|
||||
failure and failure.failed_document and failure.failed_document.document_id
|
||||
):
|
||||
batch.add(failure.failed_document.document_id)
|
||||
|
||||
if len(batch) >= batch_size:
|
||||
yield batch
|
||||
batch = set()
|
||||
if len(batch) > 0:
|
||||
yield batch
|
||||
|
||||
|
||||
class CheckpointOutputWrapper(Generic[CT]):
|
||||
"""
|
||||
Wraps a CheckpointOutput generator to give things back in a more digestible format,
|
||||
specifically for Document outputs.
|
||||
The connector format is easier for the connector implementor (e.g. it enforces exactly
|
||||
one new checkpoint is returned AND that the checkpoint is at the end), thus the different
|
||||
formats.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.next_checkpoint: CT | None = None
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
checkpoint_connector_generator: CheckpointOutput[CT],
|
||||
) -> Generator[
|
||||
tuple[Document | None, ConnectorFailure | None, CT | None],
|
||||
None,
|
||||
None,
|
||||
]:
|
||||
# grabs the final return value and stores it in the `next_checkpoint` variable
|
||||
def _inner_wrapper(
|
||||
checkpoint_connector_generator: CheckpointOutput[CT],
|
||||
) -> CheckpointOutput[CT]:
|
||||
self.next_checkpoint = yield from checkpoint_connector_generator
|
||||
return self.next_checkpoint # not used
|
||||
|
||||
for document_or_failure in _inner_wrapper(checkpoint_connector_generator):
|
||||
if isinstance(document_or_failure, Document):
|
||||
yield document_or_failure, None, None
|
||||
elif isinstance(document_or_failure, ConnectorFailure):
|
||||
yield None, document_or_failure, None
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Invalid document_or_failure type: {type(document_or_failure)}"
|
||||
)
|
||||
|
||||
if self.next_checkpoint is None:
|
||||
raise RuntimeError(
|
||||
"Checkpoint is None. This should never happen - the connector should always return a checkpoint."
|
||||
)
|
||||
|
||||
yield None, None, self.next_checkpoint
|
||||
|
||||
|
||||
class ConnectorRunner(Generic[CT]):
|
||||
"""
|
||||
Handles:
|
||||
- Batching
|
||||
- Additional exception logging
|
||||
- Combining different connector types to a single interface
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
connector: BaseConnector,
|
||||
batch_size: int,
|
||||
# cannot be True for non-checkpointed connectors
|
||||
include_permissions: bool,
|
||||
time_range: TimeRange | None = None,
|
||||
):
|
||||
if not isinstance(connector, CheckpointedConnector) and include_permissions:
|
||||
raise ValueError(
|
||||
"include_permissions cannot be True for non-checkpointed connectors"
|
||||
)
|
||||
|
||||
self.connector = connector
|
||||
self.time_range = time_range
|
||||
self.batch_size = batch_size
|
||||
self.include_permissions = include_permissions
|
||||
|
||||
self.doc_batch: list[Document] = []
|
||||
|
||||
def run(self, checkpoint: CT) -> Generator[
|
||||
tuple[list[Document] | None, ConnectorFailure | None, CT | None],
|
||||
None,
|
||||
None,
|
||||
]:
|
||||
"""Adds additional exception logging to the connector."""
|
||||
try:
|
||||
if isinstance(self.connector, CheckpointedConnector):
|
||||
if self.time_range is None:
|
||||
raise ValueError("time_range is required for CheckpointedConnector")
|
||||
|
||||
start = time.monotonic()
|
||||
if self.include_permissions:
|
||||
if not isinstance(
|
||||
self.connector, CheckpointedConnectorWithPermSync
|
||||
):
|
||||
raise ValueError(
|
||||
"Connector does not support permission syncing"
|
||||
)
|
||||
load_from_checkpoint = (
|
||||
self.connector.load_from_checkpoint_with_perm_sync
|
||||
)
|
||||
else:
|
||||
load_from_checkpoint = self.connector.load_from_checkpoint
|
||||
checkpoint_connector_generator = load_from_checkpoint(
|
||||
start=self.time_range[0].timestamp(),
|
||||
end=self.time_range[1].timestamp(),
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
next_checkpoint: CT | None = None
|
||||
# this is guaranteed to always run at least once with next_checkpoint being non-None
|
||||
for document, failure, next_checkpoint in CheckpointOutputWrapper[CT]()(
|
||||
checkpoint_connector_generator
|
||||
):
|
||||
if document is not None and isinstance(document, Document):
|
||||
self.doc_batch.append(document)
|
||||
|
||||
if failure is not None:
|
||||
yield None, failure, None
|
||||
|
||||
if len(self.doc_batch) >= self.batch_size:
|
||||
yield self.doc_batch, None, None
|
||||
self.doc_batch = []
|
||||
|
||||
# yield remaining documents
|
||||
if len(self.doc_batch) > 0:
|
||||
yield self.doc_batch, None, None
|
||||
self.doc_batch = []
|
||||
|
||||
yield None, None, next_checkpoint
|
||||
|
||||
logging.debug(
|
||||
f"Connector took {time.monotonic() - start} seconds to get to the next checkpoint."
|
||||
)
|
||||
|
||||
else:
|
||||
finished_checkpoint = self.connector.build_dummy_checkpoint()
|
||||
finished_checkpoint.has_more = False
|
||||
|
||||
if isinstance(self.connector, PollConnector):
|
||||
if self.time_range is None:
|
||||
raise ValueError("time_range is required for PollConnector")
|
||||
|
||||
for document_batch in self.connector.poll_source(
|
||||
start=self.time_range[0].timestamp(),
|
||||
end=self.time_range[1].timestamp(),
|
||||
):
|
||||
yield document_batch, None, None
|
||||
|
||||
yield None, None, finished_checkpoint
|
||||
elif isinstance(self.connector, LoadConnector):
|
||||
for document_batch in self.connector.load_from_state():
|
||||
yield document_batch, None, None
|
||||
|
||||
yield None, None, finished_checkpoint
|
||||
else:
|
||||
raise ValueError(f"Invalid connector. type: {type(self.connector)}")
|
||||
except Exception:
|
||||
exc_type, _, exc_traceback = sys.exc_info()
|
||||
|
||||
# Traverse the traceback to find the last frame where the exception was raised
|
||||
tb = exc_traceback
|
||||
if tb is None:
|
||||
logging.error("No traceback found for exception")
|
||||
raise
|
||||
|
||||
while tb.tb_next:
|
||||
tb = tb.tb_next # Move to the next frame in the traceback
|
||||
|
||||
# Get the local variables from the frame where the exception occurred
|
||||
local_vars = tb.tb_frame.f_locals
|
||||
local_vars_str = "\n".join(
|
||||
f"{key}: {value}" for key, value in local_vars.items()
|
||||
)
|
||||
logging.error(
|
||||
f"Error in connector. type: {exc_type};\n"
|
||||
f"local_vars below -> \n{local_vars_str[:1024]}"
|
||||
)
|
||||
raise
|
||||
0
common/data_source/github/__init__.py
Normal file
0
common/data_source/github/__init__.py
Normal file
973
common/data_source/github/connector.py
Normal file
973
common/data_source/github/connector.py
Normal file
@ -0,0 +1,973 @@
|
||||
import copy
|
||||
import logging
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Generator
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
from datetime import timezone
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
|
||||
from github import Github, Auth
|
||||
from github import RateLimitExceededException
|
||||
from github import Repository
|
||||
from github.GithubException import GithubException
|
||||
from github.Issue import Issue
|
||||
from github.NamedUser import NamedUser
|
||||
from github.PaginatedList import PaginatedList
|
||||
from github.PullRequest import PullRequest
|
||||
from pydantic import BaseModel
|
||||
from typing_extensions import override
|
||||
from common.data_source.google_util.util import sanitize_filename
|
||||
from common.data_source.config import DocumentSource, GITHUB_CONNECTOR_BASE_URL
|
||||
from common.data_source.exceptions import (
|
||||
ConnectorMissingCredentialError,
|
||||
ConnectorValidationError,
|
||||
CredentialExpiredError,
|
||||
InsufficientPermissionsError,
|
||||
UnexpectedValidationError,
|
||||
)
|
||||
from common.data_source.interfaces import CheckpointedConnectorWithPermSyncGH, CheckpointOutput
|
||||
from common.data_source.models import (
|
||||
ConnectorCheckpoint,
|
||||
ConnectorFailure,
|
||||
Document,
|
||||
DocumentFailure,
|
||||
ExternalAccess,
|
||||
SecondsSinceUnixEpoch,
|
||||
)
|
||||
from common.data_source.connector_runner import ConnectorRunner
|
||||
from .models import SerializedRepository
|
||||
from .rate_limit_utils import sleep_after_rate_limit_exception
|
||||
from .utils import deserialize_repository
|
||||
from .utils import get_external_access_permission
|
||||
|
||||
ITEMS_PER_PAGE = 100
|
||||
CURSOR_LOG_FREQUENCY = 50
|
||||
|
||||
_MAX_NUM_RATE_LIMIT_RETRIES = 5
|
||||
|
||||
ONE_DAY = timedelta(days=1)
|
||||
SLIM_BATCH_SIZE = 100
|
||||
# Cases
|
||||
# X (from start) standard run, no fallback to cursor-based pagination
|
||||
# X (from start) standard run errors, fallback to cursor-based pagination
|
||||
# X error in the middle of a page
|
||||
# X no errors: run to completion
|
||||
# X (from checkpoint) standard run, no fallback to cursor-based pagination
|
||||
# X (from checkpoint) continue from cursor-based pagination
|
||||
# - retrying
|
||||
# - no retrying
|
||||
|
||||
# things to check:
|
||||
# checkpoint state on return
|
||||
# checkpoint progress (no infinite loop)
|
||||
|
||||
|
||||
class DocMetadata(BaseModel):
|
||||
repo: str
|
||||
|
||||
|
||||
def get_nextUrl_key(pag_list: PaginatedList[PullRequest | Issue]) -> str:
|
||||
if "_PaginatedList__nextUrl" in pag_list.__dict__:
|
||||
return "_PaginatedList__nextUrl"
|
||||
for key in pag_list.__dict__:
|
||||
if "__nextUrl" in key:
|
||||
return key
|
||||
for key in pag_list.__dict__:
|
||||
if "nextUrl" in key:
|
||||
return key
|
||||
return ""
|
||||
|
||||
|
||||
def get_nextUrl(
|
||||
pag_list: PaginatedList[PullRequest | Issue], nextUrl_key: str
|
||||
) -> str | None:
|
||||
return getattr(pag_list, nextUrl_key) if nextUrl_key else None
|
||||
|
||||
|
||||
def set_nextUrl(
|
||||
pag_list: PaginatedList[PullRequest | Issue], nextUrl_key: str, nextUrl: str
|
||||
) -> None:
|
||||
if nextUrl_key:
|
||||
setattr(pag_list, nextUrl_key, nextUrl)
|
||||
elif nextUrl:
|
||||
raise ValueError("Next URL key not found: " + str(pag_list.__dict__))
|
||||
|
||||
|
||||
def _paginate_until_error(
|
||||
git_objs: Callable[[], PaginatedList[PullRequest | Issue]],
|
||||
cursor_url: str | None,
|
||||
prev_num_objs: int,
|
||||
cursor_url_callback: Callable[[str | None, int], None],
|
||||
retrying: bool = False,
|
||||
) -> Generator[PullRequest | Issue, None, None]:
|
||||
num_objs = prev_num_objs
|
||||
pag_list = git_objs()
|
||||
nextUrl_key = get_nextUrl_key(pag_list)
|
||||
if cursor_url:
|
||||
set_nextUrl(pag_list, nextUrl_key, cursor_url)
|
||||
elif retrying:
|
||||
# if we are retrying, we want to skip the objects retrieved
|
||||
# over previous calls. Unfortunately, this WILL retrieve all
|
||||
# pages before the one we are resuming from, so we really
|
||||
# don't want this case to be hit often
|
||||
logging.warning(
|
||||
"Retrying from a previous cursor-based pagination call. "
|
||||
"This will retrieve all pages before the one we are resuming from, "
|
||||
"which may take a while and consume many API calls."
|
||||
)
|
||||
pag_list = cast(PaginatedList[PullRequest | Issue], pag_list[prev_num_objs:])
|
||||
num_objs = 0
|
||||
|
||||
try:
|
||||
# this for loop handles cursor-based pagination
|
||||
for issue_or_pr in pag_list:
|
||||
num_objs += 1
|
||||
yield issue_or_pr
|
||||
# used to store the current cursor url in the checkpoint. This value
|
||||
# is updated during iteration over pag_list.
|
||||
cursor_url_callback(get_nextUrl(pag_list, nextUrl_key), num_objs)
|
||||
|
||||
if num_objs % CURSOR_LOG_FREQUENCY == 0:
|
||||
logging.info(
|
||||
f"Retrieved {num_objs} objects with current cursor url: {get_nextUrl(pag_list, nextUrl_key)}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.exception(f"Error during cursor-based pagination: {e}")
|
||||
if num_objs - prev_num_objs > 0:
|
||||
raise
|
||||
|
||||
if get_nextUrl(pag_list, nextUrl_key) is not None and not retrying:
|
||||
logging.info(
|
||||
"Assuming that this error is due to cursor "
|
||||
"expiration because no objects were retrieved. "
|
||||
"Retrying from the first page."
|
||||
)
|
||||
yield from _paginate_until_error(
|
||||
git_objs, None, prev_num_objs, cursor_url_callback, retrying=True
|
||||
)
|
||||
return
|
||||
|
||||
# for no cursor url or if we reach this point after a retry, raise the error
|
||||
raise
|
||||
|
||||
|
||||
def _get_batch_rate_limited(
|
||||
# We pass in a callable because we want git_objs to produce a fresh
|
||||
# PaginatedList each time it's called to avoid using the same object for cursor-based pagination
|
||||
# from a partial offset-based pagination call.
|
||||
git_objs: Callable[[], PaginatedList],
|
||||
page_num: int,
|
||||
cursor_url: str | None,
|
||||
prev_num_objs: int,
|
||||
cursor_url_callback: Callable[[str | None, int], None],
|
||||
github_client: Github,
|
||||
attempt_num: int = 0,
|
||||
) -> Generator[PullRequest | Issue, None, None]:
|
||||
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
|
||||
raise RuntimeError(
|
||||
"Re-tried fetching batch too many times. Something is going wrong with fetching objects from Github"
|
||||
)
|
||||
try:
|
||||
if cursor_url:
|
||||
# when this is set, we are resuming from an earlier
|
||||
# cursor-based pagination call.
|
||||
yield from _paginate_until_error(
|
||||
git_objs, cursor_url, prev_num_objs, cursor_url_callback
|
||||
)
|
||||
return
|
||||
objs = list(git_objs().get_page(page_num))
|
||||
# fetch all data here to disable lazy loading later
|
||||
# this is needed to capture the rate limit exception here (if one occurs)
|
||||
for obj in objs:
|
||||
if hasattr(obj, "raw_data"):
|
||||
getattr(obj, "raw_data")
|
||||
yield from objs
|
||||
except RateLimitExceededException:
|
||||
sleep_after_rate_limit_exception(github_client)
|
||||
yield from _get_batch_rate_limited(
|
||||
git_objs,
|
||||
page_num,
|
||||
cursor_url,
|
||||
prev_num_objs,
|
||||
cursor_url_callback,
|
||||
github_client,
|
||||
attempt_num + 1,
|
||||
)
|
||||
except GithubException as e:
|
||||
if not (
|
||||
e.status == 422
|
||||
and (
|
||||
"cursor" in (e.message or "")
|
||||
or "cursor" in (e.data or {}).get("message", "")
|
||||
)
|
||||
):
|
||||
raise
|
||||
# Fallback to a cursor-based pagination strategy
|
||||
# This can happen for "large datasets," but there's no documentation
|
||||
# On the error on the web as far as we can tell.
|
||||
# Error message:
|
||||
# "Pagination with the page parameter is not supported for large datasets,
|
||||
# please use cursor based pagination (after/before)"
|
||||
yield from _paginate_until_error(
|
||||
git_objs, cursor_url, prev_num_objs, cursor_url_callback
|
||||
)
|
||||
|
||||
|
||||
def _get_userinfo(user: NamedUser) -> dict[str, str]:
|
||||
def _safe_get(attr_name: str) -> str | None:
|
||||
try:
|
||||
return cast(str | None, getattr(user, attr_name))
|
||||
except GithubException:
|
||||
logging.debug(f"Error getting {attr_name} for user")
|
||||
return None
|
||||
|
||||
return {
|
||||
k: v
|
||||
for k, v in {
|
||||
"login": _safe_get("login"),
|
||||
"name": _safe_get("name"),
|
||||
"email": _safe_get("email"),
|
||||
}.items()
|
||||
if v is not None
|
||||
}
|
||||
|
||||
|
||||
def _convert_pr_to_document(
|
||||
pull_request: PullRequest, repo_external_access: ExternalAccess | None
|
||||
) -> Document:
|
||||
repo_name = pull_request.base.repo.full_name if pull_request.base else ""
|
||||
doc_metadata = DocMetadata(repo=repo_name)
|
||||
file_content_byte = pull_request.body.encode('utf-8') if pull_request.body else b""
|
||||
name = sanitize_filename(pull_request.title, "md")
|
||||
|
||||
return Document(
|
||||
id=pull_request.html_url,
|
||||
blob= file_content_byte,
|
||||
source=DocumentSource.GITHUB,
|
||||
external_access=repo_external_access,
|
||||
semantic_identifier=f"{pull_request.number}:{name}",
|
||||
# updated_at is UTC time but is timezone unaware, explicitly add UTC
|
||||
# as there is logic in indexing to prevent wrong timestamped docs
|
||||
# due to local time discrepancies with UTC
|
||||
doc_updated_at=(
|
||||
pull_request.updated_at.replace(tzinfo=timezone.utc)
|
||||
if pull_request.updated_at
|
||||
else None
|
||||
),
|
||||
extension=".md",
|
||||
# this metadata is used in perm sync
|
||||
size_bytes=len(file_content_byte) if file_content_byte else 0,
|
||||
primary_owners=[],
|
||||
doc_metadata=doc_metadata.model_dump(),
|
||||
metadata={
|
||||
k: [str(vi) for vi in v] if isinstance(v, list) else str(v)
|
||||
for k, v in {
|
||||
"object_type": "PullRequest",
|
||||
"id": pull_request.number,
|
||||
"merged": pull_request.merged,
|
||||
"state": pull_request.state,
|
||||
"user": _get_userinfo(pull_request.user) if pull_request.user else None,
|
||||
"assignees": [
|
||||
_get_userinfo(assignee) for assignee in pull_request.assignees
|
||||
],
|
||||
"repo": (
|
||||
pull_request.base.repo.full_name if pull_request.base else None
|
||||
),
|
||||
"num_commits": str(pull_request.commits),
|
||||
"num_files_changed": str(pull_request.changed_files),
|
||||
"labels": [label.name for label in pull_request.labels],
|
||||
"created_at": (
|
||||
pull_request.created_at.replace(tzinfo=timezone.utc)
|
||||
if pull_request.created_at
|
||||
else None
|
||||
),
|
||||
"updated_at": (
|
||||
pull_request.updated_at.replace(tzinfo=timezone.utc)
|
||||
if pull_request.updated_at
|
||||
else None
|
||||
),
|
||||
"closed_at": (
|
||||
pull_request.closed_at.replace(tzinfo=timezone.utc)
|
||||
if pull_request.closed_at
|
||||
else None
|
||||
),
|
||||
"merged_at": (
|
||||
pull_request.merged_at.replace(tzinfo=timezone.utc)
|
||||
if pull_request.merged_at
|
||||
else None
|
||||
),
|
||||
"merged_by": (
|
||||
_get_userinfo(pull_request.merged_by)
|
||||
if pull_request.merged_by
|
||||
else None
|
||||
),
|
||||
}.items()
|
||||
if v is not None
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _fetch_issue_comments(issue: Issue) -> str:
|
||||
comments = issue.get_comments()
|
||||
return "\nComment: ".join(comment.body for comment in comments)
|
||||
|
||||
|
||||
def _convert_issue_to_document(
|
||||
issue: Issue, repo_external_access: ExternalAccess | None
|
||||
) -> Document:
|
||||
repo_name = issue.repository.full_name if issue.repository else ""
|
||||
doc_metadata = DocMetadata(repo=repo_name)
|
||||
file_content_byte = issue.body.encode('utf-8') if issue.body else b""
|
||||
name = sanitize_filename(issue.title, "md")
|
||||
|
||||
return Document(
|
||||
id=issue.html_url,
|
||||
blob=file_content_byte,
|
||||
source=DocumentSource.GITHUB,
|
||||
extension=".md",
|
||||
external_access=repo_external_access,
|
||||
semantic_identifier=f"{issue.number}:{name}",
|
||||
# updated_at is UTC time but is timezone unaware
|
||||
doc_updated_at=issue.updated_at.replace(tzinfo=timezone.utc),
|
||||
# this metadata is used in perm sync
|
||||
doc_metadata=doc_metadata.model_dump(),
|
||||
size_bytes=len(file_content_byte) if file_content_byte else 0,
|
||||
primary_owners=[_get_userinfo(issue.user) if issue.user else None],
|
||||
metadata={
|
||||
k: [str(vi) for vi in v] if isinstance(v, list) else str(v)
|
||||
for k, v in {
|
||||
"object_type": "Issue",
|
||||
"id": issue.number,
|
||||
"state": issue.state,
|
||||
"user": _get_userinfo(issue.user) if issue.user else None,
|
||||
"assignees": [_get_userinfo(assignee) for assignee in issue.assignees],
|
||||
"repo": issue.repository.full_name if issue.repository else None,
|
||||
"labels": [label.name for label in issue.labels],
|
||||
"created_at": (
|
||||
issue.created_at.replace(tzinfo=timezone.utc)
|
||||
if issue.created_at
|
||||
else None
|
||||
),
|
||||
"updated_at": (
|
||||
issue.updated_at.replace(tzinfo=timezone.utc)
|
||||
if issue.updated_at
|
||||
else None
|
||||
),
|
||||
"closed_at": (
|
||||
issue.closed_at.replace(tzinfo=timezone.utc)
|
||||
if issue.closed_at
|
||||
else None
|
||||
),
|
||||
"closed_by": (
|
||||
_get_userinfo(issue.closed_by) if issue.closed_by else None
|
||||
),
|
||||
}.items()
|
||||
if v is not None
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class GithubConnectorStage(Enum):
|
||||
START = "start"
|
||||
PRS = "prs"
|
||||
ISSUES = "issues"
|
||||
|
||||
|
||||
class GithubConnectorCheckpoint(ConnectorCheckpoint):
|
||||
stage: GithubConnectorStage
|
||||
curr_page: int
|
||||
|
||||
cached_repo_ids: list[int] | None = None
|
||||
cached_repo: SerializedRepository | None = None
|
||||
|
||||
# Used for the fallback cursor-based pagination strategy
|
||||
num_retrieved: int
|
||||
cursor_url: str | None = None
|
||||
|
||||
def reset(self) -> None:
|
||||
"""
|
||||
Resets curr_page, num_retrieved, and cursor_url to their initial values (0, 0, None)
|
||||
"""
|
||||
self.curr_page = 0
|
||||
self.num_retrieved = 0
|
||||
self.cursor_url = None
|
||||
|
||||
|
||||
def make_cursor_url_callback(
|
||||
checkpoint: GithubConnectorCheckpoint,
|
||||
) -> Callable[[str | None, int], None]:
|
||||
def cursor_url_callback(cursor_url: str | None, num_objs: int) -> None:
|
||||
# we want to maintain the old cursor url so code after retrieval
|
||||
# can determine that we are using the fallback cursor-based pagination strategy
|
||||
if cursor_url:
|
||||
checkpoint.cursor_url = cursor_url
|
||||
checkpoint.num_retrieved = num_objs
|
||||
|
||||
return cursor_url_callback
|
||||
|
||||
|
||||
class GithubConnector(CheckpointedConnectorWithPermSyncGH[GithubConnectorCheckpoint]):
|
||||
def __init__(
|
||||
self,
|
||||
repo_owner: str,
|
||||
repositories: str | None = None,
|
||||
state_filter: str = "all",
|
||||
include_prs: bool = True,
|
||||
include_issues: bool = False,
|
||||
) -> None:
|
||||
self.repo_owner = repo_owner
|
||||
self.repositories = repositories
|
||||
self.state_filter = state_filter
|
||||
self.include_prs = include_prs
|
||||
self.include_issues = include_issues
|
||||
self.github_client: Github | None = None
|
||||
|
||||
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
|
||||
# defaults to 30 items per page, can be set to as high as 100
|
||||
token = credentials["github_access_token"]
|
||||
auth = Auth.Token(token)
|
||||
|
||||
if GITHUB_CONNECTOR_BASE_URL:
|
||||
self.github_client = Github(
|
||||
auth=auth,
|
||||
base_url=GITHUB_CONNECTOR_BASE_URL,
|
||||
per_page=ITEMS_PER_PAGE,
|
||||
)
|
||||
else:
|
||||
self.github_client = Github(
|
||||
auth=auth,
|
||||
per_page=ITEMS_PER_PAGE,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def get_github_repo(
|
||||
self, github_client: Github, attempt_num: int = 0
|
||||
) -> Repository.Repository:
|
||||
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
|
||||
raise RuntimeError(
|
||||
"Re-tried fetching repo too many times. Something is going wrong with fetching objects from Github"
|
||||
)
|
||||
|
||||
try:
|
||||
return github_client.get_repo(f"{self.repo_owner}/{self.repositories}")
|
||||
except RateLimitExceededException:
|
||||
sleep_after_rate_limit_exception(github_client)
|
||||
return self.get_github_repo(github_client, attempt_num + 1)
|
||||
|
||||
def get_github_repos(
|
||||
self, github_client: Github, attempt_num: int = 0
|
||||
) -> list[Repository.Repository]:
|
||||
"""Get specific repositories based on comma-separated repo_name string."""
|
||||
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
|
||||
raise RuntimeError(
|
||||
"Re-tried fetching repos too many times. Something is going wrong with fetching objects from Github"
|
||||
)
|
||||
|
||||
try:
|
||||
repos = []
|
||||
# Split repo_name by comma and strip whitespace
|
||||
repo_names = [
|
||||
name.strip() for name in (cast(str, self.repositories)).split(",")
|
||||
]
|
||||
|
||||
for repo_name in repo_names:
|
||||
if repo_name: # Skip empty strings
|
||||
try:
|
||||
repo = github_client.get_repo(f"{self.repo_owner}/{repo_name}")
|
||||
repos.append(repo)
|
||||
except GithubException as e:
|
||||
logging.warning(
|
||||
f"Could not fetch repo {self.repo_owner}/{repo_name}: {e}"
|
||||
)
|
||||
|
||||
return repos
|
||||
except RateLimitExceededException:
|
||||
sleep_after_rate_limit_exception(github_client)
|
||||
return self.get_github_repos(github_client, attempt_num + 1)
|
||||
|
||||
def get_all_repos(
|
||||
self, github_client: Github, attempt_num: int = 0
|
||||
) -> list[Repository.Repository]:
|
||||
if attempt_num > _MAX_NUM_RATE_LIMIT_RETRIES:
|
||||
raise RuntimeError(
|
||||
"Re-tried fetching repos too many times. Something is going wrong with fetching objects from Github"
|
||||
)
|
||||
|
||||
try:
|
||||
# Try to get organization first
|
||||
try:
|
||||
org = github_client.get_organization(self.repo_owner)
|
||||
return list(org.get_repos())
|
||||
|
||||
except GithubException:
|
||||
# If not an org, try as a user
|
||||
user = github_client.get_user(self.repo_owner)
|
||||
return list(user.get_repos())
|
||||
except RateLimitExceededException:
|
||||
sleep_after_rate_limit_exception(github_client)
|
||||
return self.get_all_repos(github_client, attempt_num + 1)
|
||||
|
||||
def _pull_requests_func(
|
||||
self, repo: Repository.Repository
|
||||
) -> Callable[[], PaginatedList[PullRequest]]:
|
||||
return lambda: repo.get_pulls(
|
||||
state=self.state_filter, sort="updated", direction="desc"
|
||||
)
|
||||
|
||||
def _issues_func(
|
||||
self, repo: Repository.Repository
|
||||
) -> Callable[[], PaginatedList[Issue]]:
|
||||
return lambda: repo.get_issues(
|
||||
state=self.state_filter, sort="updated", direction="desc"
|
||||
)
|
||||
|
||||
def _fetch_from_github(
|
||||
self,
|
||||
checkpoint: GithubConnectorCheckpoint,
|
||||
start: datetime | None = None,
|
||||
end: datetime | None = None,
|
||||
include_permissions: bool = False,
|
||||
) -> Generator[Document | ConnectorFailure, None, GithubConnectorCheckpoint]:
|
||||
if self.github_client is None:
|
||||
raise ConnectorMissingCredentialError("GitHub")
|
||||
|
||||
checkpoint = copy.deepcopy(checkpoint)
|
||||
|
||||
# First run of the connector, fetch all repos and store in checkpoint
|
||||
if checkpoint.cached_repo_ids is None:
|
||||
repos = []
|
||||
if self.repositories:
|
||||
if "," in self.repositories:
|
||||
# Multiple repositories specified
|
||||
repos = self.get_github_repos(self.github_client)
|
||||
else:
|
||||
# Single repository (backward compatibility)
|
||||
repos = [self.get_github_repo(self.github_client)]
|
||||
else:
|
||||
# All repositories
|
||||
repos = self.get_all_repos(self.github_client)
|
||||
if not repos:
|
||||
checkpoint.has_more = False
|
||||
return checkpoint
|
||||
|
||||
curr_repo = repos.pop()
|
||||
checkpoint.cached_repo_ids = [repo.id for repo in repos]
|
||||
checkpoint.cached_repo = SerializedRepository(
|
||||
id=curr_repo.id,
|
||||
headers=curr_repo.raw_headers,
|
||||
raw_data=curr_repo.raw_data,
|
||||
)
|
||||
checkpoint.stage = GithubConnectorStage.PRS
|
||||
checkpoint.curr_page = 0
|
||||
# save checkpoint with repo ids retrieved
|
||||
return checkpoint
|
||||
|
||||
if checkpoint.cached_repo is None:
|
||||
raise ValueError("No repo saved in checkpoint")
|
||||
|
||||
# Deserialize the repository from the checkpoint
|
||||
repo = deserialize_repository(checkpoint.cached_repo, self.github_client)
|
||||
|
||||
cursor_url_callback = make_cursor_url_callback(checkpoint)
|
||||
repo_external_access: ExternalAccess | None = None
|
||||
if include_permissions:
|
||||
repo_external_access = get_external_access_permission(
|
||||
repo, self.github_client
|
||||
)
|
||||
if self.include_prs and checkpoint.stage == GithubConnectorStage.PRS:
|
||||
logging.info(f"Fetching PRs for repo: {repo.name}")
|
||||
|
||||
pr_batch = _get_batch_rate_limited(
|
||||
self._pull_requests_func(repo),
|
||||
checkpoint.curr_page,
|
||||
checkpoint.cursor_url,
|
||||
checkpoint.num_retrieved,
|
||||
cursor_url_callback,
|
||||
self.github_client,
|
||||
)
|
||||
checkpoint.curr_page += 1 # NOTE: not used for cursor-based fallback
|
||||
done_with_prs = False
|
||||
num_prs = 0
|
||||
pr = None
|
||||
print("start: ", start)
|
||||
for pr in pr_batch:
|
||||
num_prs += 1
|
||||
print("-"*40)
|
||||
print("PR name", pr.title)
|
||||
print("updated at", pr.updated_at)
|
||||
print("-"*40)
|
||||
print("\n")
|
||||
# we iterate backwards in time, so at this point we stop processing prs
|
||||
if (
|
||||
start is not None
|
||||
and pr.updated_at
|
||||
and pr.updated_at.replace(tzinfo=timezone.utc) <= start
|
||||
):
|
||||
done_with_prs = True
|
||||
break
|
||||
# Skip PRs updated after the end date
|
||||
if (
|
||||
end is not None
|
||||
and pr.updated_at
|
||||
and pr.updated_at.replace(tzinfo=timezone.utc) > end
|
||||
):
|
||||
continue
|
||||
try:
|
||||
yield _convert_pr_to_document(
|
||||
cast(PullRequest, pr), repo_external_access
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = f"Error converting PR to document: {e}"
|
||||
logging.exception(error_msg)
|
||||
yield ConnectorFailure(
|
||||
failed_document=DocumentFailure(
|
||||
document_id=str(pr.id), document_link=pr.html_url
|
||||
),
|
||||
failure_message=error_msg,
|
||||
exception=e,
|
||||
)
|
||||
continue
|
||||
|
||||
# If we reach this point with a cursor url in the checkpoint, we were using
|
||||
# the fallback cursor-based pagination strategy. That strategy tries to get all
|
||||
# PRs, so having curosr_url set means we are done with prs. However, we need to
|
||||
# return AFTER the checkpoint reset to avoid infinite loops.
|
||||
|
||||
# if we found any PRs on the page and there are more PRs to get, return the checkpoint.
|
||||
# In offset mode, while indexing without time constraints, the pr batch
|
||||
# will be empty when we're done.
|
||||
used_cursor = checkpoint.cursor_url is not None
|
||||
if num_prs > 0 and not done_with_prs and not used_cursor:
|
||||
return checkpoint
|
||||
|
||||
# if we went past the start date during the loop or there are no more
|
||||
# prs to get, we move on to issues
|
||||
checkpoint.stage = GithubConnectorStage.ISSUES
|
||||
checkpoint.reset()
|
||||
|
||||
if used_cursor:
|
||||
# save the checkpoint after changing stage; next run will continue from issues
|
||||
return checkpoint
|
||||
|
||||
checkpoint.stage = GithubConnectorStage.ISSUES
|
||||
|
||||
if self.include_issues and checkpoint.stage == GithubConnectorStage.ISSUES:
|
||||
logging.info(f"Fetching issues for repo: {repo.name}")
|
||||
|
||||
issue_batch = list(
|
||||
_get_batch_rate_limited(
|
||||
self._issues_func(repo),
|
||||
checkpoint.curr_page,
|
||||
checkpoint.cursor_url,
|
||||
checkpoint.num_retrieved,
|
||||
cursor_url_callback,
|
||||
self.github_client,
|
||||
)
|
||||
)
|
||||
checkpoint.curr_page += 1
|
||||
done_with_issues = False
|
||||
num_issues = 0
|
||||
for issue in issue_batch:
|
||||
num_issues += 1
|
||||
issue = cast(Issue, issue)
|
||||
# we iterate backwards in time, so at this point we stop processing prs
|
||||
if (
|
||||
start is not None
|
||||
and issue.updated_at.replace(tzinfo=timezone.utc) <= start
|
||||
):
|
||||
done_with_issues = True
|
||||
break
|
||||
# Skip PRs updated after the end date
|
||||
if (
|
||||
end is not None
|
||||
and issue.updated_at.replace(tzinfo=timezone.utc) > end
|
||||
):
|
||||
continue
|
||||
|
||||
if issue.pull_request is not None:
|
||||
# PRs are handled separately
|
||||
continue
|
||||
|
||||
try:
|
||||
yield _convert_issue_to_document(issue, repo_external_access)
|
||||
except Exception as e:
|
||||
error_msg = f"Error converting issue to document: {e}"
|
||||
logging.exception(error_msg)
|
||||
yield ConnectorFailure(
|
||||
failed_document=DocumentFailure(
|
||||
document_id=str(issue.id),
|
||||
document_link=issue.html_url,
|
||||
),
|
||||
failure_message=error_msg,
|
||||
exception=e,
|
||||
)
|
||||
continue
|
||||
|
||||
# if we found any issues on the page, and we're not done, return the checkpoint.
|
||||
# don't return if we're using cursor-based pagination to avoid infinite loops
|
||||
if num_issues > 0 and not done_with_issues and not checkpoint.cursor_url:
|
||||
return checkpoint
|
||||
|
||||
# if we went past the start date during the loop or there are no more
|
||||
# issues to get, we move on to the next repo
|
||||
checkpoint.stage = GithubConnectorStage.PRS
|
||||
checkpoint.reset()
|
||||
|
||||
checkpoint.has_more = len(checkpoint.cached_repo_ids) > 0
|
||||
if checkpoint.cached_repo_ids:
|
||||
next_id = checkpoint.cached_repo_ids.pop()
|
||||
next_repo = self.github_client.get_repo(next_id)
|
||||
checkpoint.cached_repo = SerializedRepository(
|
||||
id=next_id,
|
||||
headers=next_repo.raw_headers,
|
||||
raw_data=next_repo.raw_data,
|
||||
)
|
||||
checkpoint.stage = GithubConnectorStage.PRS
|
||||
checkpoint.reset()
|
||||
|
||||
if checkpoint.cached_repo_ids:
|
||||
logging.info(
|
||||
f"{len(checkpoint.cached_repo_ids)} repos remaining (IDs: {checkpoint.cached_repo_ids})"
|
||||
)
|
||||
else:
|
||||
logging.info("No more repos remaining")
|
||||
|
||||
return checkpoint
|
||||
|
||||
def _load_from_checkpoint(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: GithubConnectorCheckpoint,
|
||||
include_permissions: bool = False,
|
||||
) -> CheckpointOutput[GithubConnectorCheckpoint]:
|
||||
start_datetime = datetime.fromtimestamp(start, tz=timezone.utc)
|
||||
# add a day for timezone safety
|
||||
end_datetime = datetime.fromtimestamp(end, tz=timezone.utc) + ONE_DAY
|
||||
|
||||
# Move start time back by 3 hours, since some Issues/PRs are getting dropped
|
||||
# Could be due to delayed processing on GitHub side
|
||||
# The non-updated issues since last poll will be shortcut-ed and not embedded
|
||||
# adjusted_start_datetime = start_datetime - timedelta(hours=3)
|
||||
|
||||
adjusted_start_datetime = start_datetime
|
||||
|
||||
epoch = datetime.fromtimestamp(0, tz=timezone.utc)
|
||||
if adjusted_start_datetime < epoch:
|
||||
adjusted_start_datetime = epoch
|
||||
|
||||
return self._fetch_from_github(
|
||||
checkpoint,
|
||||
start=adjusted_start_datetime,
|
||||
end=end_datetime,
|
||||
include_permissions=include_permissions,
|
||||
)
|
||||
|
||||
@override
|
||||
def load_from_checkpoint(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: GithubConnectorCheckpoint,
|
||||
) -> CheckpointOutput[GithubConnectorCheckpoint]:
|
||||
return self._load_from_checkpoint(
|
||||
start, end, checkpoint, include_permissions=False
|
||||
)
|
||||
|
||||
@override
|
||||
def load_from_checkpoint_with_perm_sync(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: GithubConnectorCheckpoint,
|
||||
) -> CheckpointOutput[GithubConnectorCheckpoint]:
|
||||
return self._load_from_checkpoint(
|
||||
start, end, checkpoint, include_permissions=True
|
||||
)
|
||||
|
||||
def validate_connector_settings(self) -> None:
|
||||
if self.github_client is None:
|
||||
raise ConnectorMissingCredentialError("GitHub credentials not loaded.")
|
||||
|
||||
if not self.repo_owner:
|
||||
raise ConnectorValidationError(
|
||||
"Invalid connector settings: 'repo_owner' must be provided."
|
||||
)
|
||||
|
||||
try:
|
||||
if self.repositories:
|
||||
if "," in self.repositories:
|
||||
# Multiple repositories specified
|
||||
repo_names = [name.strip() for name in self.repositories.split(",")]
|
||||
if not repo_names:
|
||||
raise ConnectorValidationError(
|
||||
"Invalid connector settings: No valid repository names provided."
|
||||
)
|
||||
|
||||
# Validate at least one repository exists and is accessible
|
||||
valid_repos = False
|
||||
validation_errors = []
|
||||
|
||||
for repo_name in repo_names:
|
||||
if not repo_name:
|
||||
continue
|
||||
|
||||
try:
|
||||
test_repo = self.github_client.get_repo(
|
||||
f"{self.repo_owner}/{repo_name}"
|
||||
)
|
||||
logging.info(
|
||||
f"Successfully accessed repository: {self.repo_owner}/{repo_name}"
|
||||
)
|
||||
test_repo.get_contents("")
|
||||
valid_repos = True
|
||||
# If at least one repo is valid, we can proceed
|
||||
break
|
||||
except GithubException as e:
|
||||
validation_errors.append(
|
||||
f"Repository '{repo_name}': {e.data.get('message', str(e))}"
|
||||
)
|
||||
|
||||
if not valid_repos:
|
||||
error_msg = (
|
||||
"None of the specified repositories could be accessed: "
|
||||
)
|
||||
error_msg += ", ".join(validation_errors)
|
||||
raise ConnectorValidationError(error_msg)
|
||||
else:
|
||||
# Single repository (backward compatibility)
|
||||
test_repo = self.github_client.get_repo(
|
||||
f"{self.repo_owner}/{self.repositories}"
|
||||
)
|
||||
test_repo.get_contents("")
|
||||
else:
|
||||
# Try to get organization first
|
||||
try:
|
||||
org = self.github_client.get_organization(self.repo_owner)
|
||||
total_count = org.get_repos().totalCount
|
||||
if total_count == 0:
|
||||
raise ConnectorValidationError(
|
||||
f"Found no repos for organization: {self.repo_owner}. "
|
||||
"Does the credential have the right scopes?"
|
||||
)
|
||||
except GithubException as e:
|
||||
# Check for missing SSO
|
||||
MISSING_SSO_ERROR_MESSAGE = "You must grant your Personal Access token access to this organization".lower()
|
||||
if MISSING_SSO_ERROR_MESSAGE in str(e).lower():
|
||||
SSO_GUIDE_LINK = (
|
||||
"https://docs.github.com/en/enterprise-cloud@latest/authentication/"
|
||||
"authenticating-with-saml-single-sign-on/"
|
||||
"authorizing-a-personal-access-token-for-use-with-saml-single-sign-on"
|
||||
)
|
||||
raise ConnectorValidationError(
|
||||
f"Your GitHub token is missing authorization to access the "
|
||||
f"`{self.repo_owner}` organization. Please follow the guide to "
|
||||
f"authorize your token: {SSO_GUIDE_LINK}"
|
||||
)
|
||||
# If not an org, try as a user
|
||||
user = self.github_client.get_user(self.repo_owner)
|
||||
|
||||
# Check if we can access any repos
|
||||
total_count = user.get_repos().totalCount
|
||||
if total_count == 0:
|
||||
raise ConnectorValidationError(
|
||||
f"Found no repos for user: {self.repo_owner}. "
|
||||
"Does the credential have the right scopes?"
|
||||
)
|
||||
|
||||
except RateLimitExceededException:
|
||||
raise UnexpectedValidationError(
|
||||
"Validation failed due to GitHub rate-limits being exceeded. Please try again later."
|
||||
)
|
||||
|
||||
except GithubException as e:
|
||||
if e.status == 401:
|
||||
raise CredentialExpiredError(
|
||||
"GitHub credential appears to be invalid or expired (HTTP 401)."
|
||||
)
|
||||
elif e.status == 403:
|
||||
raise InsufficientPermissionsError(
|
||||
"Your GitHub token does not have sufficient permissions for this repository (HTTP 403)."
|
||||
)
|
||||
elif e.status == 404:
|
||||
if self.repositories:
|
||||
if "," in self.repositories:
|
||||
raise ConnectorValidationError(
|
||||
f"None of the specified GitHub repositories could be found for owner: {self.repo_owner}"
|
||||
)
|
||||
else:
|
||||
raise ConnectorValidationError(
|
||||
f"GitHub repository not found with name: {self.repo_owner}/{self.repositories}"
|
||||
)
|
||||
else:
|
||||
raise ConnectorValidationError(
|
||||
f"GitHub user or organization not found: {self.repo_owner}"
|
||||
)
|
||||
else:
|
||||
raise ConnectorValidationError(
|
||||
f"Unexpected GitHub error (status={e.status}): {e.data}"
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
raise Exception(
|
||||
f"Unexpected error during GitHub settings validation: {exc}"
|
||||
)
|
||||
|
||||
def validate_checkpoint_json(
|
||||
self, checkpoint_json: str
|
||||
) -> GithubConnectorCheckpoint:
|
||||
return GithubConnectorCheckpoint.model_validate_json(checkpoint_json)
|
||||
|
||||
def build_dummy_checkpoint(self) -> GithubConnectorCheckpoint:
|
||||
return GithubConnectorCheckpoint(
|
||||
stage=GithubConnectorStage.PRS, curr_page=0, has_more=True, num_retrieved=0
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Initialize the connector
|
||||
connector = GithubConnector(
|
||||
repo_owner="EvoAgentX",
|
||||
repositories="EvoAgentX",
|
||||
include_issues=True,
|
||||
include_prs=False,
|
||||
)
|
||||
connector.load_credentials(
|
||||
{"github_access_token": "<Your_GitHub_Access_Token>"}
|
||||
)
|
||||
|
||||
if connector.github_client:
|
||||
get_external_access_permission(
|
||||
connector.get_github_repos(connector.github_client).pop(),
|
||||
connector.github_client,
|
||||
)
|
||||
|
||||
# Create a time range from epoch to now
|
||||
end_time = datetime.now(timezone.utc)
|
||||
start_time = datetime.fromtimestamp(0, tz=timezone.utc)
|
||||
time_range = (start_time, end_time)
|
||||
|
||||
# Initialize the runner with a batch size of 10
|
||||
runner: ConnectorRunner[GithubConnectorCheckpoint] = ConnectorRunner(
|
||||
connector, batch_size=10, include_permissions=False, time_range=time_range
|
||||
)
|
||||
|
||||
# Get initial checkpoint
|
||||
checkpoint = connector.build_dummy_checkpoint()
|
||||
|
||||
# Run the connector
|
||||
while checkpoint.has_more:
|
||||
for doc_batch, failure, next_checkpoint in runner.run(checkpoint):
|
||||
if doc_batch:
|
||||
print(f"Retrieved batch of {len(doc_batch)} documents")
|
||||
for doc in doc_batch:
|
||||
print(f"Document: {doc.semantic_identifier}")
|
||||
if failure:
|
||||
print(f"Failure: {failure.failure_message}")
|
||||
if next_checkpoint:
|
||||
checkpoint = next_checkpoint
|
||||
17
common/data_source/github/models.py
Normal file
17
common/data_source/github/models.py
Normal file
@ -0,0 +1,17 @@
|
||||
from typing import Any
|
||||
|
||||
from github import Repository
|
||||
from github.Requester import Requester
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class SerializedRepository(BaseModel):
|
||||
# id is part of the raw_data as well, just pulled out for convenience
|
||||
id: int
|
||||
headers: dict[str, str | int]
|
||||
raw_data: dict[str, Any]
|
||||
|
||||
def to_Repository(self, requester: Requester) -> Repository.Repository:
|
||||
return Repository.Repository(
|
||||
requester, self.headers, self.raw_data, completed=True
|
||||
)
|
||||
24
common/data_source/github/rate_limit_utils.py
Normal file
24
common/data_source/github/rate_limit_utils.py
Normal file
@ -0,0 +1,24 @@
|
||||
import time
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
from datetime import timezone
|
||||
|
||||
from github import Github
|
||||
|
||||
|
||||
def sleep_after_rate_limit_exception(github_client: Github) -> None:
|
||||
"""
|
||||
Sleep until the GitHub rate limit resets.
|
||||
|
||||
Args:
|
||||
github_client: The GitHub client that hit the rate limit
|
||||
"""
|
||||
sleep_time = github_client.get_rate_limit().core.reset.replace(
|
||||
tzinfo=timezone.utc
|
||||
) - datetime.now(tz=timezone.utc)
|
||||
sleep_time += timedelta(minutes=1) # add an extra minute just to be safe
|
||||
logging.info(
|
||||
"Ran into Github rate-limit. Sleeping %s seconds.", sleep_time.seconds
|
||||
)
|
||||
time.sleep(sleep_time.total_seconds())
|
||||
44
common/data_source/github/utils.py
Normal file
44
common/data_source/github/utils.py
Normal file
@ -0,0 +1,44 @@
|
||||
import logging
|
||||
|
||||
from github import Github
|
||||
from github.Repository import Repository
|
||||
|
||||
from common.data_source.models import ExternalAccess
|
||||
|
||||
from .models import SerializedRepository
|
||||
|
||||
|
||||
def get_external_access_permission(
|
||||
repo: Repository, github_client: Github
|
||||
) -> ExternalAccess:
|
||||
"""
|
||||
Get the external access permission for a repository.
|
||||
This functionality requires Enterprise Edition.
|
||||
"""
|
||||
# RAGFlow doesn't implement the Onyx EE external-permissions system.
|
||||
# Default to private/unknown permissions.
|
||||
return ExternalAccess.empty()
|
||||
|
||||
|
||||
def deserialize_repository(
|
||||
cached_repo: SerializedRepository, github_client: Github
|
||||
) -> Repository:
|
||||
"""
|
||||
Deserialize a SerializedRepository back into a Repository object.
|
||||
"""
|
||||
# Try to access the requester - different PyGithub versions may use different attribute names
|
||||
try:
|
||||
# Try to get the requester using getattr to avoid linter errors
|
||||
requester = getattr(github_client, "_requester", None)
|
||||
if requester is None:
|
||||
requester = getattr(github_client, "_Github__requester", None)
|
||||
if requester is None:
|
||||
# If we can't find the requester attribute, we need to fall back to recreating the repo
|
||||
raise AttributeError("Could not find requester attribute")
|
||||
|
||||
return cached_repo.to_Repository(requester)
|
||||
except Exception as e:
|
||||
# If all else fails, re-fetch the repo directly
|
||||
logging.warning("Failed to deserialize repository: %s. Attempting to re-fetch.", e)
|
||||
repo_id = cached_repo.id
|
||||
return github_client.get_repo(repo_id)
|
||||
@ -191,7 +191,7 @@ def get_credentials_from_env(email: str, oauth: bool = False, source="drive") ->
|
||||
DB_CREDENTIALS_AUTHENTICATION_METHOD: "uploaded",
|
||||
}
|
||||
|
||||
def sanitize_filename(name: str) -> str:
|
||||
def sanitize_filename(name: str, extension: str = "txt") -> str:
|
||||
"""
|
||||
Soft sanitize for MinIO/S3:
|
||||
- Replace only prohibited characters with a space.
|
||||
@ -199,7 +199,7 @@ def sanitize_filename(name: str) -> str:
|
||||
- Collapse multiple spaces.
|
||||
"""
|
||||
if name is None:
|
||||
return "file.txt"
|
||||
return f"file.{extension}"
|
||||
|
||||
name = str(name).strip()
|
||||
|
||||
@ -222,9 +222,8 @@ def sanitize_filename(name: str) -> str:
|
||||
base, ext = os.path.splitext(name)
|
||||
name = base[:180].rstrip() + ext
|
||||
|
||||
# Ensure there is an extension (your original logic)
|
||||
if not os.path.splitext(name)[1]:
|
||||
name += ".txt"
|
||||
name += f".{extension}"
|
||||
|
||||
return name
|
||||
|
||||
|
||||
@ -237,16 +237,13 @@ class BaseConnector(abc.ABC, Generic[CT]):
|
||||
|
||||
def validate_perm_sync(self) -> None:
|
||||
"""
|
||||
Don't override this; add a function to perm_sync_valid.py in the ee package
|
||||
to do permission sync validation
|
||||
Permission-sync validation hook.
|
||||
|
||||
RAGFlow doesn't ship the Onyx EE permission-sync validation package.
|
||||
Connectors that support permission sync should override
|
||||
`validate_connector_settings()` as needed.
|
||||
"""
|
||||
"""
|
||||
validate_connector_settings_fn = fetch_ee_implementation_or_noop(
|
||||
"onyx.connectors.perm_sync_valid",
|
||||
"validate_perm_sync",
|
||||
noop_return_value=None,
|
||||
)
|
||||
validate_connector_settings_fn(self)"""
|
||||
return None
|
||||
|
||||
def set_allow_images(self, value: bool) -> None:
|
||||
"""Implement if the underlying connector wants to skip/allow image downloading
|
||||
@ -345,6 +342,17 @@ class CheckpointOutputWrapper(Generic[CT]):
|
||||
yield None, None, self.next_checkpoint
|
||||
|
||||
|
||||
class CheckpointedConnectorWithPermSyncGH(CheckpointedConnector[CT]):
|
||||
@abc.abstractmethod
|
||||
def load_from_checkpoint_with_perm_sync(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: CT,
|
||||
) -> CheckpointOutput[CT]:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
# Slim connectors retrieve just the ids of documents
|
||||
class SlimConnector(BaseConnector):
|
||||
@abc.abstractmethod
|
||||
|
||||
@ -94,8 +94,10 @@ class Document(BaseModel):
|
||||
blob: bytes
|
||||
doc_updated_at: datetime
|
||||
size_bytes: int
|
||||
externale_access: Optional[ExternalAccess] = None
|
||||
primary_owners: Optional[list] = None
|
||||
metadata: Optional[dict[str, Any]] = None
|
||||
doc_metadata: Optional[dict[str, Any]] = None
|
||||
|
||||
|
||||
class BasicExpertInfo(BaseModel):
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import ast
|
||||
import logging
|
||||
from typing import Any, Callable, Dict
|
||||
|
||||
@ -49,8 +50,8 @@ def meta_filter(metas: dict, filters: list[dict], logic: str = "and"):
|
||||
try:
|
||||
if isinstance(input, list):
|
||||
input = input[0]
|
||||
input = float(input)
|
||||
value = float(value)
|
||||
input = ast.literal_eval(input)
|
||||
value = ast.literal_eval(value)
|
||||
except Exception:
|
||||
pass
|
||||
if isinstance(input, str):
|
||||
@ -58,28 +59,41 @@ def meta_filter(metas: dict, filters: list[dict], logic: str = "and"):
|
||||
if isinstance(value, str):
|
||||
value = value.lower()
|
||||
|
||||
for conds in [
|
||||
(operator == "contains", input in value if not isinstance(input, list) else all([i in value for i in input])),
|
||||
(operator == "not contains", input not in value if not isinstance(input, list) else all([i not in value for i in input])),
|
||||
(operator == "in", input in value if not isinstance(input, list) else all([i in value for i in input])),
|
||||
(operator == "not in", input not in value if not isinstance(input, list) else all([i not in value for i in input])),
|
||||
(operator == "start with", str(input).lower().startswith(str(value).lower()) if not isinstance(input, list) else "".join([str(i).lower() for i in input]).startswith(str(value).lower())),
|
||||
(operator == "end with", str(input).lower().endswith(str(value).lower()) if not isinstance(input, list) else "".join([str(i).lower() for i in input]).endswith(str(value).lower())),
|
||||
(operator == "empty", not input),
|
||||
(operator == "not empty", input),
|
||||
(operator == "=", input == value),
|
||||
(operator == "≠", input != value),
|
||||
(operator == ">", input > value),
|
||||
(operator == "<", input < value),
|
||||
(operator == "≥", input >= value),
|
||||
(operator == "≤", input <= value),
|
||||
]:
|
||||
try:
|
||||
if all(conds):
|
||||
ids.extend(docids)
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
matched = False
|
||||
try:
|
||||
if operator == "contains":
|
||||
matched = input in value if not isinstance(input, list) else all(i in value for i in input)
|
||||
elif operator == "not contains":
|
||||
matched = input not in value if not isinstance(input, list) else all(i not in value for i in input)
|
||||
elif operator == "in":
|
||||
matched = input in value if not isinstance(input, list) else all(i in value for i in input)
|
||||
elif operator == "not in":
|
||||
matched = input not in value if not isinstance(input, list) else all(i not in value for i in input)
|
||||
elif operator == "start with":
|
||||
matched = str(input).lower().startswith(str(value).lower()) if not isinstance(input, list) else "".join([str(i).lower() for i in input]).startswith(str(value).lower())
|
||||
elif operator == "end with":
|
||||
matched = str(input).lower().endswith(str(value).lower()) if not isinstance(input, list) else "".join([str(i).lower() for i in input]).endswith(str(value).lower())
|
||||
elif operator == "empty":
|
||||
matched = not input
|
||||
elif operator == "not empty":
|
||||
matched = bool(input)
|
||||
elif operator == "=":
|
||||
matched = input == value
|
||||
elif operator == "≠":
|
||||
matched = input != value
|
||||
elif operator == ">":
|
||||
matched = input > value
|
||||
elif operator == "<":
|
||||
matched = input < value
|
||||
elif operator == "≥":
|
||||
matched = input >= value
|
||||
elif operator == "≤":
|
||||
matched = input <= value
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if matched:
|
||||
ids.extend(docids)
|
||||
return ids
|
||||
|
||||
for k, v2docs in metas.items():
|
||||
|
||||
@ -170,7 +170,7 @@ def init_settings():
|
||||
global DATABASE_TYPE, DATABASE
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", "mysql")
|
||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||
|
||||
|
||||
global ALLOWED_LLM_FACTORIES, LLM_FACTORY, LLM_BASE_URL
|
||||
llm_settings = get_base_config("user_default_llm", {}) or {}
|
||||
llm_default_models = llm_settings.get("default_models", {}) or {}
|
||||
@ -334,6 +334,9 @@ def init_settings():
|
||||
DOC_BULK_SIZE = int(os.environ.get("DOC_BULK_SIZE", 4))
|
||||
EMBEDDING_BATCH_SIZE = int(os.environ.get("EMBEDDING_BATCH_SIZE", 16))
|
||||
|
||||
os.environ["DOTNET_SYSTEM_GLOBALIZATION_INVARIANT"] = "1"
|
||||
|
||||
|
||||
def check_and_install_torch():
|
||||
global PARALLEL_DEVICES
|
||||
try:
|
||||
|
||||
@ -1061,8 +1061,8 @@ class RAGFlowPdfParser:
|
||||
|
||||
self.total_page = len(self.pdf.pages)
|
||||
|
||||
except Exception:
|
||||
logging.exception("RAGFlowPdfParser __images__")
|
||||
except Exception as e:
|
||||
logging.exception(f"RAGFlowPdfParser __images__, exception: {e}")
|
||||
logging.info(f"__images__ dedupe_chars cost {timer() - start}s")
|
||||
|
||||
self.outlines = []
|
||||
|
||||
@ -149,6 +149,7 @@ dependencies = [
|
||||
# "cryptography==46.0.3",
|
||||
# "jinja2>=3.1.0",
|
||||
"pyairtable>=3.3.0",
|
||||
"pygithub>=2.8.1",
|
||||
"asana>=5.2.2",
|
||||
"python-gitlab>=7.0.0",
|
||||
]
|
||||
|
||||
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
@ -28,7 +28,7 @@ def chunk(filename, binary, tenant_id, lang, callback=None, **kwargs):
|
||||
doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
|
||||
|
||||
# is it English
|
||||
eng = lang.lower() == "english" # is_english(sections)
|
||||
is_english = lang.lower() == "english" # is_english(sections)
|
||||
try:
|
||||
_, ext = os.path.splitext(filename)
|
||||
if not ext:
|
||||
@ -49,7 +49,7 @@ def chunk(filename, binary, tenant_id, lang, callback=None, **kwargs):
|
||||
ans = seq2txt_mdl.transcription(tmp_path)
|
||||
callback(0.8, "Sequence2Txt LLM respond: %s ..." % ans[:32])
|
||||
|
||||
tokenize(doc, ans, eng)
|
||||
tokenize(doc, ans, is_english)
|
||||
return [doc]
|
||||
except Exception as e:
|
||||
callback(prog=-1, msg=str(e))
|
||||
@ -57,6 +57,7 @@ def chunk(filename, binary, tenant_id, lang, callback=None, **kwargs):
|
||||
if tmp_path and os.path.exists(tmp_path):
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to remove temporary file: {tmp_path}, exception: {e}")
|
||||
pass
|
||||
return []
|
||||
|
||||
@ -210,8 +210,8 @@ class Docx(DocxParser):
|
||||
except UnicodeDecodeError:
|
||||
logging.info("The recognized image stream appears to be corrupted. Skipping image.")
|
||||
continue
|
||||
except Exception:
|
||||
logging.info("The recognized image stream appears to be corrupted. Skipping image.")
|
||||
except Exception as e:
|
||||
logging.warning(f"The recognized image stream appears to be corrupted. Skipping image, exception: {e}")
|
||||
continue
|
||||
try:
|
||||
image = Image.open(BytesIO(image_blob)).convert('RGB')
|
||||
@ -219,7 +219,8 @@ class Docx(DocxParser):
|
||||
res_img = image
|
||||
else:
|
||||
res_img = concat_img(res_img, image)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logging.warning(f"Fail to open or concat images, exception: {e}")
|
||||
continue
|
||||
|
||||
return res_img
|
||||
@ -553,7 +554,8 @@ class Markdown(MarkdownParser):
|
||||
if (src, line_no) not in seen:
|
||||
urls.append({"url": src, "line": line_no})
|
||||
seen.add((src, line_no))
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logging.error("Failed to extract image urls: {}".format(e))
|
||||
pass
|
||||
|
||||
return urls
|
||||
@ -698,8 +700,10 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
**kwargs) or []
|
||||
embed_res.extend(sub_res)
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to chunk embed {embed_filename}: {e}"
|
||||
logging.error(error_msg)
|
||||
if callback:
|
||||
callback(0.05, f"Failed to chunk embed {embed_filename}: {e}")
|
||||
callback(0.05, error_msg)
|
||||
continue
|
||||
|
||||
if re.search(r"\.docx$", filename, re.IGNORECASE):
|
||||
@ -839,7 +843,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
try:
|
||||
vision_model = LLMBundle(kwargs["tenant_id"], LLMType.IMAGE2TEXT)
|
||||
callback(0.2, "Visual model detected. Attempting to enhance figure extraction...")
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to detect figure extraction: {e}")
|
||||
vision_model = None
|
||||
|
||||
if vision_model:
|
||||
@ -905,8 +910,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
sections = [(_, "") for _ in sections if _]
|
||||
callback(0.8, "Finish parsing.")
|
||||
else:
|
||||
callback(0.8, f"tika.parser got empty content from {filename}.")
|
||||
logging.warning(f"tika.parser got empty content from {filename}.")
|
||||
error_msg = f"tika.parser got empty content from {filename}."
|
||||
callback(0.8, error_msg)
|
||||
logging.warning(error_msg)
|
||||
return []
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
|
||||
@ -42,16 +42,16 @@ class Excel(ExcelParser):
|
||||
else:
|
||||
wb = Excel._load_excel_to_workbook(BytesIO(binary))
|
||||
total = 0
|
||||
for sheetname in wb.sheetnames:
|
||||
total += len(list(wb[sheetname].rows))
|
||||
for sheet_name in wb.sheetnames:
|
||||
total += len(list(wb[sheet_name].rows))
|
||||
res, fails, done = [], [], 0
|
||||
rn = 0
|
||||
flow_images = []
|
||||
pending_cell_images = []
|
||||
tables = []
|
||||
for sheetname in wb.sheetnames:
|
||||
ws = wb[sheetname]
|
||||
images = Excel._extract_images_from_worksheet(ws, sheetname=sheetname)
|
||||
for sheet_name in wb.sheetnames:
|
||||
ws = wb[sheet_name]
|
||||
images = Excel._extract_images_from_worksheet(ws, sheetname=sheet_name)
|
||||
if images:
|
||||
image_descriptions = vision_figure_parser_figure_xlsx_wrapper(images=images, callback=callback,
|
||||
**kwargs)
|
||||
@ -59,7 +59,7 @@ class Excel(ExcelParser):
|
||||
for i, bf in enumerate(image_descriptions):
|
||||
images[i]["image_description"] = "\n".join(bf[0][1])
|
||||
for img in images:
|
||||
if (img["span_type"] == "single_cell" and img.get("image_description")):
|
||||
if img["span_type"] == "single_cell" and img.get("image_description"):
|
||||
pending_cell_images.append(img)
|
||||
else:
|
||||
flow_images.append(img)
|
||||
@ -67,7 +67,7 @@ class Excel(ExcelParser):
|
||||
try:
|
||||
rows = list(ws.rows)
|
||||
except Exception as e:
|
||||
logging.warning(f"Skip sheet '{sheetname}' due to rows access error: {e}")
|
||||
logging.warning(f"Skip sheet '{sheet_name}' due to rows access error: {e}")
|
||||
continue
|
||||
if not rows:
|
||||
continue
|
||||
@ -303,7 +303,8 @@ class Excel(ExcelParser):
|
||||
def trans_datatime(s):
|
||||
try:
|
||||
return datetime_parse(s.strip()).strftime("%Y-%m-%d %H:%M:%S")
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to parse date from {s}, error: {e}")
|
||||
pass
|
||||
|
||||
|
||||
@ -312,6 +313,7 @@ def trans_bool(s):
|
||||
return "yes"
|
||||
if re.match(r"(false|no|否|⍻|×)$", str(s).strip(), flags=re.IGNORECASE):
|
||||
return "no"
|
||||
return None
|
||||
|
||||
|
||||
def column_data_type(arr):
|
||||
@ -346,8 +348,9 @@ def column_data_type(arr):
|
||||
continue
|
||||
try:
|
||||
arr[i] = trans[ty](str(arr[i]))
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
arr[i] = None
|
||||
logging.warning(f"Column {i}: {e}")
|
||||
# if ty == "text":
|
||||
# if len(arr) > 128 and uni / len(arr) < 0.1:
|
||||
# ty = "keyword"
|
||||
|
||||
@ -55,6 +55,7 @@ from common.data_source.config import INDEX_BATCH_SIZE
|
||||
from common.data_source.confluence_connector import ConfluenceConnector
|
||||
from common.data_source.gmail_connector import GmailConnector
|
||||
from common.data_source.box_connector import BoxConnector
|
||||
from common.data_source.github.connector import GithubConnector
|
||||
from common.data_source.gitlab_connector import GitlabConnector
|
||||
from common.data_source.interfaces import CheckpointOutputWrapper
|
||||
from common.log_utils import init_root_logger
|
||||
@ -108,7 +109,7 @@ class SyncBase:
|
||||
if task["poll_range_start"]:
|
||||
next_update = task["poll_range_start"]
|
||||
|
||||
for document_batch in document_batch_generator:
|
||||
async for document_batch in document_batch_generator:
|
||||
if not document_batch:
|
||||
continue
|
||||
|
||||
@ -706,20 +707,17 @@ class Moodle(SyncBase):
|
||||
self.connector.load_credentials(self.conf["credentials"])
|
||||
|
||||
# Determine the time range for synchronization based on reindex or poll_range_start
|
||||
if task["reindex"] == "1" or not task.get("poll_range_start"):
|
||||
poll_start = task.get("poll_range_start")
|
||||
|
||||
if task["reindex"] == "1" or poll_start is None:
|
||||
document_generator = self.connector.load_from_state()
|
||||
begin_info = "totally"
|
||||
else:
|
||||
poll_start = task["poll_range_start"]
|
||||
if poll_start is None:
|
||||
document_generator = self.connector.load_from_state()
|
||||
begin_info = "totally"
|
||||
else:
|
||||
document_generator = self.connector.poll_source(
|
||||
poll_start.timestamp(),
|
||||
datetime.now(timezone.utc).timestamp()
|
||||
)
|
||||
begin_info = "from {}".format(poll_start)
|
||||
document_generator = self.connector.poll_source(
|
||||
poll_start.timestamp(),
|
||||
datetime.now(timezone.utc).timestamp(),
|
||||
)
|
||||
begin_info = f"from {poll_start}"
|
||||
|
||||
logging.info("Connect to Moodle: {} {}".format(self.conf["moodle_url"], begin_info))
|
||||
return document_generator
|
||||
@ -749,20 +747,17 @@ class BOX(SyncBase):
|
||||
auth.token_storage.store(token)
|
||||
|
||||
self.connector.load_credentials(auth)
|
||||
if task["reindex"] == "1" or not task["poll_range_start"]:
|
||||
poll_start = task["poll_range_start"]
|
||||
|
||||
if task["reindex"] == "1" or poll_start is None:
|
||||
document_generator = self.connector.load_from_state()
|
||||
begin_info = "totally"
|
||||
else:
|
||||
poll_start = task["poll_range_start"]
|
||||
if poll_start is None:
|
||||
document_generator = self.connector.load_from_state()
|
||||
begin_info = "totally"
|
||||
else:
|
||||
document_generator = self.connector.poll_source(
|
||||
poll_start.timestamp(),
|
||||
datetime.now(timezone.utc).timestamp()
|
||||
)
|
||||
begin_info = "from {}".format(poll_start)
|
||||
document_generator = self.connector.poll_source(
|
||||
poll_start.timestamp(),
|
||||
datetime.now(timezone.utc).timestamp(),
|
||||
)
|
||||
begin_info = f"from {poll_start}"
|
||||
logging.info("Connect to Box: folder_id({}) {}".format(self.conf["folder_id"], begin_info))
|
||||
return document_generator
|
||||
|
||||
@ -788,20 +783,17 @@ class Airtable(SyncBase):
|
||||
{"airtable_access_token": credentials["airtable_access_token"]}
|
||||
)
|
||||
|
||||
if task.get("reindex") == "1" or not task.get("poll_range_start"):
|
||||
poll_start = task.get("poll_range_start")
|
||||
|
||||
if task.get("reindex") == "1" or poll_start is None:
|
||||
document_generator = self.connector.load_from_state()
|
||||
begin_info = "totally"
|
||||
else:
|
||||
poll_start = task.get("poll_range_start")
|
||||
if poll_start is None:
|
||||
document_generator = self.connector.load_from_state()
|
||||
begin_info = "totally"
|
||||
else:
|
||||
document_generator = self.connector.poll_source(
|
||||
poll_start.timestamp(),
|
||||
datetime.now(timezone.utc).timestamp(),
|
||||
)
|
||||
begin_info = f"from {poll_start}"
|
||||
document_generator = self.connector.poll_source(
|
||||
poll_start.timestamp(),
|
||||
datetime.now(timezone.utc).timestamp(),
|
||||
)
|
||||
begin_info = f"from {poll_start}"
|
||||
|
||||
logging.info(
|
||||
"Connect to Airtable: base_id(%s), table(%s) %s",
|
||||
@ -854,6 +846,75 @@ class Asana(SyncBase):
|
||||
|
||||
return document_generator
|
||||
|
||||
class Github(SyncBase):
|
||||
SOURCE_NAME: str = FileSource.GITHUB
|
||||
|
||||
async def _generate(self, task: dict):
|
||||
"""
|
||||
Sync files from Github repositories.
|
||||
"""
|
||||
from common.data_source.connector_runner import ConnectorRunner
|
||||
|
||||
self.connector = GithubConnector(
|
||||
repo_owner=self.conf.get("repository_owner"),
|
||||
repositories=self.conf.get("repository_name"),
|
||||
include_prs=self.conf.get("include_pull_requests", False),
|
||||
include_issues=self.conf.get("include_issues", False),
|
||||
)
|
||||
|
||||
credentials = self.conf.get("credentials", {})
|
||||
if "github_access_token" not in credentials:
|
||||
raise ValueError("Missing github_access_token in credentials")
|
||||
|
||||
self.connector.load_credentials(
|
||||
{"github_access_token": credentials["github_access_token"]}
|
||||
)
|
||||
|
||||
if task.get("reindex") == "1" or not task.get("poll_range_start"):
|
||||
start_time = datetime.fromtimestamp(0, tz=timezone.utc)
|
||||
begin_info = "totally"
|
||||
else:
|
||||
start_time = task.get("poll_range_start")
|
||||
begin_info = f"from {start_time}"
|
||||
|
||||
end_time = datetime.now(timezone.utc)
|
||||
|
||||
runner = ConnectorRunner(
|
||||
connector=self.connector,
|
||||
batch_size=self.conf.get("batch_size", INDEX_BATCH_SIZE),
|
||||
include_permissions=False,
|
||||
time_range=(start_time, end_time)
|
||||
)
|
||||
|
||||
def document_batches():
|
||||
checkpoint = self.connector.build_dummy_checkpoint()
|
||||
|
||||
while checkpoint.has_more:
|
||||
for doc_batch, failure, next_checkpoint in runner.run(checkpoint):
|
||||
if failure is not None:
|
||||
logging.warning(
|
||||
"Github connector failure: %s",
|
||||
getattr(failure, "failure_message", failure),
|
||||
)
|
||||
continue
|
||||
if doc_batch is not None:
|
||||
yield doc_batch
|
||||
if next_checkpoint is not None:
|
||||
checkpoint = next_checkpoint
|
||||
|
||||
async def async_wrapper():
|
||||
for batch in document_batches():
|
||||
yield batch
|
||||
|
||||
logging.info(
|
||||
"Connect to Github: org_name(%s), repo_names(%s) for %s",
|
||||
self.conf.get("repository_owner"),
|
||||
self.conf.get("repository_name"),
|
||||
begin_info,
|
||||
)
|
||||
|
||||
return async_wrapper()
|
||||
|
||||
|
||||
|
||||
class Gitlab(SyncBase):
|
||||
@ -915,8 +976,9 @@ func_factory = {
|
||||
FileSource.WEBDAV: WebDAV,
|
||||
FileSource.BOX: BOX,
|
||||
FileSource.AIRTABLE: Airtable,
|
||||
FileSource.GITLAB: Gitlab,
|
||||
FileSource.ASANA: Asana,
|
||||
FileSource.GITHUB: Github,
|
||||
FileSource.GITLAB: Gitlab,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -26,6 +26,7 @@ import time
|
||||
from api.db import PIPELINE_SPECIAL_PROGRESS_FREEZE_TASK_TYPES
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.pipeline_operation_log_service import PipelineOperationLogService
|
||||
from api.db.joint_services.memory_message_service import handle_save_to_memory_task
|
||||
from common.connection_utils import timeout
|
||||
from common.metadata_utils import update_metadata_to, metadata_schema
|
||||
from rag.utils.base64_image import image2id
|
||||
@ -96,6 +97,7 @@ TASK_TYPE_TO_PIPELINE_TASK_TYPE = {
|
||||
"raptor": PipelineTaskType.RAPTOR,
|
||||
"graphrag": PipelineTaskType.GRAPH_RAG,
|
||||
"mindmap": PipelineTaskType.MINDMAP,
|
||||
"memory": PipelineTaskType.MEMORY,
|
||||
}
|
||||
|
||||
UNACKED_ITERATOR = None
|
||||
@ -157,8 +159,8 @@ def set_progress(task_id, from_page=0, to_page=-1, prog=None, msg="Processing...
|
||||
logging.info(f"set_progress({task_id}), progress: {prog}, progress_msg: {msg}")
|
||||
except DoesNotExist:
|
||||
logging.warning(f"set_progress({task_id}) got exception DoesNotExist")
|
||||
except Exception:
|
||||
logging.exception(f"set_progress({task_id}), progress: {prog}, progress_msg: {msg}, got exception")
|
||||
except Exception as e:
|
||||
logging.exception(f"set_progress({task_id}), progress: {prog}, progress_msg: {msg}, got exception: {e}")
|
||||
|
||||
|
||||
async def collect():
|
||||
@ -166,6 +168,7 @@ async def collect():
|
||||
global UNACKED_ITERATOR
|
||||
|
||||
svr_queue_names = settings.get_svr_queue_names()
|
||||
redis_msg = None
|
||||
try:
|
||||
if not UNACKED_ITERATOR:
|
||||
UNACKED_ITERATOR = REDIS_CONN.get_unacked_iterator(svr_queue_names, SVR_CONSUMER_GROUP_NAME, CONSUMER_NAME)
|
||||
@ -176,8 +179,8 @@ async def collect():
|
||||
redis_msg = REDIS_CONN.queue_consumer(svr_queue_name, SVR_CONSUMER_GROUP_NAME, CONSUMER_NAME)
|
||||
if redis_msg:
|
||||
break
|
||||
except Exception:
|
||||
logging.exception("collect got exception")
|
||||
except Exception as e:
|
||||
logging.exception(f"collect got exception: {e}")
|
||||
return None, None
|
||||
|
||||
if not redis_msg:
|
||||
@ -196,6 +199,9 @@ async def collect():
|
||||
if task:
|
||||
task["doc_id"] = msg["doc_id"]
|
||||
task["doc_ids"] = msg.get("doc_ids", []) or []
|
||||
elif msg.get("task_type") == PipelineTaskType.MEMORY.lower():
|
||||
_, task_obj = TaskService.get_by_id(msg["id"])
|
||||
task = task_obj.to_dict()
|
||||
else:
|
||||
task = TaskService.get_task(msg["id"])
|
||||
|
||||
@ -214,6 +220,10 @@ async def collect():
|
||||
task["tenant_id"] = msg["tenant_id"]
|
||||
task["dataflow_id"] = msg["dataflow_id"]
|
||||
task["kb_id"] = msg.get("kb_id", "")
|
||||
if task_type[:6] == "memory":
|
||||
task["memory_id"] = msg["memory_id"]
|
||||
task["source_id"] = msg["source_id"]
|
||||
task["message_dict"] = msg["message_dict"]
|
||||
return redis_msg, task
|
||||
|
||||
|
||||
@ -865,6 +875,10 @@ async def insert_es(task_id, task_tenant_id, task_dataset_id, chunks, progress_c
|
||||
async def do_handle_task(task):
|
||||
task_type = task.get("task_type", "")
|
||||
|
||||
if task_type == "memory":
|
||||
await handle_save_to_memory_task(task)
|
||||
return
|
||||
|
||||
if task_type == "dataflow" and task.get("doc_id", "") == CANVAS_DEBUG_DOC_ID:
|
||||
await run_dataflow(task)
|
||||
return
|
||||
@ -1050,8 +1064,8 @@ async def do_handle_task(task):
|
||||
async def _maybe_insert_es(_chunks):
|
||||
if has_canceled(task_id):
|
||||
return True
|
||||
e = await insert_es(task_id, task_tenant_id, task_dataset_id, _chunks, progress_callback)
|
||||
return bool(e)
|
||||
insert_result = await insert_es(task_id, task_tenant_id, task_dataset_id, _chunks, progress_callback)
|
||||
return bool(insert_result)
|
||||
|
||||
try:
|
||||
if not await _maybe_insert_es(chunks):
|
||||
@ -1101,10 +1115,9 @@ async def do_handle_task(task):
|
||||
search.index_name(task_tenant_id),
|
||||
task_dataset_id,
|
||||
)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logging.exception(
|
||||
f"Remove doc({task_doc_id}) from docStore failed when task({task_id}) canceled."
|
||||
)
|
||||
f"Remove doc({task_doc_id}) from docStore failed when task({task_id}) canceled, exception: {e}")
|
||||
|
||||
|
||||
async def handle_task():
|
||||
@ -1117,24 +1130,25 @@ async def handle_task():
|
||||
task_type = task["task_type"]
|
||||
pipeline_task_type = TASK_TYPE_TO_PIPELINE_TASK_TYPE.get(task_type,
|
||||
PipelineTaskType.PARSE) or PipelineTaskType.PARSE
|
||||
|
||||
task_id = task["id"]
|
||||
try:
|
||||
logging.info(f"handle_task begin for task {json.dumps(task)}")
|
||||
CURRENT_TASKS[task["id"]] = copy.deepcopy(task)
|
||||
await do_handle_task(task)
|
||||
DONE_TASKS += 1
|
||||
CURRENT_TASKS.pop(task["id"], None)
|
||||
CURRENT_TASKS.pop(task_id, None)
|
||||
logging.info(f"handle_task done for task {json.dumps(task)}")
|
||||
except Exception as e:
|
||||
FAILED_TASKS += 1
|
||||
CURRENT_TASKS.pop(task["id"], None)
|
||||
CURRENT_TASKS.pop(task_id, None)
|
||||
try:
|
||||
err_msg = str(e)
|
||||
while isinstance(e, exceptiongroup.ExceptionGroup):
|
||||
e = e.exceptions[0]
|
||||
err_msg += ' -- ' + str(e)
|
||||
set_progress(task["id"], prog=-1, msg=f"[Exception]: {err_msg}")
|
||||
except Exception:
|
||||
set_progress(task_id, prog=-1, msg=f"[Exception]: {err_msg}")
|
||||
except Exception as e:
|
||||
logging.exception(f"[Exception]: {str(e)}")
|
||||
pass
|
||||
logging.exception(f"handle_task got exception for task {json.dumps(task)}")
|
||||
finally:
|
||||
@ -1207,8 +1221,8 @@ async def report_status():
|
||||
logging.info(f"{consumer_name} expired, removed")
|
||||
REDIS_CONN.srem("TASKEXE", consumer_name)
|
||||
REDIS_CONN.delete(consumer_name)
|
||||
except Exception:
|
||||
logging.exception("report_status got exception")
|
||||
except Exception as e:
|
||||
logging.exception(f"report_status got exception: {e}")
|
||||
finally:
|
||||
redis_lock.release()
|
||||
await asyncio.sleep(30)
|
||||
|
||||
55
uv.lock
generated
55
uv.lock
generated
@ -5509,6 +5509,22 @@ dependencies = [
|
||||
]
|
||||
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/ba/8e/aedef81641c8dca6fd0fb7294de5bed9c45f3397d67fddf755c1042c2642/PyExecJS-1.5.1.tar.gz", hash = "sha256:34cc1d070976918183ff7bdc0ad71f8157a891c92708c00c5fbbff7a769f505c", size = 13344, upload-time = "2018-01-18T04:33:55.126Z" }
|
||||
|
||||
[[package]]
|
||||
name = "pygithub"
|
||||
version = "2.8.1"
|
||||
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||
dependencies = [
|
||||
{ name = "pyjwt", extra = ["crypto"] },
|
||||
{ name = "pynacl" },
|
||||
{ name = "requests" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/c1/74/e560bdeffea72ecb26cff27f0fad548bbff5ecc51d6a155311ea7f9e4c4c/pygithub-2.8.1.tar.gz", hash = "sha256:341b7c78521cb07324ff670afd1baa2bf5c286f8d9fd302c1798ba594a5400c9", size = 2246994, upload-time = "2025-09-02T17:41:54.674Z" }
|
||||
wheels = [
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/07/ba/7049ce39f653f6140aac4beb53a5aaf08b4407b6a3019aae394c1c5244ff/pygithub-2.8.1-py3-none-any.whl", hash = "sha256:23a0a5bca93baef082e03411bf0ce27204c32be8bfa7abc92fe4a3e132936df0", size = 432709, upload-time = "2025-09-02T17:41:52.947Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pygments"
|
||||
version = "2.19.2"
|
||||
@ -5541,6 +5557,43 @@ wheels = [
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/7c/4c/ad33b92b9864cbde84f259d5df035a6447f91891f5be77788e2a3892bce3/pymysql-1.1.2-py3-none-any.whl", hash = "sha256:e6b1d89711dd51f8f74b1631fe08f039e7d76cf67a42a323d3178f0f25762ed9", size = 45300, upload-time = "2025-08-24T12:55:53.394Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pynacl"
|
||||
version = "1.6.1"
|
||||
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||
dependencies = [
|
||||
{ name = "cffi", marker = "platform_python_implementation != 'PyPy'" },
|
||||
]
|
||||
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/b2/46/aeca065d227e2265125aea590c9c47fbf5786128c9400ee0eb7c88931f06/pynacl-1.6.1.tar.gz", hash = "sha256:8d361dac0309f2b6ad33b349a56cd163c98430d409fa503b10b70b3ad66eaa1d", size = 3506616, upload-time = "2025-11-10T16:02:13.195Z" }
|
||||
wheels = [
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/75/d6/4b2dca33ed512de8f54e5c6074aa06eaeb225bfbcd9b16f33a414389d6bd/pynacl-1.6.1-cp314-cp314t-macosx_10_10_universal2.whl", hash = "sha256:7d7c09749450c385301a3c20dca967a525152ae4608c0a096fe8464bfc3df93d", size = 389109, upload-time = "2025-11-10T16:01:28.79Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/3c/30/e8dbb8ff4fa2559bbbb2187ba0d0d7faf728d17cb8396ecf4a898b22d3da/pynacl-1.6.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc734c1696ffd49b40f7c1779c89ba908157c57345cf626be2e0719488a076d3", size = 808254, upload-time = "2025-11-10T16:01:37.839Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/44/f9/f5449c652f31da00249638dbab065ad4969c635119094b79b17c3a4da2ab/pynacl-1.6.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3cd787ec1f5c155dc8ecf39b1333cfef41415dc96d392f1ce288b4fe970df489", size = 1407365, upload-time = "2025-11-10T16:01:40.454Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/eb/2f/9aa5605f473b712065c0a193ebf4ad4725d7a245533f0cd7e5dcdbc78f35/pynacl-1.6.1-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b35d93ab2df03ecb3aa506be0d3c73609a51449ae0855c2e89c7ed44abde40b", size = 843842, upload-time = "2025-11-10T16:01:30.524Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/32/8d/748f0f6956e207453da8f5f21a70885fbbb2e060d5c9d78e0a4a06781451/pynacl-1.6.1-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dece79aecbb8f4640a1adbb81e4aa3bfb0e98e99834884a80eb3f33c7c30e708", size = 1445559, upload-time = "2025-11-10T16:01:33.663Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/78/d0/2387f0dcb0e9816f38373999e48db4728ed724d31accdd4e737473319d35/pynacl-1.6.1-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:c2228054f04bf32d558fb89bb99f163a8197d5a9bf4efa13069a7fa8d4b93fc3", size = 825791, upload-time = "2025-11-10T16:01:34.823Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/18/3d/ef6fb7eb072aaf15f280bc66f26ab97e7fc9efa50fb1927683013ef47473/pynacl-1.6.1-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:2b12f1b97346f177affcdfdc78875ff42637cb40dcf79484a97dae3448083a78", size = 1410843, upload-time = "2025-11-10T16:01:36.401Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/e3/fb/23824a017526850ee7d8a1cc4cd1e3e5082800522c10832edbbca8619537/pynacl-1.6.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e735c3a1bdfde3834503baf1a6d74d4a143920281cb724ba29fb84c9f49b9c48", size = 801140, upload-time = "2025-11-10T16:01:42.013Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/5d/d1/ebc6b182cb98603a35635b727d62f094bc201bf610f97a3bb6357fe688d2/pynacl-1.6.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3384a454adf5d716a9fadcb5eb2e3e72cd49302d1374a60edc531c9957a9b014", size = 1371966, upload-time = "2025-11-10T16:01:43.297Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/64/f4/c9d7b6f02924b1f31db546c7bd2a83a2421c6b4a8e6a2e53425c9f2802e0/pynacl-1.6.1-cp314-cp314t-win32.whl", hash = "sha256:d8615ee34d01c8e0ab3f302dcdd7b32e2bcf698ba5f4809e7cc407c8cdea7717", size = 230482, upload-time = "2025-11-10T16:01:47.688Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/c4/2c/942477957fba22da7bf99131850e5ebdff66623418ab48964e78a7a8293e/pynacl-1.6.1-cp314-cp314t-win_amd64.whl", hash = "sha256:5f5b35c1a266f8a9ad22525049280a600b19edd1f785bccd01ae838437dcf935", size = 243232, upload-time = "2025-11-10T16:01:45.208Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/7a/0c/bdbc0d04a53b96a765ab03aa2cf9a76ad8653d70bf1665459b9a0dedaa1c/pynacl-1.6.1-cp314-cp314t-win_arm64.whl", hash = "sha256:d984c91fe3494793b2a1fb1e91429539c6c28e9ec8209d26d25041ec599ccf63", size = 187907, upload-time = "2025-11-10T16:01:46.328Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/49/41/3cfb3b4f3519f6ff62bf71bf1722547644bcfb1b05b8fdbdc300249ba113/pynacl-1.6.1-cp38-abi3-macosx_10_10_universal2.whl", hash = "sha256:a6f9fd6d6639b1e81115c7f8ff16b8dedba1e8098d2756275d63d208b0e32021", size = 387591, upload-time = "2025-11-10T16:01:49.1Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/18/21/b8a6563637799f617a3960f659513eccb3fcc655d5fc2be6e9dc6416826f/pynacl-1.6.1-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e49a3f3d0da9f79c1bec2aa013261ab9fa651c7da045d376bd306cf7c1792993", size = 798866, upload-time = "2025-11-10T16:01:55.688Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/e8/6c/dc38033bc3ea461e05ae8f15a81e0e67ab9a01861d352ae971c99de23e7c/pynacl-1.6.1-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7713f8977b5d25f54a811ec9efa2738ac592e846dd6e8a4d3f7578346a841078", size = 1398001, upload-time = "2025-11-10T16:01:57.101Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/9f/05/3ec0796a9917100a62c5073b20c4bce7bf0fea49e99b7906d1699cc7b61b/pynacl-1.6.1-cp38-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a3becafc1ee2e5ea7f9abc642f56b82dcf5be69b961e782a96ea52b55d8a9fc", size = 834024, upload-time = "2025-11-10T16:01:50.228Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/f0/b7/ae9982be0f344f58d9c64a1c25d1f0125c79201634efe3c87305ac7cb3e3/pynacl-1.6.1-cp38-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4ce50d19f1566c391fedc8dc2f2f5be265ae214112ebe55315e41d1f36a7f0a9", size = 1436766, upload-time = "2025-11-10T16:01:51.886Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/b4/51/b2ccbf89cf3025a02e044dd68a365cad593ebf70f532299f2c047d2b7714/pynacl-1.6.1-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:543f869140f67d42b9b8d47f922552d7a967e6c116aad028c9bfc5f3f3b3a7b7", size = 817275, upload-time = "2025-11-10T16:01:53.351Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/a8/6c/dd9ee8214edf63ac563b08a9b30f98d116942b621d39a751ac3256694536/pynacl-1.6.1-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a2bb472458c7ca959aeeff8401b8efef329b0fc44a89d3775cffe8fad3398ad8", size = 1401891, upload-time = "2025-11-10T16:01:54.587Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/0f/c1/97d3e1c83772d78ee1db3053fd674bc6c524afbace2bfe8d419fd55d7ed1/pynacl-1.6.1-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:3206fa98737fdc66d59b8782cecc3d37d30aeec4593d1c8c145825a345bba0f0", size = 772291, upload-time = "2025-11-10T16:01:58.111Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/4d/ca/691ff2fe12f3bb3e43e8e8df4b806f6384593d427f635104d337b8e00291/pynacl-1.6.1-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:53543b4f3d8acb344f75fd4d49f75e6572fce139f4bfb4815a9282296ff9f4c0", size = 1370839, upload-time = "2025-11-10T16:01:59.252Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/30/27/06fe5389d30391fce006442246062cc35773c84fbcad0209fbbf5e173734/pynacl-1.6.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:319de653ef84c4f04e045eb250e6101d23132372b0a61a7acf91bac0fda8e58c", size = 791371, upload-time = "2025-11-10T16:02:01.075Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/2c/7a/e2bde8c9d39074a5aa046c7d7953401608d1f16f71e237f4bef3fb9d7e49/pynacl-1.6.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:262a8de6bba4aee8a66f5edf62c214b06647461c9b6b641f8cd0cb1e3b3196fe", size = 1363031, upload-time = "2025-11-10T16:02:02.656Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/dd/b6/63fd77264dae1087770a1bb414bc604470f58fbc21d83822fc9c76248076/pynacl-1.6.1-cp38-abi3-win32.whl", hash = "sha256:9fd1a4eb03caf8a2fe27b515a998d26923adb9ddb68db78e35ca2875a3830dde", size = 226585, upload-time = "2025-11-10T16:02:07.116Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/12/c8/b419180f3fdb72ab4d45e1d88580761c267c7ca6eda9a20dcbcba254efe6/pynacl-1.6.1-cp38-abi3-win_amd64.whl", hash = "sha256:a569a4069a7855f963940040f35e87d8bc084cb2d6347428d5ad20550a0a1a21", size = 238923, upload-time = "2025-11-10T16:02:04.401Z" },
|
||||
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/35/76/c34426d532e4dce7ff36e4d92cb20f4cbbd94b619964b93d24e8f5b5510f/pynacl-1.6.1-cp38-abi3-win_arm64.whl", hash = "sha256:5953e8b8cfadb10889a6e7bd0f53041a745d1b3d30111386a1bb37af171e6daf", size = 183970, upload-time = "2025-11-10T16:02:05.786Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pynndescent"
|
||||
version = "0.5.13"
|
||||
@ -6184,6 +6237,7 @@ dependencies = [
|
||||
{ name = "pyairtable" },
|
||||
{ name = "pyclipper" },
|
||||
{ name = "pycryptodomex" },
|
||||
{ name = "pygithub" },
|
||||
{ name = "pyobvector" },
|
||||
{ name = "pyodbc" },
|
||||
{ name = "pypandoc" },
|
||||
@ -6315,6 +6369,7 @@ requires-dist = [
|
||||
{ name = "pyairtable", specifier = ">=3.3.0" },
|
||||
{ name = "pyclipper", specifier = ">=1.4.0,<2.0.0" },
|
||||
{ name = "pycryptodomex", specifier = "==3.20.0" },
|
||||
{ name = "pygithub", specifier = ">=2.8.1" },
|
||||
{ name = "pyobvector", specifier = "==0.2.18" },
|
||||
{ name = "pyodbc", specifier = ">=5.2.0,<6.0.0" },
|
||||
{ name = "pypandoc", specifier = ">=1.16" },
|
||||
|
||||
3
web/src/assets/svg/data-source/github.svg
Normal file
3
web/src/assets/svg/data-source/github.svg
Normal file
@ -0,0 +1,3 @@
|
||||
<svg width="1024" height="1024" viewBox="0 0 1024 1024" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8C0 11.54 2.29 14.53 5.47 15.59C5.87 15.66 6.02 15.42 6.02 15.21C6.02 15.02 6.01 14.39 6.01 13.72C4 14.09 3.48 13.23 3.32 12.78C3.23 12.55 2.84 11.84 2.5 11.65C2.22 11.5 1.82 11.13 2.49 11.12C3.12 11.11 3.57 11.7 3.72 11.94C4.44 13.15 5.59 12.81 6.05 12.6C6.12 12.08 6.33 11.73 6.56 11.53C4.78 11.33 2.92 10.64 2.92 7.58C2.92 6.71 3.23 5.99 3.74 5.43C3.66 5.23 3.38 4.41 3.82 3.31C3.82 3.31 4.49 3.1 6.02 4.13C6.66 3.95 7.34 3.86 8.02 3.86C8.7 3.86 9.38 3.95 10.02 4.13C11.55 3.09 12.22 3.31 12.22 3.31C12.66 4.41 12.38 5.23 12.3 5.43C12.81 5.99 13.12 6.7 13.12 7.58C13.12 10.65 11.25 11.33 9.47 11.53C9.76 11.78 10.01 12.26 10.01 13.01C10.01 14.08 10 14.94 10 15.21C10 15.42 10.15 15.67 10.55 15.59C13.71 14.53 16 11.53 16 8C16 3.58 12.42 0 8 0Z" transform="scale(64)" fill="#1B1F23"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 968 B |
@ -278,7 +278,7 @@ export const useRunDocument = () => {
|
||||
const ret = await kbService.document_run({
|
||||
doc_ids: documentIds,
|
||||
run,
|
||||
...option,
|
||||
...(option || {}),
|
||||
});
|
||||
const code = get(ret, 'data.code');
|
||||
if (code === 0) {
|
||||
|
||||
@ -949,6 +949,8 @@ Beispiel: Virtual Hosted Style`,
|
||||
'Verbinden Sie Ihre Dropbox, um Dateien und Ordner von einem ausgewählten Konto zu synchronisieren.',
|
||||
boxDescription:
|
||||
'Verbinden Sie Ihr Box-Laufwerk, um Dateien und Ordner zu synchronisieren.',
|
||||
githubDescription:
|
||||
'Verbinden Sie GitHub, um Pull Requests und Issues zur Recherche zu synchronisieren.',
|
||||
dropboxAccessTokenTip:
|
||||
'Generieren Sie ein langlebiges Zugriffstoken in der Dropbox App Console mit den Bereichen files.metadata.read, files.content.read und sharing.read.',
|
||||
moodleDescription:
|
||||
|
||||
@ -931,6 +931,8 @@ Example: Virtual Hosted Style`,
|
||||
dropboxDescription:
|
||||
'Connect your Dropbox to sync files and folders from a chosen account.',
|
||||
boxDescription: 'Connect your Box drive to sync files and folders.',
|
||||
githubDescription:
|
||||
'Connect GitHub to sync pull requests and issues for retrieval.',
|
||||
airtableDescription:
|
||||
'Connect to Airtable and synchronize files from a specified table within a designated workspace.',
|
||||
gitlabDescription:
|
||||
|
||||
@ -747,6 +747,8 @@ export default {
|
||||
'Синхронизируйте страницы и базы данных из Notion для извлечения знаний.',
|
||||
boxDescription:
|
||||
'Подключите ваш диск Box для синхронизации файлов и папок.',
|
||||
githubDescription:
|
||||
'Подключите GitHub для синхронизации содержимого Pull Request и Issue для поиска.',
|
||||
airtableDescription:
|
||||
'Подключите Airtable и синхронизируйте файлы из указанной таблицы в заданном рабочем пространстве.',
|
||||
gitlabDescription:
|
||||
|
||||
@ -552,10 +552,10 @@ export default {
|
||||
注意您需要指定的条目类型。</p>`,
|
||||
tag: `<p>使用“Tag”分块方法的知识库用作标签集.其他知识库可以把标签集当中的标签按照相似度匹配到自己对应的文本块中,对这些知识库的查询也将根据此标签集对自己进行标记。</p>
|
||||
<p>标签集<b>不会</b>直接参与 RAG 检索过程。</p>
|
||||
<p>标签集中的每个文本分块是都是相互独立的标签和标签描述的文本对。</p>
|
||||
<p>标签集中的每个文本分块都是相互独立的标签和标签描述的文本对。</p>
|
||||
|
||||
<p>Tag 分块方法支持<b>XLSX</b>和<b>CSV/TXT</b>文件格式。</p>
|
||||
<p>如果文件为<b>XLSX</b>格式,则它应该包含两列无标题:一列用于标签描述,另一列用于标签,标签描述列位于标签列之前。支持多个工作表,只要列结构正确即可。</p>
|
||||
<p>如果文件为<b>XLSX</b>格式,则它应该包含无标题的两列:一列用于标签描述,另一列用于标签,标签描述列位于标签列之前。支持多个工作表,只要列结构正确即可。</p>
|
||||
<p>如果文件为 <b>CSV/TXT</b> 格式,则必须使用 UTF-8 编码并以 TAB 作为分隔符来分隔内容和标签。</p>
|
||||
<p>在标签列中,标签之间使用英文逗号分隔。</p>
|
||||
<i>不符合上述规则的文本行将被忽略。</i>
|
||||
@ -861,6 +861,8 @@ General:实体和关系提取提示来自 GitHub - microsoft/graphrag:基于
|
||||
'请上传由 Google Console 生成的 OAuth JSON。如果仅包含 client credentials,请通过浏览器授权一次以获取长期有效的刷新 Token。',
|
||||
dropboxDescription: '连接 Dropbox,同步指定账号下的文件与文件夹。',
|
||||
boxDescription: '连接你的 Box 云盘以同步文件和文件夹。',
|
||||
githubDescription:
|
||||
'连接 GitHub,可同步 Pull Request 与 Issue 内容用于检索。',
|
||||
airtableDescription: '连接 Airtable,同步指定工作区下指定表格中的文件。',
|
||||
gitlabDescription:
|
||||
'连接 GitLab,同步仓库、Issue、合并请求(MR)及相关文档内容。',
|
||||
|
||||
@ -129,6 +129,7 @@ export const util = {
|
||||
metaDataSettingJSONToMetaDataTableData(
|
||||
data: IMetaDataReturnJSONSettings,
|
||||
): IMetaDataTableData[] {
|
||||
if (!Array.isArray(data)) return [];
|
||||
return data.map((item) => {
|
||||
return {
|
||||
field: item.key,
|
||||
|
||||
@ -216,7 +216,7 @@ export default function Dataset() {
|
||||
{reparseDialogVisible && (
|
||||
<ReparseDialog
|
||||
// hidden={isZeroChunk || isRunning}
|
||||
hidden={false}
|
||||
hidden={true}
|
||||
handleOperationIconClick={handleOperationIconClick}
|
||||
chunk_num={0}
|
||||
visible={reparseDialogVisible}
|
||||
|
||||
@ -72,7 +72,7 @@ export function ParsingStatusCell({
|
||||
const isRunning = isParserRunning(run);
|
||||
const isZeroChunk = chunk_num === 0;
|
||||
|
||||
const handleOperationIconClick = (option: {
|
||||
const handleOperationIconClick = (option?: {
|
||||
delete: boolean;
|
||||
apply_kb: boolean;
|
||||
}) => {
|
||||
@ -183,8 +183,8 @@ export function ParsingStatusCell({
|
||||
)}
|
||||
{reparseDialogVisible && (
|
||||
<ReparseDialog
|
||||
// hidden={isZeroChunk || isRunning}
|
||||
hidden={false}
|
||||
hidden={isZeroChunk || isRunning}
|
||||
// hidden={false}
|
||||
handleOperationIconClick={handleOperationIconClick}
|
||||
chunk_num={chunk_num}
|
||||
visible={reparseDialogVisible}
|
||||
|
||||
@ -7,136 +7,134 @@ import {
|
||||
import { Checkbox } from '@/components/ui/checkbox';
|
||||
import { DialogProps } from '@radix-ui/react-dialog';
|
||||
import { t } from 'i18next';
|
||||
import { useCallback, useState } from 'react';
|
||||
import { memo, useCallback, useEffect, useRef } from 'react';
|
||||
|
||||
export const ReparseDialog = ({
|
||||
handleOperationIconClick,
|
||||
chunk_num,
|
||||
hidden = false,
|
||||
visible = true,
|
||||
hideModal,
|
||||
children,
|
||||
}: DialogProps & {
|
||||
chunk_num: number;
|
||||
handleOperationIconClick: (options: {
|
||||
delete: boolean;
|
||||
apply_kb: boolean;
|
||||
}) => void;
|
||||
visible: boolean;
|
||||
hideModal: () => void;
|
||||
hidden?: boolean;
|
||||
}) => {
|
||||
const [formInstance, setFormInstance] = useState<DynamicFormRef | null>(null);
|
||||
export const ReparseDialog = memo(
|
||||
({
|
||||
handleOperationIconClick,
|
||||
chunk_num,
|
||||
hidden = false,
|
||||
visible = true,
|
||||
hideModal,
|
||||
}: DialogProps & {
|
||||
chunk_num: number;
|
||||
handleOperationIconClick: (options?: {
|
||||
delete: boolean;
|
||||
apply_kb: boolean;
|
||||
}) => void;
|
||||
visible: boolean;
|
||||
hideModal: () => void;
|
||||
hidden?: boolean;
|
||||
}) => {
|
||||
// const [formInstance, setFormInstance] = useState<DynamicFormRef | null>(
|
||||
// null,
|
||||
// );
|
||||
|
||||
const formCallbackRef = useCallback((node: DynamicFormRef | null) => {
|
||||
if (node) {
|
||||
setFormInstance(node);
|
||||
console.log('Form instance assigned:', node);
|
||||
} else {
|
||||
console.log('Form instance removed');
|
||||
}
|
||||
}, []);
|
||||
// const formCallbackRef = useCallback((node: DynamicFormRef | null) => {
|
||||
// if (node) {
|
||||
// setFormInstance(node);
|
||||
// console.log('Form instance assigned:', node);
|
||||
// } else {
|
||||
// console.log('Form instance removed');
|
||||
// }
|
||||
// }, []);
|
||||
|
||||
const handleCancel = useCallback(() => {
|
||||
// handleOperationIconClick(false);
|
||||
hideModal?.();
|
||||
formInstance?.reset();
|
||||
}, [formInstance]);
|
||||
const formCallbackRef = useRef<DynamicFormRef>(null);
|
||||
|
||||
const handleSave = useCallback(async () => {
|
||||
const instance = formInstance;
|
||||
if (!instance) {
|
||||
console.error('Form instance is null');
|
||||
return;
|
||||
}
|
||||
const handleCancel = useCallback(() => {
|
||||
// handleOperationIconClick(false);
|
||||
hideModal?.();
|
||||
// formInstance?.reset();
|
||||
formCallbackRef?.current?.reset();
|
||||
}, [formCallbackRef, hideModal]);
|
||||
|
||||
const check = await instance.trigger();
|
||||
if (check) {
|
||||
instance.submit();
|
||||
const formValues = instance.getValues();
|
||||
console.log(formValues);
|
||||
handleOperationIconClick({
|
||||
delete: formValues.delete,
|
||||
apply_kb: formValues.apply_kb,
|
||||
});
|
||||
}
|
||||
}, [formInstance, handleOperationIconClick]);
|
||||
const handleSave = useCallback(async () => {
|
||||
// const instance = formInstance;
|
||||
const instance = formCallbackRef?.current;
|
||||
if (!instance) {
|
||||
console.error('Form instance is null');
|
||||
return;
|
||||
}
|
||||
|
||||
// useEffect(() => {
|
||||
// if (!hidden) {
|
||||
// const timer = setTimeout(() => {
|
||||
// if (!formInstance) {
|
||||
// console.warn(
|
||||
// 'Form ref is still null after component should be mounted',
|
||||
// );
|
||||
// } else {
|
||||
// console.log('Form ref is properly set');
|
||||
// }
|
||||
// }, 1000);
|
||||
const check = await instance.trigger();
|
||||
if (check) {
|
||||
instance.submit();
|
||||
const formValues = instance.getValues();
|
||||
console.log(formValues);
|
||||
handleOperationIconClick({
|
||||
delete: formValues.delete,
|
||||
apply_kb: formValues.apply_kb,
|
||||
});
|
||||
}
|
||||
}, [formCallbackRef, handleOperationIconClick]);
|
||||
|
||||
// return () => clearTimeout(timer);
|
||||
// }
|
||||
// }, [hidden, formInstance]);
|
||||
useEffect(() => {
|
||||
if (hidden) {
|
||||
handleOperationIconClick();
|
||||
}
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<ConfirmDeleteDialog
|
||||
title={t(`knowledgeDetails.parseFile`)}
|
||||
onOk={() => handleSave()}
|
||||
onCancel={() => handleCancel()}
|
||||
hidden={hidden}
|
||||
open={visible}
|
||||
okButtonText={t('common.confirm')}
|
||||
content={{
|
||||
title: t(`knowledgeDetails.parseFileTip`),
|
||||
node: (
|
||||
<div>
|
||||
<DynamicForm.Root
|
||||
onSubmit={(data) => {
|
||||
console.log('submit', data);
|
||||
}}
|
||||
ref={formCallbackRef}
|
||||
fields={[
|
||||
{
|
||||
name: 'delete',
|
||||
label: '',
|
||||
type: FormFieldType.Checkbox,
|
||||
render: (fieldProps) => (
|
||||
<div className="flex items-center text-text-secondary p-5 border border-border-button rounded-lg">
|
||||
<Checkbox
|
||||
{...fieldProps}
|
||||
onCheckedChange={(checked: boolean) => {
|
||||
fieldProps.onChange(checked);
|
||||
}}
|
||||
/>
|
||||
<span className="ml-2">
|
||||
{chunk_num > 0
|
||||
? t(`knowledgeDetails.redo`, { chunkNum: chunk_num })
|
||||
: t('knowledgeDetails.redoAll')}
|
||||
</span>
|
||||
</div>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'apply_kb',
|
||||
label: '',
|
||||
type: FormFieldType.Checkbox,
|
||||
render: (fieldProps) => (
|
||||
<div className="flex items-center text-text-secondary p-5 border border-border-button rounded-lg">
|
||||
<Checkbox
|
||||
{...fieldProps}
|
||||
onCheckedChange={(checked: boolean) => {
|
||||
fieldProps.onChange(checked);
|
||||
}}
|
||||
/>
|
||||
<span className="ml-2">
|
||||
{t('knowledgeDetails.applyAutoMetadataSettings')}
|
||||
</span>
|
||||
</div>
|
||||
),
|
||||
},
|
||||
]}
|
||||
>
|
||||
{/* <DynamicForm.CancelButton
|
||||
return (
|
||||
<ConfirmDeleteDialog
|
||||
title={t(`knowledgeDetails.parseFile`)}
|
||||
onOk={() => handleSave()}
|
||||
onCancel={() => handleCancel()}
|
||||
hidden={hidden}
|
||||
open={visible}
|
||||
okButtonText={t('common.confirm')}
|
||||
content={{
|
||||
title: t(`knowledgeDetails.parseFileTip`),
|
||||
node: (
|
||||
<div>
|
||||
<DynamicForm.Root
|
||||
onSubmit={(data) => {
|
||||
console.log('submit', data);
|
||||
}}
|
||||
ref={formCallbackRef}
|
||||
fields={[
|
||||
{
|
||||
name: 'delete',
|
||||
label: '',
|
||||
type: FormFieldType.Checkbox,
|
||||
render: (fieldProps) => (
|
||||
<div className="flex items-center text-text-secondary p-5 border border-border-button rounded-lg">
|
||||
<Checkbox
|
||||
{...fieldProps}
|
||||
onCheckedChange={(checked: boolean) => {
|
||||
fieldProps.onChange(checked);
|
||||
}}
|
||||
/>
|
||||
<span className="ml-2">
|
||||
{chunk_num > 0
|
||||
? t(`knowledgeDetails.redo`, {
|
||||
chunkNum: chunk_num,
|
||||
})
|
||||
: t('knowledgeDetails.redoAll')}
|
||||
</span>
|
||||
</div>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'apply_kb',
|
||||
label: '',
|
||||
type: FormFieldType.Checkbox,
|
||||
render: (fieldProps) => (
|
||||
<div className="flex items-center text-text-secondary p-5 border border-border-button rounded-lg">
|
||||
<Checkbox
|
||||
{...fieldProps}
|
||||
onCheckedChange={(checked: boolean) => {
|
||||
fieldProps.onChange(checked);
|
||||
}}
|
||||
/>
|
||||
<span className="ml-2">
|
||||
{t('knowledgeDetails.applyAutoMetadataSettings')}
|
||||
</span>
|
||||
</div>
|
||||
),
|
||||
},
|
||||
]}
|
||||
>
|
||||
{/* <DynamicForm.CancelButton
|
||||
handleCancel={() => handleOperationIconClick(false)}
|
||||
cancelText={t('common.cancel')}
|
||||
/>
|
||||
@ -144,12 +142,13 @@ export const ReparseDialog = ({
|
||||
buttonText={t('common.confirm')}
|
||||
submitFunc={handleSave}
|
||||
/> */}
|
||||
</DynamicForm.Root>
|
||||
</div>
|
||||
),
|
||||
}}
|
||||
>
|
||||
{/* {children} */}
|
||||
</ConfirmDeleteDialog>
|
||||
);
|
||||
};
|
||||
</DynamicForm.Root>
|
||||
</div>
|
||||
),
|
||||
}}
|
||||
>
|
||||
{/* {children} */}
|
||||
</ConfirmDeleteDialog>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
@ -54,7 +54,7 @@ export function useBulkOperateDataset({
|
||||
);
|
||||
|
||||
const handleRunClick = useCallback(
|
||||
(option: { delete: boolean; apply_kb: boolean }) => {
|
||||
(option?: { delete: boolean; apply_kb: boolean }) => {
|
||||
runDocument(1, option);
|
||||
},
|
||||
[runDocument],
|
||||
|
||||
@ -10,7 +10,7 @@ export const useHandleRunDocumentByIds = (id: string) => {
|
||||
const handleRunDocumentByIds = async (
|
||||
documentId: string,
|
||||
isRunning: boolean,
|
||||
option: { delete: boolean; apply_kb: boolean },
|
||||
option?: { delete: boolean; apply_kb: boolean },
|
||||
) => {
|
||||
if (isLoading) {
|
||||
return;
|
||||
|
||||
@ -27,6 +27,7 @@ export enum DataSourceKey {
|
||||
AIRTABLE = 'airtable',
|
||||
GITLAB = 'gitlab',
|
||||
ASANA = 'asana',
|
||||
GITHUB = 'github',
|
||||
// SHAREPOINT = 'sharepoint',
|
||||
// SLACK = 'slack',
|
||||
// TEAMS = 'teams',
|
||||
@ -121,6 +122,11 @@ export const generateDataSourceInfo = (t: TFunction) => {
|
||||
description: t(`setting.${DataSourceKey.ASANA}Description`),
|
||||
icon: <SvgIcon name={'data-source/asana'} width={38} />,
|
||||
},
|
||||
[DataSourceKey.GITHUB]: {
|
||||
name: 'GitHub',
|
||||
description: t(`setting.${DataSourceKey.GITHUB}Description`),
|
||||
icon: <SvgIcon name={'data-source/github'} width={38} />,
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
@ -738,6 +744,40 @@ export const DataSourceFormFields = {
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
[DataSourceKey.GITHUB]: [
|
||||
{
|
||||
label: 'Repository Owner',
|
||||
name: 'config.repository_owner',
|
||||
type: FormFieldType.Text,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
label: 'Repository Name',
|
||||
name: 'config.repository_name',
|
||||
type: FormFieldType.Text,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
label: 'GitHub Access Token',
|
||||
name: 'config.credentials.github_access_token',
|
||||
type: FormFieldType.Password,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
label: 'Inlcude Pull Requests',
|
||||
name: 'config.include_pull_requests',
|
||||
type: FormFieldType.Checkbox,
|
||||
required: false,
|
||||
defaultValue: false,
|
||||
},
|
||||
{
|
||||
label: 'Inlcude Issues',
|
||||
name: 'config.include_issues',
|
||||
type: FormFieldType.Checkbox,
|
||||
required: false,
|
||||
defaultValue: false,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
export const DataSourceFormDefaultValues = {
|
||||
@ -964,4 +1004,17 @@ export const DataSourceFormDefaultValues = {
|
||||
},
|
||||
},
|
||||
},
|
||||
[DataSourceKey.GITHUB]: {
|
||||
name: '',
|
||||
source: DataSourceKey.GITHUB,
|
||||
config: {
|
||||
repository_owner: '',
|
||||
repository_name: '',
|
||||
include_pull_requests: false,
|
||||
include_issues: false,
|
||||
credentials: {
|
||||
github_access_token: '',
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user