mirror of
https://github.com/infiniflow/ragflow.git
synced 2026-01-04 03:25:30 +08:00
Compare commits
12 Commits
ff2c70608d
...
6fee60e110
| Author | SHA1 | Date | |
|---|---|---|---|
| 6fee60e110 | |||
| 52f91c2388 | |||
| 348265afc1 | |||
| a7e466142d | |||
| 2fccf3924d | |||
| 4705d07e11 | |||
| 68be3b9a3d | |||
| e2d17d808b | |||
| 95edbd43ba | |||
| b96d553cd8 | |||
| bffdb5fb11 | |||
| 109e782493 |
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
@ -10,6 +10,12 @@ on:
|
||||
tags:
|
||||
- "v*.*.*" # normal release
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
actions: read
|
||||
checks: read
|
||||
statuses: read
|
||||
|
||||
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
@ -76,6 +82,14 @@ jobs:
|
||||
# The body field does not support environment variable substitution directly.
|
||||
body_path: release_body.md
|
||||
|
||||
- name: Build and push image
|
||||
run: |
|
||||
sudo docker login --username infiniflow --password-stdin <<< ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
sudo docker build --build-arg NEED_MIRROR=1 --build-arg HTTPS_PROXY=${HTTPS_PROXY} --build-arg HTTP_PROXY=${HTTP_PROXY} -t infiniflow/ragflow:${RELEASE_TAG} -f Dockerfile .
|
||||
sudo docker tag infiniflow/ragflow:${RELEASE_TAG} infiniflow/ragflow:latest
|
||||
sudo docker push infiniflow/ragflow:${RELEASE_TAG}
|
||||
sudo docker push infiniflow/ragflow:latest
|
||||
|
||||
- name: Build and push ragflow-sdk
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: |
|
||||
@ -85,11 +99,3 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: |
|
||||
cd admin/client && uv build && uv publish --token ${{ secrets.PYPI_API_TOKEN }}
|
||||
|
||||
- name: Build and push image
|
||||
run: |
|
||||
sudo docker login --username infiniflow --password-stdin <<< ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
sudo docker build --build-arg NEED_MIRROR=1 --build-arg HTTPS_PROXY=${HTTPS_PROXY} --build-arg HTTP_PROXY=${HTTP_PROXY} -t infiniflow/ragflow:${RELEASE_TAG} -f Dockerfile .
|
||||
sudo docker tag infiniflow/ragflow:${RELEASE_TAG} infiniflow/ragflow:latest
|
||||
sudo docker push infiniflow/ragflow:${RELEASE_TAG}
|
||||
sudo docker push infiniflow/ragflow:latest
|
||||
|
||||
@ -132,7 +132,8 @@ class FileSource(StrEnum):
|
||||
ASANA = "asana"
|
||||
GITHUB = "github"
|
||||
GITLAB = "gitlab"
|
||||
|
||||
IMAP = "imap"
|
||||
|
||||
class PipelineTaskType(StrEnum):
|
||||
PARSE = "Parse"
|
||||
DOWNLOAD = "Download"
|
||||
|
||||
@ -38,6 +38,7 @@ from .webdav_connector import WebDAVConnector
|
||||
from .moodle_connector import MoodleConnector
|
||||
from .airtable_connector import AirtableConnector
|
||||
from .asana_connector import AsanaConnector
|
||||
from .imap_connector import ImapConnector
|
||||
from .config import BlobType, DocumentSource
|
||||
from .models import Document, TextSection, ImageSection, BasicExpertInfo
|
||||
from .exceptions import (
|
||||
@ -75,4 +76,5 @@ __all__ = [
|
||||
"UnexpectedValidationError",
|
||||
"AirtableConnector",
|
||||
"AsanaConnector",
|
||||
"ImapConnector"
|
||||
]
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
from datetime import datetime, timezone
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import Any, Generator
|
||||
|
||||
import requests
|
||||
|
||||
@ -8,8 +8,8 @@ from pyairtable import Api as AirtableApi
|
||||
|
||||
from common.data_source.config import AIRTABLE_CONNECTOR_SIZE_THRESHOLD, INDEX_BATCH_SIZE, DocumentSource
|
||||
from common.data_source.exceptions import ConnectorMissingCredentialError
|
||||
from common.data_source.interfaces import LoadConnector
|
||||
from common.data_source.models import Document, GenerateDocumentsOutput
|
||||
from common.data_source.interfaces import LoadConnector, PollConnector
|
||||
from common.data_source.models import Document, GenerateDocumentsOutput, SecondsSinceUnixEpoch
|
||||
from common.data_source.utils import extract_size_bytes, get_file_ext
|
||||
|
||||
class AirtableClientNotSetUpError(PermissionError):
|
||||
@ -19,7 +19,7 @@ class AirtableClientNotSetUpError(PermissionError):
|
||||
)
|
||||
|
||||
|
||||
class AirtableConnector(LoadConnector):
|
||||
class AirtableConnector(LoadConnector, PollConnector):
|
||||
"""
|
||||
Lightweight Airtable connector.
|
||||
|
||||
@ -132,6 +132,26 @@ class AirtableConnector(LoadConnector):
|
||||
if batch:
|
||||
yield batch
|
||||
|
||||
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> Generator[list[Document], None, None]:
|
||||
"""Poll source to get documents"""
|
||||
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
|
||||
end_dt = datetime.fromtimestamp(end, tz=timezone.utc)
|
||||
|
||||
for batch in self.load_from_state():
|
||||
filtered: list[Document] = []
|
||||
|
||||
for doc in batch:
|
||||
if not doc.doc_updated_at:
|
||||
continue
|
||||
|
||||
doc_dt = doc.doc_updated_at.astimezone(timezone.utc)
|
||||
|
||||
if start_dt <= doc_dt < end_dt:
|
||||
filtered.append(doc)
|
||||
|
||||
if filtered:
|
||||
yield filtered
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
|
||||
@ -57,6 +57,7 @@ class DocumentSource(str, Enum):
|
||||
ASANA = "asana"
|
||||
GITHUB = "github"
|
||||
GITLAB = "gitlab"
|
||||
IMAP = "imap"
|
||||
|
||||
|
||||
class FileOrigin(str, Enum):
|
||||
@ -266,6 +267,10 @@ ASANA_CONNECTOR_SIZE_THRESHOLD = int(
|
||||
os.environ.get("ASANA_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
|
||||
)
|
||||
|
||||
IMAP_CONNECTOR_SIZE_THRESHOLD = int(
|
||||
os.environ.get("IMAP_CONNECTOR_SIZE_THRESHOLD", 10 * 1024 * 1024)
|
||||
)
|
||||
|
||||
_USER_NOT_FOUND = "Unknown Confluence User"
|
||||
|
||||
_COMMENT_EXPANSION_FIELDS = ["body.storage.value"]
|
||||
|
||||
724
common/data_source/imap_connector.py
Normal file
724
common/data_source/imap_connector.py
Normal file
@ -0,0 +1,724 @@
|
||||
import copy
|
||||
import email
|
||||
from email.header import decode_header
|
||||
import imaplib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import timezone
|
||||
from email.message import Message
|
||||
from email.utils import collapse_rfc2231_value, parseaddr
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
|
||||
import bs4
|
||||
from pydantic import BaseModel
|
||||
|
||||
from common.data_source.config import IMAP_CONNECTOR_SIZE_THRESHOLD, DocumentSource
|
||||
from common.data_source.interfaces import CheckpointOutput, CheckpointedConnectorWithPermSync, CredentialsConnector, CredentialsProviderInterface
|
||||
from common.data_source.models import BasicExpertInfo, ConnectorCheckpoint, Document, ExternalAccess, SecondsSinceUnixEpoch
|
||||
|
||||
_DEFAULT_IMAP_PORT_NUMBER = int(os.environ.get("IMAP_PORT", 993))
|
||||
_IMAP_OKAY_STATUS = "OK"
|
||||
_PAGE_SIZE = 100
|
||||
_USERNAME_KEY = "imap_username"
|
||||
_PASSWORD_KEY = "imap_password"
|
||||
|
||||
class Header(str, Enum):
|
||||
SUBJECT_HEADER = "subject"
|
||||
FROM_HEADER = "from"
|
||||
TO_HEADER = "to"
|
||||
CC_HEADER = "cc"
|
||||
DELIVERED_TO_HEADER = (
|
||||
"Delivered-To" # Used in mailing lists instead of the "to" header.
|
||||
)
|
||||
DATE_HEADER = "date"
|
||||
MESSAGE_ID_HEADER = "Message-ID"
|
||||
|
||||
|
||||
class EmailHeaders(BaseModel):
|
||||
"""
|
||||
Model for email headers extracted from IMAP messages.
|
||||
"""
|
||||
|
||||
id: str
|
||||
subject: str
|
||||
sender: str
|
||||
recipients: str | None
|
||||
cc: str | None
|
||||
date: datetime
|
||||
|
||||
@classmethod
|
||||
def from_email_msg(cls, email_msg: Message) -> "EmailHeaders":
|
||||
def _decode(header: str, default: str | None = None) -> str | None:
|
||||
value = email_msg.get(header, default)
|
||||
if not value:
|
||||
return None
|
||||
|
||||
decoded_fragments = decode_header(value)
|
||||
decoded_strings: list[str] = []
|
||||
|
||||
for decoded_value, encoding in decoded_fragments:
|
||||
if isinstance(decoded_value, bytes):
|
||||
try:
|
||||
decoded_strings.append(
|
||||
decoded_value.decode(encoding or "utf-8", errors="replace")
|
||||
)
|
||||
except LookupError:
|
||||
decoded_strings.append(
|
||||
decoded_value.decode("utf-8", errors="replace")
|
||||
)
|
||||
elif isinstance(decoded_value, str):
|
||||
decoded_strings.append(decoded_value)
|
||||
else:
|
||||
decoded_strings.append(str(decoded_value))
|
||||
|
||||
return "".join(decoded_strings)
|
||||
|
||||
def _parse_date(date_str: str | None) -> datetime | None:
|
||||
if not date_str:
|
||||
return None
|
||||
try:
|
||||
return email.utils.parsedate_to_datetime(date_str)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
message_id = _decode(header=Header.MESSAGE_ID_HEADER)
|
||||
if not message_id:
|
||||
message_id = f"<generated-{uuid.uuid4()}@imap.local>"
|
||||
# It's possible for the subject line to not exist or be an empty string.
|
||||
subject = _decode(header=Header.SUBJECT_HEADER) or "Unknown Subject"
|
||||
from_ = _decode(header=Header.FROM_HEADER)
|
||||
to = _decode(header=Header.TO_HEADER)
|
||||
if not to:
|
||||
to = _decode(header=Header.DELIVERED_TO_HEADER)
|
||||
cc = _decode(header=Header.CC_HEADER)
|
||||
date_str = _decode(header=Header.DATE_HEADER)
|
||||
date = _parse_date(date_str=date_str)
|
||||
|
||||
if not date:
|
||||
date = datetime.now(tz=timezone.utc)
|
||||
|
||||
# If any of the above are `None`, model validation will fail.
|
||||
# Therefore, no guards (i.e.: `if <header> is None: raise RuntimeError(..)`) were written.
|
||||
return cls.model_validate(
|
||||
{
|
||||
"id": message_id,
|
||||
"subject": subject,
|
||||
"sender": from_,
|
||||
"recipients": to,
|
||||
"cc": cc,
|
||||
"date": date,
|
||||
}
|
||||
)
|
||||
|
||||
class CurrentMailbox(BaseModel):
|
||||
mailbox: str
|
||||
todo_email_ids: list[str]
|
||||
|
||||
|
||||
# An email has a list of mailboxes.
|
||||
# Each mailbox has a list of email-ids inside of it.
|
||||
#
|
||||
# Usage:
|
||||
# To use this checkpointer, first fetch all the mailboxes.
|
||||
# Then, pop a mailbox and fetch all of its email-ids.
|
||||
# Then, pop each email-id and fetch its content (and parse it, etc..).
|
||||
# When you have popped all email-ids for this mailbox, pop the next mailbox and repeat the above process until you're done.
|
||||
#
|
||||
# For initial checkpointing, set both fields to `None`.
|
||||
class ImapCheckpoint(ConnectorCheckpoint):
|
||||
todo_mailboxes: list[str] | None = None
|
||||
current_mailbox: CurrentMailbox | None = None
|
||||
|
||||
|
||||
class LoginState(str, Enum):
|
||||
LoggedIn = "logged_in"
|
||||
LoggedOut = "logged_out"
|
||||
|
||||
|
||||
class ImapConnector(
|
||||
CredentialsConnector,
|
||||
CheckpointedConnectorWithPermSync,
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
host: str,
|
||||
port: int = _DEFAULT_IMAP_PORT_NUMBER,
|
||||
mailboxes: list[str] | None = None,
|
||||
) -> None:
|
||||
self._host = host
|
||||
self._port = port
|
||||
self._mailboxes = mailboxes
|
||||
self._credentials: dict[str, Any] | None = None
|
||||
|
||||
@property
|
||||
def credentials(self) -> dict[str, Any]:
|
||||
if not self._credentials:
|
||||
raise RuntimeError(
|
||||
"Credentials have not been initialized; call `set_credentials_provider` first"
|
||||
)
|
||||
return self._credentials
|
||||
|
||||
def _get_mail_client(self) -> imaplib.IMAP4_SSL:
|
||||
"""
|
||||
Returns a new `imaplib.IMAP4_SSL` instance.
|
||||
|
||||
The `imaplib.IMAP4_SSL` object is supposed to be an "ephemeral" object; it's not something that you can login,
|
||||
logout, then log back into again. I.e., the following will fail:
|
||||
|
||||
```py
|
||||
mail_client.login(..)
|
||||
mail_client.logout();
|
||||
mail_client.login(..)
|
||||
```
|
||||
|
||||
Therefore, you need a fresh, new instance in order to operate with IMAP. This function gives one to you.
|
||||
|
||||
# Notes
|
||||
This function will throw an error if the credentials have not yet been set.
|
||||
"""
|
||||
|
||||
def get_or_raise(name: str) -> str:
|
||||
value = self.credentials.get(name)
|
||||
if not value:
|
||||
raise RuntimeError(f"Credential item {name=} was not found")
|
||||
if not isinstance(value, str):
|
||||
raise RuntimeError(
|
||||
f"Credential item {name=} must be of type str, instead received {type(name)=}"
|
||||
)
|
||||
return value
|
||||
|
||||
username = get_or_raise(_USERNAME_KEY)
|
||||
password = get_or_raise(_PASSWORD_KEY)
|
||||
|
||||
mail_client = imaplib.IMAP4_SSL(host=self._host, port=self._port)
|
||||
status, _data = mail_client.login(user=username, password=password)
|
||||
|
||||
if status != _IMAP_OKAY_STATUS:
|
||||
raise RuntimeError(f"Failed to log into imap server; {status=}")
|
||||
|
||||
return mail_client
|
||||
|
||||
def _load_from_checkpoint(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: ImapCheckpoint,
|
||||
include_perm_sync: bool,
|
||||
) -> CheckpointOutput[ImapCheckpoint]:
|
||||
checkpoint = cast(ImapCheckpoint, copy.deepcopy(checkpoint))
|
||||
checkpoint.has_more = True
|
||||
|
||||
mail_client = self._get_mail_client()
|
||||
|
||||
if checkpoint.todo_mailboxes is None:
|
||||
# This is the dummy checkpoint.
|
||||
# Fill it with mailboxes first.
|
||||
if self._mailboxes:
|
||||
checkpoint.todo_mailboxes = _sanitize_mailbox_names(self._mailboxes)
|
||||
else:
|
||||
fetched_mailboxes = _fetch_all_mailboxes_for_email_account(
|
||||
mail_client=mail_client
|
||||
)
|
||||
if not fetched_mailboxes:
|
||||
raise RuntimeError(
|
||||
"Failed to find any mailboxes for this email account"
|
||||
)
|
||||
checkpoint.todo_mailboxes = _sanitize_mailbox_names(fetched_mailboxes)
|
||||
|
||||
return checkpoint
|
||||
|
||||
if (
|
||||
not checkpoint.current_mailbox
|
||||
or not checkpoint.current_mailbox.todo_email_ids
|
||||
):
|
||||
if not checkpoint.todo_mailboxes:
|
||||
checkpoint.has_more = False
|
||||
return checkpoint
|
||||
|
||||
mailbox = checkpoint.todo_mailboxes.pop()
|
||||
email_ids = _fetch_email_ids_in_mailbox(
|
||||
mail_client=mail_client,
|
||||
mailbox=mailbox,
|
||||
start=start,
|
||||
end=end,
|
||||
)
|
||||
checkpoint.current_mailbox = CurrentMailbox(
|
||||
mailbox=mailbox,
|
||||
todo_email_ids=email_ids,
|
||||
)
|
||||
|
||||
_select_mailbox(
|
||||
mail_client=mail_client, mailbox=checkpoint.current_mailbox.mailbox
|
||||
)
|
||||
current_todos = cast(
|
||||
list, copy.deepcopy(checkpoint.current_mailbox.todo_email_ids[:_PAGE_SIZE])
|
||||
)
|
||||
checkpoint.current_mailbox.todo_email_ids = (
|
||||
checkpoint.current_mailbox.todo_email_ids[_PAGE_SIZE:]
|
||||
)
|
||||
|
||||
for email_id in current_todos:
|
||||
email_msg = _fetch_email(mail_client=mail_client, email_id=email_id)
|
||||
if not email_msg:
|
||||
logging.warning(f"Failed to fetch message {email_id=}; skipping")
|
||||
continue
|
||||
|
||||
email_headers = EmailHeaders.from_email_msg(email_msg=email_msg)
|
||||
msg_dt = email_headers.date
|
||||
if msg_dt.tzinfo is None:
|
||||
msg_dt = msg_dt.replace(tzinfo=timezone.utc)
|
||||
else:
|
||||
msg_dt = msg_dt.astimezone(timezone.utc)
|
||||
|
||||
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
|
||||
end_dt = datetime.fromtimestamp(end, tz=timezone.utc)
|
||||
|
||||
if not (start_dt < msg_dt <= end_dt):
|
||||
continue
|
||||
|
||||
email_doc = _convert_email_headers_and_body_into_document(
|
||||
email_msg=email_msg,
|
||||
email_headers=email_headers,
|
||||
include_perm_sync=include_perm_sync,
|
||||
)
|
||||
yield email_doc
|
||||
attachments = extract_attachments(email_msg)
|
||||
for att in attachments:
|
||||
yield attachment_to_document(email_doc, att, email_headers)
|
||||
|
||||
return checkpoint
|
||||
|
||||
# impls for BaseConnector
|
||||
|
||||
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
|
||||
self._credentials = credentials
|
||||
return None
|
||||
|
||||
def validate_connector_settings(self) -> None:
|
||||
self._get_mail_client()
|
||||
|
||||
# impls for CredentialsConnector
|
||||
|
||||
def set_credentials_provider(
|
||||
self, credentials_provider: CredentialsProviderInterface
|
||||
) -> None:
|
||||
self._credentials = credentials_provider.get_credentials()
|
||||
|
||||
# impls for CheckpointedConnector
|
||||
|
||||
def load_from_checkpoint(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: ImapCheckpoint,
|
||||
) -> CheckpointOutput[ImapCheckpoint]:
|
||||
return self._load_from_checkpoint(
|
||||
start=start, end=end, checkpoint=checkpoint, include_perm_sync=False
|
||||
)
|
||||
|
||||
def build_dummy_checkpoint(self) -> ImapCheckpoint:
|
||||
return ImapCheckpoint(has_more=True)
|
||||
|
||||
def validate_checkpoint_json(self, checkpoint_json: str) -> ImapCheckpoint:
|
||||
return ImapCheckpoint.model_validate_json(json_data=checkpoint_json)
|
||||
|
||||
# impls for CheckpointedConnectorWithPermSync
|
||||
|
||||
def load_from_checkpoint_with_perm_sync(
|
||||
self,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
checkpoint: ImapCheckpoint,
|
||||
) -> CheckpointOutput[ImapCheckpoint]:
|
||||
return self._load_from_checkpoint(
|
||||
start=start, end=end, checkpoint=checkpoint, include_perm_sync=True
|
||||
)
|
||||
|
||||
|
||||
def _fetch_all_mailboxes_for_email_account(mail_client: imaplib.IMAP4_SSL) -> list[str]:
|
||||
status, mailboxes_data = mail_client.list('""', "*")
|
||||
if status != _IMAP_OKAY_STATUS:
|
||||
raise RuntimeError(f"Failed to fetch mailboxes; {status=}")
|
||||
|
||||
mailboxes = []
|
||||
|
||||
for mailboxes_raw in mailboxes_data:
|
||||
if isinstance(mailboxes_raw, bytes):
|
||||
mailboxes_str = mailboxes_raw.decode()
|
||||
elif isinstance(mailboxes_raw, str):
|
||||
mailboxes_str = mailboxes_raw
|
||||
else:
|
||||
logging.warning(
|
||||
f"Expected the mailbox data to be of type str, instead got {type(mailboxes_raw)=} {mailboxes_raw}; skipping"
|
||||
)
|
||||
continue
|
||||
|
||||
# The mailbox LIST response output can be found here:
|
||||
# https://www.rfc-editor.org/rfc/rfc3501.html#section-7.2.2
|
||||
#
|
||||
# The general format is:
|
||||
# `(<name-attributes>) <hierarchy-delimiter> <mailbox-name>`
|
||||
#
|
||||
# The below regex matches on that pattern; from there, we select the 3rd match (index 2), which is the mailbox-name.
|
||||
match = re.match(r'\([^)]*\)\s+"([^"]+)"\s+"?(.+?)"?$', mailboxes_str)
|
||||
if not match:
|
||||
logging.warning(
|
||||
f"Invalid mailbox-data formatting structure: {mailboxes_str=}; skipping"
|
||||
)
|
||||
continue
|
||||
|
||||
mailbox = match.group(2)
|
||||
mailboxes.append(mailbox)
|
||||
if not mailboxes:
|
||||
logging.warning(
|
||||
"No mailboxes parsed from LIST response; falling back to INBOX"
|
||||
)
|
||||
return ["INBOX"]
|
||||
|
||||
return mailboxes
|
||||
|
||||
|
||||
def _select_mailbox(mail_client: imaplib.IMAP4_SSL, mailbox: str) -> bool:
|
||||
try:
|
||||
status, _ = mail_client.select(mailbox=mailbox, readonly=True)
|
||||
if status != _IMAP_OKAY_STATUS:
|
||||
return False
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _fetch_email_ids_in_mailbox(
|
||||
mail_client: imaplib.IMAP4_SSL,
|
||||
mailbox: str,
|
||||
start: SecondsSinceUnixEpoch,
|
||||
end: SecondsSinceUnixEpoch,
|
||||
) -> list[str]:
|
||||
if not _select_mailbox(mail_client, mailbox):
|
||||
logging.warning(f"Skip mailbox: {mailbox}")
|
||||
return []
|
||||
|
||||
start_dt = datetime.fromtimestamp(start, tz=timezone.utc)
|
||||
end_dt = datetime.fromtimestamp(end, tz=timezone.utc) + timedelta(days=1)
|
||||
|
||||
start_str = start_dt.strftime("%d-%b-%Y")
|
||||
end_str = end_dt.strftime("%d-%b-%Y")
|
||||
search_criteria = f'(SINCE "{start_str}" BEFORE "{end_str}")'
|
||||
|
||||
status, email_ids_byte_array = mail_client.search(None, search_criteria)
|
||||
|
||||
if status != _IMAP_OKAY_STATUS or not email_ids_byte_array:
|
||||
raise RuntimeError(f"Failed to fetch email ids; {status=}")
|
||||
|
||||
email_ids: bytes = email_ids_byte_array[0]
|
||||
|
||||
return [email_id.decode() for email_id in email_ids.split()]
|
||||
|
||||
|
||||
def _fetch_email(mail_client: imaplib.IMAP4_SSL, email_id: str) -> Message | None:
|
||||
status, msg_data = mail_client.fetch(message_set=email_id, message_parts="(RFC822)")
|
||||
if status != _IMAP_OKAY_STATUS or not msg_data:
|
||||
return None
|
||||
|
||||
data = msg_data[0]
|
||||
if not isinstance(data, tuple):
|
||||
raise RuntimeError(
|
||||
f"Message data should be a tuple; instead got a {type(data)=} {data=}"
|
||||
)
|
||||
|
||||
_, raw_email = data
|
||||
return email.message_from_bytes(raw_email)
|
||||
|
||||
|
||||
def _convert_email_headers_and_body_into_document(
|
||||
email_msg: Message,
|
||||
email_headers: EmailHeaders,
|
||||
include_perm_sync: bool,
|
||||
) -> Document:
|
||||
sender_name, sender_addr = _parse_singular_addr(raw_header=email_headers.sender)
|
||||
to_addrs = (
|
||||
_parse_addrs(email_headers.recipients)
|
||||
if email_headers.recipients
|
||||
else []
|
||||
)
|
||||
cc_addrs = (
|
||||
_parse_addrs(email_headers.cc)
|
||||
if email_headers.cc
|
||||
else []
|
||||
)
|
||||
all_participants = to_addrs + cc_addrs
|
||||
|
||||
expert_info_map = {
|
||||
recipient_addr: BasicExpertInfo(
|
||||
display_name=recipient_name, email=recipient_addr
|
||||
)
|
||||
for recipient_name, recipient_addr in all_participants
|
||||
}
|
||||
if sender_addr not in expert_info_map:
|
||||
expert_info_map[sender_addr] = BasicExpertInfo(
|
||||
display_name=sender_name, email=sender_addr
|
||||
)
|
||||
|
||||
email_body = _parse_email_body(email_msg=email_msg, email_headers=email_headers)
|
||||
primary_owners = list(expert_info_map.values())
|
||||
external_access = (
|
||||
ExternalAccess(
|
||||
external_user_emails=set(expert_info_map.keys()),
|
||||
external_user_group_ids=set(),
|
||||
is_public=False,
|
||||
)
|
||||
if include_perm_sync
|
||||
else None
|
||||
)
|
||||
return Document(
|
||||
id=email_headers.id,
|
||||
title=email_headers.subject,
|
||||
blob=email_body,
|
||||
size_bytes=len(email_body),
|
||||
semantic_identifier=email_headers.subject,
|
||||
metadata={},
|
||||
extension='.txt',
|
||||
doc_updated_at=email_headers.date,
|
||||
source=DocumentSource.IMAP,
|
||||
primary_owners=primary_owners,
|
||||
external_access=external_access,
|
||||
)
|
||||
|
||||
def extract_attachments(email_msg: Message, max_bytes: int = IMAP_CONNECTOR_SIZE_THRESHOLD):
|
||||
attachments = []
|
||||
|
||||
if not email_msg.is_multipart():
|
||||
return attachments
|
||||
|
||||
for part in email_msg.walk():
|
||||
if part.get_content_maintype() == "multipart":
|
||||
continue
|
||||
|
||||
disposition = (part.get("Content-Disposition") or "").lower()
|
||||
filename = part.get_filename()
|
||||
|
||||
if not (
|
||||
disposition.startswith("attachment")
|
||||
or (disposition.startswith("inline") and filename)
|
||||
):
|
||||
continue
|
||||
|
||||
payload = part.get_payload(decode=True)
|
||||
if not payload:
|
||||
continue
|
||||
|
||||
if len(payload) > max_bytes:
|
||||
continue
|
||||
|
||||
attachments.append({
|
||||
"filename": filename or "attachment.bin",
|
||||
"content_type": part.get_content_type(),
|
||||
"content_bytes": payload,
|
||||
"size_bytes": len(payload),
|
||||
})
|
||||
|
||||
return attachments
|
||||
|
||||
def decode_mime_filename(raw: str | None) -> str | None:
|
||||
if not raw:
|
||||
return None
|
||||
|
||||
try:
|
||||
raw = collapse_rfc2231_value(raw)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
parts = decode_header(raw)
|
||||
decoded = []
|
||||
|
||||
for value, encoding in parts:
|
||||
if isinstance(value, bytes):
|
||||
decoded.append(value.decode(encoding or "utf-8", errors="replace"))
|
||||
else:
|
||||
decoded.append(value)
|
||||
|
||||
return "".join(decoded)
|
||||
|
||||
def attachment_to_document(
|
||||
parent_doc: Document,
|
||||
att: dict,
|
||||
email_headers: EmailHeaders,
|
||||
):
|
||||
raw_filename = att["filename"]
|
||||
filename = decode_mime_filename(raw_filename) or "attachment.bin"
|
||||
ext = "." + filename.split(".")[-1] if "." in filename else ""
|
||||
|
||||
return Document(
|
||||
id=f"{parent_doc.id}#att:{filename}",
|
||||
source=DocumentSource.IMAP,
|
||||
semantic_identifier=filename,
|
||||
extension=ext,
|
||||
blob=att["content_bytes"],
|
||||
size_bytes=att["size_bytes"],
|
||||
doc_updated_at=email_headers.date,
|
||||
primary_owners=parent_doc.primary_owners,
|
||||
metadata={
|
||||
"parent_email_id": parent_doc.id,
|
||||
"parent_subject": email_headers.subject,
|
||||
"attachment_filename": filename,
|
||||
"attachment_content_type": att["content_type"],
|
||||
},
|
||||
)
|
||||
|
||||
def _parse_email_body(
|
||||
email_msg: Message,
|
||||
email_headers: EmailHeaders,
|
||||
) -> str:
|
||||
body = None
|
||||
for part in email_msg.walk():
|
||||
if part.is_multipart():
|
||||
# Multipart parts are *containers* for other parts, not the actual content itself.
|
||||
# Therefore, we skip until we find the individual parts instead.
|
||||
continue
|
||||
|
||||
charset = part.get_content_charset() or "utf-8"
|
||||
|
||||
try:
|
||||
raw_payload = part.get_payload(decode=True)
|
||||
if not isinstance(raw_payload, bytes):
|
||||
logging.warning(
|
||||
"Payload section from email was expected to be an array of bytes, instead got "
|
||||
f"{type(raw_payload)=}, {raw_payload=}"
|
||||
)
|
||||
continue
|
||||
body = raw_payload.decode(charset)
|
||||
break
|
||||
except (UnicodeDecodeError, LookupError) as e:
|
||||
logging.warning(f"Could not decode part with charset {charset}. Error: {e}")
|
||||
continue
|
||||
|
||||
if not body:
|
||||
logging.warning(
|
||||
f"Email with {email_headers.id=} has an empty body; returning an empty string"
|
||||
)
|
||||
return ""
|
||||
|
||||
soup = bs4.BeautifulSoup(markup=body, features="html.parser")
|
||||
|
||||
return " ".join(str_section for str_section in soup.stripped_strings)
|
||||
|
||||
|
||||
def _sanitize_mailbox_names(mailboxes: list[str]) -> list[str]:
|
||||
"""
|
||||
Mailboxes with special characters in them must be enclosed by double-quotes, as per the IMAP protocol.
|
||||
Just to be safe, we wrap *all* mailboxes with double-quotes.
|
||||
"""
|
||||
return [f'"{mailbox}"' for mailbox in mailboxes if mailbox]
|
||||
|
||||
|
||||
def _parse_addrs(raw_header: str) -> list[tuple[str, str]]:
|
||||
addrs = raw_header.split(",")
|
||||
name_addr_pairs = [parseaddr(addr=addr) for addr in addrs if addr]
|
||||
return [(name, addr) for name, addr in name_addr_pairs if addr]
|
||||
|
||||
|
||||
def _parse_singular_addr(raw_header: str) -> tuple[str, str]:
|
||||
addrs = _parse_addrs(raw_header=raw_header)
|
||||
if not addrs:
|
||||
return ("Unknown", "unknown@example.com")
|
||||
elif len(addrs) >= 2:
|
||||
raise RuntimeError(
|
||||
f"Expected a singular address, but instead got multiple; {raw_header=} {addrs=}"
|
||||
)
|
||||
|
||||
return addrs[0]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import time
|
||||
import uuid
|
||||
from types import TracebackType
|
||||
from common.data_source.utils import load_all_docs_from_checkpoint_connector
|
||||
|
||||
|
||||
class OnyxStaticCredentialsProvider(
|
||||
CredentialsProviderInterface["OnyxStaticCredentialsProvider"]
|
||||
):
|
||||
"""Implementation (a very simple one!) to handle static credentials."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
tenant_id: str | None,
|
||||
connector_name: str,
|
||||
credential_json: dict[str, Any],
|
||||
):
|
||||
self._tenant_id = tenant_id
|
||||
self._connector_name = connector_name
|
||||
self._credential_json = credential_json
|
||||
|
||||
self._provider_key = str(uuid.uuid4())
|
||||
|
||||
def __enter__(self) -> "OnyxStaticCredentialsProvider":
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def get_tenant_id(self) -> str | None:
|
||||
return self._tenant_id
|
||||
|
||||
def get_provider_key(self) -> str:
|
||||
return self._provider_key
|
||||
|
||||
def get_credentials(self) -> dict[str, Any]:
|
||||
return self._credential_json
|
||||
|
||||
def set_credentials(self, credential_json: dict[str, Any]) -> None:
|
||||
self._credential_json = credential_json
|
||||
|
||||
def is_dynamic(self) -> bool:
|
||||
return False
|
||||
# from tests.daily.connectors.utils import load_all_docs_from_checkpoint_connector
|
||||
# from onyx.connectors.credentials_provider import OnyxStaticCredentialsProvider
|
||||
|
||||
host = os.environ.get("IMAP_HOST")
|
||||
mailboxes_str = os.environ.get("IMAP_MAILBOXES","INBOX")
|
||||
username = os.environ.get("IMAP_USERNAME")
|
||||
password = os.environ.get("IMAP_PASSWORD")
|
||||
|
||||
mailboxes = (
|
||||
[mailbox.strip() for mailbox in mailboxes_str.split(",")]
|
||||
if mailboxes_str
|
||||
else []
|
||||
)
|
||||
|
||||
if not host:
|
||||
raise RuntimeError("`IMAP_HOST` must be set")
|
||||
|
||||
imap_connector = ImapConnector(
|
||||
host=host,
|
||||
mailboxes=mailboxes,
|
||||
)
|
||||
|
||||
imap_connector.set_credentials_provider(
|
||||
OnyxStaticCredentialsProvider(
|
||||
tenant_id=None,
|
||||
connector_name=DocumentSource.IMAP,
|
||||
credential_json={
|
||||
_USERNAME_KEY: username,
|
||||
_PASSWORD_KEY: password,
|
||||
},
|
||||
)
|
||||
)
|
||||
END = time.time()
|
||||
START = END - 1 * 24 * 60 * 60
|
||||
for doc in load_all_docs_from_checkpoint_connector(
|
||||
connector=imap_connector,
|
||||
start=START,
|
||||
end=END,
|
||||
):
|
||||
print(doc.id,doc.extension)
|
||||
@ -476,11 +476,13 @@ class RAGFlowPdfParser:
|
||||
self.boxes = bxs
|
||||
|
||||
def _naive_vertical_merge(self, zoomin=3):
|
||||
bxs = self._assign_column(self.boxes, zoomin)
|
||||
#bxs = self._assign_column(self.boxes, zoomin)
|
||||
bxs = self.boxes
|
||||
|
||||
grouped = defaultdict(list)
|
||||
for b in bxs:
|
||||
grouped[(b["page_number"], b.get("col_id", 0))].append(b)
|
||||
# grouped[(b["page_number"], b.get("col_id", 0))].append(b)
|
||||
grouped[(b["page_number"], "x")].append(b)
|
||||
|
||||
merged_boxes = []
|
||||
for (pg, col), bxs in grouped.items():
|
||||
@ -551,7 +553,7 @@ class RAGFlowPdfParser:
|
||||
|
||||
merged_boxes.extend(bxs)
|
||||
|
||||
self.boxes = sorted(merged_boxes, key=lambda x: (x["page_number"], x.get("col_id", 0), x["top"]))
|
||||
#self.boxes = sorted(merged_boxes, key=lambda x: (x["page_number"], x.get("col_id", 0), x["top"]))
|
||||
|
||||
def _final_reading_order_merge(self, zoomin=3):
|
||||
if not self.boxes:
|
||||
@ -1206,7 +1208,7 @@ class RAGFlowPdfParser:
|
||||
start = timer()
|
||||
self._text_merge()
|
||||
self._concat_downward()
|
||||
#self._naive_vertical_merge(zoomin)
|
||||
self._naive_vertical_merge(zoomin)
|
||||
if callback:
|
||||
callback(0.92, "Text merged ({:.2f}s)".format(timer() - start))
|
||||
|
||||
|
||||
8
docs/basics/_category_.json
Normal file
8
docs/basics/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Basics",
|
||||
"position": 2,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "Basic concepts."
|
||||
}
|
||||
}
|
||||
61
docs/basics/agent_context_engine.md
Normal file
61
docs/basics/agent_context_engine.md
Normal file
@ -0,0 +1,61 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
slug: /what_is_agent_context_engine
|
||||
---
|
||||
|
||||
# What is Agent context engine?
|
||||
|
||||
From 2025, a silent revolution began beneath the dazzling surface of AI Agents. While the world marveled at agents that could write code, analyze data, and automate workflows, a fundamental bottleneck emerged: why do even the most advanced agents still stumble on simple questions, forget previous conversations, or misuse available tools?
|
||||
|
||||
The answer lies not in the intelligence of the Large Language Model (LLM) itself, but in the quality of the Context it receives. An LLM, no matter how powerful, is only as good as the information we feed it. Today’s cutting-edge agents are often crippled by a cumbersome, manual, and error-prone process of context assembly—a process known as Context Engineering.
|
||||
|
||||
This is where the Agent Context Engine comes in. It is not merely an incremental improvement but a foundational shift, representing the evolution of RAG from a singular technique into the core data and intelligence substrate for the entire Agent ecosystem.
|
||||
|
||||
## Beyond the hype: The reality of today's "intelligent" Agents
|
||||
Today, the “intelligence” behind most AI Agents hides a mountain of human labor. Developers must:
|
||||
|
||||
- Hand-craft elaborate prompt templates
|
||||
- Hard-code document-retrieval logic for every task
|
||||
- Juggle tool descriptions, conversation history, and knowledge snippets inside a tiny context window
|
||||
- Repeat the whole process for each new scenario
|
||||
|
||||
This pattern is called Context Engineering. It is deeply tied to expert know-how, almost impossible to scale, and prohibitively expensive to maintain. When an enterprise needs to keep dozens of distinct agents alive, the artisanal workshop model collapses under its own weight.
|
||||
|
||||
The mission of an Agent Context Engine is to turn Context Engineering from an “art” into an industrial-grade science.
|
||||
|
||||
Deconstructing the Agent Context Engine
|
||||
So, what exactly is an Agent Context Engine? It is a unified, intelligent, and automated platform responsible for the end-to-end process of assembling the optimal context for an LLM or Agent at the moment of inference. It moves from artisanal crafting to industrialized production.
|
||||
At its core, an Agent Context Engine is built on a triumvirate of next-generation retrieval capabilities, seamlessly integrated into a single service layer:
|
||||
|
||||
1. The Knowledge Core (Advanced RAG): This is the evolution of traditional RAG. It moves beyond simple chunk-and-embed to intelligently process static, private enterprise knowledge. Techniques like TreeRAG (building LLM-generated document outlines for "locate-then-expand" retrieval) and GraphRAG (extracting entity networks to find semantically distant connections) work to close the "semantic gap." The engine’s Ingestion Pipeline acts as the ETL for unstructured data, parsing multi-format documents and using LLMs to enrich content with summaries, metadata, and structure before indexing.
|
||||
|
||||
2. The Memory Layer: An Agent’s intelligence is defined by its ability to learn from interaction. The Memory Layer is a specialized retrieval system for dynamic, episodic data: conversation history, user preferences, and the agent’s own internal state (e.g., "waiting for human input"). It manages the lifecycle of this data—storing raw dialogue, triggering summarization into semantic memory, and retrieving relevant past interactions to provide continuity and personalization. Technologically, it is a close sibling to RAG, but focused on a temporal stream of data.
|
||||
|
||||
3. The Tool Orchestrator: As MCP (Model Context Protocol) enables the connection of hundreds of internal services as tools, a new problem arises: tool selection. The Context Engine solves this with Tool Retrieval. Instead of dumping all tool descriptions into the prompt, it maintains an index of tools and—critically—an index of Playbooks or Guidelines (best practices on when and how to use tools). For a given task, it retrieves only the most relevant tools and instructions, transforming the LLM’s job from "searching a haystack" to "following a recipe."
|
||||
|
||||
## Why we need a dedicated engine? The case for a unified substrate
|
||||
|
||||
The necessity of an Agent Context Engine becomes clear when we examine the alternative: siloed, manually wired components.
|
||||
|
||||
- The Data Silo Problem: Knowledge, memory, and tools reside in separate systems, requiring complex integration for each new agent.
|
||||
- The Assembly Line Bottleneck: Developers spend more time on context plumbing than on agent logic, slowing innovation to a crawl.
|
||||
- The "Context Ownership" Dilemma: In manually engineered systems, context logic is buried in code, owned by developers, and opaque to business users. An Engine makes context a configurable, observable, and customer-owned asset.
|
||||
|
||||
The shift from Context Engineering to a Context Platform/Engine marks the maturation of enterprise AI, as summarized in the table below:
|
||||
|
||||
| Dimension | Context engineering (present) | Context engineering/Platform (future) |
|
||||
| ------------------- | -------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- |
|
||||
| Context creation | Manual, artisanal work by developers and prompt engineers. | Automated, driven by intelligent ingestion pipelines and configurable rules. |
|
||||
| Context delivery | Hard-coded prompts and static retrieval logic embedded in agent workflows. | Dynamic, real-time retrieval and assembly based on the agent's live state and intent. |
|
||||
| Context maintenance | A development and operational burden, logic locked in code. | A manageable platform function, with visibility and control returned to the business. |
|
||||
|
||||
|
||||
## RAGFlow: A resolute march toward the context engine of Agents
|
||||
|
||||
This is the future RAGFlow is forging.
|
||||
|
||||
We left behind the label of “yet another RAG system” long ago. From DeepDoc—our deeply-optimized, multimodal document parser—to the bleeding-edge architectures that bridge semantic chasms in complex RAG scenarios, all the way to a full-blown, enterprise-grade ingestion pipeline, every evolutionary step RAGFlow takes is a deliberate stride toward the ultimate form: an Agentic Context Engine.
|
||||
|
||||
We believe tomorrow’s enterprise AI advantage will hinge not on who owns the largest model, but on who can feed that model the highest-quality, most real-time, and most relevant context. An Agentic Context Engine is the critical infrastructure that turns this vision into reality.
|
||||
|
||||
In the paradigm shift from “hand-crafted prompts” to “intelligent context,” RAGFlow is determined to be the most steadfast propeller and enabler. We invite every developer, enterprise, and researcher who cares about the future of AI agents to follow RAGFlow’s journey—so together we can witness and build the cornerstone of the next-generation AI stack.
|
||||
107
docs/basics/rag.md
Normal file
107
docs/basics/rag.md
Normal file
@ -0,0 +1,107 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
slug: /what_is_rag
|
||||
---
|
||||
|
||||
# What is Retreival-Augmented-Generation (RAG)?
|
||||
|
||||
Since large language models (LLMs) became the focus of technology, their ability to handle general knowledge has been astonishing. However, when questions shift to internal corporate documents, proprietary knowledge bases, or real-time data, the limitations of LLMs become glaringly apparent: they cannot access private information outside their training data. Retrieval-Augmented Generation (RAG) was born precisely to address this core need. Before an LLM generates an answer, it first retrieves the most relevant context from an external knowledge base and inputs it as "reference material" to the LLM, thereby guiding it to produce accurate answers. In short, RAG elevates LLMs from "relying on memory" to "having evidence to rely on," significantly improving their accuracy and trustworthiness in specialized fields and real-time information queries.
|
||||
|
||||
## Why RAG is important?
|
||||
|
||||
Although LLMs excel in language understanding and generation, they have inherent limitations:
|
||||
|
||||
- Static Knowledge: The model's knowledge is based on a data snapshot from its training time and cannot be automatically updated, making it difficult to perceive the latest information.
|
||||
- Blind Spot to External Data: They cannot directly access corporate private documents, real-time information streams, or domain-specific content.
|
||||
- Hallucination Risk: When lacking accurate evidence, they may still fabricate plausible-sounding but false answers to maintain conversational fluency.
|
||||
|
||||
The introduction of RAG provides LLMs with real-time, credible "factual grounding." Its core mechanism is divided into two stages:
|
||||
|
||||
- Retrieval Stage: Based on the user's question, quickly retrieve the most relevant documents or data fragments from an external knowledge base.
|
||||
- Generation Stage: The LLM organizes and generates the final answer by incorporating the retrieved information as context, combined with its own linguistic capabilities.
|
||||
|
||||
This upgrades LLMs from "speaking from memory" to "speaking with documentation," significantly enhancing reliability in professional and enterprise-level applications.
|
||||
|
||||
## How RAG works?
|
||||
|
||||
Retrieval-Augmented Generation enables LLMs to generate higher-quality responses by leveraging real-time, external, or private data sources through the introduction of an information retrieval mechanism. Its workflow can be divided into following key steps:
|
||||
|
||||
### Data processing and vectorization
|
||||
|
||||
The knowledge required by RAG comes from unstructured data in various formats, such as documents, database records, or API return content. This data typically needs to be chunked, then transformed into vectors via an embedding model, and stored in a vector database.
|
||||
|
||||
Why is Chunking Needed? Indexing entire documents directly faces the following problems:
|
||||
|
||||
- Decreased Retrieval Precision: Vectorizing long documents leads to semantic "averaging," losing details.
|
||||
- Context Length Limitation: LLMs have a finite context window, requiring filtering of the most relevant parts for input.
|
||||
- Cost and Efficiency: Embedding computation and retrieval costs are higher for long texts.
|
||||
|
||||
Therefore, an intelligent chunking strategy is key to balancing information integrity, retrieval granularity, and computational efficiency.
|
||||
|
||||
### Retrieve relevant information
|
||||
|
||||
The user's query is also converted into a vector to perform semantic relevance searches (e.g., calculating cosine similarity) in the vector database, matching and recalling the most relevant text fragments.
|
||||
|
||||
### Context construction and answer generation
|
||||
|
||||
The retrieved relevant content is added to the LLM's context as factual grounding, and the LLM finally generates the answer. Therefore, RAG can be seen as Context Engineering 1.0 for automated context construction.
|
||||
|
||||
## Deep dive into existing RAG architecture: beyond vector retrieval
|
||||
|
||||
An industrial-grade RAG system is far from being as simple as "vector search + LLM"; its complexity and challenges are primarily embedded in the retrieval process.
|
||||
|
||||
### Data complexity: multimodal document processing
|
||||
|
||||
Core Challenge: Corporate knowledge mostly exists in the form of multimodal documents containing text, charts, tables, and formulas. Simple OCR extraction loses a large amount of semantic information.
|
||||
|
||||
Advanced Practice: Leading solutions, such as RAGFlow, tend to use Visual Language Models (VLM) or specialized parsing models like DeepDoc to "translate" multimodal documents into unimodal text rich in structural and semantic information. Converting multimodal information into high-quality unimodal text has become standard practice for advanced RAG.
|
||||
|
||||
### The complexity of chunking: the trade-off between precision and context
|
||||
|
||||
A simple "chunk-embed-retrieve" pipeline has an inherent contradiction:
|
||||
- Semantic Matching requires small text chunks to ensure clear semantic focus.
|
||||
- Context Understanding requires large text chunks to ensure complete and coherent information.
|
||||
|
||||
This forces system design into a difficult trade-off between "precise but fragmented" and "complete but vague."
|
||||
|
||||
Advanced Practice: Leading solutions, such as RAGFlow, employ semantic enhancement techniques like constructing semantic tables of contents and knowledge graphs. These not only address semantic fragmentation caused by physical chunking but also enable the discovery of relevant content across documents based on entity-relationship networks.
|
||||
|
||||
### Why is a vector database insufficient for serving RAG?
|
||||
|
||||
Vector databases excel at semantic similarity search, but RAG requires precise and reliable answers, demanding more capabilities from the retrieval system:
|
||||
- Hybrid Search: Relying solely on vector retrieval may miss exact keyword matches (e.g., product codes, regulation numbers). Hybrid search, combining vector retrieval with keyword retrieval (BM25), ensures both semantic breadth and keyword precision.
|
||||
- Tensor or Multi-Vector Representation: To support cross-modal data, employing tensor or multi-vector representation has become an important trend.
|
||||
- Metadata Filtering: Filtering based on attributes like date, department, and type is a rigid requirement in business scenarios.
|
||||
|
||||
Therefore, the retrieval layer of RAG is a composite system based on vector search but must integrate capabilities like full-text search, re-ranking, and metadata filtering.
|
||||
|
||||
## RAG and memory: Retrieval from the same source but different streams
|
||||
|
||||
Within the agent framework, the essence of the memory mechanism is the same as RAG: both retrieve relevant information from storage based on current needs. The key difference lies in the data source:
|
||||
- RAG: Targets pre-existing static or dynamic private data provided by the user in advance (e.g., documents, databases).
|
||||
- Memory: Targets dynamic data generated or perceived by the agent in real-time during interaction (e.g., conversation history, environmental state, tool execution results).
|
||||
They are highly consistent at the technical base (e.g., vector retrieval, keyword matching) and can be seen as the same retrieval capability applied in different scenarios ("existing knowledge" vs. "interaction memory"). A complete agent system often includes both an RAG module for inherent knowledge and a Memory module for interaction history.
|
||||
|
||||
## RAG applications
|
||||
|
||||
RAG has demonstrated clear value in several typical scenarios:
|
||||
|
||||
1. Enterprise Knowledge Q&A and Internal Search
|
||||
By vectorizing corporate private data and combining it with an LLM, RAG can directly return natural language answers based on authoritative sources, rather than document lists. While meeting intelligent Q&A needs, it inherently aligns with corporate requirements for data security, access control, and compliance.
|
||||
2. Complex Document Understanding and Professional Q&A
|
||||
For structurally complex documents like contracts and regulations, the value of RAG lies in its ability to generate accurate, verifiable answers while maintaining context integrity. Its system accuracy largely depends on text chunking and semantic understanding strategies.
|
||||
3. Dynamic Knowledge Fusion and Decision Support
|
||||
In business scenarios requiring the synthesis of information from multiple sources, RAG evolves into a knowledge orchestration and reasoning support system for business decisions. Through a multi-path recall mechanism, it fuses knowledge from different systems and formats, maintaining factual consistency and logical controllability during the generation phase.
|
||||
|
||||
## The future of RAG
|
||||
|
||||
The evolution of RAG is unfolding along several clear paths:
|
||||
|
||||
1. RAG as the data foundation for Agents
|
||||
RAG and agents have an architecture vs. scenario relationship. For agents to achieve autonomous and reliable decision-making and execution, they must rely on accurate and timely knowledge. RAG provides them with a standardized capability to access private domain knowledge and is an inevitable choice for building knowledge-aware agents.
|
||||
2. Advanced RAG: Using LLMs to optimize retrieval itself
|
||||
The core feature of next-generation RAG is fully utilizing the reasoning capabilities of LLMs to optimize the retrieval process, such as rewriting queries, summarizing or fusing results, or implementing intelligent routing. Empowering every aspect of retrieval with LLMs is key to breaking through current performance bottlenecks.
|
||||
3. Towards context engineering 2.0
|
||||
Current RAG can be viewed as Context Engineering 1.0, whose core is assembling static knowledge context for single Q&A tasks. The forthcoming Context Engineering 2.0 will extend with RAG technology at its core, becoming a system that automatically and dynamically assembles comprehensive context for agents. The context fused by this system will come not only from documents but also include interaction memory, available tools/skills, and real-time environmental information. This marks the transition of agent development from a "handicraft workshop" model to the industrial starting point of automated context engineering.
|
||||
|
||||
The essence of RAG is to build a dedicated, efficient, and trustworthy external data interface for large language models; its core is Retrieval, not Generation. Starting from the practical need to solve private data access, its technical depth is reflected in the optimization of retrieval for complex unstructured data. With its deep integration into agent architectures and its development towards automated context engineering, RAG is evolving from a technology that improves Q&A quality into the core infrastructure for building the next generation of trustworthy, controllable, and scalable intelligent applications.
|
||||
@ -13,61 +13,58 @@ A complete list of models supported by RAGFlow, which will continue to expand.
|
||||
<APITable>
|
||||
```
|
||||
|
||||
| Provider | Chat | Embedding | Rerank | Img2txt | Speech2txt | TTS |
|
||||
| --------------------- | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| Anthropic | :heavy_check_mark: | | | | | |
|
||||
| Azure-OpenAI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| BAAI | | :heavy_check_mark: | :heavy_check_mark: | | | |
|
||||
| BaiChuan | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| BaiduYiyan | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| Bedrock | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| Cohere | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| DeepSeek | :heavy_check_mark: | | | | | |
|
||||
| FastEmbed | | :heavy_check_mark: | | | | |
|
||||
| Fish Audio | | | | | | :heavy_check_mark: |
|
||||
| Gemini | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| Google Cloud | :heavy_check_mark: | | | | | |
|
||||
| GPUStack | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Groq | :heavy_check_mark: | | | | | |
|
||||
| HuggingFace | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| Jina | | :heavy_check_mark: | :heavy_check_mark: | | | |
|
||||
| LeptonAI | :heavy_check_mark: | | | | | |
|
||||
| LocalAI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| LM-Studio | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| MiniMax | :heavy_check_mark: | | | | | |
|
||||
| Mistral | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| ModelScope | :heavy_check_mark: | | | | | |
|
||||
| Moonshot | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| Novita AI | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| NVIDIA | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| Ollama | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| OpenAI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| OpenAI-API-Compatible | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| OpenRouter | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| PerfXCloud | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| Replicate | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| PPIO | :heavy_check_mark: | | | | | |
|
||||
| SILICONFLOW | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| StepFun | :heavy_check_mark: | | | | | |
|
||||
| Tencent Hunyuan | :heavy_check_mark: | | | | | |
|
||||
| Tencent Cloud | | | | | :heavy_check_mark: | |
|
||||
| TogetherAI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| Tongyi-Qianwen | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Upstage | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| VLLM | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| VolcEngine | :heavy_check_mark: | | | | | |
|
||||
| Voyage AI | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| Xinference | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| XunFei Spark | :heavy_check_mark: | | | | | :heavy_check_mark: |
|
||||
| xAI | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| Youdao | | :heavy_check_mark: | :heavy_check_mark: | | | |
|
||||
| ZHIPU-AI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| 01.AI | :heavy_check_mark: | | | | | |
|
||||
| DeepInfra | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| 302.AI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| CometAPI | :heavy_check_mark: | :heavy_check_mark: | | | | |
|
||||
| DeerAPI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | :heavy_check_mark: |
|
||||
| Jiekou.AI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | | |
|
||||
| Provider | LLM | Image2Text | Speech2text | TTS | Embedding | Rerank | OCR |
|
||||
| --------------------- | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| Anthropic | :heavy_check_mark: | | | | | | |
|
||||
| Azure-OpenAI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
|
||||
| BaiChuan | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| BaiduYiyan | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| Bedrock | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| Cohere | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| DeepSeek | :heavy_check_mark: | | | | | | |
|
||||
| Fish Audio | | | | :heavy_check_mark: | | | |
|
||||
| Gemini | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| Google Cloud | :heavy_check_mark: | | | | | | |
|
||||
| GPUStack | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| Groq | :heavy_check_mark: | | | | | | |
|
||||
| HuggingFace | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| Jina | | | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| LocalAI | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| LongCat | :heavy_check_mark: | | | | | | |
|
||||
| LM-Studio | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| MiniMax | :heavy_check_mark: | | | | | | |
|
||||
| MinerU | | | | | | | :heavy_check_mark: |
|
||||
| Mistral | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| ModelScope | :heavy_check_mark: | | | | | | |
|
||||
| Moonshot | :heavy_check_mark: | :heavy_check_mark: | | | | | |
|
||||
| NovitaAI | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| NVIDIA | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| Ollama | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| OpenAI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| OpenAI-API-Compatible | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| OpenRouter | :heavy_check_mark: | :heavy_check_mark: | | | | | |
|
||||
| Replicate | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| PPIO | :heavy_check_mark: | | | | | | |
|
||||
| SILICONFLOW | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| StepFun | :heavy_check_mark: | | | | | | |
|
||||
| Tencent Hunyuan | :heavy_check_mark: | | | | | | |
|
||||
| Tencent Cloud | | | :heavy_check_mark: | | | | |
|
||||
| TogetherAI | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| TokenPony | :heavy_check_mark: | | | | | | |
|
||||
| Tongyi-Qianwen | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| Upstage | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| VLLM | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| VolcEngine | :heavy_check_mark: | | | | | | |
|
||||
| Voyage AI | | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| Xinference | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| XunFei Spark | :heavy_check_mark: | | | :heavy_check_mark: | | | |
|
||||
| xAI | :heavy_check_mark: | :heavy_check_mark: | | | | | |
|
||||
| ZHIPU-AI | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | | |
|
||||
| DeepInfra | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| 302.AI | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
| CometAPI | :heavy_check_mark: | | | | :heavy_check_mark: | | |
|
||||
| DeerAPI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | |
|
||||
| Jiekou.AI | :heavy_check_mark: | | | | :heavy_check_mark: | :heavy_check_mark: | |
|
||||
|
||||
```mdx-code-block
|
||||
</APITable>
|
||||
|
||||
@ -40,7 +40,7 @@ from deepdoc.parser.docling_parser import DoclingParser
|
||||
from deepdoc.parser.tcadp_parser import TCADPParser
|
||||
from common.parser_config_utils import normalize_layout_recognizer
|
||||
from rag.nlp import concat_img, find_codec, naive_merge, naive_merge_with_images, naive_merge_docx, rag_tokenizer, \
|
||||
tokenize_chunks, tokenize_chunks_with_images, tokenize_table, attach_media_context
|
||||
tokenize_chunks, tokenize_chunks_with_images, tokenize_table, attach_media_context, append_context2table_image4pdf
|
||||
|
||||
|
||||
def by_deepdoc(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, pdf_cls=None,
|
||||
@ -487,7 +487,7 @@ class Pdf(PdfParser):
|
||||
tbls = self._extract_table_figure(True, zoomin, True, True)
|
||||
self._naive_vertical_merge()
|
||||
self._concat_downward()
|
||||
self._final_reading_order_merge()
|
||||
# self._final_reading_order_merge()
|
||||
# self._filter_forpages()
|
||||
logging.info("layouts cost: {}s".format(timer() - first_start))
|
||||
return [(b["text"], self._line_tag(b, zoomin)) for b in self.boxes], tbls
|
||||
@ -776,6 +776,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
if not sections and not tables:
|
||||
return []
|
||||
|
||||
if table_context_size or image_context_size:
|
||||
tables = append_context2table_image4pdf(sections, tables, image_context_size)
|
||||
|
||||
if name in ["tcadp", "docling", "mineru"]:
|
||||
parser_config["chunk_token_num"] = 0
|
||||
|
||||
@ -1006,8 +1009,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
res.extend(embed_res)
|
||||
if url_res:
|
||||
res.extend(url_res)
|
||||
if table_context_size or image_context_size:
|
||||
attach_media_context(res, table_context_size, image_context_size)
|
||||
#if table_context_size or image_context_size:
|
||||
# attach_media_context(res, table_context_size, image_context_size)
|
||||
return res
|
||||
|
||||
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
|
||||
import logging
|
||||
import random
|
||||
from collections import Counter
|
||||
from collections import Counter, defaultdict
|
||||
|
||||
from common.token_utils import num_tokens_from_string
|
||||
import re
|
||||
@ -667,6 +667,94 @@ def attach_media_context(chunks, table_context_size=0, image_context_size=0):
|
||||
return chunks
|
||||
|
||||
|
||||
def append_context2table_image4pdf(sections: list, tabls: list, table_context_size=0):
|
||||
from deepdoc.parser import PdfParser
|
||||
if table_context_size <=0:
|
||||
return tabls
|
||||
|
||||
page_bucket = defaultdict(list)
|
||||
for i, (txt, poss) in enumerate(sections):
|
||||
poss = PdfParser.extract_positions(poss)
|
||||
for page, left, right, top, bottom in poss:
|
||||
page = page[0]
|
||||
page_bucket[page].append(((left, top, right, bottom), txt))
|
||||
|
||||
def upper_context(page, i):
|
||||
txt = ""
|
||||
if page not in page_bucket:
|
||||
i = -1
|
||||
while num_tokens_from_string(txt) < table_context_size:
|
||||
if i < 0:
|
||||
page -= 1
|
||||
if page < 0 or page not in page_bucket:
|
||||
break
|
||||
i = len(page_bucket[page]) -1
|
||||
blks = page_bucket[page]
|
||||
(_, _, _, _), cnt = blks[i]
|
||||
txts = re.split(r"([。!??;!\n]|\. )", cnt, flags=re.DOTALL)[::-1]
|
||||
for j in range(0, len(txts), 2):
|
||||
txt = (txts[j+1] if j+1<len(txts) else "") + txts[j] + txt
|
||||
if num_tokens_from_string(txt) > table_context_size:
|
||||
break
|
||||
i -= 1
|
||||
return txt
|
||||
|
||||
def lower_context(page, i):
|
||||
txt = ""
|
||||
if page not in page_bucket:
|
||||
return txt
|
||||
while num_tokens_from_string(txt) < table_context_size:
|
||||
if i >= len(page_bucket[page]):
|
||||
page += 1
|
||||
if page not in page_bucket:
|
||||
break
|
||||
i = 0
|
||||
blks = page_bucket[page]
|
||||
(_, _, _, _), cnt = blks[i]
|
||||
txts = re.split(r"([。!??;!\n]|\. )", cnt, flags=re.DOTALL)
|
||||
for j in range(0, len(txts), 2):
|
||||
txt += txts[j] + (txts[j+1] if j+1<len(txts) else "")
|
||||
if num_tokens_from_string(txt) > table_context_size:
|
||||
break
|
||||
i += 1
|
||||
return txt
|
||||
|
||||
res = []
|
||||
for (img, tb), poss in tabls:
|
||||
page, left, top, right, bott = poss[0]
|
||||
_page, _left, _top, _right, _bott = poss[-1]
|
||||
if isinstance(tb, list):
|
||||
tb = "\n".join(tb)
|
||||
|
||||
i = 0
|
||||
blks = page_bucket.get(page, [])
|
||||
_tb = tb
|
||||
while i < len(blks):
|
||||
if i + 1 >= len(blks):
|
||||
if _page > page:
|
||||
page += 1
|
||||
i = 0
|
||||
blks = page_bucket.get(page, [])
|
||||
continue
|
||||
tb = upper_context(page, i) + tb + lower_context(page+1, 0)
|
||||
break
|
||||
(_, t, r, b), txt = blks[i]
|
||||
if b > top:
|
||||
break
|
||||
(_, _t, _r, _b), _txt = blks[i+1]
|
||||
if _t < _bott:
|
||||
i += 1
|
||||
continue
|
||||
|
||||
tb = upper_context(page, i) + tb + lower_context(page, i)
|
||||
break
|
||||
|
||||
if _tb == tb:
|
||||
tb = upper_context(page, -1) + tb + lower_context(page+1, 0)
|
||||
res.append(((img, tb), poss))
|
||||
return res
|
||||
|
||||
|
||||
def add_positions(d, poss):
|
||||
if not poss:
|
||||
return
|
||||
|
||||
@ -729,6 +729,8 @@ TOC_FROM_TEXT_USER = load_prompt("toc_from_text_user")
|
||||
|
||||
# Generate TOC from text chunks with text llms
|
||||
async def gen_toc_from_text(txt_info: dict, chat_mdl, callback=None):
|
||||
if callback:
|
||||
callback(msg="")
|
||||
try:
|
||||
ans = await gen_json(
|
||||
PROMPT_JINJA_ENV.from_string(TOC_FROM_TEXT_SYSTEM).render(),
|
||||
@ -738,8 +740,6 @@ async def gen_toc_from_text(txt_info: dict, chat_mdl, callback=None):
|
||||
gen_conf={"temperature": 0.0, "top_p": 0.9}
|
||||
)
|
||||
txt_info["toc"] = ans if ans and not isinstance(ans, str) else []
|
||||
if callback:
|
||||
callback(msg="")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
||||
|
||||
@ -39,16 +39,17 @@ from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from common import settings
|
||||
from common.config_utils import show_configs
|
||||
from common.data_source import (
|
||||
BlobStorageConnector,
|
||||
NotionConnector,
|
||||
DiscordConnector,
|
||||
GoogleDriveConnector,
|
||||
MoodleConnector,
|
||||
JiraConnector,
|
||||
DropboxConnector,
|
||||
WebDAVConnector,
|
||||
AirtableConnector,
|
||||
BlobStorageConnector,
|
||||
NotionConnector,
|
||||
DiscordConnector,
|
||||
GoogleDriveConnector,
|
||||
MoodleConnector,
|
||||
JiraConnector,
|
||||
DropboxConnector,
|
||||
WebDAVConnector,
|
||||
AirtableConnector,
|
||||
AsanaConnector,
|
||||
ImapConnector
|
||||
)
|
||||
from common.constants import FileSource, TaskStatus
|
||||
from common.data_source.config import INDEX_BATCH_SIZE
|
||||
@ -915,6 +916,70 @@ class Github(SyncBase):
|
||||
|
||||
return async_wrapper()
|
||||
|
||||
class IMAP(SyncBase):
|
||||
SOURCE_NAME: str = FileSource.IMAP
|
||||
|
||||
async def _generate(self, task):
|
||||
from common.data_source.config import DocumentSource
|
||||
from common.data_source.interfaces import StaticCredentialsProvider
|
||||
self.connector = ImapConnector(
|
||||
host=self.conf.get("imap_host"),
|
||||
port=self.conf.get("imap_port"),
|
||||
mailboxes=self.conf.get("imap_mailbox"),
|
||||
)
|
||||
credentials_provider = StaticCredentialsProvider(tenant_id=task["tenant_id"], connector_name=DocumentSource.IMAP, credential_json=self.conf["credentials"])
|
||||
self.connector.set_credentials_provider(credentials_provider)
|
||||
end_time = datetime.now(timezone.utc).timestamp()
|
||||
if task["reindex"] == "1" or not task["poll_range_start"]:
|
||||
start_time = end_time - self.conf.get("poll_range",30) * 24 * 60 * 60
|
||||
begin_info = "totally"
|
||||
else:
|
||||
start_time = task["poll_range_start"].timestamp()
|
||||
begin_info = f"from {task['poll_range_start']}"
|
||||
raw_batch_size = self.conf.get("sync_batch_size") or self.conf.get("batch_size") or INDEX_BATCH_SIZE
|
||||
try:
|
||||
batch_size = int(raw_batch_size)
|
||||
except (TypeError, ValueError):
|
||||
batch_size = INDEX_BATCH_SIZE
|
||||
if batch_size <= 0:
|
||||
batch_size = INDEX_BATCH_SIZE
|
||||
|
||||
def document_batches():
|
||||
checkpoint = self.connector.build_dummy_checkpoint()
|
||||
pending_docs = []
|
||||
iterations = 0
|
||||
iteration_limit = 100_000
|
||||
while checkpoint.has_more:
|
||||
wrapper = CheckpointOutputWrapper()
|
||||
doc_generator = wrapper(self.connector.load_from_checkpoint(start_time, end_time, checkpoint))
|
||||
for document, failure, next_checkpoint in doc_generator:
|
||||
if failure is not None:
|
||||
logging.warning("IMAP connector failure: %s", getattr(failure, "failure_message", failure))
|
||||
continue
|
||||
if document is not None:
|
||||
pending_docs.append(document)
|
||||
if len(pending_docs) >= batch_size:
|
||||
yield pending_docs
|
||||
pending_docs = []
|
||||
if next_checkpoint is not None:
|
||||
checkpoint = next_checkpoint
|
||||
|
||||
iterations += 1
|
||||
if iterations > iteration_limit:
|
||||
raise RuntimeError("Too many iterations while loading IMAP documents.")
|
||||
|
||||
if pending_docs:
|
||||
yield pending_docs
|
||||
|
||||
logging.info(
|
||||
"Connect to IMAP: host(%s) port(%s) user(%s) folder(%s) %s",
|
||||
self.conf["imap_host"],
|
||||
self.conf["imap_port"],
|
||||
self.conf["credentials"]["imap_username"],
|
||||
self.conf["imap_mailbox"],
|
||||
begin_info
|
||||
)
|
||||
return document_batches()
|
||||
|
||||
|
||||
class Gitlab(SyncBase):
|
||||
@ -977,6 +1042,7 @@ func_factory = {
|
||||
FileSource.BOX: BOX,
|
||||
FileSource.AIRTABLE: Airtable,
|
||||
FileSource.ASANA: Asana,
|
||||
FileSource.IMAP: IMAP,
|
||||
FileSource.GITHUB: Github,
|
||||
FileSource.GITLAB: Gitlab,
|
||||
}
|
||||
|
||||
@ -332,6 +332,9 @@ async def build_chunks(task, progress_callback):
|
||||
async def doc_keyword_extraction(chat_mdl, d, topn):
|
||||
cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], "keywords", {"topn": topn})
|
||||
if not cached:
|
||||
if has_canceled(task["id"]):
|
||||
progress_callback(-1, msg="Task has been canceled.")
|
||||
return
|
||||
async with chat_limiter:
|
||||
cached = await keyword_extraction(chat_mdl, d["content_with_weight"], topn)
|
||||
set_llm_cache(chat_mdl.llm_name, d["content_with_weight"], cached, "keywords", {"topn": topn})
|
||||
@ -362,6 +365,9 @@ async def build_chunks(task, progress_callback):
|
||||
async def doc_question_proposal(chat_mdl, d, topn):
|
||||
cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], "question", {"topn": topn})
|
||||
if not cached:
|
||||
if has_canceled(task["id"]):
|
||||
progress_callback(-1, msg="Task has been canceled.")
|
||||
return
|
||||
async with chat_limiter:
|
||||
cached = await question_proposal(chat_mdl, d["content_with_weight"], topn)
|
||||
set_llm_cache(chat_mdl.llm_name, d["content_with_weight"], cached, "question", {"topn": topn})
|
||||
@ -392,6 +398,9 @@ async def build_chunks(task, progress_callback):
|
||||
cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], "metadata",
|
||||
task["parser_config"]["metadata"])
|
||||
if not cached:
|
||||
if has_canceled(task["id"]):
|
||||
progress_callback(-1, msg="Task has been canceled.")
|
||||
return
|
||||
async with chat_limiter:
|
||||
cached = await gen_metadata(chat_mdl,
|
||||
metadata_schema(task["parser_config"]["metadata"]),
|
||||
@ -457,6 +466,9 @@ async def build_chunks(task, progress_callback):
|
||||
async def doc_content_tagging(chat_mdl, d, topn_tags):
|
||||
cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], all_tags, {"topn": topn_tags})
|
||||
if not cached:
|
||||
if has_canceled(task["id"]):
|
||||
progress_callback(-1, msg="Task has been canceled.")
|
||||
return
|
||||
picked_examples = random.choices(examples, k=2) if len(examples) > 2 else examples
|
||||
if not picked_examples:
|
||||
picked_examples.append({"content": "This is an example", TAG_FLD: {'example': 1}})
|
||||
|
||||
7
web/src/assets/svg/data-source/imap.svg
Normal file
7
web/src/assets/svg/data-source/imap.svg
Normal file
@ -0,0 +1,7 @@
|
||||
<svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24"
|
||||
stroke-linecap="round" stroke-linejoin="round"
|
||||
class="text-text-04" height="32" width="32"
|
||||
xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M4 4h16c1.1 0 2 .9 2 2v12c0 1.1-.9 2-2 2H4c-1.1 0-2-.9-2-2V6c0-1.1.9-2 2-2z"></path>
|
||||
<polyline points="22,6 12,13 2,6"></polyline>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 360 B |
@ -1,3 +1,4 @@
|
||||
import { useIsDarkTheme } from '@/components/theme-provider';
|
||||
import { useSetModalState, useTranslate } from '@/hooks/common-hooks';
|
||||
import { LangfuseCard } from '@/pages/user-setting/setting-model/langfuse';
|
||||
import apiDoc from '@parent/docs/references/http_api_reference.md';
|
||||
@ -28,6 +29,8 @@ const ApiContent = ({
|
||||
|
||||
const { handlePreview } = usePreviewChat(idKey);
|
||||
|
||||
const isDarkTheme = useIsDarkTheme();
|
||||
|
||||
return (
|
||||
<div className="pb-2">
|
||||
<Flex vertical gap={'middle'}>
|
||||
@ -47,7 +50,10 @@ const ApiContent = ({
|
||||
<div style={{ position: 'relative' }}>
|
||||
<MarkdownToc content={apiDoc} />
|
||||
</div>
|
||||
<MarkdownPreview source={apiDoc}></MarkdownPreview>
|
||||
<MarkdownPreview
|
||||
source={apiDoc}
|
||||
wrapperElement={{ 'data-color-mode': isDarkTheme ? 'dark' : 'light' }}
|
||||
></MarkdownPreview>
|
||||
</Flex>
|
||||
{apiKeyVisible && (
|
||||
<ChatApiKeyModal
|
||||
|
||||
@ -1,79 +1,72 @@
|
||||
import { DocumentParserType } from '@/constants/knowledge';
|
||||
import { useTranslate } from '@/hooks/common-hooks';
|
||||
import { useFetchKnowledgeList } from '@/hooks/use-knowledge-request';
|
||||
import { IKnowledge } from '@/interfaces/database/knowledge';
|
||||
import { useBuildQueryVariableOptions } from '@/pages/agent/hooks/use-get-begin-query';
|
||||
import { UserOutlined } from '@ant-design/icons';
|
||||
import { Avatar as AntAvatar, Form, Select, Space } from 'antd';
|
||||
import { toLower } from 'lodash';
|
||||
import { useMemo } from 'react';
|
||||
import { useEffect, useMemo, useState } from 'react';
|
||||
import { useFormContext } from 'react-hook-form';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { RAGFlowAvatar } from './ragflow-avatar';
|
||||
import { FormControl, FormField, FormItem, FormLabel } from './ui/form';
|
||||
import { MultiSelect } from './ui/multi-select';
|
||||
|
||||
interface KnowledgeBaseItemProps {
|
||||
label?: string;
|
||||
tooltipText?: string;
|
||||
name?: string;
|
||||
required?: boolean;
|
||||
onChange?(): void;
|
||||
}
|
||||
|
||||
const KnowledgeBaseItem = ({
|
||||
label,
|
||||
tooltipText,
|
||||
name,
|
||||
required = true,
|
||||
onChange,
|
||||
}: KnowledgeBaseItemProps) => {
|
||||
const { t } = useTranslate('chat');
|
||||
|
||||
const { list: knowledgeList } = useFetchKnowledgeList(true);
|
||||
|
||||
const filteredKnowledgeList = knowledgeList.filter(
|
||||
(x) => x.parser_id !== DocumentParserType.Tag,
|
||||
);
|
||||
|
||||
const knowledgeOptions = filteredKnowledgeList.map((x) => ({
|
||||
label: (
|
||||
<Space>
|
||||
<AntAvatar size={20} icon={<UserOutlined />} src={x.avatar} />
|
||||
{x.name}
|
||||
</Space>
|
||||
),
|
||||
value: x.id,
|
||||
}));
|
||||
|
||||
return (
|
||||
<Form.Item
|
||||
label={label || t('knowledgeBases')}
|
||||
name={name || 'kb_ids'}
|
||||
tooltip={tooltipText || t('knowledgeBasesTip')}
|
||||
rules={[
|
||||
{
|
||||
required,
|
||||
message: t('knowledgeBasesMessage'),
|
||||
type: 'array',
|
||||
},
|
||||
]}
|
||||
>
|
||||
<Select
|
||||
mode="multiple"
|
||||
options={knowledgeOptions}
|
||||
placeholder={t('knowledgeBasesMessage')}
|
||||
onChange={onChange}
|
||||
></Select>
|
||||
</Form.Item>
|
||||
);
|
||||
};
|
||||
|
||||
export default KnowledgeBaseItem;
|
||||
import { MultiSelect, MultiSelectOptionType } from './ui/multi-select';
|
||||
|
||||
function buildQueryVariableOptionsByShowVariable(showVariable?: boolean) {
|
||||
return showVariable ? useBuildQueryVariableOptions : () => [];
|
||||
}
|
||||
|
||||
export function useDisableDifferenceEmbeddingDataset() {
|
||||
const [datasetOptions, setDatasetOptions] = useState<MultiSelectOptionType[]>(
|
||||
[],
|
||||
);
|
||||
const [datasetSelectEmbedId, setDatasetSelectEmbedId] = useState('');
|
||||
const { list: datasetListOrigin } = useFetchKnowledgeList(true);
|
||||
|
||||
useEffect(() => {
|
||||
const datasetListMap = datasetListOrigin
|
||||
.filter((x) => x.parser_id !== DocumentParserType.Tag)
|
||||
.map((item: IKnowledge) => {
|
||||
return {
|
||||
label: item.name,
|
||||
icon: () => (
|
||||
<RAGFlowAvatar
|
||||
className="size-4"
|
||||
avatar={item.avatar}
|
||||
name={item.name}
|
||||
/>
|
||||
),
|
||||
suffix: (
|
||||
<div className="text-xs px-4 p-1 bg-bg-card text-text-secondary rounded-lg border border-bg-card">
|
||||
{item.embd_id}
|
||||
</div>
|
||||
),
|
||||
value: item.id,
|
||||
disabled:
|
||||
item.embd_id !== datasetSelectEmbedId &&
|
||||
datasetSelectEmbedId !== '',
|
||||
};
|
||||
});
|
||||
setDatasetOptions(datasetListMap);
|
||||
}, [datasetListOrigin, datasetSelectEmbedId]);
|
||||
|
||||
const handleDatasetSelectChange = (
|
||||
value: string[],
|
||||
onChange: (value: string[]) => void,
|
||||
) => {
|
||||
if (value.length) {
|
||||
const data = datasetListOrigin?.find((item) => item.id === value[0]);
|
||||
setDatasetSelectEmbedId(data?.embd_id ?? '');
|
||||
} else {
|
||||
setDatasetSelectEmbedId('');
|
||||
}
|
||||
onChange?.(value);
|
||||
};
|
||||
|
||||
return {
|
||||
datasetOptions,
|
||||
handleDatasetSelectChange,
|
||||
};
|
||||
}
|
||||
|
||||
export function KnowledgeBaseFormField({
|
||||
showVariable = false,
|
||||
}: {
|
||||
@ -82,22 +75,12 @@ export function KnowledgeBaseFormField({
|
||||
const form = useFormContext();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const { list: knowledgeList } = useFetchKnowledgeList(true);
|
||||
|
||||
const filteredKnowledgeList = knowledgeList.filter(
|
||||
(x) => x.parser_id !== DocumentParserType.Tag,
|
||||
);
|
||||
const { datasetOptions, handleDatasetSelectChange } =
|
||||
useDisableDifferenceEmbeddingDataset();
|
||||
|
||||
const nextOptions = buildQueryVariableOptionsByShowVariable(showVariable)();
|
||||
|
||||
const knowledgeOptions = filteredKnowledgeList.map((x) => ({
|
||||
label: x.name,
|
||||
value: x.id,
|
||||
icon: () => (
|
||||
<RAGFlowAvatar className="size-4 mr-2" avatar={x.avatar} name={x.name} />
|
||||
),
|
||||
}));
|
||||
|
||||
const knowledgeOptions = datasetOptions;
|
||||
const options = useMemo(() => {
|
||||
if (showVariable) {
|
||||
return [
|
||||
@ -140,11 +123,14 @@ export function KnowledgeBaseFormField({
|
||||
<FormControl>
|
||||
<MultiSelect
|
||||
options={options}
|
||||
onValueChange={field.onChange}
|
||||
onValueChange={(value) => {
|
||||
handleDatasetSelectChange(value, field.onChange);
|
||||
}}
|
||||
placeholder={t('chat.knowledgeBasesMessage')}
|
||||
variant="inverted"
|
||||
maxCount={100}
|
||||
defaultValue={field.value}
|
||||
showSelectAll={false}
|
||||
{...field}
|
||||
/>
|
||||
</FormControl>
|
||||
|
||||
@ -109,6 +109,19 @@ export const SelectWithSearch = forwardRef<
|
||||
}
|
||||
}, [options, value]);
|
||||
|
||||
const showSearch = useMemo(() => {
|
||||
if (Array.isArray(options) && options.length > 5) {
|
||||
return true;
|
||||
}
|
||||
if (Array.isArray(options)) {
|
||||
const optionsNum = options.reduce((acc, option) => {
|
||||
return acc + (option?.options?.length || 0);
|
||||
}, 0);
|
||||
return optionsNum > 5;
|
||||
}
|
||||
return false;
|
||||
}, [options]);
|
||||
|
||||
const handleSelect = useCallback(
|
||||
(val: string) => {
|
||||
setValue(val);
|
||||
@ -179,7 +192,7 @@ export const SelectWithSearch = forwardRef<
|
||||
align="start"
|
||||
>
|
||||
<Command className="p-5">
|
||||
{options && options.length > 5 && (
|
||||
{showSearch && (
|
||||
<CommandInput
|
||||
placeholder={t('common.search') + '...'}
|
||||
className=" placeholder:text-text-disabled"
|
||||
|
||||
@ -1,35 +1,19 @@
|
||||
import { toast } from 'sonner';
|
||||
import { ExternalToast, toast } from 'sonner';
|
||||
|
||||
const duration = { duration: 2500 };
|
||||
const configuration: ExternalToast = { duration: 2500, position: 'top-center' };
|
||||
|
||||
const message = {
|
||||
success: (msg: string) => {
|
||||
toast.success(msg, {
|
||||
position: 'top-center',
|
||||
closeButton: false,
|
||||
...duration,
|
||||
});
|
||||
toast.success(msg, configuration);
|
||||
},
|
||||
error: (msg: string) => {
|
||||
toast.error(msg, {
|
||||
position: 'top-center',
|
||||
closeButton: false,
|
||||
...duration,
|
||||
});
|
||||
toast.error(msg, configuration);
|
||||
},
|
||||
warning: (msg: string) => {
|
||||
toast.warning(msg, {
|
||||
position: 'top-center',
|
||||
closeButton: false,
|
||||
...duration,
|
||||
});
|
||||
toast.warning(msg, configuration);
|
||||
},
|
||||
info: (msg: string) => {
|
||||
toast.info(msg, {
|
||||
position: 'top-center',
|
||||
closeButton: false,
|
||||
...duration,
|
||||
});
|
||||
toast.info(msg, configuration);
|
||||
},
|
||||
};
|
||||
export default message;
|
||||
|
||||
@ -116,7 +116,7 @@ export interface ITenantInfo {
|
||||
tts_id: string;
|
||||
}
|
||||
|
||||
export type ChunkDocType = 'image' | 'table';
|
||||
export type ChunkDocType = 'image' | 'table' | 'text';
|
||||
|
||||
export interface IChunk {
|
||||
available_int: number; // Whether to enable, 0: not enabled, 1: enabled
|
||||
|
||||
@ -147,6 +147,8 @@ Procedural Memory: Learned skills, habits, and automated procedures.`,
|
||||
action: 'Action',
|
||||
},
|
||||
config: {
|
||||
memorySizeTooltip: `Accounts for each message's content + its embedding vector (≈ Content + Dimensions × 8 Bytes).
|
||||
Example: A 1 KB message with 1024-dim embedding uses ~9 KB. The 5 MB default limit holds ~500 such messages.`,
|
||||
avatar: 'Avatar',
|
||||
description: 'Description',
|
||||
memorySize: 'Memory size',
|
||||
@ -939,6 +941,8 @@ Example: Virtual Hosted Style`,
|
||||
'Connect GitLab to sync repositories, issues, merge requests, and related documentation.',
|
||||
asanaDescription:
|
||||
'Connect to Asana and synchronize files from a specified workspace.',
|
||||
imapDescription:
|
||||
'Connect to your IMAP mailbox to sync emails for knowledge retrieval.',
|
||||
dropboxAccessTokenTip:
|
||||
'Generate a long-lived access token in the Dropbox App Console with files.metadata.read, files.content.read, and sharing.read scopes.',
|
||||
moodleDescription:
|
||||
|
||||
@ -755,6 +755,8 @@ export default {
|
||||
'Подключите GitLab для синхронизации репозиториев, задач, merge requests и связанной документации.',
|
||||
asanaDescription:
|
||||
'Подключите Asana и синхронизируйте файлы из рабочего пространства.',
|
||||
imapDescription:
|
||||
'Подключите почтовый ящик IMAP для синхронизации писем из указанных почтовых ящиков (mailboxes) с целью поиска и анализа знаний.',
|
||||
google_driveDescription:
|
||||
'Подключите ваш Google Drive через OAuth и синхронизируйте определенные папки или диски.',
|
||||
gmailDescription:
|
||||
|
||||
@ -124,7 +124,6 @@ export default {
|
||||
forgetMessageTip: '确定遗忘吗?',
|
||||
messageDescription: '记忆提取使用高级设置中的提示词和温度值进行配置。',
|
||||
copied: '已复制!',
|
||||
contentEmbed: '内容嵌入',
|
||||
content: '内容',
|
||||
delMessageWarn: `遗忘后,代理将无法检索此消息。`,
|
||||
forgetMessage: '遗忘消息',
|
||||
@ -138,6 +137,8 @@ export default {
|
||||
action: '操作',
|
||||
},
|
||||
config: {
|
||||
memorySizeTooltip: `记录每条消息的内容 + 其嵌入向量(≈ 内容 + 维度 × 8 字节)。
|
||||
例如:一条带有 1024 维嵌入的 1 KB 消息大约使用 9 KB。5 MB 的默认限制大约可容纳 500 条此类消息。`,
|
||||
avatar: '头像',
|
||||
description: '描述',
|
||||
memorySize: '记忆大小',
|
||||
@ -867,6 +868,8 @@ General:实体和关系提取提示来自 GitHub - microsoft/graphrag:基于
|
||||
gitlabDescription:
|
||||
'连接 GitLab,同步仓库、Issue、合并请求(MR)及相关文档内容。',
|
||||
asanaDescription: '连接 Asana,同步工作区中的文件。',
|
||||
imapDescription:
|
||||
'连接你的 IMAP 邮箱,同步指定mailboxes中的邮件,用于知识检索与分析',
|
||||
r2Description: '连接你的 Cloudflare R2 存储桶以导入和同步文件。',
|
||||
dropboxAccessTokenTip:
|
||||
'请在 Dropbox App Console 生成 Access Token,并勾选 files.metadata.read、files.content.read、sharing.read 等必要权限。',
|
||||
|
||||
@ -1,16 +1,18 @@
|
||||
import { IBeginNode } from '@/interfaces/database/flow';
|
||||
import { BaseNode } from '@/interfaces/database/flow';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { NodeProps, Position } from '@xyflow/react';
|
||||
import get from 'lodash/get';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import {
|
||||
AgentDialogueMode,
|
||||
BeginQueryType,
|
||||
BeginQueryTypeIconMap,
|
||||
NodeHandleId,
|
||||
Operator,
|
||||
} from '../../constant';
|
||||
import { BeginQuery } from '../../interface';
|
||||
import { BeginFormSchemaType } from '../../form/begin-form/schema';
|
||||
import { useBuildWebhookUrl } from '../../hooks/use-build-webhook-url';
|
||||
import OperatorIcon from '../../operator-icon';
|
||||
import { LabelCard } from './card';
|
||||
import { CommonHandle } from './handle';
|
||||
@ -18,10 +20,19 @@ import { RightHandleStyle } from './handle-icon';
|
||||
import styles from './index.less';
|
||||
import { NodeWrapper } from './node-wrapper';
|
||||
|
||||
// TODO: do not allow other nodes to connect to this node
|
||||
function InnerBeginNode({ data, id, selected }: NodeProps<IBeginNode>) {
|
||||
function InnerBeginNode({
|
||||
data,
|
||||
id,
|
||||
selected,
|
||||
}: NodeProps<BaseNode<BeginFormSchemaType>>) {
|
||||
const { t } = useTranslation();
|
||||
const inputs: Record<string, BeginQuery> = get(data, 'form.inputs', {});
|
||||
const inputs = get(data, 'form.inputs', {});
|
||||
|
||||
const mode = data.form?.mode;
|
||||
|
||||
const isWebhookMode = mode === AgentDialogueMode.Webhook;
|
||||
|
||||
const url = useBuildWebhookUrl();
|
||||
|
||||
return (
|
||||
<NodeWrapper selected={selected} id={id}>
|
||||
@ -40,23 +51,38 @@ function InnerBeginNode({ data, id, selected }: NodeProps<IBeginNode>) {
|
||||
{t(`flow.begin`)}
|
||||
</div>
|
||||
</section>
|
||||
<section className={cn(styles.generateParameters, 'flex gap-2 flex-col')}>
|
||||
{Object.entries(inputs).map(([key, val], idx) => {
|
||||
const Icon = BeginQueryTypeIconMap[val.type as BeginQueryType];
|
||||
return (
|
||||
<LabelCard key={idx} className={cn('flex gap-1.5 items-center')}>
|
||||
<Icon className="size-3.5" />
|
||||
<label htmlFor="" className="text-accent-primary text-sm italic">
|
||||
{key}
|
||||
</label>
|
||||
<LabelCard className="py-0.5 truncate flex-1">
|
||||
{val.name}
|
||||
<div className="text-accent-primary mt-2 p-1 bg-bg-accent w-fit rounded-sm text-xs">
|
||||
{t(`flow.${isWebhookMode ? 'webhook.name' : mode}`)}
|
||||
</div>
|
||||
{isWebhookMode ? (
|
||||
<LabelCard className="mt-2 flex gap-1 items-center">
|
||||
<span className="font-bold">URL</span>
|
||||
<span className="flex-1 truncate">{url}</span>
|
||||
</LabelCard>
|
||||
) : (
|
||||
<section
|
||||
className={cn(styles.generateParameters, 'flex gap-2 flex-col')}
|
||||
>
|
||||
{Object.entries(inputs).map(([key, val], idx) => {
|
||||
const Icon = BeginQueryTypeIconMap[val.type as BeginQueryType];
|
||||
return (
|
||||
<LabelCard key={idx} className={cn('flex gap-1.5 items-center')}>
|
||||
<Icon className="size-3.5" />
|
||||
<label
|
||||
htmlFor=""
|
||||
className="text-accent-primary text-sm italic"
|
||||
>
|
||||
{key}
|
||||
</label>
|
||||
<LabelCard className="py-0.5 truncate flex-1">
|
||||
{val.name}
|
||||
</LabelCard>
|
||||
<span className="flex-1">{val.optional ? 'Yes' : 'No'}</span>
|
||||
</LabelCard>
|
||||
<span className="flex-1">{val.optional ? 'Yes' : 'No'}</span>
|
||||
</LabelCard>
|
||||
);
|
||||
})}
|
||||
</section>
|
||||
);
|
||||
})}
|
||||
</section>
|
||||
)}
|
||||
</NodeWrapper>
|
||||
);
|
||||
}
|
||||
|
||||
@ -3,13 +3,16 @@ import { CopyToClipboardWithText } from '@/components/copy-to-clipboard';
|
||||
import NumberInput from '@/components/originui/number-input';
|
||||
import { SelectWithSearch } from '@/components/originui/select-with-search';
|
||||
import { RAGFlowFormItem } from '@/components/ragflow-form';
|
||||
import { Label } from '@/components/ui/label';
|
||||
import { MultiSelect } from '@/components/ui/multi-select';
|
||||
import { Separator } from '@/components/ui/separator';
|
||||
import { Textarea } from '@/components/ui/textarea';
|
||||
import { useBuildWebhookUrl } from '@/pages/agent/hooks/use-build-webhook-url';
|
||||
import { buildOptions } from '@/utils/form';
|
||||
import { upperFirst } from 'lodash';
|
||||
import { useCallback } from 'react';
|
||||
import { useFormContext, useWatch } from 'react-hook-form';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useParams } from 'umi';
|
||||
import {
|
||||
RateLimitPerList,
|
||||
WebhookMaxBodySize,
|
||||
@ -22,7 +25,10 @@ import { Auth } from './auth';
|
||||
import { WebhookRequestSchema } from './request-schema';
|
||||
import { WebhookResponse } from './response';
|
||||
|
||||
const RateLimitPerOptions = buildOptions(RateLimitPerList);
|
||||
const RateLimitPerOptions = RateLimitPerList.map((x) => ({
|
||||
value: x,
|
||||
label: upperFirst(x),
|
||||
}));
|
||||
|
||||
const RequestLimitMap = {
|
||||
[WebhookRateLimitPer.Second]: 100,
|
||||
@ -33,7 +39,6 @@ const RequestLimitMap = {
|
||||
|
||||
export function WebHook() {
|
||||
const { t } = useTranslation();
|
||||
const { id } = useParams();
|
||||
const form = useFormContext();
|
||||
|
||||
const rateLimitPer = useWatch({
|
||||
@ -45,7 +50,7 @@ export function WebHook() {
|
||||
return RequestLimitMap[rateLimitPer as keyof typeof RequestLimitMap] ?? 100;
|
||||
}, []);
|
||||
|
||||
const text = `${location.protocol}//${location.host}/api/v1/webhook/${id}`;
|
||||
const text = useBuildWebhookUrl();
|
||||
|
||||
return (
|
||||
<>
|
||||
@ -74,33 +79,36 @@ export function WebHook() {
|
||||
></SelectWithSearch>
|
||||
</RAGFlowFormItem>
|
||||
<Auth></Auth>
|
||||
<RAGFlowFormItem
|
||||
name="security.rate_limit.limit"
|
||||
label={t('flow.webhook.limit')}
|
||||
>
|
||||
<NumberInput
|
||||
max={getLimitRateLimitPerMax(rateLimitPer)}
|
||||
className="w-full"
|
||||
></NumberInput>
|
||||
</RAGFlowFormItem>
|
||||
<RAGFlowFormItem
|
||||
name="security.rate_limit.per"
|
||||
label={t('flow.webhook.per')}
|
||||
>
|
||||
{(field) => (
|
||||
<SelectWithSearch
|
||||
options={RateLimitPerOptions}
|
||||
value={field.value}
|
||||
onChange={(val) => {
|
||||
field.onChange(val);
|
||||
form.setValue(
|
||||
'security.rate_limit.limit',
|
||||
getLimitRateLimitPerMax(val),
|
||||
);
|
||||
}}
|
||||
></SelectWithSearch>
|
||||
)}
|
||||
</RAGFlowFormItem>
|
||||
<section>
|
||||
<Label>{t('flow.webhook.limit')}</Label>
|
||||
<div className="flex items-center mt-1 gap-2">
|
||||
<RAGFlowFormItem
|
||||
name="security.rate_limit.limit"
|
||||
className="flex-1"
|
||||
>
|
||||
<NumberInput
|
||||
max={getLimitRateLimitPerMax(rateLimitPer)}
|
||||
className="w-full"
|
||||
></NumberInput>
|
||||
</RAGFlowFormItem>
|
||||
<Separator className="w-2" />
|
||||
<RAGFlowFormItem name="security.rate_limit.per">
|
||||
{(field) => (
|
||||
<SelectWithSearch
|
||||
options={RateLimitPerOptions}
|
||||
value={field.value}
|
||||
onChange={(val) => {
|
||||
field.onChange(val);
|
||||
form.setValue(
|
||||
'security.rate_limit.limit',
|
||||
getLimitRateLimitPerMax(val),
|
||||
);
|
||||
}}
|
||||
></SelectWithSearch>
|
||||
)}
|
||||
</RAGFlowFormItem>
|
||||
</div>
|
||||
</section>
|
||||
<RAGFlowFormItem
|
||||
name="security.max_body_size"
|
||||
label={t('flow.webhook.maxBodySize')}
|
||||
|
||||
8
web/src/pages/agent/hooks/use-build-webhook-url.ts
Normal file
8
web/src/pages/agent/hooks/use-build-webhook-url.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import { useParams } from 'umi';
|
||||
|
||||
export function useBuildWebhookUrl() {
|
||||
const { id } = useParams();
|
||||
|
||||
const text = `${location.protocol}//${location.host}/api/v1/webhook/${id}`;
|
||||
return text;
|
||||
}
|
||||
@ -8,7 +8,7 @@ import {
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from '@/components/ui/tooltip';
|
||||
import { IChunk } from '@/interfaces/database/knowledge';
|
||||
import type { ChunkDocType, IChunk } from '@/interfaces/database/knowledge';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { CheckedState } from '@radix-ui/react-checkbox';
|
||||
import classNames from 'classnames';
|
||||
@ -67,6 +67,10 @@ const ChunkCard = ({
|
||||
setEnabled(available === 1);
|
||||
}, [available]);
|
||||
|
||||
const chunkType =
|
||||
((item.doc_type_kwd &&
|
||||
String(item.doc_type_kwd)?.toLowerCase()) as ChunkDocType) || 'text';
|
||||
|
||||
return (
|
||||
<Card
|
||||
className={classNames('relative flex-none', styles.chunkCard, {
|
||||
@ -81,9 +85,7 @@ const ChunkCard = ({
|
||||
bg-bg-card rounded-bl-2xl rounded-tr-lg
|
||||
border-l-0.5 border-b-0.5 border-border-button"
|
||||
>
|
||||
{t(
|
||||
`chunk.docType.${item.doc_type_kwd ? String(item.doc_type_kwd).toLowerCase() : 'text'}`,
|
||||
)}
|
||||
{t(`chunk.docType.${chunkType}`)}
|
||||
</span>
|
||||
|
||||
<div className="flex items-start justify-between gap-2">
|
||||
|
||||
@ -22,6 +22,7 @@ import { Switch } from '@/components/ui/switch';
|
||||
import { Textarea } from '@/components/ui/textarea';
|
||||
import { useFetchChunk } from '@/hooks/use-chunk-request';
|
||||
import { IModalProps } from '@/interfaces/common';
|
||||
import type { ChunkDocType } from '@/interfaces/database/knowledge';
|
||||
import React, { useCallback, useEffect, useState } from 'react';
|
||||
import { FieldValues, FormProvider, useForm } from 'react-hook-form';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@ -151,20 +152,25 @@ const ChunkCreatingModal: React.FC<IModalProps<any> & kFProps> = ({
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="doc_type_kwd"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>{t(`chunk.type`)}</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
type="text"
|
||||
value={t(
|
||||
`chunk.docType.${field.value ? String(field.value).toLowerCase() : 'text'}`,
|
||||
)}
|
||||
readOnly
|
||||
/>
|
||||
</FormControl>
|
||||
</FormItem>
|
||||
)}
|
||||
render={({ field }) => {
|
||||
const chunkType =
|
||||
((field.value &&
|
||||
String(field.value)?.toLowerCase()) as ChunkDocType) ||
|
||||
'text';
|
||||
|
||||
return (
|
||||
<FormItem>
|
||||
<FormLabel>{t(`chunk.type`)}</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
type="text"
|
||||
value={t(`chunk.docType.${chunkType}`)}
|
||||
readOnly
|
||||
/>
|
||||
</FormControl>
|
||||
</FormItem>
|
||||
);
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
|
||||
|
||||
@ -79,6 +79,7 @@ export default function Dataset() {
|
||||
useRowSelection();
|
||||
|
||||
const {
|
||||
chunkNum,
|
||||
list,
|
||||
visible: reparseDialogVisible,
|
||||
hideModal: hideReparseDialogModal,
|
||||
@ -218,7 +219,7 @@ export default function Dataset() {
|
||||
// hidden={isZeroChunk || isRunning}
|
||||
hidden={true}
|
||||
handleOperationIconClick={handleOperationIconClick}
|
||||
chunk_num={0}
|
||||
chunk_num={chunkNum}
|
||||
visible={reparseDialogVisible}
|
||||
hideModal={hideReparseDialogModal}
|
||||
></ReparseDialog>
|
||||
|
||||
@ -183,7 +183,7 @@ export function ParsingStatusCell({
|
||||
)}
|
||||
{reparseDialogVisible && (
|
||||
<ReparseDialog
|
||||
hidden={isZeroChunk || isRunning}
|
||||
hidden={isRunning}
|
||||
// hidden={false}
|
||||
handleOperationIconClick={handleOperationIconClick}
|
||||
chunk_num={chunk_num}
|
||||
|
||||
@ -2,12 +2,14 @@ import { ConfirmDeleteDialog } from '@/components/confirm-delete-dialog';
|
||||
import {
|
||||
DynamicForm,
|
||||
DynamicFormRef,
|
||||
FormFieldConfig,
|
||||
FormFieldType,
|
||||
} from '@/components/dynamic-form';
|
||||
import { Checkbox } from '@/components/ui/checkbox';
|
||||
import { DialogProps } from '@radix-ui/react-dialog';
|
||||
import { t } from 'i18next';
|
||||
import { memo, useCallback, useEffect, useRef } from 'react';
|
||||
import { memo, useCallback, useEffect, useRef, useState } from 'react';
|
||||
import { ControllerRenderProps } from 'react-hook-form';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const ReparseDialog = memo(
|
||||
({
|
||||
@ -26,18 +28,77 @@ export const ReparseDialog = memo(
|
||||
hideModal: () => void;
|
||||
hidden?: boolean;
|
||||
}) => {
|
||||
// const [formInstance, setFormInstance] = useState<DynamicFormRef | null>(
|
||||
// null,
|
||||
// );
|
||||
const [defaultValues, setDefaultValues] = useState<any>(null);
|
||||
const [fields, setFields] = useState<FormFieldConfig[]>([]);
|
||||
const { t } = useTranslation();
|
||||
const handleOperationIconClickRef = useRef(handleOperationIconClick);
|
||||
const hiddenRef = useRef(hidden);
|
||||
|
||||
// const formCallbackRef = useCallback((node: DynamicFormRef | null) => {
|
||||
// if (node) {
|
||||
// setFormInstance(node);
|
||||
// console.log('Form instance assigned:', node);
|
||||
// } else {
|
||||
// console.log('Form instance removed');
|
||||
// }
|
||||
// }, []);
|
||||
useEffect(() => {
|
||||
handleOperationIconClickRef.current = handleOperationIconClick;
|
||||
hiddenRef.current = hidden;
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
if (hiddenRef.current) {
|
||||
handleOperationIconClickRef.current();
|
||||
}
|
||||
}, []);
|
||||
useEffect(() => {
|
||||
setDefaultValues({
|
||||
delete: chunk_num > 0,
|
||||
apply_kb: false,
|
||||
});
|
||||
const deleteField = {
|
||||
name: 'delete',
|
||||
label: '',
|
||||
type: FormFieldType.Checkbox,
|
||||
render: (fieldProps: ControllerRenderProps) => (
|
||||
<div className="flex items-center text-text-secondary p-5 border border-border-button rounded-lg">
|
||||
<Checkbox
|
||||
{...fieldProps}
|
||||
checked={fieldProps.value}
|
||||
onCheckedChange={(checked: boolean) => {
|
||||
fieldProps.onChange(checked);
|
||||
}}
|
||||
/>
|
||||
<span className="ml-2">
|
||||
{chunk_num > 0
|
||||
? t(`knowledgeDetails.redo`, {
|
||||
chunkNum: chunk_num,
|
||||
})
|
||||
: t('knowledgeDetails.redoAll')}
|
||||
</span>
|
||||
</div>
|
||||
),
|
||||
};
|
||||
const applyKBField = {
|
||||
name: 'apply_kb',
|
||||
label: '',
|
||||
type: FormFieldType.Checkbox,
|
||||
defaultValue: false,
|
||||
render: (fieldProps: ControllerRenderProps) => (
|
||||
<div className="flex items-center text-text-secondary p-5 border border-border-button rounded-lg">
|
||||
<Checkbox
|
||||
{...fieldProps}
|
||||
checked={fieldProps.value}
|
||||
onCheckedChange={(checked: boolean) => {
|
||||
fieldProps.onChange(checked);
|
||||
}}
|
||||
/>
|
||||
<span className="ml-2">
|
||||
{t('knowledgeDetails.applyAutoMetadataSettings')}
|
||||
</span>
|
||||
</div>
|
||||
),
|
||||
};
|
||||
if (chunk_num > 0) {
|
||||
setFields([deleteField, applyKBField]);
|
||||
}
|
||||
if (chunk_num <= 0) {
|
||||
setFields([applyKBField]);
|
||||
}
|
||||
}, [chunk_num, t]);
|
||||
|
||||
const formCallbackRef = useRef<DynamicFormRef>(null);
|
||||
|
||||
@ -68,12 +129,6 @@ export const ReparseDialog = memo(
|
||||
}
|
||||
}, [formCallbackRef, handleOperationIconClick]);
|
||||
|
||||
useEffect(() => {
|
||||
if (hidden) {
|
||||
handleOperationIconClick();
|
||||
}
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<ConfirmDeleteDialog
|
||||
title={t(`knowledgeDetails.parseFile`)}
|
||||
@ -91,48 +146,8 @@ export const ReparseDialog = memo(
|
||||
console.log('submit', data);
|
||||
}}
|
||||
ref={formCallbackRef}
|
||||
fields={[
|
||||
{
|
||||
name: 'delete',
|
||||
label: '',
|
||||
type: FormFieldType.Checkbox,
|
||||
render: (fieldProps) => (
|
||||
<div className="flex items-center text-text-secondary p-5 border border-border-button rounded-lg">
|
||||
<Checkbox
|
||||
{...fieldProps}
|
||||
onCheckedChange={(checked: boolean) => {
|
||||
fieldProps.onChange(checked);
|
||||
}}
|
||||
/>
|
||||
<span className="ml-2">
|
||||
{chunk_num > 0
|
||||
? t(`knowledgeDetails.redo`, {
|
||||
chunkNum: chunk_num,
|
||||
})
|
||||
: t('knowledgeDetails.redoAll')}
|
||||
</span>
|
||||
</div>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'apply_kb',
|
||||
label: '',
|
||||
type: FormFieldType.Checkbox,
|
||||
render: (fieldProps) => (
|
||||
<div className="flex items-center text-text-secondary p-5 border border-border-button rounded-lg">
|
||||
<Checkbox
|
||||
{...fieldProps}
|
||||
onCheckedChange={(checked: boolean) => {
|
||||
fieldProps.onChange(checked);
|
||||
}}
|
||||
/>
|
||||
<span className="ml-2">
|
||||
{t('knowledgeDetails.applyAutoMetadataSettings')}
|
||||
</span>
|
||||
</div>
|
||||
),
|
||||
},
|
||||
]}
|
||||
fields={fields}
|
||||
defaultValues={defaultValues}
|
||||
>
|
||||
{/* <DynamicForm.CancelButton
|
||||
handleCancel={() => handleOperationIconClick(false)}
|
||||
|
||||
@ -10,7 +10,7 @@ import {
|
||||
} from '@/hooks/use-document-request';
|
||||
import { IDocumentInfo } from '@/interfaces/database/document';
|
||||
import { Ban, CircleCheck, CircleX, Play, Trash2 } from 'lucide-react';
|
||||
import { useCallback } from 'react';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { toast } from 'sonner';
|
||||
import { DocumentType, RunningStatus } from './constant';
|
||||
@ -32,6 +32,16 @@ export function useBulkOperateDataset({
|
||||
const { setDocumentStatus } = useSetDocumentStatus();
|
||||
const { removeDocument } = useRemoveDocument();
|
||||
const { visible, showModal, hideModal } = useSetModalState();
|
||||
|
||||
const chunkNum = useMemo(() => {
|
||||
if (!documents.length) {
|
||||
return 0;
|
||||
}
|
||||
return documents.reduce((acc, cur) => {
|
||||
return acc + cur.chunk_num;
|
||||
}, 0);
|
||||
}, [documents]);
|
||||
|
||||
const runDocument = useCallback(
|
||||
async (run: number, option?: { delete: boolean; apply_kb: boolean }) => {
|
||||
const nonVirtualKeys = selectedRowKeys.filter(
|
||||
@ -132,5 +142,5 @@ export function useBulkOperateDataset({
|
||||
},
|
||||
];
|
||||
|
||||
return { list, visible, hideModal, showModal, handleRunClick };
|
||||
return { chunkNum, list, visible, hideModal, showModal, handleRunClick };
|
||||
}
|
||||
|
||||
@ -38,21 +38,27 @@ interface ProcessLogModalProps {
|
||||
}
|
||||
|
||||
const InfoItem: React.FC<{
|
||||
overflowTip?: boolean;
|
||||
label: string;
|
||||
value: string | React.ReactNode;
|
||||
className?: string;
|
||||
}> = ({ label, value, className = '' }) => {
|
||||
}> = ({ label, value, className = '', overflowTip = false }) => {
|
||||
return (
|
||||
<div className={`flex flex-col mb-4 ${className}`}>
|
||||
<span className="text-text-secondary text-sm">{label}</span>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<span className="text-text-primary mt-1 truncate w-full">
|
||||
{value}
|
||||
</span>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>{value}</TooltipContent>
|
||||
</Tooltip>
|
||||
{overflowTip && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<span className="text-text-primary mt-1 truncate w-full">
|
||||
{value}
|
||||
</span>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>{value}</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
{!overflowTip && (
|
||||
<span className="text-text-primary mt-1 truncate w-full">{value}</span>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@ -139,6 +145,7 @@ const ProcessLogModal: React.FC<ProcessLogModalProps> = ({
|
||||
return (
|
||||
<div className="w-1/2" key={key}>
|
||||
<InfoItem
|
||||
overflowTip={true}
|
||||
label={t(key)}
|
||||
value={logInfo[key as keyof typeof logInfo]}
|
||||
/>
|
||||
|
||||
@ -92,6 +92,7 @@ export const MemoryModelForm = () => {
|
||||
label: t('memory.config.memorySize') + ' (Bytes)',
|
||||
type: FormFieldType.Number,
|
||||
horizontal: true,
|
||||
tooltip: t('memory.config.memorySizeTooltip'),
|
||||
// placeholder: t('memory.config.memorySizePlaceholder'),
|
||||
required: false,
|
||||
}}
|
||||
|
||||
@ -27,6 +27,7 @@ export enum DataSourceKey {
|
||||
AIRTABLE = 'airtable',
|
||||
GITLAB = 'gitlab',
|
||||
ASANA = 'asana',
|
||||
IMAP = 'imap',
|
||||
GITHUB = 'github',
|
||||
// SHAREPOINT = 'sharepoint',
|
||||
// SLACK = 'slack',
|
||||
@ -127,6 +128,11 @@ export const generateDataSourceInfo = (t: TFunction) => {
|
||||
description: t(`setting.${DataSourceKey.GITHUB}Description`),
|
||||
icon: <SvgIcon name={'data-source/github'} width={38} />,
|
||||
},
|
||||
[DataSourceKey.IMAP]: {
|
||||
name: 'IMAP',
|
||||
description: t(`setting.${DataSourceKey.IMAP}Description`),
|
||||
icon: <SvgIcon name={'data-source/imap'} width={38} />,
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
@ -654,7 +660,7 @@ export const DataSourceFormFields = {
|
||||
{
|
||||
label: 'Access Token',
|
||||
name: 'config.credentials.airtable_access_token',
|
||||
type: FormFieldType.Text,
|
||||
type: FormFieldType.Password,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
@ -722,7 +728,7 @@ export const DataSourceFormFields = {
|
||||
{
|
||||
label: 'API Token',
|
||||
name: 'config.credentials.asana_api_token_secret',
|
||||
type: FormFieldType.Text,
|
||||
type: FormFieldType.Password,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
@ -778,6 +784,44 @@ export const DataSourceFormFields = {
|
||||
defaultValue: false,
|
||||
},
|
||||
],
|
||||
[DataSourceKey.IMAP]: [
|
||||
{
|
||||
label: 'Username',
|
||||
name: 'config.credentials.imap_username',
|
||||
type: FormFieldType.Text,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
label: 'Password',
|
||||
name: 'config.credentials.imap_password',
|
||||
type: FormFieldType.Password,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
label: 'Host',
|
||||
name: 'config.imap_host',
|
||||
type: FormFieldType.Text,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
label: 'Port',
|
||||
name: 'config.imap_port',
|
||||
type: FormFieldType.Number,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
label: 'Mailboxes',
|
||||
name: 'config.imap_mailbox',
|
||||
type: FormFieldType.Tag,
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
label: 'Poll Range',
|
||||
name: 'config.poll_range',
|
||||
type: FormFieldType.Number,
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
export const DataSourceFormDefaultValues = {
|
||||
@ -1017,4 +1061,19 @@ export const DataSourceFormDefaultValues = {
|
||||
},
|
||||
},
|
||||
},
|
||||
[DataSourceKey.IMAP]: {
|
||||
name: '',
|
||||
source: DataSourceKey.IMAP,
|
||||
config: {
|
||||
name: '',
|
||||
imap_host: '',
|
||||
imap_port: 993,
|
||||
imap_mailbox: [],
|
||||
poll_range: 30,
|
||||
credentials: {
|
||||
imap_username: '',
|
||||
imap_password: '',
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user