diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 489daab3e..e5da3d127 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -88,7 +88,9 @@ jobs: with: context: . push: true - tags: infiniflow/ragflow:${{ env.RELEASE_TAG }} + tags: | + infiniflow/ragflow:${{ env.RELEASE_TAG }} + infiniflow/ragflow:latest-full file: Dockerfile platforms: linux/amd64 @@ -98,7 +100,9 @@ jobs: with: context: . push: true - tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}-slim + tags: | + infiniflow/ragflow:${{ env.RELEASE_TAG }}-slim + infiniflow/ragflow:latest-slim file: Dockerfile build-args: LIGHTEN=1 platforms: linux/amd64 diff --git a/agent/templates/sql_assistant.json b/agent/templates/sql_assistant.json index d11de2363..27ac46ea6 100644 --- a/agent/templates/sql_assistant.json +++ b/agent/templates/sql_assistant.json @@ -83,7 +83,7 @@ }, "password": "20010812Yy!", "port": 3306, - "sql": "Agent:WickedGoatsDivide@content", + "sql": "{Agent:WickedGoatsDivide@content}", "username": "13637682833@163.com" } }, @@ -114,9 +114,7 @@ "params": { "cross_languages": [], "empty_response": "", - "kb_ids": [ - "ed31364c727211f0bdb2bafe6e7908e6" - ], + "kb_ids": [], "keywords_similarity_weight": 0.7, "outputs": { "formalized_content": { @@ -124,7 +122,7 @@ "value": "" } }, - "query": "sys.query", + "query": "{sys.query}", "rerank_id": "", "similarity_threshold": 0.2, "top_k": 1024, @@ -145,9 +143,7 @@ "params": { "cross_languages": [], "empty_response": "", - "kb_ids": [ - "0f968106727311f08357bafe6e7908e6" - ], + "kb_ids": [], "keywords_similarity_weight": 0.7, "outputs": { "formalized_content": { @@ -155,7 +151,7 @@ "value": "" } }, - "query": "sys.query", + "query": "{sys.query}", "rerank_id": "", "similarity_threshold": 0.2, "top_k": 1024, @@ -176,9 +172,7 @@ "params": { "cross_languages": [], "empty_response": "", - "kb_ids": [ - "4ad1f9d0727311f0827dbafe6e7908e6" - ], + "kb_ids": [], "keywords_similarity_weight": 0.7, "outputs": { "formalized_content": { @@ -186,7 +180,7 @@ "value": "" } }, - "query": "sys.query", + "query": "{sys.query}", "rerank_id": "", "similarity_threshold": 0.2, "top_k": 1024, @@ -347,9 +341,7 @@ "form": { "cross_languages": [], "empty_response": "", - "kb_ids": [ - "ed31364c727211f0bdb2bafe6e7908e6" - ], + "kb_ids": [], "keywords_similarity_weight": 0.7, "outputs": { "formalized_content": { @@ -357,7 +349,7 @@ "value": "" } }, - "query": "sys.query", + "query": "{sys.query}", "rerank_id": "", "similarity_threshold": 0.2, "top_k": 1024, @@ -387,9 +379,7 @@ "form": { "cross_languages": [], "empty_response": "", - "kb_ids": [ - "0f968106727311f08357bafe6e7908e6" - ], + "kb_ids": [], "keywords_similarity_weight": 0.7, "outputs": { "formalized_content": { @@ -397,7 +387,7 @@ "value": "" } }, - "query": "sys.query", + "query": "{sys.query}", "rerank_id": "", "similarity_threshold": 0.2, "top_k": 1024, @@ -427,9 +417,7 @@ "form": { "cross_languages": [], "empty_response": "", - "kb_ids": [ - "4ad1f9d0727311f0827dbafe6e7908e6" - ], + "kb_ids": [], "keywords_similarity_weight": 0.7, "outputs": { "formalized_content": { @@ -437,7 +425,7 @@ "value": "" } }, - "query": "sys.query", + "query": "{sys.query}", "rerank_id": "", "similarity_threshold": 0.2, "top_k": 1024, @@ -539,7 +527,7 @@ }, "password": "20010812Yy!", "port": 3306, - "sql": "Agent:WickedGoatsDivide@content", + "sql": "{Agent:WickedGoatsDivide@content}", "username": "13637682833@163.com" }, "label": "ExeSQL", diff --git a/agent/tools/code_exec.py b/agent/tools/code_exec.py index 866a523ab..b94dc8d5e 100644 --- a/agent/tools/code_exec.py +++ b/agent/tools/code_exec.py @@ -157,7 +157,7 @@ class CodeExec(ToolBase, ABC): try: resp = requests.post(url=f"http://{settings.SANDBOX_HOST}:9385/run", json=code_req, timeout=os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)) - logging.info(f"http://{settings.SANDBOX_HOST}:9385/run", code_req, resp.status_code) + logging.info(f"http://{settings.SANDBOX_HOST}:9385/run, code_req: {code_req}, resp.status_code {resp.status_code}:") if resp.status_code != 200: resp.raise_for_status() body = resp.json() diff --git a/agent/tools/exesql.py b/agent/tools/exesql.py index 317941713..c4bc4fdb4 100644 --- a/agent/tools/exesql.py +++ b/agent/tools/exesql.py @@ -53,7 +53,7 @@ class ExeSQLParam(ToolParamBase): self.max_records = 1024 def check(self): - self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb', 'mssql']) + self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgres', 'mariadb', 'mssql']) self.check_empty(self.database, "Database name") self.check_empty(self.username, "database username") self.check_empty(self.host, "IP Address") @@ -111,7 +111,7 @@ class ExeSQL(ToolBase, ABC): if self._param.db_type in ["mysql", "mariadb"]: db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host, port=self._param.port, password=self._param.password) - elif self._param.db_type == 'postgresql': + elif self._param.db_type == 'postgres': db = psycopg2.connect(dbname=self._param.database, user=self._param.username, host=self._param.host, port=self._param.port, password=self._param.password) elif self._param.db_type == 'mssql': diff --git a/api/apps/HEALTHCHECK_TESTING.md b/api/apps/HEALTHCHECK_TESTING.md new file mode 100644 index 000000000..a97a03c0e --- /dev/null +++ b/api/apps/HEALTHCHECK_TESTING.md @@ -0,0 +1,105 @@ +# 健康检查与 Kubernetes 探针简明说明 + +本文件说明:什么是 K8s 探针、如何用 `/v1/system/healthz` 做健康检查,以及下文用例中的关键词含义。 + +## 什么是 K8s 探针(Probe) +- 探针是 K8s 用来“探测”容器是否健康/可对外服务的机制。 +- 常见三类: + - livenessProbe:活性探针。失败时 K8s 会重启容器,用于“应用卡死/失去连接时自愈”。 + - readinessProbe:就绪探针。失败时 Endpoint 不会被加入 Service 负载均衡,用于“应用尚未准备好时不接流量”。 + - startupProbe:启动探针。给慢启动应用更长的初始化窗口,期间不执行 liveness/readiness。 +- 这些探针通常通过 HTTP GET 访问一个公开且轻量的健康端点(无需鉴权),以 HTTP 状态码判定结果:200=通过;5xx/超时=失败。 + +## 本项目健康端点 +- 已实现:`GET /v1/system/healthz`(无需认证)。 +- 语义: + - 200:关键依赖正常。 + - 500:任一关键依赖异常(当前判定为 DB 或 Chat)。 + - 响应体:JSON,最小字段 `status, db, chat`;并包含 `redis, doc_engine, storage` 等可观测项。失败项会在 `_meta` 中包含 `error/elapsed`。 +- 示例(DB 故障): +```json +{"status":"nok","chat":"ok","db":"nok"} +``` + +## 用例背景(Problem/use case) +- 现状:Ragflow 跑在 K8s,数据库是 AWS RDS Postgres,凭证由 Secret Manager 管理并每 7 天轮换。轮换后应用连接失效,需要手动重启 Pod 才能重新建立连接。 +- 目标:通过 K8s 探针自动化检测并重启异常 Pod,减少人工操作。 +- 需求:一个“无需鉴权”的公共健康端点,能在依赖异常时返回非 200(如 500)且提供 JSON 详情。 +- 现已满足:`/v1/system/healthz` 正是为此设计。 + +## 关键术语解释(对应你提供的描述) +- Ragflow instance:部署在 K8s 的 Ragflow 服务。 +- AWS RDS Postgres:托管的 PostgreSQL 数据库实例。 +- Secret Manager rotation:Secrets 定期轮换(每 7 天),会导致旧连接失效。 +- Probes(K8s 探针):liveness/readiness,用于自动重启或摘除不健康实例。 +- Public endpoint without API key:无需 Authorization 的 HTTP 路由,便于探针直接访问。 +- Dependencies statuses:依赖健康状态(db、chat、redis、doc_engine、storage 等)。 +- HTTP 500 with JSON:当依赖异常时返回 500,并附带 JSON 说明哪个子系统失败。 + +## 快速测试 +- 正常: +```bash +curl -i http:///v1/system/healthz +``` +- 制造 DB 故障(docker-compose 示例): +```bash +docker compose stop db && curl -i http:///v1/system/healthz +``` +(预期 500,JSON 中 `db:"nok"`) + +## 更完整的测试清单 +### 1) 仅查看 HTTP 状态码 +```bash +curl -s -o /dev/null -w "%{http_code}\n" http:///v1/system/healthz +``` +期望:`200` 或 `500`。 + +### 2) Windows PowerShell +```powershell +# 状态码 +(Invoke-WebRequest -Uri "http:///v1/system/healthz" -Method GET -TimeoutSec 3 -ErrorAction SilentlyContinue).StatusCode +# 完整响应 +Invoke-RestMethod -Uri "http:///v1/system/healthz" -Method GET +``` + +### 3) 通过 kubectl 端口转发本地测试 +```bash +# 前端/网关暴露端口不同环境自行调整 +kubectl port-forward deploy/ 8080:80 -n +curl -i http://127.0.0.1:8080/v1/system/healthz +``` + +### 4) 制造常见失败场景 +- DB 失败(推荐): +```bash +docker compose stop db +curl -i http:///v1/system/healthz # 预期 500 +``` +- Chat 失败(可选):将 `CHAT_CFG` 的 `factory`/`base_url` 设为无效并重启后端,再请求应为 500,且 `chat:"nok"`。 +- Redis/存储/文档引擎:停用对应服务后再次请求,可在 JSON 中看到相应字段为 `"nok"`(不影响 200/500 判定)。 + +### 5) 浏览器验证 +- 直接打开 `http:///v1/system/healthz`,在 DevTools Network 查看 200/500;页面正文就是 JSON。 +- 反向代理注意:若有自定义 500 错页,需对 `/healthz` 关闭错误页拦截(如 `proxy_intercept_errors off;`)。 + +## K8s 探针示例 +```yaml +readinessProbe: + httpGet: + path: /v1/system/healthz + port: 80 + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 2 + failureThreshold: 1 +livenessProbe: + httpGet: + path: /v1/system/healthz + port: 80 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 2 + failureThreshold: 3 +``` + +提示:如有反向代理(Nginx)自定义 500 错页,需对 `/healthz` 关闭错误页拦截,以便保留 JSON。 diff --git a/api/apps/canvas_app.py b/api/apps/canvas_app.py index 4ced90a3d..f39339601 100644 --- a/api/apps/canvas_app.py +++ b/api/apps/canvas_app.py @@ -28,6 +28,7 @@ from api.db import CanvasCategory, FileType from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService, API4ConversationService from api.db.services.document_service import DocumentService from api.db.services.file_service import FileService +from api.db.services.task_service import queue_dataflow from api.db.services.user_service import TenantService from api.db.services.user_canvas_version import UserCanvasVersionService from api.settings import RetCode @@ -48,14 +49,6 @@ def templates(): return get_json_result(data=[c.to_dict() for c in CanvasTemplateService.query(canvas_category=CanvasCategory.Agent)]) -@manager.route('/list', methods=['GET']) # noqa: F821 -@login_required -def canvas_list(): - return get_json_result(data=sorted([c.to_dict() for c in \ - UserCanvasService.query(user_id=current_user.id, canvas_category=CanvasCategory.Agent)], key=lambda x: x["update_time"]*-1) - ) - - @manager.route('/rm', methods=['POST']) # noqa: F821 @validate_request("canvas_ids") @login_required @@ -77,9 +70,10 @@ def save(): if not isinstance(req["dsl"], str): req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False) req["dsl"] = json.loads(req["dsl"]) + cate = req.get("canvas_category", CanvasCategory.Agent) if "id" not in req: req["user_id"] = current_user.id - if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip(), canvas_category=CanvasCategory.Agent): + if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip(), canvas_category=cate): return get_data_error_result(message=f"{req['title'].strip()} already exists.") req["id"] = get_uuid() if not UserCanvasService.save(**req): @@ -148,6 +142,14 @@ def run(): if not isinstance(cvs.dsl, str): cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False) + if cvs.canvas_category == CanvasCategory.DataFlow: + task_id = get_uuid() + flow_id = get_uuid() + ok, error_message = queue_dataflow(dsl=cvs.dsl, tenant_id=user_id, file=files[0], task_id=task_id, flow_id=flow_id, priority=0) + if not ok: + return server_error_response(error_message) + return get_json_result(data={"task_id": task_id, "message_id": flow_id}) + try: canvas = Canvas(cvs.dsl, current_user.id, req["id"]) except Exception as e: @@ -332,7 +334,7 @@ def test_db_connect(): if req["db_type"] in ["mysql", "mariadb"]: db = MySQLDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"], password=req["password"]) - elif req["db_type"] == 'postgresql': + elif req["db_type"] == 'postgres': db = PostgresqlDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"], password=req["password"]) elif req["db_type"] == 'mssql': @@ -383,22 +385,31 @@ def getversion( version_id): return get_json_result(data=f"Error getting history file: {e}") -@manager.route('/listteam', methods=['GET']) # noqa: F821 +@manager.route('/list', methods=['GET']) # noqa: F821 @login_required def list_canvas(): keywords = request.args.get("keywords", "") page_number = int(request.args.get("page", 1)) items_per_page = int(request.args.get("page_size", 150)) orderby = request.args.get("orderby", "create_time") - desc = request.args.get("desc", True) - try: + canvas_category = request.args.get("canvas_category") + if request.args.get("desc", "true").lower() == "false": + desc = False + else: + desc = True + owner_ids = request.args.get("owner_ids", []) + if not owner_ids: tenants = TenantService.get_joined_tenants_by_user_id(current_user.id) + tenants = [m["tenant_id"] for m in tenants] canvas, total = UserCanvasService.get_by_tenant_ids( - [m["tenant_id"] for m in tenants], current_user.id, page_number, - items_per_page, orderby, desc, keywords, canvas_category=CanvasCategory.Agent) - return get_json_result(data={"canvas": canvas, "total": total}) - except Exception as e: - return server_error_response(e) + tenants, current_user.id, page_number, + items_per_page, orderby, desc, keywords, canvas_category) + else: + tenants = owner_ids + canvas, total = UserCanvasService.get_by_tenant_ids( + tenants, current_user.id, 0, + 0, orderby, desc, keywords, canvas_category) + return get_json_result(data={"canvas": canvas, "total": total}) @manager.route('/setting', methods=['POST']) # noqa: F821 diff --git a/api/apps/document_app.py b/api/apps/document_app.py index c6280c6fb..1031f1ea4 100644 --- a/api/apps/document_app.py +++ b/api/apps/document_app.py @@ -182,6 +182,7 @@ def create(): "id": get_uuid(), "kb_id": kb.id, "parser_id": kb.parser_id, + "pipeline_id": kb.pipeline_id, "parser_config": kb.parser_config, "created_by": current_user.id, "type": FileType.VIRTUAL, @@ -546,31 +547,22 @@ def get(doc_id): @manager.route("/change_parser", methods=["POST"]) # noqa: F821 @login_required -@validate_request("doc_id", "parser_id") +@validate_request("doc_id") def change_parser(): req = request.json if not DocumentService.accessible(req["doc_id"], current_user.id): return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR) - try: - e, doc = DocumentService.get_by_id(req["doc_id"]) - if not e: - return get_data_error_result(message="Document not found!") - if doc.parser_id.lower() == req["parser_id"].lower(): - if "parser_config" in req: - if req["parser_config"] == doc.parser_config: - return get_json_result(data=True) - else: - return get_json_result(data=True) - if (doc.type == FileType.VISUAL and req["parser_id"] != "picture") or (re.search(r"\.(ppt|pptx|pages)$", doc.name) and req["parser_id"] != "presentation"): - return get_data_error_result(message="Not supported yet!") + e, doc = DocumentService.get_by_id(req["doc_id"]) + if not e: + return get_data_error_result(message="Document not found!") + def reset_doc(): + nonlocal doc e = DocumentService.update_by_id(doc.id, {"parser_id": req["parser_id"], "progress": 0, "progress_msg": "", "run": TaskStatus.UNSTART.value}) if not e: return get_data_error_result(message="Document not found!") - if "parser_config" in req: - DocumentService.update_parser_config(doc.id, req["parser_config"]) if doc.token_num > 0: e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1, doc.process_duration * -1) if not e: @@ -581,6 +573,26 @@ def change_parser(): if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id): settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id) + try: + if req.get("pipeline_id"): + if doc.pipeline_id == req["pipeline_id"]: + return get_json_result(data=True) + DocumentService.update_by_id(doc.id, {"pipeline_id": req["pipeline_id"]}) + reset_doc() + return get_json_result(data=True) + + if doc.parser_id.lower() == req["parser_id"].lower(): + if "parser_config" in req: + if req["parser_config"] == doc.parser_config: + return get_json_result(data=True) + else: + return get_json_result(data=True) + + if (doc.type == FileType.VISUAL and req["parser_id"] != "picture") or (re.search(r"\.(ppt|pptx|pages)$", doc.name) and req["parser_id"] != "presentation"): + return get_data_error_result(message="Not supported yet!") + if "parser_config" in req: + DocumentService.update_parser_config(doc.id, req["parser_config"]) + reset_doc() return get_json_result(data=True) except Exception as e: return server_error_response(e) diff --git a/api/apps/kb_app.py b/api/apps/kb_app.py index 2e86a31bd..c9420c1ff 100644 --- a/api/apps/kb_app.py +++ b/api/apps/kb_app.py @@ -64,7 +64,7 @@ def create(): e, t = TenantService.get_by_id(current_user.id) if not e: return get_data_error_result(message="Tenant not found.") - req["embd_id"] = t.embd_id + #req["embd_id"] = t.embd_id if not KnowledgebaseService.save(**req): return get_data_error_result() return get_json_result(data={"kb_id": req["id"]}) @@ -379,3 +379,19 @@ def get_meta(): code=settings.RetCode.AUTHENTICATION_ERROR ) return get_json_result(data=DocumentService.get_meta_by_kbs(kb_ids)) + + +@manager.route("/basic_info", methods=["GET"]) # noqa: F821 +@login_required +def get_basic_info(): + kb_id = request.args.get("kb_id", "") + if not KnowledgebaseService.accessible(kb_id, current_user.id): + return get_json_result( + data=False, + message='No authorization.', + code=settings.RetCode.AUTHENTICATION_ERROR + ) + + basic_info = DocumentService.knowledgebase_basic_info(kb_id) + + return get_json_result(data=basic_info) diff --git a/api/apps/sdk/session.py b/api/apps/sdk/session.py index 80e45a778..8e4f5ee67 100644 --- a/api/apps/sdk/session.py +++ b/api/apps/sdk/session.py @@ -414,7 +414,7 @@ def agents_completion_openai_compatibility(tenant_id, agent_id): tenant_id, agent_id, question, - session_id=req.get("session_id", req.get("id", "") or req.get("metadata", {}).get("id", "")), + session_id=req.pop("session_id", req.get("id", "")) or req.get("metadata", {}).get("id", ""), stream=True, **req, ), @@ -432,7 +432,7 @@ def agents_completion_openai_compatibility(tenant_id, agent_id): tenant_id, agent_id, question, - session_id=req.get("session_id", req.get("id", "") or req.get("metadata", {}).get("id", "")), + session_id=req.pop("session_id", req.get("id", "")) or req.get("metadata", {}).get("id", ""), stream=False, **req, ) diff --git a/api/apps/system_app.py b/api/apps/system_app.py index c4a70bcac..df17e4b57 100644 --- a/api/apps/system_app.py +++ b/api/apps/system_app.py @@ -36,6 +36,8 @@ from rag.utils.storage_factory import STORAGE_IMPL, STORAGE_IMPL_TYPE from timeit import default_timer as timer from rag.utils.redis_conn import REDIS_CONN +from flask import jsonify +from api.utils.health import run_health_checks @manager.route("/version", methods=["GET"]) # noqa: F821 @login_required @@ -169,6 +171,12 @@ def status(): return get_json_result(data=res) +@manager.route("/healthz", methods=["GET"]) # noqa: F821 +def healthz(): + result, all_ok = run_health_checks() + return jsonify(result), (200 if all_ok else 500) + + @manager.route("/new_token", methods=["POST"]) # noqa: F821 @login_required def new_token(): diff --git a/api/db/db_models.py b/api/db/db_models.py index cda279f22..98db65be3 100644 --- a/api/db/db_models.py +++ b/api/db/db_models.py @@ -646,6 +646,7 @@ class Knowledgebase(DataBaseModel): vector_similarity_weight = FloatField(default=0.3, index=True) parser_id = CharField(max_length=32, null=False, help_text="default parser ID", default=ParserType.NAIVE.value, index=True) + pipeline_id = CharField(max_length=32, null=True, help_text="Pipeline ID", index=True) parser_config = JSONField(null=False, default={"pages": [[1, 1000000]]}) pagerank = IntegerField(default=0, index=False) status = CharField(max_length=1, null=True, help_text="is it validate(0: wasted, 1: validate)", default="1", index=True) @@ -662,6 +663,7 @@ class Document(DataBaseModel): thumbnail = TextField(null=True, help_text="thumbnail base64 string") kb_id = CharField(max_length=256, null=False, index=True) parser_id = CharField(max_length=32, null=False, help_text="default parser ID", index=True) + pipeline_id = CharField(max_length=32, null=True, help_text="pipleline ID", index=True) parser_config = JSONField(null=False, default={"pages": [[1, 1000000]]}) source_type = CharField(max_length=128, null=False, default="local", help_text="where dose this document come from", index=True) type = CharField(max_length=32, null=False, help_text="file extension", index=True) @@ -1020,7 +1022,6 @@ def migrate_db(): migrate(migrator.add_column("dialog", "meta_data_filter", JSONField(null=True, default={}))) except Exception: pass - try: migrate(migrator.alter_column_type("canvas_template", "title", JSONField(null=True, default=dict, help_text="Canvas title"))) except Exception: @@ -1037,4 +1038,12 @@ def migrate_db(): migrate(migrator.add_column("canvas_template", "canvas_category", CharField(max_length=32, null=False, default="agent_canvas", help_text="agent_canvas|dataflow_canvas", index=True))) except Exception: pass + try: + migrate(migrator.add_column("knowledgebase", "pipeline_id", CharField(max_length=32, null=True, help_text="default parser ID", index=True))) + except Exception: + pass + try: + migrate(migrator.add_column("document", "pipeline_id", CharField(max_length=32, null=True, help_text="default parser ID", index=True))) + except Exception: + pass logging.disable(logging.NOTSET) diff --git a/api/db/services/canvas_service.py b/api/db/services/canvas_service.py index ddb00ac11..4766ca821 100644 --- a/api/db/services/canvas_service.py +++ b/api/db/services/canvas_service.py @@ -95,7 +95,7 @@ class UserCanvasService(CommonService): @DB.connection_context() def get_by_tenant_ids(cls, joined_tenant_ids, user_id, page_number, items_per_page, - orderby, desc, keywords, canvas_category=CanvasCategory.Agent, + orderby, desc, keywords, canvas_category=None ): fields = [ cls.model.id, @@ -122,7 +122,8 @@ class UserCanvasService(CommonService): TenantPermission.TEAM.value)) | ( cls.model.user_id == user_id)) ) - agents = agents.where(cls.model.canvas_category == canvas_category) + if canvas_category: + agents = agents.where(cls.model.canvas_category == canvas_category) if desc: agents = agents.order_by(cls.model.getter_by(orderby).desc()) else: diff --git a/api/db/services/document_service.py b/api/db/services/document_service.py index d58bfb322..23eef474f 100644 --- a/api/db/services/document_service.py +++ b/api/db/services/document_service.py @@ -24,7 +24,7 @@ from io import BytesIO import trio import xxhash -from peewee import fn +from peewee import fn, Case from api import settings from api.constants import IMG_BASE64_PREFIX, FILE_NAME_LEN_LIMIT @@ -674,6 +674,53 @@ class DocumentService(CommonService): return False + @classmethod + @DB.connection_context() + def knowledgebase_basic_info(cls, kb_id: str) -> dict[str, int]: + # cancelled: run == "2" but progress can vary + cancelled = ( + cls.model.select(fn.COUNT(1)) + .where((cls.model.kb_id == kb_id) & (cls.model.run == TaskStatus.CANCEL)) + .scalar() + ) + + row = ( + cls.model.select( + # finished: progress == 1 + fn.COALESCE(fn.SUM(Case(None, [(cls.model.progress == 1, 1)], 0)), 0).alias("finished"), + + # failed: progress == -1 + fn.COALESCE(fn.SUM(Case(None, [(cls.model.progress == -1, 1)], 0)), 0).alias("failed"), + + # processing: 0 <= progress < 1 + fn.COALESCE( + fn.SUM( + Case( + None, + [ + (((cls.model.progress == 0) | ((cls.model.progress > 0) & (cls.model.progress < 1))), 1), + ], + 0, + ) + ), + 0, + ).alias("processing"), + ) + .where( + (cls.model.kb_id == kb_id) + & ((cls.model.run.is_null(True)) | (cls.model.run != TaskStatus.CANCEL)) + ) + .dicts() + .get() + ) + + return { + "processing": int(row["processing"]), + "finished": int(row["finished"]), + "failed": int(row["failed"]), + "cancelled": int(cancelled), + } + def queue_raptor_o_graphrag_tasks(doc, ty, priority): chunking_config = DocumentService.get_chunking_config(doc["id"]) hasher = xxhash.xxh64() @@ -702,6 +749,8 @@ def queue_raptor_o_graphrag_tasks(doc, ty, priority): def get_queue_length(priority): group_info = REDIS_CONN.queue_info(get_svr_queue_name(priority), SVR_CONSUMER_GROUP_NAME) + if not group_info: + return 0 return int(group_info.get("lag", 0) or 0) @@ -847,3 +896,4 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id): doc_id, kb.id, token_counts[doc_id], chunk_counts[doc_id], 0) return [d["id"] for d, _ in files] + diff --git a/api/db/services/file_service.py b/api/db/services/file_service.py index 24f923d38..68d720aa4 100644 --- a/api/db/services/file_service.py +++ b/api/db/services/file_service.py @@ -440,6 +440,7 @@ class FileService(CommonService): "id": doc_id, "kb_id": kb.id, "parser_id": self.get_parser(filetype, filename, kb.parser_id), + "pipeline_id": kb.pipeline_id, "parser_config": kb.parser_config, "created_by": user_id, "type": filetype, diff --git a/api/db/services/task_service.py b/api/db/services/task_service.py index 46087f8ba..61d13419a 100644 --- a/api/db/services/task_service.py +++ b/api/db/services/task_service.py @@ -472,19 +472,19 @@ def has_canceled(task_id): return False -def queue_dataflow(dsl:str, tenant_id:str, doc_id:str, task_id:str, flow_id:str, priority: int, callback=None) -> tuple[bool, str]: +def queue_dataflow(dsl:str, tenant_id:str, task_id:str, flow_id:str=None, doc_id:str=None, file:dict=None, priority: int=0, callback=None) -> tuple[bool, str]: """ Returns a tuple (success: bool, error_message: str). """ _ = callback task = dict( - id=get_uuid() if not task_id else task_id, - doc_id=doc_id, - from_page=0, - to_page=100000000, - task_type="dataflow", - priority=priority, + id=get_uuid() if not task_id else task_id, + doc_id=doc_id, + from_page=0, + to_page=100000000, + task_type="dataflow", + priority=priority, ) TaskService.model.delete().where(TaskService.model.id == task["id"]).execute() @@ -499,6 +499,7 @@ def queue_dataflow(dsl:str, tenant_id:str, doc_id:str, task_id:str, flow_id:str, task["task_type"] = "dataflow" task["dsl"] = dsl task["dataflow_id"] = get_uuid() if not flow_id else flow_id + task["file"] = file if not REDIS_CONN.queue_product( get_svr_queue_name(priority), message=task diff --git a/api/utils/base64_image.py b/api/utils/base64_image.py index d9dd4cde9..ed01d2708 100644 --- a/api/utils/base64_image.py +++ b/api/utils/base64_image.py @@ -1,3 +1,51 @@ import base64 +from functools import partial +from io import BytesIO + +from PIL import Image + test_image_base64 = "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAAA6ElEQVR4nO3QwQ3AIBDAsIP9d25XIC+EZE8QZc18w5l9O+AlZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBT+IYAHHLHkdEgAAAABJRU5ErkJggg==" -test_image = base64.b64decode(test_image_base64) \ No newline at end of file +test_image = base64.b64decode(test_image_base64) + +async def image2id(d: dict, storage_put_func: partial, bucket:str, objname:str): + import logging + from io import BytesIO + import trio + from rag.svr.task_executor import minio_limiter + if not d.get("image"): + return + + with BytesIO() as output_buffer: + if isinstance(d["image"], bytes): + output_buffer.write(d["image"]) + output_buffer.seek(0) + else: + # If the image is in RGBA mode, convert it to RGB mode before saving it in JPEG format. + if d["image"].mode in ("RGBA", "P"): + converted_image = d["image"].convert("RGB") + d["image"] = converted_image + try: + d["image"].save(output_buffer, format='JPEG') + except OSError as e: + logging.warning( + "Saving image exception, ignore: {}".format(str(e))) + + async with minio_limiter: + await trio.to_thread.run_sync(lambda: storage_put_func(bucket=bucket, fnm=objname, binary=output_buffer.getvalue())) + d["img_id"] = f"{bucket}-{objname}" + if not isinstance(d["image"], bytes): + d["image"].close() + del d["image"] # Remove image reference + + +def id2image(image_id:str|None, storage_get_func: partial): + if not image_id: + return + arr = image_id.split("-") + if len(arr) != 2: + return + bkt, nm = image_id.split("-") + blob = storage_get_func(bucket=bkt, filename=nm) + if not blob: + return + return Image.open(BytesIO(blob)) \ No newline at end of file diff --git a/api/utils/health.py b/api/utils/health.py new file mode 100644 index 000000000..394154b9a --- /dev/null +++ b/api/utils/health.py @@ -0,0 +1,104 @@ +from timeit import default_timer as timer + +from api import settings +from api.db.db_models import DB +from rag.utils.redis_conn import REDIS_CONN +from rag.utils.storage_factory import STORAGE_IMPL + + +def _ok_nok(ok: bool) -> str: + return "ok" if ok else "nok" + + +def check_db() -> tuple[bool, dict]: + st = timer() + try: + # lightweight probe; works for MySQL/Postgres + DB.execute_sql("SELECT 1") + return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"} + except Exception as e: + return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)} + + +def check_redis() -> tuple[bool, dict]: + st = timer() + try: + ok = bool(REDIS_CONN.health()) + return ok, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"} + except Exception as e: + return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)} + + +def check_doc_engine() -> tuple[bool, dict]: + st = timer() + try: + meta = settings.docStoreConn.health() + # treat any successful call as ok + return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", **(meta or {})} + except Exception as e: + return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)} + + +def check_storage() -> tuple[bool, dict]: + st = timer() + try: + STORAGE_IMPL.health() + return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"} + except Exception as e: + return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)} + + +def check_chat() -> tuple[bool, dict]: + st = timer() + try: + cfg = getattr(settings, "CHAT_CFG", None) + ok = bool(cfg and cfg.get("factory")) + return ok, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"} + except Exception as e: + return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)} + + +def run_health_checks() -> tuple[dict, bool]: + result: dict[str, str | dict] = {} + + db_ok, db_meta = check_db() + chat_ok, chat_meta = check_chat() + + result["db"] = _ok_nok(db_ok) + if not db_ok: + result.setdefault("_meta", {})["db"] = db_meta + + result["chat"] = _ok_nok(chat_ok) + if not chat_ok: + result.setdefault("_meta", {})["chat"] = chat_meta + + # Optional probes (do not change minimal contract but exposed for observability) + try: + redis_ok, redis_meta = check_redis() + result["redis"] = _ok_nok(redis_ok) + if not redis_ok: + result.setdefault("_meta", {})["redis"] = redis_meta + except Exception: + result["redis"] = "nok" + + try: + doc_ok, doc_meta = check_doc_engine() + result["doc_engine"] = _ok_nok(doc_ok) + if not doc_ok: + result.setdefault("_meta", {})["doc_engine"] = doc_meta + except Exception: + result["doc_engine"] = "nok" + + try: + sto_ok, sto_meta = check_storage() + result["storage"] = _ok_nok(sto_ok) + if not sto_ok: + result.setdefault("_meta", {})["storage"] = sto_meta + except Exception: + result["storage"] = "nok" + + all_ok = (result.get("db") == "ok") and (result.get("chat") == "ok") + result["status"] = "ok" if all_ok else "nok" + return result, all_ok + + diff --git a/conf/llm_factories.json b/conf/llm_factories.json index 476307206..686e97373 100644 --- a/conf/llm_factories.json +++ b/conf/llm_factories.json @@ -219,6 +219,70 @@ } ] }, + { + "name": "TokenPony", + "logo": "", + "tags": "LLM", + "status": "1", + "llm": [ + { + "llm_name": "qwen3-8b", + "tags": "LLM,CHAT,131k", + "max_tokens": 131000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "deepseek-v3-0324", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "qwen3-32b", + "tags": "LLM,CHAT,131k", + "max_tokens": 131000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "kimi-k2-instruct", + "tags": "LLM,CHAT,128K", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "deepseek-r1-0528", + "tags": "LLM,CHAT,164k", + "max_tokens": 164000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "qwen3-coder-480b", + "tags": "LLM,CHAT,1024k", + "max_tokens": 1024000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "glm-4.5", + "tags": "LLM,CHAT,131K", + "max_tokens": 131000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "deepseek-v3.1", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + } + ] + }, { "name": "Tongyi-Qianwen", "logo": "", @@ -625,7 +689,7 @@ }, { "llm_name": "glm-4", - "tags":"LLM,CHAT,128K", + "tags": "LLM,CHAT,128K", "max_tokens": 128000, "model_type": "chat", "is_tools": true @@ -4477,6 +4541,273 @@ } ] }, + { + "name": "CometAPI", + "logo": "", + "tags": "LLM,TEXT EMBEDDING,IMAGE2TEXT", + "status": "1", + "llm": [ + { + "llm_name": "gpt-5-chat-latest", + "tags": "LLM,CHAT,400k", + "max_tokens": 400000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "chatgpt-4o-latest", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-5-mini", + "tags": "LLM,CHAT,400k", + "max_tokens": 400000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-5-nano", + "tags": "LLM,CHAT,400k", + "max_tokens": 400000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-5", + "tags": "LLM,CHAT,400k", + "max_tokens": 400000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-4.1-mini", + "tags": "LLM,CHAT,1M", + "max_tokens": 1047576, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-4.1-nano", + "tags": "LLM,CHAT,1M", + "max_tokens": 1047576, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-4.1", + "tags": "LLM,CHAT,1M", + "max_tokens": 1047576, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-4o-mini", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "o4-mini-2025-04-16", + "tags": "LLM,CHAT,200k", + "max_tokens": 200000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "o3-pro-2025-06-10", + "tags": "LLM,CHAT,200k", + "max_tokens": 200000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "claude-opus-4-1-20250805", + "tags": "LLM,CHAT,200k,IMAGE2TEXT", + "max_tokens": 200000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "claude-opus-4-1-20250805-thinking", + "tags": "LLM,CHAT,200k,IMAGE2TEXT", + "max_tokens": 200000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "claude-sonnet-4-20250514", + "tags": "LLM,CHAT,200k,IMAGE2TEXT", + "max_tokens": 200000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "claude-sonnet-4-20250514-thinking", + "tags": "LLM,CHAT,200k,IMAGE2TEXT", + "max_tokens": 200000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "claude-3-7-sonnet-latest", + "tags": "LLM,CHAT,200k", + "max_tokens": 200000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "claude-3-5-haiku-latest", + "tags": "LLM,CHAT,200k", + "max_tokens": 200000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gemini-2.5-pro", + "tags": "LLM,CHAT,1M,IMAGE2TEXT", + "max_tokens": 1000000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "gemini-2.5-flash", + "tags": "LLM,CHAT,1M,IMAGE2TEXT", + "max_tokens": 1000000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "gemini-2.5-flash-lite", + "tags": "LLM,CHAT,1M,IMAGE2TEXT", + "max_tokens": 1000000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "gemini-2.0-flash", + "tags": "LLM,CHAT,1M,IMAGE2TEXT", + "max_tokens": 1000000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "grok-4-0709", + "tags": "LLM,CHAT,131k", + "max_tokens": 131072, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "grok-3", + "tags": "LLM,CHAT,131k", + "max_tokens": 131072, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "grok-3-mini", + "tags": "LLM,CHAT,131k", + "max_tokens": 131072, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "grok-2-image-1212", + "tags": "LLM,CHAT,32k,IMAGE2TEXT", + "max_tokens": 32768, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "deepseek-v3.1", + "tags": "LLM,CHAT,64k", + "max_tokens": 64000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "deepseek-v3", + "tags": "LLM,CHAT,64k", + "max_tokens": 64000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "deepseek-r1-0528", + "tags": "LLM,CHAT,164k", + "max_tokens": 164000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "deepseek-chat", + "tags": "LLM,CHAT,32k", + "max_tokens": 32000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "deepseek-reasoner", + "tags": "LLM,CHAT,64k", + "max_tokens": 64000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "qwen3-30b-a3b", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "qwen3-coder-plus-2025-07-22", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "text-embedding-ada-002", + "tags": "TEXT EMBEDDING,8K", + "max_tokens": 8191, + "model_type": "embedding", + "is_tools": false + }, + { + "llm_name": "text-embedding-3-small", + "tags": "TEXT EMBEDDING,8K", + "max_tokens": 8191, + "model_type": "embedding", + "is_tools": false + }, + { + "llm_name": "text-embedding-3-large", + "tags": "TEXT EMBEDDING,8K", + "max_tokens": 8191, + "model_type": "embedding", + "is_tools": false + }, + { + "llm_name": "whisper-1", + "tags": "SPEECH2TEXT", + "max_tokens": 26214400, + "model_type": "speech2text", + "is_tools": false + }, + { + "llm_name": "tts-1", + "tags": "TTS", + "max_tokens": 2048, + "model_type": "tts", + "is_tools": false + } + ] + }, { "name": "Meituan", "logo": "", @@ -4493,4 +4824,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/deepdoc/parser/html_parser.py b/deepdoc/parser/html_parser.py index 71bbb706a..44ff10389 100644 --- a/deepdoc/parser/html_parser.py +++ b/deepdoc/parser/html_parser.py @@ -37,7 +37,7 @@ TITLE_TAGS = {"h1": "#", "h2": "##", "h3": "###", "h4": "#####", "h5": "#####", class RAGFlowHtmlParser: - def __call__(self, fnm, binary=None, chunk_token_num=None): + def __call__(self, fnm, binary=None, chunk_token_num=512): if binary: encoding = find_codec(binary) txt = binary.decode(encoding, errors="ignore") diff --git a/deepdoc/parser/pdf_parser.py b/deepdoc/parser/pdf_parser.py index c2c6fcafe..c6c47c593 100644 --- a/deepdoc/parser/pdf_parser.py +++ b/deepdoc/parser/pdf_parser.py @@ -34,7 +34,7 @@ from pypdf import PdfReader as pdf2_read from api import settings from api.utils.file_utils import get_project_base_directory -from deepdoc.vision import OCR, LayoutRecognizer, Recognizer, TableStructureRecognizer +from deepdoc.vision import OCR, AscendLayoutRecognizer, LayoutRecognizer, Recognizer, TableStructureRecognizer from rag.app.picture import vision_llm_chunk as picture_vision_llm_chunk from rag.nlp import rag_tokenizer from rag.prompts import vision_llm_describe_prompt @@ -64,33 +64,38 @@ class RAGFlowPdfParser: if PARALLEL_DEVICES > 1: self.parallel_limiter = [trio.CapacityLimiter(1) for _ in range(PARALLEL_DEVICES)] + layout_recognizer_type = os.getenv("LAYOUT_RECOGNIZER_TYPE", "onnx").lower() + if layout_recognizer_type not in ["onnx", "ascend"]: + raise RuntimeError("Unsupported layout recognizer type.") + if hasattr(self, "model_speciess"): - self.layouter = LayoutRecognizer("layout." + self.model_speciess) + recognizer_domain = "layout." + self.model_speciess else: - self.layouter = LayoutRecognizer("layout") + recognizer_domain = "layout" + + if layout_recognizer_type == "ascend": + logging.debug("Using Ascend LayoutRecognizer") + self.layouter = AscendLayoutRecognizer(recognizer_domain) + else: # onnx + logging.debug("Using Onnx LayoutRecognizer") + self.layouter = LayoutRecognizer(recognizer_domain) self.tbl_det = TableStructureRecognizer() self.updown_cnt_mdl = xgb.Booster() if not settings.LIGHTEN: try: import torch.cuda + if torch.cuda.is_available(): self.updown_cnt_mdl.set_param({"device": "cuda"}) except Exception: logging.exception("RAGFlowPdfParser __init__") try: - model_dir = os.path.join( - get_project_base_directory(), - "rag/res/deepdoc") - self.updown_cnt_mdl.load_model(os.path.join( - model_dir, "updown_concat_xgb.model")) + model_dir = os.path.join(get_project_base_directory(), "rag/res/deepdoc") + self.updown_cnt_mdl.load_model(os.path.join(model_dir, "updown_concat_xgb.model")) except Exception: - model_dir = snapshot_download( - repo_id="InfiniFlow/text_concat_xgb_v1.0", - local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"), - local_dir_use_symlinks=False) - self.updown_cnt_mdl.load_model(os.path.join( - model_dir, "updown_concat_xgb.model")) + model_dir = snapshot_download(repo_id="InfiniFlow/text_concat_xgb_v1.0", local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"), local_dir_use_symlinks=False) + self.updown_cnt_mdl.load_model(os.path.join(model_dir, "updown_concat_xgb.model")) self.page_from = 0 self.column_num = 1 @@ -102,13 +107,10 @@ class RAGFlowPdfParser: return c["bottom"] - c["top"] def _x_dis(self, a, b): - return min(abs(a["x1"] - b["x0"]), abs(a["x0"] - b["x1"]), - abs(a["x0"] + a["x1"] - b["x0"] - b["x1"]) / 2) + return min(abs(a["x1"] - b["x0"]), abs(a["x0"] - b["x1"]), abs(a["x0"] + a["x1"] - b["x0"] - b["x1"]) / 2) - def _y_dis( - self, a, b): - return ( - b["top"] + b["bottom"] - a["top"] - a["bottom"]) / 2 + def _y_dis(self, a, b): + return (b["top"] + b["bottom"] - a["top"] - a["bottom"]) / 2 def _match_proj(self, b): proj_patt = [ @@ -130,10 +132,7 @@ class RAGFlowPdfParser: LEN = 6 tks_down = rag_tokenizer.tokenize(down["text"][:LEN]).split() tks_up = rag_tokenizer.tokenize(up["text"][-LEN:]).split() - tks_all = up["text"][-LEN:].strip() \ - + (" " if re.match(r"[a-zA-Z0-9]+", - up["text"][-1] + down["text"][0]) else "") \ - + down["text"][:LEN].strip() + tks_all = up["text"][-LEN:].strip() + (" " if re.match(r"[a-zA-Z0-9]+", up["text"][-1] + down["text"][0]) else "") + down["text"][:LEN].strip() tks_all = rag_tokenizer.tokenize(tks_all).split() fea = [ up.get("R", -1) == down.get("R", -1), @@ -144,39 +143,30 @@ class RAGFlowPdfParser: down["layout_type"] == "text", up["layout_type"] == "table", down["layout_type"] == "table", - True if re.search( - r"([。?!;!?;+))]|[a-z]\.)$", - up["text"]) else False, + True if re.search(r"([。?!;!?;+))]|[a-z]\.)$", up["text"]) else False, True if re.search(r"[,:‘“、0-9(+-]$", up["text"]) else False, - True if re.search( - r"(^.?[/,?;:\],。;:’”?!》】)-])", - down["text"]) else False, + True if re.search(r"(^.?[/,?;:\],。;:’”?!》】)-])", down["text"]) else False, True if re.match(r"[\((][^\(\)()]+[)\)]$", up["text"]) else False, True if re.search(r"[,,][^。.]+$", up["text"]) else False, True if re.search(r"[,,][^。.]+$", up["text"]) else False, - True if re.search(r"[\((][^\))]+$", up["text"]) - and re.search(r"[\))]", down["text"]) else False, + True if re.search(r"[\((][^\))]+$", up["text"]) and re.search(r"[\))]", down["text"]) else False, self._match_proj(down), True if re.match(r"[A-Z]", down["text"]) else False, True if re.match(r"[A-Z]", up["text"][-1]) else False, True if re.match(r"[a-z0-9]", up["text"][-1]) else False, True if re.match(r"[0-9.%,-]+$", down["text"]) else False, - up["text"].strip()[-2:] == down["text"].strip()[-2:] if len(up["text"].strip() - ) > 1 and len( - down["text"].strip()) > 1 else False, + up["text"].strip()[-2:] == down["text"].strip()[-2:] if len(up["text"].strip()) > 1 and len(down["text"].strip()) > 1 else False, up["x0"] > down["x1"], - abs(self.__height(up) - self.__height(down)) / min(self.__height(up), - self.__height(down)), + abs(self.__height(up) - self.__height(down)) / min(self.__height(up), self.__height(down)), self._x_dis(up, down) / max(w, 0.000001), - (len(up["text"]) - len(down["text"])) / - max(len(up["text"]), len(down["text"])), + (len(up["text"]) - len(down["text"])) / max(len(up["text"]), len(down["text"])), len(tks_all) - len(tks_up) - len(tks_down), len(tks_down) - len(tks_up), tks_down[-1] == tks_up[-1] if tks_down and tks_up else False, max(down["in_row"], up["in_row"]), abs(down["in_row"] - up["in_row"]), len(tks_down) == 1 and rag_tokenizer.tag(tks_down[0]).find("n") >= 0, - len(tks_up) == 1 and rag_tokenizer.tag(tks_up[0]).find("n") >= 0 + len(tks_up) == 1 and rag_tokenizer.tag(tks_up[0]).find("n") >= 0, ] return fea @@ -187,9 +177,7 @@ class RAGFlowPdfParser: for i in range(len(arr) - 1): for j in range(i, -1, -1): # restore the order using th - if abs(arr[j + 1]["x0"] - arr[j]["x0"]) < threshold \ - and arr[j + 1]["top"] < arr[j]["top"] \ - and arr[j + 1]["page_number"] == arr[j]["page_number"]: + if abs(arr[j + 1]["x0"] - arr[j]["x0"]) < threshold and arr[j + 1]["top"] < arr[j]["top"] and arr[j + 1]["page_number"] == arr[j]["page_number"]: tmp = arr[j] arr[j] = arr[j + 1] arr[j + 1] = tmp @@ -197,8 +185,7 @@ class RAGFlowPdfParser: def _has_color(self, o): if o.get("ncs", "") == "DeviceGray": - if o["stroking_color"] and o["stroking_color"][0] == 1 and o["non_stroking_color"] and \ - o["non_stroking_color"][0] == 1: + if o["stroking_color"] and o["stroking_color"][0] == 1 and o["non_stroking_color"] and o["non_stroking_color"][0] == 1: if re.match(r"[a-zT_\[\]\(\)-]+", o.get("text", "")): return False return True @@ -216,8 +203,7 @@ class RAGFlowPdfParser: if not tbls: continue for tb in tbls: # for table - left, top, right, bott = tb["x0"] - MARGIN, tb["top"] - MARGIN, \ - tb["x1"] + MARGIN, tb["bottom"] + MARGIN + left, top, right, bott = tb["x0"] - MARGIN, tb["top"] - MARGIN, tb["x1"] + MARGIN, tb["bottom"] + MARGIN left *= ZM top *= ZM right *= ZM @@ -232,14 +218,13 @@ class RAGFlowPdfParser: tbcnt = np.cumsum(tbcnt) for i in range(len(tbcnt) - 1): # for page pg = [] - for j, tb_items in enumerate( - recos[tbcnt[i]: tbcnt[i + 1]]): # for table - poss = pos[tbcnt[i]: tbcnt[i + 1]] + for j, tb_items in enumerate(recos[tbcnt[i] : tbcnt[i + 1]]): # for table + poss = pos[tbcnt[i] : tbcnt[i + 1]] for it in tb_items: # for table components - it["x0"] = (it["x0"] + poss[j][0]) - it["x1"] = (it["x1"] + poss[j][0]) - it["top"] = (it["top"] + poss[j][1]) - it["bottom"] = (it["bottom"] + poss[j][1]) + it["x0"] = it["x0"] + poss[j][0] + it["x1"] = it["x1"] + poss[j][0] + it["top"] = it["top"] + poss[j][1] + it["bottom"] = it["bottom"] + poss[j][1] for n in ["x0", "x1", "top", "bottom"]: it[n] /= ZM it["top"] += self.page_cum_height[i] @@ -250,8 +235,7 @@ class RAGFlowPdfParser: self.tb_cpns.extend(pg) def gather(kwd, fzy=10, ption=0.6): - eles = Recognizer.sort_Y_firstly( - [r for r in self.tb_cpns if re.match(kwd, r["label"])], fzy) + eles = Recognizer.sort_Y_firstly([r for r in self.tb_cpns if re.match(kwd, r["label"])], fzy) eles = Recognizer.layouts_cleanup(self.boxes, eles, 5, ption) return Recognizer.sort_Y_firstly(eles, 0) @@ -259,8 +243,7 @@ class RAGFlowPdfParser: headers = gather(r".*header$") rows = gather(r".* (row|header)") spans = gather(r".*spanning") - clmns = sorted([r for r in self.tb_cpns if re.match( - r"table column$", r["label"])], key=lambda x: (x["pn"], x["layoutno"], x["x0"])) + clmns = sorted([r for r in self.tb_cpns if re.match(r"table column$", r["label"])], key=lambda x: (x["pn"], x["layoutno"], x["x0"])) clmns = Recognizer.layouts_cleanup(self.boxes, clmns, 5, 0.5) for b in self.boxes: if b.get("layout_type", "") != "table": @@ -271,8 +254,7 @@ class RAGFlowPdfParser: b["R_top"] = rows[ii]["top"] b["R_bott"] = rows[ii]["bottom"] - ii = Recognizer.find_overlapped_with_threshold( - b, headers, thr=0.3) + ii = Recognizer.find_overlapped_with_threshold(b, headers, thr=0.3) if ii is not None: b["H_top"] = headers[ii]["top"] b["H_bott"] = headers[ii]["bottom"] @@ -305,12 +287,12 @@ class RAGFlowPdfParser: return bxs = [(line[0], line[1][0]) for line in bxs] bxs = Recognizer.sort_Y_firstly( - [{"x0": b[0][0] / ZM, "x1": b[1][0] / ZM, - "top": b[0][1] / ZM, "text": "", "txt": t, - "bottom": b[-1][1] / ZM, - "chars": [], - "page_number": pagenum} for b, t in bxs if b[0][0] <= b[1][0] and b[0][1] <= b[-1][1]], - self.mean_height[pagenum-1] / 3 + [ + {"x0": b[0][0] / ZM, "x1": b[1][0] / ZM, "top": b[0][1] / ZM, "text": "", "txt": t, "bottom": b[-1][1] / ZM, "chars": [], "page_number": pagenum} + for b, t in bxs + if b[0][0] <= b[1][0] and b[0][1] <= b[-1][1] + ], + self.mean_height[pagenum - 1] / 3, ) # merge chars in the same rect @@ -321,7 +303,7 @@ class RAGFlowPdfParser: continue ch = c["bottom"] - c["top"] bh = bxs[ii]["bottom"] - bxs[ii]["top"] - if abs(ch - bh) / max(ch, bh) >= 0.7 and c["text"] != ' ': + if abs(ch - bh) / max(ch, bh) >= 0.7 and c["text"] != " ": self.lefted_chars.append(c) continue bxs[ii]["chars"].append(c) @@ -345,8 +327,7 @@ class RAGFlowPdfParser: img_np = np.array(img) for b in bxs: if not b["text"]: - left, right, top, bott = b["x0"] * ZM, b["x1"] * \ - ZM, b["top"] * ZM, b["bottom"] * ZM + left, right, top, bott = b["x0"] * ZM, b["x1"] * ZM, b["top"] * ZM, b["bottom"] * ZM b["box_image"] = self.ocr.get_rotate_crop_image(img_np, np.array([[left, top], [right, top], [right, bott], [left, bott]], dtype=np.float32)) boxes_to_reg.append(b) del b["txt"] @@ -356,21 +337,17 @@ class RAGFlowPdfParser: del boxes_to_reg[i]["box_image"] logging.info(f"__ocr recognize {len(bxs)} boxes cost {timer() - start}s") bxs = [b for b in bxs if b["text"]] - if self.mean_height[pagenum-1] == 0: - self.mean_height[pagenum-1] = np.median([b["bottom"] - b["top"] - for b in bxs]) + if self.mean_height[pagenum - 1] == 0: + self.mean_height[pagenum - 1] = np.median([b["bottom"] - b["top"] for b in bxs]) self.boxes.append(bxs) def _layouts_rec(self, ZM, drop=True): assert len(self.page_images) == len(self.boxes) - self.boxes, self.page_layout = self.layouter( - self.page_images, self.boxes, ZM, drop=drop) + self.boxes, self.page_layout = self.layouter(self.page_images, self.boxes, ZM, drop=drop) # cumlative Y for i in range(len(self.boxes)): - self.boxes[i]["top"] += \ - self.page_cum_height[self.boxes[i]["page_number"] - 1] - self.boxes[i]["bottom"] += \ - self.page_cum_height[self.boxes[i]["page_number"] - 1] + self.boxes[i]["top"] += self.page_cum_height[self.boxes[i]["page_number"] - 1] + self.boxes[i]["bottom"] += self.page_cum_height[self.boxes[i]["page_number"] - 1] def _text_merge(self): # merge adjusted boxes @@ -390,12 +367,10 @@ class RAGFlowPdfParser: while i < len(bxs) - 1: b = bxs[i] b_ = bxs[i + 1] - if b.get("layoutno", "0") != b_.get("layoutno", "1") or b.get("layout_type", "") in ["table", "figure", - "equation"]: + if b.get("layoutno", "0") != b_.get("layoutno", "1") or b.get("layout_type", "") in ["table", "figure", "equation"]: i += 1 continue - if abs(self._y_dis(b, b_) - ) < self.mean_height[bxs[i]["page_number"] - 1] / 3: + if abs(self._y_dis(b, b_)) < self.mean_height[bxs[i]["page_number"] - 1] / 3: # merge bxs[i]["x1"] = b_["x1"] bxs[i]["top"] = (b["top"] + b_["top"]) / 2 @@ -408,16 +383,14 @@ class RAGFlowPdfParser: dis_thr = 1 dis = b["x1"] - b_["x0"] - if b.get("layout_type", "") != "text" or b_.get( - "layout_type", "") != "text": + if b.get("layout_type", "") != "text" or b_.get("layout_type", "") != "text": if end_with(b, ",") or start_with(b_, "(,"): dis_thr = -8 else: i += 1 continue - if abs(self._y_dis(b, b_)) < self.mean_height[bxs[i]["page_number"] - 1] / 5 \ - and dis >= dis_thr and b["x1"] < b_["x1"]: + if abs(self._y_dis(b, b_)) < self.mean_height[bxs[i]["page_number"] - 1] / 5 and dis >= dis_thr and b["x1"] < b_["x1"]: # merge bxs[i]["x1"] = b_["x1"] bxs[i]["top"] = (b["top"] + b_["top"]) / 2 @@ -429,23 +402,22 @@ class RAGFlowPdfParser: self.boxes = bxs def _naive_vertical_merge(self, zoomin=3): - bxs = Recognizer.sort_Y_firstly( - self.boxes, np.median( - self.mean_height) / 3) + import math + bxs = Recognizer.sort_Y_firstly(self.boxes, np.median(self.mean_height) / 3) column_width = np.median([b["x1"] - b["x0"] for b in self.boxes]) + if not column_width or math.isnan(column_width): + column_width = self.mean_width[0] self.column_num = int(self.page_images[0].size[0] / zoomin / column_width) if column_width < self.page_images[0].size[0] / zoomin / self.column_num: - logging.info("Multi-column................... {} {}".format(column_width, - self.page_images[0].size[0] / zoomin / self.column_num)) + logging.info("Multi-column................... {} {}".format(column_width, self.page_images[0].size[0] / zoomin / self.column_num)) self.boxes = self.sort_X_by_page(self.boxes, column_width / self.column_num) i = 0 while i + 1 < len(bxs): b = bxs[i] b_ = bxs[i + 1] - if b["page_number"] < b_["page_number"] and re.match( - r"[0-9 •一—-]+$", b["text"]): + if b["page_number"] < b_["page_number"] and re.match(r"[0-9 •一—-]+$", b["text"]): bxs.pop(i) continue if not b["text"].strip(): @@ -453,8 +425,7 @@ class RAGFlowPdfParser: continue concatting_feats = [ b["text"].strip()[-1] in ",;:'\",、‘“;:-", - len(b["text"].strip()) > 1 and b["text"].strip( - )[-2] in ",;:'\",‘“、;:", + len(b["text"].strip()) > 1 and b["text"].strip()[-2] in ",;:'\",‘“、;:", b_["text"].strip() and b_["text"].strip()[0] in "。;?!?”)),,、:", ] # features for not concating @@ -462,21 +433,20 @@ class RAGFlowPdfParser: b.get("layoutno", 0) != b_.get("layoutno", 0), b["text"].strip()[-1] in "。?!?", self.is_english and b["text"].strip()[-1] in ".!?", - b["page_number"] == b_["page_number"] and b_["top"] - - b["bottom"] > self.mean_height[b["page_number"] - 1] * 1.5, - b["page_number"] < b_["page_number"] and abs( - b["x0"] - b_["x0"]) > self.mean_width[b["page_number"] - 1] * 4, + b["page_number"] == b_["page_number"] and b_["top"] - b["bottom"] > self.mean_height[b["page_number"] - 1] * 1.5, + b["page_number"] < b_["page_number"] and abs(b["x0"] - b_["x0"]) > self.mean_width[b["page_number"] - 1] * 4, ] # split features - detach_feats = [b["x1"] < b_["x0"], - b["x0"] > b_["x1"]] + detach_feats = [b["x1"] < b_["x0"], b["x0"] > b_["x1"]] if (any(feats) and not any(concatting_feats)) or any(detach_feats): - logging.debug("{} {} {} {}".format( - b["text"], - b_["text"], - any(feats), - any(concatting_feats), - )) + logging.debug( + "{} {} {} {}".format( + b["text"], + b_["text"], + any(feats), + any(concatting_feats), + ) + ) i += 1 continue # merge up and down @@ -529,14 +499,11 @@ class RAGFlowPdfParser: if not concat_between_pages and down["page_number"] > up["page_number"]: break - if up.get("R", "") != down.get( - "R", "") and up["text"][-1] != ",": + if up.get("R", "") != down.get("R", "") and up["text"][-1] != ",": i += 1 continue - if re.match(r"[0-9]{2,3}/[0-9]{3}$", up["text"]) \ - or re.match(r"[0-9]{2,3}/[0-9]{3}$", down["text"]) \ - or not down["text"].strip(): + if re.match(r"[0-9]{2,3}/[0-9]{3}$", up["text"]) or re.match(r"[0-9]{2,3}/[0-9]{3}$", down["text"]) or not down["text"].strip(): i += 1 continue @@ -544,14 +511,12 @@ class RAGFlowPdfParser: i += 1 continue - if up["x1"] < down["x0"] - 10 * \ - mw or up["x0"] > down["x1"] + 10 * mw: + if up["x1"] < down["x0"] - 10 * mw or up["x0"] > down["x1"] + 10 * mw: i += 1 continue if i - dp < 5 and up.get("layout_type") == "text": - if up.get("layoutno", "1") == down.get( - "layoutno", "2"): + if up.get("layoutno", "1") == down.get("layoutno", "2"): dfs(down, i + 1) boxes.pop(i) return @@ -559,8 +524,7 @@ class RAGFlowPdfParser: continue fea = self._updown_concat_features(up, down) - if self.updown_cnt_mdl.predict( - xgb.DMatrix([fea]))[0] <= 0.5: + if self.updown_cnt_mdl.predict(xgb.DMatrix([fea]))[0] <= 0.5: i += 1 continue dfs(down, i + 1) @@ -584,16 +548,14 @@ class RAGFlowPdfParser: c["text"] = c["text"].strip() if not c["text"]: continue - if t["text"] and re.match( - r"[0-9\.a-zA-Z]+$", t["text"][-1] + c["text"][-1]): + if t["text"] and re.match(r"[0-9\.a-zA-Z]+$", t["text"][-1] + c["text"][-1]): t["text"] += " " t["text"] += c["text"] t["x0"] = min(t["x0"], c["x0"]) t["x1"] = max(t["x1"], c["x1"]) t["page_number"] = min(t["page_number"], c["page_number"]) t["bottom"] = c["bottom"] - if not t["layout_type"] \ - and c["layout_type"]: + if not t["layout_type"] and c["layout_type"]: t["layout_type"] = c["layout_type"] boxes.append(t) @@ -605,25 +567,20 @@ class RAGFlowPdfParser: findit = False i = 0 while i < len(self.boxes): - if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$", - re.sub(r"( | |\u3000)+", "", self.boxes[i]["text"].lower())): + if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$", re.sub(r"( | |\u3000)+", "", self.boxes[i]["text"].lower())): i += 1 continue findit = True - eng = re.match( - r"[0-9a-zA-Z :'.-]{5,}", - self.boxes[i]["text"].strip()) + eng = re.match(r"[0-9a-zA-Z :'.-]{5,}", self.boxes[i]["text"].strip()) self.boxes.pop(i) if i >= len(self.boxes): break - prefix = self.boxes[i]["text"].strip()[:3] if not eng else " ".join( - self.boxes[i]["text"].strip().split()[:2]) + prefix = self.boxes[i]["text"].strip()[:3] if not eng else " ".join(self.boxes[i]["text"].strip().split()[:2]) while not prefix: self.boxes.pop(i) if i >= len(self.boxes): break - prefix = self.boxes[i]["text"].strip()[:3] if not eng else " ".join( - self.boxes[i]["text"].strip().split()[:2]) + prefix = self.boxes[i]["text"].strip()[:3] if not eng else " ".join(self.boxes[i]["text"].strip().split()[:2]) self.boxes.pop(i) if i >= len(self.boxes) or not prefix: break @@ -662,10 +619,12 @@ class RAGFlowPdfParser: self.boxes.pop(i + 1) continue - if b["text"].strip()[0] != b_["text"].strip()[0] \ - or b["text"].strip()[0].lower() in set("qwertyuopasdfghjklzxcvbnm") \ - or rag_tokenizer.is_chinese(b["text"].strip()[0]) \ - or b["top"] > b_["bottom"]: + if ( + b["text"].strip()[0] != b_["text"].strip()[0] + or b["text"].strip()[0].lower() in set("qwertyuopasdfghjklzxcvbnm") + or rag_tokenizer.is_chinese(b["text"].strip()[0]) + or b["top"] > b_["bottom"] + ): i += 1 continue b_["text"] = b["text"] + "\n" + b_["text"] @@ -685,12 +644,8 @@ class RAGFlowPdfParser: if "layoutno" not in self.boxes[i]: i += 1 continue - lout_no = str(self.boxes[i]["page_number"]) + \ - "-" + str(self.boxes[i]["layoutno"]) - if TableStructureRecognizer.is_caption(self.boxes[i]) or self.boxes[i]["layout_type"] in ["table caption", - "title", - "figure caption", - "reference"]: + lout_no = str(self.boxes[i]["page_number"]) + "-" + str(self.boxes[i]["layoutno"]) + if TableStructureRecognizer.is_caption(self.boxes[i]) or self.boxes[i]["layout_type"] in ["table caption", "title", "figure caption", "reference"]: nomerge_lout_no.append(lst_lout_no) if self.boxes[i]["layout_type"] == "table": if re.match(r"(数据|资料|图表)*来源[:: ]", self.boxes[i]["text"]): @@ -716,8 +671,7 @@ class RAGFlowPdfParser: # merge table on different pages nomerge_lout_no = set(nomerge_lout_no) - tbls = sorted([(k, bxs) for k, bxs in tables.items()], - key=lambda x: (x[1][0]["top"], x[1][0]["x0"])) + tbls = sorted([(k, bxs) for k, bxs in tables.items()], key=lambda x: (x[1][0]["top"], x[1][0]["x0"])) i = len(tbls) - 1 while i - 1 >= 0: @@ -758,9 +712,7 @@ class RAGFlowPdfParser: if b.get("layout_type", "").find("caption") >= 0: continue y_dis = self._y_dis(c, b) - x_dis = self._x_dis( - c, b) if not x_overlapped( - c, b) else 0 + x_dis = self._x_dis(c, b) if not x_overlapped(c, b) else 0 dis = y_dis * y_dis + x_dis * x_dis if dis < minv: mink = k @@ -774,18 +726,10 @@ class RAGFlowPdfParser: # continue if tv < fv and tk: tables[tk].insert(0, c) - logging.debug( - "TABLE:" + - self.boxes[i]["text"] + - "; Cap: " + - tk) + logging.debug("TABLE:" + self.boxes[i]["text"] + "; Cap: " + tk) elif fk: figures[fk].insert(0, c) - logging.debug( - "FIGURE:" + - self.boxes[i]["text"] + - "; Cap: " + - tk) + logging.debug("FIGURE:" + self.boxes[i]["text"] + "; Cap: " + tk) self.boxes.pop(i) def cropout(bxs, ltype, poss): @@ -794,29 +738,19 @@ class RAGFlowPdfParser: if len(pn) < 2: pn = list(pn)[0] ht = self.page_cum_height[pn] - b = { - "x0": np.min([b["x0"] for b in bxs]), - "top": np.min([b["top"] for b in bxs]) - ht, - "x1": np.max([b["x1"] for b in bxs]), - "bottom": np.max([b["bottom"] for b in bxs]) - ht - } + b = {"x0": np.min([b["x0"] for b in bxs]), "top": np.min([b["top"] for b in bxs]) - ht, "x1": np.max([b["x1"] for b in bxs]), "bottom": np.max([b["bottom"] for b in bxs]) - ht} louts = [layout for layout in self.page_layout[pn] if layout["type"] == ltype] ii = Recognizer.find_overlapped(b, louts, naive=True) if ii is not None: b = louts[ii] else: - logging.warning( - f"Missing layout match: {pn + 1},%s" % - (bxs[0].get( - "layoutno", ""))) + logging.warning(f"Missing layout match: {pn + 1},%s" % (bxs[0].get("layoutno", ""))) left, top, right, bott = b["x0"], b["top"], b["x1"], b["bottom"] if right < left: right = left + 1 poss.append((pn + self.page_from, left, right, top, bott)) - return self.page_images[pn] \ - .crop((left * ZM, top * ZM, - right * ZM, bott * ZM)) + return self.page_images[pn].crop((left * ZM, top * ZM, right * ZM, bott * ZM)) pn = {} for b in bxs: p = b["page_number"] - 1 @@ -825,10 +759,7 @@ class RAGFlowPdfParser: pn[p].append(b) pn = sorted(pn.items(), key=lambda x: x[0]) imgs = [cropout(arr, ltype, poss) for p, arr in pn] - pic = Image.new("RGB", - (int(np.max([i.size[0] for i in imgs])), - int(np.sum([m.size[1] for m in imgs]))), - (245, 245, 245)) + pic = Image.new("RGB", (int(np.max([i.size[0] for i in imgs])), int(np.sum([m.size[1] for m in imgs]))), (245, 245, 245)) height = 0 for img in imgs: pic.paste(img, (0, int(height))) @@ -848,30 +779,20 @@ class RAGFlowPdfParser: poss = [] if separate_tables_figures: - figure_results.append( - (cropout( - bxs, - "figure", poss), - [txt])) + figure_results.append((cropout(bxs, "figure", poss), [txt])) figure_positions.append(poss) else: - res.append( - (cropout( - bxs, - "figure", poss), - [txt])) + res.append((cropout(bxs, "figure", poss), [txt])) positions.append(poss) for k, bxs in tables.items(): if not bxs: continue - bxs = Recognizer.sort_Y_firstly(bxs, np.mean( - [(b["bottom"] - b["top"]) / 2 for b in bxs])) + bxs = Recognizer.sort_Y_firstly(bxs, np.mean([(b["bottom"] - b["top"]) / 2 for b in bxs])) poss = [] - res.append((cropout(bxs, "table", poss), - self.tbl_det.construct_table(bxs, html=return_html, is_english=self.is_english))) + res.append((cropout(bxs, "table", poss), self.tbl_det.construct_table(bxs, html=return_html, is_english=self.is_english))) positions.append(poss) if separate_tables_figures: @@ -905,7 +826,7 @@ class RAGFlowPdfParser: (r"[0-9]+)", 10), (r"[\((][0-9]+[)\)]", 11), (r"[零一二三四五六七八九十百]+是", 12), - (r"[⚫•➢✓]", 12) + (r"[⚫•➢✓]", 12), ]: if re.match(p, line): return j @@ -924,12 +845,9 @@ class RAGFlowPdfParser: if pn[-1] - 1 >= page_images_cnt: return "" - return "@@{}\t{:.1f}\t{:.1f}\t{:.1f}\t{:.1f}##" \ - .format("-".join([str(p) for p in pn]), - bx["x0"], bx["x1"], top, bott) + return "@@{}\t{:.1f}\t{:.1f}\t{:.1f}\t{:.1f}##".format("-".join([str(p) for p in pn]), bx["x0"], bx["x1"], top, bott) def __filterout_scraps(self, boxes, ZM): - def width(b): return b["x1"] - b["x0"] @@ -939,8 +857,7 @@ class RAGFlowPdfParser: def usefull(b): if b.get("layout_type"): return True - if width( - b) > self.page_images[b["page_number"] - 1].size[0] / ZM / 3: + if width(b) > self.page_images[b["page_number"] - 1].size[0] / ZM / 3: return True if b["bottom"] - b["top"] > self.mean_height[b["page_number"] - 1]: return True @@ -952,31 +869,23 @@ class RAGFlowPdfParser: widths = [] pw = self.page_images[boxes[0]["page_number"] - 1].size[0] / ZM mh = self.mean_height[boxes[0]["page_number"] - 1] - mj = self.proj_match( - boxes[0]["text"]) or boxes[0].get( - "layout_type", - "") == "title" + mj = self.proj_match(boxes[0]["text"]) or boxes[0].get("layout_type", "") == "title" def dfs(line, st): nonlocal mh, pw, lines, widths lines.append(line) widths.append(width(line)) - mmj = self.proj_match( - line["text"]) or line.get( - "layout_type", - "") == "title" + mmj = self.proj_match(line["text"]) or line.get("layout_type", "") == "title" for i in range(st + 1, min(st + 20, len(boxes))): if (boxes[i]["page_number"] - line["page_number"]) > 0: break - if not mmj and self._y_dis( - line, boxes[i]) >= 3 * mh and height(line) < 1.5 * mh: + if not mmj and self._y_dis(line, boxes[i]) >= 3 * mh and height(line) < 1.5 * mh: break if not usefull(boxes[i]): continue - if mmj or \ - (self._x_dis(boxes[i], line) < pw / 10): \ - # and abs(width(boxes[i])-width_mean)/max(width(boxes[i]),width_mean)<0.5): + if mmj or (self._x_dis(boxes[i], line) < pw / 10): + # and abs(width(boxes[i])-width_mean)/max(width(boxes[i]),width_mean)<0.5): # concat following dfs(boxes[i], i) boxes.pop(i) @@ -992,11 +901,9 @@ class RAGFlowPdfParser: boxes.pop(0) mw = np.mean(widths) if mj or mw / pw >= 0.35 or mw > 200: - res.append( - "\n".join([c["text"] + self._line_tag(c, ZM) for c in lines])) + res.append("\n".join([c["text"] + self._line_tag(c, ZM) for c in lines])) else: - logging.debug("REMOVED: " + - "<<".join([c["text"] for c in lines])) + logging.debug("REMOVED: " + "<<".join([c["text"] for c in lines])) return "\n\n".join(res) @@ -1004,16 +911,14 @@ class RAGFlowPdfParser: def total_page_number(fnm, binary=None): try: with sys.modules[LOCK_KEY_pdfplumber]: - pdf = pdfplumber.open( - fnm) if not binary else pdfplumber.open(BytesIO(binary)) + pdf = pdfplumber.open(fnm) if not binary else pdfplumber.open(BytesIO(binary)) total_page = len(pdf.pages) pdf.close() return total_page except Exception: logging.exception("total_page_number") - def __images__(self, fnm, zoomin=3, page_from=0, - page_to=299, callback=None): + def __images__(self, fnm, zoomin=3, page_from=0, page_to=299, callback=None): self.lefted_chars = [] self.mean_height = [] self.mean_width = [] @@ -1025,10 +930,9 @@ class RAGFlowPdfParser: start = timer() try: with sys.modules[LOCK_KEY_pdfplumber]: - with (pdfplumber.open(fnm) if isinstance(fnm, str) else pdfplumber.open(BytesIO(fnm))) as pdf: + with pdfplumber.open(fnm) if isinstance(fnm, str) else pdfplumber.open(BytesIO(fnm)) as pdf: self.pdf = pdf - self.page_images = [p.to_image(resolution=72 * zoomin, antialias=True).annotated for i, p in - enumerate(self.pdf.pages[page_from:page_to])] + self.page_images = [p.to_image(resolution=72 * zoomin, antialias=True).annotated for i, p in enumerate(self.pdf.pages[page_from:page_to])] try: self.page_chars = [[c for c in page.dedupe_chars().chars if self._has_color(c)] for page in self.pdf.pages[page_from:page_to]] @@ -1044,11 +948,11 @@ class RAGFlowPdfParser: self.outlines = [] try: - with (pdf2_read(fnm if isinstance(fnm, str) - else BytesIO(fnm))) as pdf: + with pdf2_read(fnm if isinstance(fnm, str) else BytesIO(fnm)) as pdf: self.pdf = pdf outlines = self.pdf.outline + def dfs(arr, depth): for a in arr: if isinstance(a, dict): @@ -1065,11 +969,11 @@ class RAGFlowPdfParser: logging.warning("Miss outlines") logging.debug("Images converted.") - self.is_english = [re.search(r"[a-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", "".join( - random.choices([c["text"] for c in self.page_chars[i]], k=min(100, len(self.page_chars[i]))))) for i in - range(len(self.page_chars))] - if sum([1 if e else 0 for e in self.is_english]) > len( - self.page_images) / 2: + self.is_english = [ + re.search(r"[a-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", "".join(random.choices([c["text"] for c in self.page_chars[i]], k=min(100, len(self.page_chars[i]))))) + for i in range(len(self.page_chars)) + ] + if sum([1 if e else 0 for e in self.is_english]) > len(self.page_images) / 2: self.is_english = True else: self.is_english = False @@ -1077,10 +981,12 @@ class RAGFlowPdfParser: async def __img_ocr(i, id, img, chars, limiter): j = 0 while j + 1 < len(chars): - if chars[j]["text"] and chars[j + 1]["text"] \ - and re.match(r"[0-9a-zA-Z,.:;!%]+", chars[j]["text"] + chars[j + 1]["text"]) \ - and chars[j + 1]["x0"] - chars[j]["x1"] >= min(chars[j + 1]["width"], - chars[j]["width"]) / 2: + if ( + chars[j]["text"] + and chars[j + 1]["text"] + and re.match(r"[0-9a-zA-Z,.:;!%]+", chars[j]["text"] + chars[j + 1]["text"]) + and chars[j + 1]["x0"] - chars[j]["x1"] >= min(chars[j + 1]["width"], chars[j]["width"]) / 2 + ): chars[j]["text"] += " " j += 1 @@ -1096,12 +1002,8 @@ class RAGFlowPdfParser: async def __img_ocr_launcher(): def __ocr_preprocess(): chars = self.page_chars[i] if not self.is_english else [] - self.mean_height.append( - np.median(sorted([c["height"] for c in chars])) if chars else 0 - ) - self.mean_width.append( - np.median(sorted([c["width"] for c in chars])) if chars else 8 - ) + self.mean_height.append(np.median(sorted([c["height"] for c in chars])) if chars else 0) + self.mean_width.append(np.median(sorted([c["width"] for c in chars])) if chars else 8) self.page_cum_height.append(img.size[1] / zoomin) return chars @@ -1110,8 +1012,7 @@ class RAGFlowPdfParser: for i, img in enumerate(self.page_images): chars = __ocr_preprocess() - nursery.start_soon(__img_ocr, i, i % PARALLEL_DEVICES, img, chars, - self.parallel_limiter[i % PARALLEL_DEVICES]) + nursery.start_soon(__img_ocr, i, i % PARALLEL_DEVICES, img, chars, self.parallel_limiter[i % PARALLEL_DEVICES]) await trio.sleep(0.1) else: for i, img in enumerate(self.page_images): @@ -1124,11 +1025,9 @@ class RAGFlowPdfParser: logging.info(f"__images__ {len(self.page_images)} pages cost {timer() - start}s") - if not self.is_english and not any( - [c for c in self.page_chars]) and self.boxes: + if not self.is_english and not any([c for c in self.page_chars]) and self.boxes: bxes = [b for bxs in self.boxes for b in bxs] - self.is_english = re.search(r"[\na-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", - "".join([b["text"] for b in random.choices(bxes, k=min(30, len(bxes)))])) + self.is_english = re.search(r"[\na-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", "".join([b["text"] for b in random.choices(bxes, k=min(30, len(bxes)))])) logging.debug("Is it English:", self.is_english) @@ -1144,8 +1043,7 @@ class RAGFlowPdfParser: self._text_merge() self._concat_downward() self._filter_forpages() - tbls = self._extract_table_figure( - need_image, zoomin, return_html, False) + tbls = self._extract_table_figure(need_image, zoomin, return_html, False) return self.__filterout_scraps(deepcopy(self.boxes), zoomin), tbls def parse_into_bboxes(self, fnm, callback=None, zoomin=3): @@ -1179,9 +1077,8 @@ class RAGFlowPdfParser: import math pn1, left1, right1, top1, bottom1 = rect1 pn2, left2, right2, top2, bottom2 = rect2 - if (right1 >= left2 and right2 >= left1 and - bottom1 >= top2 and bottom2 >= top1): - return 0 + (pn1-pn2)*10000 + if right1 >= left2 and right2 >= left1 and bottom1 >= top2 and bottom2 >= top1: + return 0 if right1 < left2: dx = left2 - right1 elif right2 < left1: @@ -1194,17 +1091,20 @@ class RAGFlowPdfParser: dy = top1 - bottom2 else: dy = 0 - return math.sqrt(dx*dx + dy*dy) + (pn1-pn2)*10000 + return math.sqrt(dx*dx + dy*dy)# + (pn2-pn1)*10000 for (img, txt), poss in tbls_or_figs: bboxes = [(i, (b["page_number"], b["x0"], b["x1"], b["top"], b["bottom"])) for i, b in enumerate(self.boxes)] - dists = [(min_rectangle_distance((pn, left, right, top, bott), rect),i) for i, rect in bboxes for pn, left, right, top, bott in poss] + dists = [(min_rectangle_distance((pn, left, right, top+self.page_cum_height[pn], bott+self.page_cum_height[pn]), rect),i) for i, rect in bboxes for pn, left, right, top, bott in poss] min_i = np.argmin(dists, axis=0)[0] min_i, rect = bboxes[dists[min_i][-1]] if isinstance(txt, list): txt = "\n".join(txt) + pn, left, right, top, bott = poss[0] + if self.boxes[min_i]["bottom"] < top+self.page_cum_height[pn]: + min_i += 1 self.boxes.insert(min_i, { - "page_number": rect[0], "x0": rect[1], "x1": rect[2], "top": rect[3], "bottom": rect[4], "layout_type": layout_type, "text": txt, "image": img + "page_number": pn+1, "x0": left, "x1": right, "top": top+self.page_cum_height[pn], "bottom": bott+self.page_cum_height[pn], "layout_type": layout_type, "text": txt, "image": img }) for b in self.boxes: @@ -1225,12 +1125,9 @@ class RAGFlowPdfParser: def extract_positions(txt): poss = [] for tag in re.findall(r"@@[0-9-]+\t[0-9.\t]+##", txt): - pn, left, right, top, bottom = tag.strip( - "#").strip("@").split("\t") - left, right, top, bottom = float(left), float( - right), float(top), float(bottom) - poss.append(([int(p) - 1 for p in pn.split("-")], - left, right, top, bottom)) + pn, left, right, top, bottom = tag.strip("#").strip("@").split("\t") + left, right, top, bottom = float(left), float(right), float(top), float(bottom) + poss.append(([int(p) - 1 for p in pn.split("-")], left, right, top, bottom)) return poss def crop(self, text, ZM=3, need_position=False): @@ -1241,15 +1138,12 @@ class RAGFlowPdfParser: return None, None return - max_width = max( - np.max([right - left for (_, left, right, _, _) in poss]), 6) + max_width = max(np.max([right - left for (_, left, right, _, _) in poss]), 6) GAP = 6 pos = poss[0] - poss.insert(0, ([pos[0][0]], pos[1], pos[2], max( - 0, pos[3] - 120), max(pos[3] - GAP, 0))) + poss.insert(0, ([pos[0][0]], pos[1], pos[2], max(0, pos[3] - 120), max(pos[3] - GAP, 0))) pos = poss[-1] - poss.append(([pos[0][-1]], pos[1], pos[2], min(self.page_images[pos[0][-1]].size[1] / ZM, pos[4] + GAP), - min(self.page_images[pos[0][-1]].size[1] / ZM, pos[4] + 120))) + poss.append(([pos[0][-1]], pos[1], pos[2], min(self.page_images[pos[0][-1]].size[1] / ZM, pos[4] + GAP), min(self.page_images[pos[0][-1]].size[1] / ZM, pos[4] + 120))) positions = [] for ii, (pns, left, right, top, bottom) in enumerate(poss): @@ -1257,28 +1151,14 @@ class RAGFlowPdfParser: bottom *= ZM for pn in pns[1:]: bottom += self.page_images[pn - 1].size[1] - imgs.append( - self.page_images[pns[0]].crop((left * ZM, top * ZM, - right * - ZM, min( - bottom, self.page_images[pns[0]].size[1]) - )) - ) + imgs.append(self.page_images[pns[0]].crop((left * ZM, top * ZM, right * ZM, min(bottom, self.page_images[pns[0]].size[1])))) if 0 < ii < len(poss) - 1: - positions.append((pns[0] + self.page_from, left, right, top, min( - bottom, self.page_images[pns[0]].size[1]) / ZM)) + positions.append((pns[0] + self.page_from, left, right, top, min(bottom, self.page_images[pns[0]].size[1]) / ZM)) bottom -= self.page_images[pns[0]].size[1] for pn in pns[1:]: - imgs.append( - self.page_images[pn].crop((left * ZM, 0, - right * ZM, - min(bottom, - self.page_images[pn].size[1]) - )) - ) + imgs.append(self.page_images[pn].crop((left * ZM, 0, right * ZM, min(bottom, self.page_images[pn].size[1])))) if 0 < ii < len(poss) - 1: - positions.append((pn + self.page_from, left, right, 0, min( - bottom, self.page_images[pn].size[1]) / ZM)) + positions.append((pn + self.page_from, left, right, 0, min(bottom, self.page_images[pn].size[1]) / ZM)) bottom -= self.page_images[pn].size[1] if not imgs: @@ -1290,14 +1170,12 @@ class RAGFlowPdfParser: height += img.size[1] + GAP height = int(height) width = int(np.max([i.size[0] for i in imgs])) - pic = Image.new("RGB", - (width, height), - (245, 245, 245)) + pic = Image.new("RGB", (width, height), (245, 245, 245)) height = 0 for ii, img in enumerate(imgs): if ii == 0 or ii + 1 == len(imgs): - img = img.convert('RGBA') - overlay = Image.new('RGBA', img.size, (0, 0, 0, 0)) + img = img.convert("RGBA") + overlay = Image.new("RGBA", img.size, (0, 0, 0, 0)) overlay.putalpha(128) img = Image.alpha_composite(img, overlay).convert("RGB") pic.paste(img, (0, int(height))) @@ -1312,14 +1190,12 @@ class RAGFlowPdfParser: pn = bx["page_number"] top = bx["top"] - self.page_cum_height[pn - 1] bott = bx["bottom"] - self.page_cum_height[pn - 1] - poss.append((pn, bx["x0"], bx["x1"], top, min( - bott, self.page_images[pn - 1].size[1] / ZM))) + poss.append((pn, bx["x0"], bx["x1"], top, min(bott, self.page_images[pn - 1].size[1] / ZM))) while bott * ZM > self.page_images[pn - 1].size[1]: bott -= self.page_images[pn - 1].size[1] / ZM top = 0 pn += 1 - poss.append((pn, bx["x0"], bx["x1"], top, min( - bott, self.page_images[pn - 1].size[1] / ZM))) + poss.append((pn, bx["x0"], bx["x1"], top, min(bott, self.page_images[pn - 1].size[1] / ZM))) return poss @@ -1328,9 +1204,7 @@ class PlainParser: self.outlines = [] lines = [] try: - self.pdf = pdf2_read( - filename if isinstance( - filename, str) else BytesIO(filename)) + self.pdf = pdf2_read(filename if isinstance(filename, str) else BytesIO(filename)) for page in self.pdf.pages[from_page:to_page]: lines.extend([t for t in page.extract_text().split("\n")]) @@ -1367,10 +1241,8 @@ class VisionParser(RAGFlowPdfParser): def __images__(self, fnm, zoomin=3, page_from=0, page_to=299, callback=None): try: with sys.modules[LOCK_KEY_pdfplumber]: - self.pdf = pdfplumber.open(fnm) if isinstance( - fnm, str) else pdfplumber.open(BytesIO(fnm)) - self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in - enumerate(self.pdf.pages[page_from:page_to])] + self.pdf = pdfplumber.open(fnm) if isinstance(fnm, str) else pdfplumber.open(BytesIO(fnm)) + self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in enumerate(self.pdf.pages[page_from:page_to])] self.total_page = len(self.pdf.pages) except Exception: self.page_images = None @@ -1397,15 +1269,15 @@ class VisionParser(RAGFlowPdfParser): text = picture_vision_llm_chunk( binary=img_binary, vision_model=self.vision_model, - prompt=vision_llm_describe_prompt(page=pdf_page_num+1), + prompt=vision_llm_describe_prompt(page=pdf_page_num + 1), callback=callback, ) if kwargs.get("callback"): - kwargs["callback"](idx*1./len(self.page_images), f"Processed: {idx+1}/{len(self.page_images)}") + kwargs["callback"](idx * 1.0 / len(self.page_images), f"Processed: {idx + 1}/{len(self.page_images)}") if text: width, height = self.page_images[idx].size - all_docs.append((text, f"{pdf_page_num+1} 0 {width/zoomin} 0 {height/zoomin}")) + all_docs.append((text, f"{pdf_page_num + 1} 0 {width / zoomin} 0 {height / zoomin}")) return all_docs, [] diff --git a/deepdoc/vision/__init__.py b/deepdoc/vision/__init__.py index a60e872b1..63396c2a0 100644 --- a/deepdoc/vision/__init__.py +++ b/deepdoc/vision/__init__.py @@ -16,24 +16,28 @@ import io import sys import threading + import pdfplumber from .ocr import OCR from .recognizer import Recognizer +from .layout_recognizer import AscendLayoutRecognizer from .layout_recognizer import LayoutRecognizer4YOLOv10 as LayoutRecognizer from .table_structure_recognizer import TableStructureRecognizer - LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber" if LOCK_KEY_pdfplumber not in sys.modules: sys.modules[LOCK_KEY_pdfplumber] = threading.Lock() def init_in_out(args): - from PIL import Image import os import traceback + + from PIL import Image + from api.utils.file_utils import traversal_files + images = [] outputs = [] @@ -44,8 +48,7 @@ def init_in_out(args): nonlocal outputs, images with sys.modules[LOCK_KEY_pdfplumber]: pdf = pdfplumber.open(fnm) - images = [p.to_image(resolution=72 * zoomin).annotated for i, p in - enumerate(pdf.pages)] + images = [p.to_image(resolution=72 * zoomin).annotated for i, p in enumerate(pdf.pages)] for i, page in enumerate(images): outputs.append(os.path.split(fnm)[-1] + f"_{i}.jpg") @@ -57,10 +60,10 @@ def init_in_out(args): pdf_pages(fnm) return try: - fp = open(fnm, 'rb') + fp = open(fnm, "rb") binary = fp.read() fp.close() - images.append(Image.open(io.BytesIO(binary)).convert('RGB')) + images.append(Image.open(io.BytesIO(binary)).convert("RGB")) outputs.append(os.path.split(fnm)[-1]) except Exception: traceback.print_exc() @@ -81,6 +84,7 @@ __all__ = [ "OCR", "Recognizer", "LayoutRecognizer", + "AscendLayoutRecognizer", "TableStructureRecognizer", "init_in_out", ] diff --git a/deepdoc/vision/layout_recognizer.py b/deepdoc/vision/layout_recognizer.py index 46be451c6..9cd0b5a5d 100644 --- a/deepdoc/vision/layout_recognizer.py +++ b/deepdoc/vision/layout_recognizer.py @@ -14,6 +14,8 @@ # limitations under the License. # +import logging +import math import os import re from collections import Counter @@ -45,28 +47,22 @@ class LayoutRecognizer(Recognizer): def __init__(self, domain): try: - model_dir = os.path.join( - get_project_base_directory(), - "rag/res/deepdoc") + model_dir = os.path.join(get_project_base_directory(), "rag/res/deepdoc") super().__init__(self.labels, domain, model_dir) except Exception: - model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc", - local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"), - local_dir_use_symlinks=False) + model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc", local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"), local_dir_use_symlinks=False) super().__init__(self.labels, domain, model_dir) self.garbage_layouts = ["footer", "header", "reference"] self.client = None if os.environ.get("TENSORRT_DLA_SVR"): from deepdoc.vision.dla_cli import DLAClient + self.client = DLAClient(os.environ["TENSORRT_DLA_SVR"]) def __call__(self, image_list, ocr_res, scale_factor=3, thr=0.2, batch_size=16, drop=True): def __is_garbage(b): - patt = [r"^•+$", "^[0-9]{1,2} / ?[0-9]{1,2}$", - r"^[0-9]{1,2} of [0-9]{1,2}$", "^http://[^ ]{12,}", - "\\(cid *: *[0-9]+ *\\)" - ] + patt = [r"^•+$", "^[0-9]{1,2} / ?[0-9]{1,2}$", r"^[0-9]{1,2} of [0-9]{1,2}$", "^http://[^ ]{12,}", "\\(cid *: *[0-9]+ *\\)"] return any([re.search(p, b["text"]) for p in patt]) if self.client: @@ -82,18 +78,23 @@ class LayoutRecognizer(Recognizer): page_layout = [] for pn, lts in enumerate(layouts): bxs = ocr_res[pn] - lts = [{"type": b["type"], + lts = [ + { + "type": b["type"], "score": float(b["score"]), - "x0": b["bbox"][0] / scale_factor, "x1": b["bbox"][2] / scale_factor, - "top": b["bbox"][1] / scale_factor, "bottom": b["bbox"][-1] / scale_factor, + "x0": b["bbox"][0] / scale_factor, + "x1": b["bbox"][2] / scale_factor, + "top": b["bbox"][1] / scale_factor, + "bottom": b["bbox"][-1] / scale_factor, "page_number": pn, - } for b in lts if float(b["score"]) >= 0.4 or b["type"] not in self.garbage_layouts] - lts = self.sort_Y_firstly(lts, np.mean( - [lt["bottom"] - lt["top"] for lt in lts]) / 2) + } + for b in lts + if float(b["score"]) >= 0.4 or b["type"] not in self.garbage_layouts + ] + lts = self.sort_Y_firstly(lts, np.mean([lt["bottom"] - lt["top"] for lt in lts]) / 2) lts = self.layouts_cleanup(bxs, lts) page_layout.append(lts) - # Tag layout type, layouts are ready def findLayout(ty): nonlocal bxs, lts, self lts_ = [lt for lt in lts if lt["type"] == ty] @@ -106,21 +107,17 @@ class LayoutRecognizer(Recognizer): bxs.pop(i) continue - ii = self.find_overlapped_with_threshold(bxs[i], lts_, - thr=0.4) - if ii is None: # belong to nothing + ii = self.find_overlapped_with_threshold(bxs[i], lts_, thr=0.4) + if ii is None: bxs[i]["layout_type"] = "" i += 1 continue lts_[ii]["visited"] = True keep_feats = [ - lts_[ - ii]["type"] == "footer" and bxs[i]["bottom"] < image_list[pn].size[1] * 0.9 / scale_factor, - lts_[ - ii]["type"] == "header" and bxs[i]["top"] > image_list[pn].size[1] * 0.1 / scale_factor, + lts_[ii]["type"] == "footer" and bxs[i]["bottom"] < image_list[pn].size[1] * 0.9 / scale_factor, + lts_[ii]["type"] == "header" and bxs[i]["top"] > image_list[pn].size[1] * 0.1 / scale_factor, ] - if drop and lts_[ - ii]["type"] in self.garbage_layouts and not any(keep_feats): + if drop and lts_[ii]["type"] in self.garbage_layouts and not any(keep_feats): if lts_[ii]["type"] not in garbages: garbages[lts_[ii]["type"]] = [] garbages[lts_[ii]["type"]].append(bxs[i]["text"]) @@ -128,17 +125,14 @@ class LayoutRecognizer(Recognizer): continue bxs[i]["layoutno"] = f"{ty}-{ii}" - bxs[i]["layout_type"] = lts_[ii]["type"] if lts_[ - ii]["type"] != "equation" else "figure" + bxs[i]["layout_type"] = lts_[ii]["type"] if lts_[ii]["type"] != "equation" else "figure" i += 1 - for lt in ["footer", "header", "reference", "figure caption", - "table caption", "title", "table", "text", "figure", "equation"]: + for lt in ["footer", "header", "reference", "figure caption", "table caption", "title", "table", "text", "figure", "equation"]: findLayout(lt) # add box to figure layouts which has not text box - for i, lt in enumerate( - [lt for lt in lts if lt["type"] in ["figure", "equation"]]): + for i, lt in enumerate([lt for lt in lts if lt["type"] in ["figure", "equation"]]): if lt.get("visited"): continue lt = deepcopy(lt) @@ -206,13 +200,11 @@ class LayoutRecognizer4YOLOv10(LayoutRecognizer): img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) top, bottom = int(round(dh - 0.1)) if self.center else 0, int(round(dh + 0.1)) left, right = int(round(dw - 0.1)) if self.center else 0, int(round(dw + 0.1)) - img = cv2.copyMakeBorder( - img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114) - ) # add border + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114)) # add border img /= 255.0 img = img.transpose(2, 0, 1) img = img[np.newaxis, :, :, :].astype(np.float32) - inputs.append({self.input_names[0]: img, "scale_factor": [shape[1]/ww, shape[0]/hh, dw, dh]}) + inputs.append({self.input_names[0]: img, "scale_factor": [shape[1] / ww, shape[0] / hh, dw, dh]}) return inputs @@ -230,8 +222,7 @@ class LayoutRecognizer4YOLOv10(LayoutRecognizer): boxes[:, 2] -= inputs["scale_factor"][2] boxes[:, 1] -= inputs["scale_factor"][3] boxes[:, 3] -= inputs["scale_factor"][3] - input_shape = np.array([inputs["scale_factor"][0], inputs["scale_factor"][1], inputs["scale_factor"][0], - inputs["scale_factor"][1]]) + input_shape = np.array([inputs["scale_factor"][0], inputs["scale_factor"][1], inputs["scale_factor"][0], inputs["scale_factor"][1]]) boxes = np.multiply(boxes, input_shape, dtype=np.float32) unique_class_ids = np.unique(class_ids) @@ -243,8 +234,223 @@ class LayoutRecognizer4YOLOv10(LayoutRecognizer): class_keep_boxes = nms(class_boxes, class_scores, 0.45) indices.extend(class_indices[class_keep_boxes]) - return [{ - "type": self.label_list[class_ids[i]].lower(), - "bbox": [float(t) for t in boxes[i].tolist()], - "score": float(scores[i]) - } for i in indices] + return [{"type": self.label_list[class_ids[i]].lower(), "bbox": [float(t) for t in boxes[i].tolist()], "score": float(scores[i])} for i in indices] + + +class AscendLayoutRecognizer(Recognizer): + labels = [ + "title", + "Text", + "Reference", + "Figure", + "Figure caption", + "Table", + "Table caption", + "Table caption", + "Equation", + "Figure caption", + ] + + def __init__(self, domain): + from ais_bench.infer.interface import InferSession + + model_dir = os.path.join(get_project_base_directory(), "rag/res/deepdoc") + model_file_path = os.path.join(model_dir, domain + ".om") + + if not os.path.exists(model_file_path): + raise ValueError(f"Model file not found: {model_file_path}") + + device_id = int(os.getenv("ASCEND_LAYOUT_RECOGNIZER_DEVICE_ID", 0)) + self.session = InferSession(device_id=device_id, model_path=model_file_path) + self.input_shape = self.session.get_inputs()[0].shape[2:4] # H,W + self.garbage_layouts = ["footer", "header", "reference"] + + def preprocess(self, image_list): + inputs = [] + H, W = self.input_shape + for img in image_list: + h, w = img.shape[:2] + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32) + + r = min(H / h, W / w) + new_unpad = (int(round(w * r)), int(round(h * r))) + dw, dh = (W - new_unpad[0]) / 2.0, (H - new_unpad[1]) / 2.0 + + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114)) + + img /= 255.0 + img = img.transpose(2, 0, 1)[np.newaxis, :, :, :].astype(np.float32) + + inputs.append( + { + "image": img, + "scale_factor": [w / new_unpad[0], h / new_unpad[1]], + "pad": [dw, dh], + "orig_shape": [h, w], + } + ) + return inputs + + def postprocess(self, boxes, inputs, thr=0.25): + arr = np.squeeze(boxes) + if arr.ndim == 1: + arr = arr.reshape(1, -1) + + results = [] + if arr.shape[1] == 6: + # [x1,y1,x2,y2,score,cls] + m = arr[:, 4] >= thr + arr = arr[m] + if arr.size == 0: + return [] + xyxy = arr[:, :4].astype(np.float32) + scores = arr[:, 4].astype(np.float32) + cls_ids = arr[:, 5].astype(np.int32) + + if "pad" in inputs: + dw, dh = inputs["pad"] + sx, sy = inputs["scale_factor"] + xyxy[:, [0, 2]] -= dw + xyxy[:, [1, 3]] -= dh + xyxy *= np.array([sx, sy, sx, sy], dtype=np.float32) + else: + # backup + sx, sy = inputs["scale_factor"] + xyxy *= np.array([sx, sy, sx, sy], dtype=np.float32) + + keep_indices = [] + for c in np.unique(cls_ids): + idx = np.where(cls_ids == c)[0] + k = nms(xyxy[idx], scores[idx], 0.45) + keep_indices.extend(idx[k]) + + for i in keep_indices: + cid = int(cls_ids[i]) + if 0 <= cid < len(self.labels): + results.append({"type": self.labels[cid].lower(), "bbox": [float(t) for t in xyxy[i].tolist()], "score": float(scores[i])}) + return results + + raise ValueError(f"Unexpected output shape: {arr.shape}") + + def __call__(self, image_list, ocr_res, scale_factor=3, thr=0.2, batch_size=16, drop=True): + import re + from collections import Counter + + assert len(image_list) == len(ocr_res) + + images = [np.array(im) if not isinstance(im, np.ndarray) else im for im in image_list] + layouts_all_pages = [] # list of list[{"type","score","bbox":[x1,y1,x2,y2]}] + + conf_thr = max(thr, 0.08) + + batch_loop_cnt = math.ceil(float(len(images)) / batch_size) + for bi in range(batch_loop_cnt): + s = bi * batch_size + e = min((bi + 1) * batch_size, len(images)) + batch_images = images[s:e] + + inputs_list = self.preprocess(batch_images) + logging.debug("preprocess done") + + for ins in inputs_list: + feeds = [ins["image"]] + out_list = self.session.infer(feeds=feeds, mode="static") + + for out in out_list: + lts = self.postprocess(out, ins, conf_thr) + + page_lts = [] + for b in lts: + if float(b["score"]) >= 0.4 or b["type"] not in self.garbage_layouts: + x0, y0, x1, y1 = b["bbox"] + page_lts.append( + { + "type": b["type"], + "score": float(b["score"]), + "x0": float(x0) / scale_factor, + "x1": float(x1) / scale_factor, + "top": float(y0) / scale_factor, + "bottom": float(y1) / scale_factor, + "page_number": len(layouts_all_pages), + } + ) + layouts_all_pages.append(page_lts) + + def _is_garbage_text(box): + patt = [r"^•+$", r"^[0-9]{1,2} / ?[0-9]{1,2}$", r"^[0-9]{1,2} of [0-9]{1,2}$", r"^http://[^ ]{12,}", r"\(cid *: *[0-9]+ *\)"] + return any(re.search(p, box.get("text", "")) for p in patt) + + boxes_out = [] + page_layout = [] + garbages = {} + + for pn, lts in enumerate(layouts_all_pages): + if lts: + avg_h = np.mean([lt["bottom"] - lt["top"] for lt in lts]) + lts = self.sort_Y_firstly(lts, avg_h / 2 if avg_h > 0 else 0) + + bxs = ocr_res[pn] + lts = self.layouts_cleanup(bxs, lts) + page_layout.append(lts) + + def _tag_layout(ty): + nonlocal bxs, lts + lts_of_ty = [lt for lt in lts if lt["type"] == ty] + i = 0 + while i < len(bxs): + if bxs[i].get("layout_type"): + i += 1 + continue + if _is_garbage_text(bxs[i]): + bxs.pop(i) + continue + + ii = self.find_overlapped_with_threshold(bxs[i], lts_of_ty, thr=0.4) + if ii is None: + bxs[i]["layout_type"] = "" + i += 1 + continue + + lts_of_ty[ii]["visited"] = True + + keep_feats = [ + lts_of_ty[ii]["type"] == "footer" and bxs[i]["bottom"] < image_list[pn].shape[0] * 0.9 / scale_factor, + lts_of_ty[ii]["type"] == "header" and bxs[i]["top"] > image_list[pn].shape[0] * 0.1 / scale_factor, + ] + if drop and lts_of_ty[ii]["type"] in self.garbage_layouts and not any(keep_feats): + garbages.setdefault(lts_of_ty[ii]["type"], []).append(bxs[i].get("text", "")) + bxs.pop(i) + continue + + bxs[i]["layoutno"] = f"{ty}-{ii}" + bxs[i]["layout_type"] = lts_of_ty[ii]["type"] if lts_of_ty[ii]["type"] != "equation" else "figure" + i += 1 + + for ty in ["footer", "header", "reference", "figure caption", "table caption", "title", "table", "text", "figure", "equation"]: + _tag_layout(ty) + + figs = [lt for lt in lts if lt["type"] in ["figure", "equation"]] + for i, lt in enumerate(figs): + if lt.get("visited"): + continue + lt = deepcopy(lt) + lt.pop("type", None) + lt["text"] = "" + lt["layout_type"] = "figure" + lt["layoutno"] = f"figure-{i}" + bxs.append(lt) + + boxes_out.extend(bxs) + + garbag_set = set() + for k, lst in garbages.items(): + cnt = Counter(lst) + for g, c in cnt.items(): + if c > 1: + garbag_set.add(g) + + ocr_res_new = [b for b in boxes_out if b["text"].strip() not in garbag_set] + return ocr_res_new, page_layout diff --git a/deepdoc/vision/ocr.py b/deepdoc/vision/ocr.py index e9e594274..d9f472aa1 100644 --- a/deepdoc/vision/ocr.py +++ b/deepdoc/vision/ocr.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # - +import gc import logging import copy import time @@ -348,6 +348,13 @@ class TextRecognizer: return img + def close(self): + # close session and release manually + logging.info('Close TextRecognizer.') + if hasattr(self, "predictor"): + del self.predictor + gc.collect() + def __call__(self, img_list): img_num = len(img_list) # Calculate the aspect ratio of all text bars @@ -395,6 +402,9 @@ class TextRecognizer: return rec_res, time.time() - st + def __del__(self): + self.close() + class TextDetector: def __init__(self, model_dir, device_id: int | None = None): @@ -479,6 +489,12 @@ class TextDetector: dt_boxes = np.array(dt_boxes_new) return dt_boxes + def close(self): + logging.info("Close TextDetector.") + if hasattr(self, "predictor"): + del self.predictor + gc.collect() + def __call__(self, img): ori_im = img.copy() data = {'image': img} @@ -508,6 +524,9 @@ class TextDetector: return dt_boxes, time.time() - st + def __del__(self): + self.close() + class OCR: def __init__(self, model_dir=None): diff --git a/deepdoc/vision/recognizer.py b/deepdoc/vision/recognizer.py index 9fa82d7f5..65995a579 100644 --- a/deepdoc/vision/recognizer.py +++ b/deepdoc/vision/recognizer.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # - +import gc import logging import os import math @@ -406,6 +406,12 @@ class Recognizer: "score": float(scores[i]) } for i in indices] + def close(self): + logging.info("Close recognizer.") + if hasattr(self, "ort_sess"): + del self.ort_sess + gc.collect() + def __call__(self, image_list, thr=0.7, batch_size=16): res = [] images = [] @@ -430,5 +436,7 @@ class Recognizer: return res + def __del__(self): + self.close() diff --git a/deepdoc/vision/table_structure_recognizer.py b/deepdoc/vision/table_structure_recognizer.py index 26b182e10..7f4736c69 100644 --- a/deepdoc/vision/table_structure_recognizer.py +++ b/deepdoc/vision/table_structure_recognizer.py @@ -23,6 +23,7 @@ from huggingface_hub import snapshot_download from api.utils.file_utils import get_project_base_directory from rag.nlp import rag_tokenizer + from .recognizer import Recognizer @@ -38,31 +39,49 @@ class TableStructureRecognizer(Recognizer): def __init__(self): try: - super().__init__(self.labels, "tsr", os.path.join( - get_project_base_directory(), - "rag/res/deepdoc")) + super().__init__(self.labels, "tsr", os.path.join(get_project_base_directory(), "rag/res/deepdoc")) except Exception: - super().__init__(self.labels, "tsr", snapshot_download(repo_id="InfiniFlow/deepdoc", - local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"), - local_dir_use_symlinks=False)) + super().__init__( + self.labels, + "tsr", + snapshot_download( + repo_id="InfiniFlow/deepdoc", + local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"), + local_dir_use_symlinks=False, + ), + ) def __call__(self, images, thr=0.2): - tbls = super().__call__(images, thr) + table_structure_recognizer_type = os.getenv("TABLE_STRUCTURE_RECOGNIZER_TYPE", "onnx").lower() + if table_structure_recognizer_type not in ["onnx", "ascend"]: + raise RuntimeError("Unsupported table structure recognizer type.") + + if table_structure_recognizer_type == "onnx": + logging.debug("Using Onnx table structure recognizer", flush=True) + tbls = super().__call__(images, thr) + else: # ascend + logging.debug("Using Ascend table structure recognizer", flush=True) + tbls = self._run_ascend_tsr(images, thr) + res = [] # align left&right for rows, align top&bottom for columns for tbl in tbls: - lts = [{"label": b["type"], + lts = [ + { + "label": b["type"], "score": b["score"], - "x0": b["bbox"][0], "x1": b["bbox"][2], - "top": b["bbox"][1], "bottom": b["bbox"][-1] - } for b in tbl] + "x0": b["bbox"][0], + "x1": b["bbox"][2], + "top": b["bbox"][1], + "bottom": b["bbox"][-1], + } + for b in tbl + ] if not lts: continue - left = [b["x0"] for b in lts if b["label"].find( - "row") > 0 or b["label"].find("header") > 0] - right = [b["x1"] for b in lts if b["label"].find( - "row") > 0 or b["label"].find("header") > 0] + left = [b["x0"] for b in lts if b["label"].find("row") > 0 or b["label"].find("header") > 0] + right = [b["x1"] for b in lts if b["label"].find("row") > 0 or b["label"].find("header") > 0] if not left: continue left = np.mean(left) if len(left) > 4 else np.min(left) @@ -93,11 +112,8 @@ class TableStructureRecognizer(Recognizer): @staticmethod def is_caption(bx): - patt = [ - r"[图表]+[ 0-9::]{2,}" - ] - if any([re.match(p, bx["text"].strip()) for p in patt]) \ - or bx.get("layout_type", "").find("caption") >= 0: + patt = [r"[图表]+[ 0-9::]{2,}"] + if any([re.match(p, bx["text"].strip()) for p in patt]) or bx.get("layout_type", "").find("caption") >= 0: return True return False @@ -115,7 +131,7 @@ class TableStructureRecognizer(Recognizer): (r"^[0-9A-Z/\._~-]+$", "Ca"), (r"^[A-Z]*[a-z' -]+$", "En"), (r"^[0-9.,+-]+[0-9A-Za-z/$¥%<>()()' -]+$", "NE"), - (r"^.{1}$", "Sg") + (r"^.{1}$", "Sg"), ] for p, n in patt: if re.search(p, b["text"].strip()): @@ -156,21 +172,19 @@ class TableStructureRecognizer(Recognizer): rowh = [b["R_bott"] - b["R_top"] for b in boxes if "R" in b] rowh = np.min(rowh) if rowh else 0 boxes = Recognizer.sort_R_firstly(boxes, rowh / 2) - #for b in boxes:print(b) + # for b in boxes:print(b) boxes[0]["rn"] = 0 rows = [[boxes[0]]] btm = boxes[0]["bottom"] for b in boxes[1:]: b["rn"] = len(rows) - 1 lst_r = rows[-1] - if lst_r[-1].get("R", "") != b.get("R", "") \ - or (b["top"] >= btm - 3 and lst_r[-1].get("R", "-1") != b.get("R", "-2") - ): # new row + if lst_r[-1].get("R", "") != b.get("R", "") or (b["top"] >= btm - 3 and lst_r[-1].get("R", "-1") != b.get("R", "-2")): # new row btm = b["bottom"] b["rn"] += 1 rows.append([b]) continue - btm = (btm + b["bottom"]) / 2. + btm = (btm + b["bottom"]) / 2.0 rows[-1].append(b) colwm = [b["C_right"] - b["C_left"] for b in boxes if "C" in b] @@ -186,14 +200,14 @@ class TableStructureRecognizer(Recognizer): for b in boxes[1:]: b["cn"] = len(cols) - 1 lst_c = cols[-1] - if (int(b.get("C", "1")) - int(lst_c[-1].get("C", "1")) == 1 and b["page_number"] == lst_c[-1][ - "page_number"]) \ - or (b["x0"] >= right and lst_c[-1].get("C", "-1") != b.get("C", "-2")): # new col + if (int(b.get("C", "1")) - int(lst_c[-1].get("C", "1")) == 1 and b["page_number"] == lst_c[-1]["page_number"]) or ( + b["x0"] >= right and lst_c[-1].get("C", "-1") != b.get("C", "-2") + ): # new col right = b["x1"] b["cn"] += 1 cols.append([b]) continue - right = (right + b["x1"]) / 2. + right = (right + b["x1"]) / 2.0 cols[-1].append(b) tbl = [[[] for _ in range(len(cols))] for _ in range(len(rows))] @@ -214,10 +228,8 @@ class TableStructureRecognizer(Recognizer): if e > 1: j += 1 continue - f = (j > 0 and tbl[ii][j - 1] and tbl[ii] - [j - 1][0].get("text")) or j == 0 - ff = (j + 1 < len(tbl[ii]) and tbl[ii][j + 1] and tbl[ii] - [j + 1][0].get("text")) or j + 1 >= len(tbl[ii]) + f = (j > 0 and tbl[ii][j - 1] and tbl[ii][j - 1][0].get("text")) or j == 0 + ff = (j + 1 < len(tbl[ii]) and tbl[ii][j + 1] and tbl[ii][j + 1][0].get("text")) or j + 1 >= len(tbl[ii]) if f and ff: j += 1 continue @@ -228,13 +240,11 @@ class TableStructureRecognizer(Recognizer): if j > 0 and not f: for i in range(len(tbl)): if tbl[i][j - 1]: - left = min(left, np.min( - [bx["x0"] - a["x1"] for a in tbl[i][j - 1]])) + left = min(left, np.min([bx["x0"] - a["x1"] for a in tbl[i][j - 1]])) if j + 1 < len(tbl[0]) and not ff: for i in range(len(tbl)): if tbl[i][j + 1]: - right = min(right, np.min( - [a["x0"] - bx["x1"] for a in tbl[i][j + 1]])) + right = min(right, np.min([a["x0"] - bx["x1"] for a in tbl[i][j + 1]])) assert left < 100000 or right < 100000 if left < right: for jj in range(j, len(tbl[0])): @@ -260,8 +270,7 @@ class TableStructureRecognizer(Recognizer): for i in range(len(tbl)): tbl[i].pop(j) cols.pop(j) - assert len(cols) == len(tbl[0]), "Column NO. miss matched: %d vs %d" % ( - len(cols), len(tbl[0])) + assert len(cols) == len(tbl[0]), "Column NO. miss matched: %d vs %d" % (len(cols), len(tbl[0])) if len(cols) >= 4: # remove single in row @@ -277,10 +286,8 @@ class TableStructureRecognizer(Recognizer): if e > 1: i += 1 continue - f = (i > 0 and tbl[i - 1][jj] and tbl[i - 1] - [jj][0].get("text")) or i == 0 - ff = (i + 1 < len(tbl) and tbl[i + 1][jj] and tbl[i + 1] - [jj][0].get("text")) or i + 1 >= len(tbl) + f = (i > 0 and tbl[i - 1][jj] and tbl[i - 1][jj][0].get("text")) or i == 0 + ff = (i + 1 < len(tbl) and tbl[i + 1][jj] and tbl[i + 1][jj][0].get("text")) or i + 1 >= len(tbl) if f and ff: i += 1 continue @@ -292,13 +299,11 @@ class TableStructureRecognizer(Recognizer): if i > 0 and not f: for j in range(len(tbl[i - 1])): if tbl[i - 1][j]: - up = min(up, np.min( - [bx["top"] - a["bottom"] for a in tbl[i - 1][j]])) + up = min(up, np.min([bx["top"] - a["bottom"] for a in tbl[i - 1][j]])) if i + 1 < len(tbl) and not ff: for j in range(len(tbl[i + 1])): if tbl[i + 1][j]: - down = min(down, np.min( - [a["top"] - bx["bottom"] for a in tbl[i + 1][j]])) + down = min(down, np.min([a["top"] - bx["bottom"] for a in tbl[i + 1][j]])) assert up < 100000 or down < 100000 if up < down: for ii in range(i, len(tbl)): @@ -333,22 +338,15 @@ class TableStructureRecognizer(Recognizer): cnt += 1 if max_type == "Nu" and arr[0]["btype"] == "Nu": continue - if any([a.get("H") for a in arr]) \ - or (max_type == "Nu" and arr[0]["btype"] != "Nu"): + if any([a.get("H") for a in arr]) or (max_type == "Nu" and arr[0]["btype"] != "Nu"): h += 1 if h / cnt > 0.5: hdset.add(i) if html: - return TableStructureRecognizer.__html_table(cap, hdset, - TableStructureRecognizer.__cal_spans(boxes, rows, - cols, tbl, True) - ) + return TableStructureRecognizer.__html_table(cap, hdset, TableStructureRecognizer.__cal_spans(boxes, rows, cols, tbl, True)) - return TableStructureRecognizer.__desc_table(cap, hdset, - TableStructureRecognizer.__cal_spans(boxes, rows, cols, tbl, - False), - is_english) + return TableStructureRecognizer.__desc_table(cap, hdset, TableStructureRecognizer.__cal_spans(boxes, rows, cols, tbl, False), is_english) @staticmethod def __html_table(cap, hdset, tbl): @@ -367,10 +365,8 @@ class TableStructureRecognizer(Recognizer): continue txt = "" if arr: - h = min(np.min([c["bottom"] - c["top"] - for c in arr]) / 2, 10) - txt = " ".join([c["text"] - for c in Recognizer.sort_Y_firstly(arr, h)]) + h = min(np.min([c["bottom"] - c["top"] for c in arr]) / 2, 10) + txt = " ".join([c["text"] for c in Recognizer.sort_Y_firstly(arr, h)]) txts.append(txt) sp = "" if arr[0].get("colspan"): @@ -436,15 +432,11 @@ class TableStructureRecognizer(Recognizer): if headers[j][k].find(headers[j - 1][k]) >= 0: continue if len(headers[j][k]) > len(headers[j - 1][k]): - headers[j][k] += (de if headers[j][k] - else "") + headers[j - 1][k] + headers[j][k] += (de if headers[j][k] else "") + headers[j - 1][k] else: - headers[j][k] = headers[j - 1][k] \ - + (de if headers[j - 1][k] else "") \ - + headers[j][k] + headers[j][k] = headers[j - 1][k] + (de if headers[j - 1][k] else "") + headers[j][k] - logging.debug( - f">>>>>>>>>>>>>>>>>{cap}:SIZE:{rowno}X{clmno} Header: {hdr_rowno}") + logging.debug(f">>>>>>>>>>>>>>>>>{cap}:SIZE:{rowno}X{clmno} Header: {hdr_rowno}") row_txt = [] for i in range(rowno): if i in hdr_rowno: @@ -503,14 +495,10 @@ class TableStructureRecognizer(Recognizer): @staticmethod def __cal_spans(boxes, rows, cols, tbl, html=True): # caculate span - clft = [np.mean([c.get("C_left", c["x0"]) for c in cln]) - for cln in cols] - crgt = [np.mean([c.get("C_right", c["x1"]) for c in cln]) - for cln in cols] - rtop = [np.mean([c.get("R_top", c["top"]) for c in row]) - for row in rows] - rbtm = [np.mean([c.get("R_btm", c["bottom"]) - for c in row]) for row in rows] + clft = [np.mean([c.get("C_left", c["x0"]) for c in cln]) for cln in cols] + crgt = [np.mean([c.get("C_right", c["x1"]) for c in cln]) for cln in cols] + rtop = [np.mean([c.get("R_top", c["top"]) for c in row]) for row in rows] + rbtm = [np.mean([c.get("R_btm", c["bottom"]) for c in row]) for row in rows] for b in boxes: if "SP" not in b: continue @@ -585,3 +573,40 @@ class TableStructureRecognizer(Recognizer): tbl[rowspan[0]][colspan[0]] = arr return tbl + + def _run_ascend_tsr(self, image_list, thr=0.2, batch_size=16): + import math + + from ais_bench.infer.interface import InferSession + + model_dir = os.path.join(get_project_base_directory(), "rag/res/deepdoc") + model_file_path = os.path.join(model_dir, "tsr.om") + + if not os.path.exists(model_file_path): + raise ValueError(f"Model file not found: {model_file_path}") + + device_id = int(os.getenv("ASCEND_LAYOUT_RECOGNIZER_DEVICE_ID", 0)) + session = InferSession(device_id=device_id, model_path=model_file_path) + + images = [np.array(im) if not isinstance(im, np.ndarray) else im for im in image_list] + results = [] + + conf_thr = max(thr, 0.08) + + batch_loop_cnt = math.ceil(float(len(images)) / batch_size) + for bi in range(batch_loop_cnt): + s = bi * batch_size + e = min((bi + 1) * batch_size, len(images)) + batch_images = images[s:e] + + inputs_list = self.preprocess(batch_images) + for ins in inputs_list: + feeds = [] + if "image" in ins: + feeds.append(ins["image"]) + else: + feeds.append(ins[self.input_names[0]]) + output_list = session.infer(feeds=feeds, mode="static") + bb = self.postprocess(output_list, ins, conf_thr) + results.append(bb) + return results diff --git a/docs/guides/agent/agent_component_reference/agent.mdx b/docs/guides/agent/agent_component_reference/agent.mdx index f31cb3c6c..d02617a73 100644 --- a/docs/guides/agent/agent_component_reference/agent.mdx +++ b/docs/guides/agent/agent_component_reference/agent.mdx @@ -26,6 +26,84 @@ An **Agent** component is essential when you need the LLM to assist with summari 2. If your Agent involves dataset retrieval, ensure you [have properly configured your target knowledge base(s)](../../dataset/configure_knowledge_base.md). +## Quickstart + +### 1. Click on an **Agent** component to show its configuration panel + +The corresponding configuration panel appears to the right of the canvas. Use this panel to define and fine-tune the **Agent** component's behavior. + +### 2. Select your model + +Click **Model**, and select a chat model from the dropdown menu. + +:::tip NOTE +If no model appears, check if your have added a chat model on the **Model providers** page. +::: + +### 3. Update system prompt (Optional) + +The system prompt typically defines your model's role. You can either keep the system prompt as is or customize it to override the default. + + +### 4. Update user prompt + +The user prompt typically defines your model's task. You will find the `sys.query` variable auto-populated. Type `/` or click **(x)** to view or add variables. + +In this quickstart, we assume your **Agent** component is used standalone (without tools or sub-Agents below), then you may also need to specify retrieved chunks using the `formalized_content` variable: + +![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/standalone_user_prompt_variable.jpg) + +### 5. Skip Tools and Agent + +The **+ Add tools** and **+ Add agent** sections are used *only* when you need to configure your **Agent** component as a planner (with tools or sub-Agents beneath). In this quickstart, we assume your **Agent** component is used standalone (without tools or sub-Agents beneath). + +### 6. Choose the next component + +When necessary, click the **+** button on the **Agent** component to choose the next component in the worflow from the dropdown list. + +## Connect to an MCP server as a client + +:::danger IMPORTANT +In this section, we assume your **Agent** will be configured as a planner, with a Tavily tool beneath it. +::: + +### 1. Navigate to the MCP configuration page + +![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/mcp_page.jpg) + +### 2. Configure your Tavily MCP server + +Update your MCP server's name, URL (including the API key), server type, and other necessary settings. When configured correctly, the available tools will be displayed. + +![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/edit_mcp_server.jpg) + +### 3. Navigate to your Agent's editing page + +### 4. Connect to your MCP server + +1. Click **+ Add tools**: + +![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/add_tools.jpg) + +2. Click **MCP** to show the available MCP servers. + +3. Select your MCP server: + + *The target MCP server appears below your Agent component, and your Agent will autonomously decide when to invoke the available tools it offers.* + +![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/choose_tavily_mcp_server.jpg) + +### 5. Update system prompt to specify trigger conditions (Optional) + +To ensure reliable tool calls, you may specify within the system prompt which tasks should trigger each tool call. + +### 6. View the availabe tools of your MCP server + +On the canvas, click the newly-populated Tavily server to view and select its available tools: + +![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/tavily_mcp_server.jpg) + + ## Configurations ### Model @@ -69,7 +147,7 @@ An **Agent** component relies on keys (variables) to specify its data inputs. It #### Advanced usage -From v0.20.5 onwards, four framework-level prompt blocks are available in the **System prompt** field. Type `/` or click **(x)** to view them; they appear under the **Framework** entry in the dropdown menu. +From v0.20.5 onwards, four framework-level prompt blocks are available in the **System prompt** field, enabling you to customize and *override* prompts at the framework level. Type `/` or click **(x)** to view them; they appear under the **Framework** entry in the dropdown menu. - `task_analysis` prompt block - This block is responsible for analyzing tasks — either a user task or a task assigned by the lead Agent when the **Agent** component is acting as a Sub-Agent. @@ -100,6 +178,12 @@ From v0.20.5 onwards, four framework-level prompt blocks are available in the ** - `citation_guidelines` prompt block - Reference design: [citation_prompt.md](https://github.com/infiniflow/ragflow/blob/main/rag/prompts/citation_prompt.md) +*The screenshots below show the framework prompt blocks available to an **Agent** component, both as a standalone and as a planner (with a Tavily tool below):* + +![standalone](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/standalone_agent_framework_block.jpg) + +![planner](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/planner_agent_framework_blocks.jpg) + ### User prompt The user-defined prompt. Defaults to `sys.query`, the user query. As a general rule, when using the **Agent** component as a standalone module (not as a planner), you usually need to specify the corresponding **Retrieval** component’s output variable (`formalized_content`) here as part of the input to the LLM. @@ -129,7 +213,7 @@ Defines the maximum number of attempts the agent will make to retry a failed tas The waiting period in seconds that the agent observes before retrying a failed task, helping to prevent immediate repeated attempts and allowing system conditions to improve. Defaults to 1 second. -### Max rounds +### Max reflection rounds Defines the maximum number reflection rounds of the selected chat model. Defaults to 1 round. diff --git a/docs/guides/agent/agent_component_reference/execute_sql.md b/docs/guides/agent/agent_component_reference/execute_sql.md new file mode 100644 index 000000000..f74bc32ad --- /dev/null +++ b/docs/guides/agent/agent_component_reference/execute_sql.md @@ -0,0 +1,79 @@ +--- +sidebar_position: 25 +slug: /execute_sql +--- + +# Execute SQL tool + +A tool that execute SQL queries on a specified relational database. + +--- + +The **Execute SQL** tool enables you to connect to a relational database and run SQL queries, whether entered directly or generated by the system’s Text2SQL capability via an **Agent** component. + +## Prerequisites + +- A database instance properly configured and running. +- The database must be one of the following types: + - MySQL + - PostgreSQL + - MariaDB + - Microsoft SQL Server + +## Examples + +You can pair an **Agent** component with the **Execute SQL** tool, with the **Agent** generating SQL statements and the **Execute SQL** tool handling database connection and query execution. An example of this setup can be found in the **SQL Assistant** Agent template shown below: + +![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/exeSQL.jpg) + +## Configurations + +### SQL statement + +This text input field allows you to write static SQL queries, such as `SELECT * FROM my_table`, and dynamic SQL queries using variables. + +:::tip NOTE +Click **(x)** or type `/` to insert variables. +::: + +For dynamic SQL queries, you can include variables in your SQL queries, such as `SELECT * FROM /sys.query`; if an **Agent** component is paired with the **Execute SQL** tool to generate SQL tasks (see the [Examples](#examples) section), you can directly insert that **Agent**'s output, `content`, into this field. + +### Database type + +The supported database type. Currently the following database types are available: + +- MySQL +- PostreSQL +- MariaDB +- Microsoft SQL Server (Myssql) + +### Database + +Appears only when you select **Split** as method. + +### Username + +The username with access privileges to the database. + +### Host + +The IP address of the database server. + +### Port + +The port number on which the database server is listening. + +### Password + +The password for the database user. + +### Max records + +The maximum number of records returned by the SQL query to control response size and improve efficiency. Defaults to `1024`. + +### Output + +The **Execute SQL** tool provides two output variables: + +- `formalized_content`: A string. If you reference this variable in a **Message** component, the returned records are displayed as a table. +- `json`: An object array. If you reference this variable in a **Message** component, the returned records will be presented as key-value pairs. \ No newline at end of file diff --git a/docs/guides/chat/start_chat.md b/docs/guides/chat/start_chat.md index c56b0ff6b..abe7f8a8f 100644 --- a/docs/guides/chat/start_chat.md +++ b/docs/guides/chat/start_chat.md @@ -106,7 +106,7 @@ RAGFlow offers HTTP and Python APIs for you to integrate RAGFlow's capabilities You can use iframe to embed the created chat assistant into a third-party webpage: -1. Before proceeding, you must [acquire an API key](../models/llm_api_key_setup.md); otherwise, an error message would appear. +1. Before proceeding, you must [acquire an API key](../../develop/acquire_ragflow_api_key.md); otherwise, an error message would appear. 2. Hover over an intended chat assistant **>** **Edit** to show the **iframe** window: ![chat-embed](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/embed_chat_into_webpage.jpg) diff --git a/docs/guides/models/deploy_local_llm.mdx b/docs/guides/models/deploy_local_llm.mdx index af75a1324..918e9503c 100644 --- a/docs/guides/models/deploy_local_llm.mdx +++ b/docs/guides/models/deploy_local_llm.mdx @@ -91,7 +91,7 @@ In RAGFlow, click on your logo on the top right of the page **>** **Model provid In the popup window, complete basic settings for Ollama: 1. Ensure that your model name and type match those been pulled at step 1 (Deploy Ollama using Docker). For example, (`llama3.2` and `chat`) or (`bge-m3` and `embedding`). -2. In Ollama base URL, put the URL you found in step 2 followed by `/v1`, i.e. `http://host.docker.internal:11434/v1`, `http://localhost:11434/v1` or `http://${IP_OF_OLLAMA_MACHINE}:11434/v1`. +2. Put in the Ollama base URL, i.e. `http://host.docker.internal:11434`, `http://localhost:11434` or `http://${IP_OF_OLLAMA_MACHINE}:11434`. 3. OPTIONAL: Switch on the toggle under **Does it support Vision?** if your model includes an image-to-text model. diff --git a/docs/references/http_api_reference.md b/docs/references/http_api_reference.md index 791701ebf..b112e8618 100644 --- a/docs/references/http_api_reference.md +++ b/docs/references/http_api_reference.md @@ -1856,7 +1856,7 @@ curl --request POST \ - `false`: Disable highlighting of matched terms (default). - `"cross_languages"`: (*Body parameter*) `list[string]` The languages that should be translated into, in order to achieve keywords retrievals in different languages. -- `"metadata_condition"`: (*Body parameter*), `object` +- `"metadata_condition"`: (*Body parameter*), `object` The metadata condition for filtering chunks. #### Response diff --git a/docs/references/python_api_reference.md b/docs/references/python_api_reference.md index 1d788f6ab..79c62424b 100644 --- a/docs/references/python_api_reference.md +++ b/docs/references/python_api_reference.md @@ -977,7 +977,7 @@ The languages that should be translated into, in order to achieve keywords retri ##### metadata_condition: `dict` -filter condition for meta_fields +filter condition for `meta_fields`. #### Returns diff --git a/docs/references/supported_models.mdx b/docs/references/supported_models.mdx index f106ef850..8302bd308 100644 --- a/docs/references/supported_models.mdx +++ b/docs/references/supported_models.mdx @@ -65,6 +65,7 @@ A complete list of models supported by RAGFlow, which will continue to expand. | 01.AI | :heavy_check_mark: | | | | | | | DeepInfra | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | | 302.AI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | | +| CometAPI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | | ```mdx-code-block diff --git a/docs/release_notes.md b/docs/release_notes.md index 550bae5dd..ad9f291df 100644 --- a/docs/release_notes.md +++ b/docs/release_notes.md @@ -28,11 +28,11 @@ Released on September 10, 2025. ### Improvements -- Agent Performance Optimized: Improved planning and reflection speed for simple tasks; optimized concurrent tool calls for parallelizable scenarios, significantly reducing overall response time. -- Agent Prompt Framework exposed: Developers can now customize and override framework-level prompts in the system prompt section, enhancing flexibility and control. -- Execute SQL Component Enhanced: Replaced the original variable reference component with a text input field, allowing free-form SQL writing with variable support. -- Chat: Re-enabled Reasoning and Cross-language search. -- Retrieval API Enhanced: Added metadata filtering support to the [Retrieve chunks](https://ragflow.io/docs/dev/http_api_reference#retrieve-chunks) method. +- Agent: + - Agent Performance Optimized: Improves planning and reflection speed for simple tasks; optimizes concurrent tool calls for parallelizable scenarios, significantly reducing overall response time. + - Four framework-level prompt blocks are available in the **System prompt** section, enabling customization and overriding of prompts at the framework level, thereby enhancing flexibility and control. See [here](./guides/agent/agent_component_reference/agent.mdx#system-prompt). + - **Execute SQL** component enhanced: Replaces the original variable reference component with a text input field, allowing users to write free-form SQL queries and reference variables. +- Chat: Re-enables **Reasoning** and **Cross-language search**. ### Added models @@ -44,8 +44,22 @@ Released on September 10, 2025. ### Fixed issues - Dataset: Deleted files remained searchable. -- Chat: Unable to chat with an Ollama model. -- Agent: Resolved issues including cite toggle failure, task mode requiring dialogue triggers, repeated answers in multi-turn dialogues, and duplicate summarization of parallel execution results. +- Chat: Unable to chat with an Ollama model. +- Agent: + - A **Cite** toggle failure. + - An Agent in task mode still required a dialogue to trigger. + - Repeated answers in multi-turn dialogues. + - Duplicate summarization of parallel execution results. + +### API changes + +#### HTTP APIs + +- Adds a body parameter `"metadata_condition"` to the [Retrieve chunks](./references/http_api_reference.md#retrieve-chunks) method, enabling metadata-based chunk filtering during retrieval. [#9877](https://github.com/infiniflow/ragflow/pull/9877) + +#### Python APIs + +- Adds a parameter `metadata_condition` to the [Retrieve chunks](./references/python_api_reference.md#retrieve-chunks) method, enabling metadata-based chunk filtering during retrieval. [#9877](https://github.com/infiniflow/ragflow/pull/9877) ## v0.20.4 diff --git a/rag/app/naive.py b/rag/app/naive.py index 1e110929c..9265ae776 100644 --- a/rag/app/naive.py +++ b/rag/app/naive.py @@ -507,16 +507,29 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, markdown_parser = Markdown(int(parser_config.get("chunk_token_num", 128))) sections, tables = markdown_parser(filename, binary, separate_tables=False) - # Process images for each section - section_images = [] - for section_text, _ in sections: - images = markdown_parser.get_pictures(section_text) if section_text else None - if images: - # If multiple images found, combine them using concat_img - combined_image = reduce(concat_img, images) if len(images) > 1 else images[0] - section_images.append(combined_image) - else: - section_images.append(None) + try: + vision_model = LLMBundle(kwargs["tenant_id"], LLMType.IMAGE2TEXT) + callback(0.2, "Visual model detected. Attempting to enhance figure extraction...") + except Exception: + vision_model = None + + if vision_model: + # Process images for each section + section_images = [] + for idx, (section_text, _) in enumerate(sections): + images = markdown_parser.get_pictures(section_text) if section_text else None + + if images: + # If multiple images found, combine them using concat_img + combined_image = reduce(concat_img, images) if len(images) > 1 else images[0] + section_images.append(combined_image) + markdown_vision_parser = VisionFigureParser(vision_model=vision_model, figures_data= [((combined_image, ["markdown image"]), [(0, 0, 0, 0, 0)])], **kwargs) + boosted_figures = markdown_vision_parser(callback=callback) + sections[idx] = (section_text + "\n\n" + "\n\n".join([fig[0][1] for fig in boosted_figures]), sections[idx][1]) + else: + section_images.append(None) + else: + logging.warning("No visual model detected. Skipping figure parsing enhancement.") res = tokenize_table(tables, doc, is_english) callback(0.8, "Finish parsing.") diff --git a/rag/flow/base.py b/rag/flow/base.py index 89b37b501..e229b9fc0 100644 --- a/rag/flow/base.py +++ b/rag/flow/base.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import logging import os import time from functools import partial @@ -44,17 +43,17 @@ class ProcessBase(ComponentBase): self.set_output("_created_time", time.perf_counter()) for k, v in kwargs.items(): self.set_output(k, v) - try: - with trio.fail_after(self._param.timeout): - await self._invoke(**kwargs) - self.callback(1, "Done") - except Exception as e: - if self.get_exception_default_value(): - self.set_exception_default_value() - else: - self.set_output("_ERROR", str(e)) - logging.exception(e) - self.callback(-1, str(e)) + #try: + with trio.fail_after(self._param.timeout): + await self._invoke(**kwargs) + self.callback(1, "Done") + #except Exception as e: + # if self.get_exception_default_value(): + # self.set_exception_default_value() + # else: + # self.set_output("_ERROR", str(e)) + # logging.exception(e) + # self.callback(-1, str(e)) self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time")) return self.output() diff --git a/rag/flow/chunker/chunker.py b/rag/flow/chunker/chunker.py index a8281c306..2cbbf95cf 100644 --- a/rag/flow/chunker/chunker.py +++ b/rag/flow/chunker/chunker.py @@ -12,18 +12,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import json import random - import trio - from api.db import LLMType from api.db.services.llm_service import LLMBundle from deepdoc.parser.pdf_parser import RAGFlowPdfParser from graphrag.utils import chat_limiter, get_llm_cache, set_llm_cache from rag.flow.base import ProcessBase, ProcessParamBase from rag.flow.chunker.schema import ChunkerFromUpstream -from rag.nlp import naive_merge, naive_merge_with_images -from rag.prompts.prompts import keyword_extraction, question_proposal +from rag.nlp import naive_merge, naive_merge_with_images, concat_img +from rag.prompts.prompts import keyword_extraction, question_proposal, detect_table_of_contents, \ + table_of_contents_index, toc_transformer +from rag.utils import num_tokens_from_string class ChunkerParam(ProcessParamBase): @@ -43,6 +44,7 @@ class ChunkerParam(ProcessParamBase): "paper", "laws", "presentation", + "toc" # table of contents # Other # "Tag" # TODO: Other method ] @@ -54,7 +56,7 @@ class ChunkerParam(ProcessParamBase): self.auto_keywords = 0 self.auto_questions = 0 self.tag_sets = [] - self.llm_setting = {"llm_name": "", "lang": "Chinese"} + self.llm_setting = {"llm_id": "", "lang": "Chinese"} def check(self): self.check_valid_value(self.method.lower(), "Chunk method abnormal.", self.method_options) @@ -142,6 +144,91 @@ class Chunker(ProcessBase): def _one(self, from_upstream: ChunkerFromUpstream): pass + def _toc(self, from_upstream: ChunkerFromUpstream): + self.callback(random.randint(1, 5) / 100.0, "Start to chunk via `ToC`.") + if from_upstream.output_format in ["markdown", "text", "html"]: + return + + # json + sections, section_images, page_1024, tc_arr = [], [], [""], [0] + for o in from_upstream.json_result or []: + txt = o.get("text", "") + tc = num_tokens_from_string(txt) + page_1024[-1] += "\n" + txt + tc_arr[-1] += tc + if tc_arr[-1] > 1024: + page_1024.append("") + tc_arr.append(0) + sections.append((o.get("text", ""), o.get("position_tag", ""))) + section_images.append(o.get("image")) + print(len(sections), o) + + llm_setting = self._param.llm_setting + chat_mdl = LLMBundle(self._canvas._tenant_id, LLMType.CHAT, llm_name=llm_setting["llm_id"], lang=llm_setting["lang"]) + self.callback(random.randint(5, 15) / 100.0, "Start to detect table of contents...") + toc_secs = detect_table_of_contents(page_1024, chat_mdl) + if toc_secs: + self.callback(random.randint(25, 35) / 100.0, "Start to extract table of contents...") + toc_arr = toc_transformer(toc_secs, chat_mdl) + toc_arr = [it for it in toc_arr if it.get("structure")] + print(json.dumps(toc_arr, ensure_ascii=False, indent=2), flush=True) + self.callback(random.randint(35, 75) / 100.0, "Start to link table of contents...") + toc_arr = table_of_contents_index(toc_arr, [t for t,_ in sections], chat_mdl) + for i in range(len(toc_arr)-1): + if not toc_arr[i].get("indices"): + continue + + for j in range(i+1, len(toc_arr)): + if toc_arr[j].get("indices"): + if toc_arr[j]["indices"][0] - toc_arr[i]["indices"][-1] > 1: + toc_arr[i]["indices"].extend([x for x in range(toc_arr[i]["indices"][-1]+1, toc_arr[j]["indices"][0])]) + break + # put all sections ahead of toc_arr[0] into it + # for i in range(len(toc_arr)): + # if toc_arr[i].get("indices") and toc_arr[i]["indices"][0]: + # toc_arr[i]["indices"] = [x for x in range(toc_arr[i]["indices"][-1]+1)] + # break + # put all sections after toc_arr[-1] into it + for i in range(len(toc_arr)-1, -1, -1): + if toc_arr[i].get("indices") and toc_arr[i]["indices"][-1]: + toc_arr[i]["indices"] = [x for x in range(toc_arr[i]["indices"][0], len(sections))] + break + print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n", json.dumps(toc_arr, ensure_ascii=False, indent=2), flush=True) + + chunks, images = [], [] + for it in toc_arr: + if not it.get("indices"): + continue + txt = "" + img = None + for i in it["indices"]: + idx = i + txt += "\n" + sections[idx][0] + "\t" + sections[idx][1] + if img and section_images[idx]: + img = concat_img(img, section_images[idx]) + elif section_images[idx]: + img = section_images[idx] + + it["indices"] = [] + if not txt: + continue + it["indices"] = [len(chunks)] + print(it, "KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK\n", txt) + chunks.append(txt) + images.append(img) + self.callback(1, "Done") + return [ + { + "text": RAGFlowPdfParser.remove_tag(c), + "image": img, + "positions": RAGFlowPdfParser.extract_positions(c), + } + for c, img in zip(chunks, images) + ] + + self.callback(message="No table of contents detected.") + + async def _invoke(self, **kwargs): function_map = { "general": self._general, @@ -154,6 +241,7 @@ class Chunker(ProcessBase): "laws": self._laws, "presentation": self._presentation, "one": self._one, + "toc": self._toc, } try: @@ -167,7 +255,7 @@ class Chunker(ProcessBase): async def auto_keywords(): nonlocal chunks, llm_setting - chat_mdl = LLMBundle(self._canvas._tenant_id, LLMType.CHAT, llm_name=llm_setting["llm_name"], lang=llm_setting["lang"]) + chat_mdl = LLMBundle(self._canvas._tenant_id, LLMType.CHAT, llm_name=llm_setting["llm_id"], lang=llm_setting["lang"]) async def doc_keyword_extraction(chat_mdl, ck, topn): cached = get_llm_cache(chat_mdl.llm_name, ck["text"], "keywords", {"topn": topn}) @@ -184,7 +272,7 @@ class Chunker(ProcessBase): async def auto_questions(): nonlocal chunks, llm_setting - chat_mdl = LLMBundle(self._canvas._tenant_id, LLMType.CHAT, llm_name=llm_setting["llm_name"], lang=llm_setting["lang"]) + chat_mdl = LLMBundle(self._canvas._tenant_id, LLMType.CHAT, llm_name=llm_setting["llm_id"], lang=llm_setting["lang"]) async def doc_question_proposal(chat_mdl, d, topn): cached = get_llm_cache(chat_mdl.llm_name, ck["text"], "question", {"topn": topn}) diff --git a/rag/flow/chunker/schema.py b/rag/flow/chunker/schema.py index bfeff447d..7f956b00b 100644 --- a/rag/flow/chunker/schema.py +++ b/rag/flow/chunker/schema.py @@ -22,7 +22,7 @@ class ChunkerFromUpstream(BaseModel): elapsed_time: float | None = Field(default=None, alias="_elapsed_time") name: str - blob: bytes + file: dict | None = Field(default=None) output_format: Literal["json", "markdown", "text", "html"] | None = Field(default=None) diff --git a/rag/flow/file.py b/rag/flow/file.py index 584b0ff9c..75ec211eb 100644 --- a/rag/flow/file.py +++ b/rag/flow/file.py @@ -14,10 +14,7 @@ # limitations under the License. # from api.db.services.document_service import DocumentService -from api.db.services.file2document_service import File2DocumentService -from api.db.services.file_service import FileService from rag.flow.base import ProcessBase, ProcessParamBase -from rag.utils.storage_factory import STORAGE_IMPL class FileParam(ProcessParamBase): @@ -41,10 +38,13 @@ class File(ProcessBase): self.set_output("_ERROR", f"Document({self._canvas._doc_id}) not found!") return - b, n = File2DocumentService.get_storage_address(doc_id=self._canvas._doc_id) - self.set_output("blob", STORAGE_IMPL.get(b, n)) + #b, n = File2DocumentService.get_storage_address(doc_id=self._canvas._doc_id) + #self.set_output("blob", STORAGE_IMPL.get(b, n)) self.set_output("name", doc.name) else: file = kwargs.get("file") self.set_output("name", file["name"]) - self.set_output("blob", FileService.get_blob(file["created_by"], file["id"])) + self.set_output("file", file) + #self.set_output("blob", FileService.get_blob(file["created_by"], file["id"])) + + self.callback(1, "File fetched.") diff --git a/rag/flow/hierarchical_merger/__init__.py b/rag/flow/hierarchical_merger/__init__.py new file mode 100644 index 000000000..b4663378e --- /dev/null +++ b/rag/flow/hierarchical_merger/__init__.py @@ -0,0 +1,15 @@ +# +# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/rag/flow/hierarchical_merger/hierarchical_merger.py b/rag/flow/hierarchical_merger/hierarchical_merger.py new file mode 100644 index 000000000..ee85c581c --- /dev/null +++ b/rag/flow/hierarchical_merger/hierarchical_merger.py @@ -0,0 +1,178 @@ +# +# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import random +import re +from copy import deepcopy +from functools import partial + +import trio + +from api.utils import get_uuid +from api.utils.base64_image import id2image, image2id +from deepdoc.parser.pdf_parser import RAGFlowPdfParser +from rag.flow.base import ProcessBase, ProcessParamBase +from rag.flow.hierarchical_merger.schema import HierarchicalMergerFromUpstream +from rag.nlp import concat_img +from rag.utils.storage_factory import STORAGE_IMPL + + +class HierarchicalMergerParam(ProcessParamBase): + def __init__(self): + super().__init__() + self.levels = [] + self.hierarchy = None + + def check(self): + self.check_empty(self.levels, "Hierarchical setups.") + self.check_empty(self.hierarchy, "Hierarchy number.") + + def get_input_form(self) -> dict[str, dict]: + return {} + + +class HierarchicalMerger(ProcessBase): + component_name = "HierarchicalMerger" + + async def _invoke(self, **kwargs): + try: + from_upstream = HierarchicalMergerFromUpstream.model_validate(kwargs) + except Exception as e: + self.set_output("_ERROR", f"Input error: {str(e)}") + return + + self.callback(random.randint(1, 5) / 100.0, "Start to merge hierarchically.") + if from_upstream.output_format in ["markdown", "text", "html"]: + if from_upstream.output_format == "markdown": + payload = from_upstream.markdown_result + elif from_upstream.output_format == "text": + payload = from_upstream.text_result + else: # == "html" + payload = from_upstream.html_result + + if not payload: + payload = "" + + lines = [ln for ln in payload.split("\n") if ln] + else: + lines = [o.get("text", "") for o in from_upstream.json_result] + sections, section_images = [], [] + for o in from_upstream.json_result or []: + sections.append((o.get("text", ""), o.get("position_tag", ""))) + section_images.append(o.get("img_id")) + + matches = [] + for txt in lines: + good = False + for lvl, regs in enumerate(self._param.levels): + for reg in regs: + if re.search(reg, txt): + matches.append(lvl) + good = True + break + if good: + break + if not good: + matches.append(len(self._param.levels)) + assert len(matches) == len(lines), f"{len(matches)} vs. {len(lines)}" + + root = { + "level": -1, + "index": -1, + "texts": [], + "children": [] + } + for i, m in enumerate(matches): + if m == 0: + root["children"].append({ + "level": m, + "index": i, + "texts": [], + "children": [] + }) + elif m == len(self._param.levels): + def dfs(b): + if not b["children"]: + b["texts"].append(i) + else: + dfs(b["children"][-1]) + dfs(root) + else: + def dfs(b): + nonlocal m, i + if not b["children"] or m == b["level"] + 1: + b["children"].append({ + "level": m, + "index": i, + "texts": [], + "children": [] + }) + return + dfs(b["children"][-1]) + + dfs(root) + + all_pathes = [] + def dfs(n, path, depth): + nonlocal all_pathes + if depth < self._param.hierarchy: + path = deepcopy(path) + + for nn in n["children"]: + path.extend([nn["index"], *nn["texts"]]) + dfs(nn, path, depth+1) + + if depth == self._param.hierarchy: + all_pathes.append(path) + + for i in range(len(lines)): + print(i, lines[i]) + dfs(root, [], 0) + print("sSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", json.dumps(root, ensure_ascii=False, indent=2)) + + if from_upstream.output_format in ["markdown", "text", "html"]: + cks = [] + for path in all_pathes: + txt = "" + for i in path: + txt += lines[i] + "\n" + cks.append(txt) + + self.set_output("chunks", [{"text": c} for c in cks if c]) + else: + cks = [] + images = [] + for path in all_pathes: + txt = "" + img = None + for i in path: + txt += lines[i] + "\n" + concat_img(img, id2image(section_images[i], partial(STORAGE_IMPL.get))) + cks.append(cks) + images.append(img) + + cks = [ + { + "text": RAGFlowPdfParser.remove_tag(c), + "image": img, + "positions": RAGFlowPdfParser.extract_positions(c), + } + for c, img in zip(cks, images) + ] + async with trio.open_nursery() as nursery: + for d in cks: + nursery.start_soon(image2id, d, partial(STORAGE_IMPL.put), "_image_temps", get_uuid()) + + self.callback(1, "Done.") diff --git a/rag/flow/hierarchical_merger/schema.py b/rag/flow/hierarchical_merger/schema.py new file mode 100644 index 000000000..e45610fe5 --- /dev/null +++ b/rag/flow/hierarchical_merger/schema.py @@ -0,0 +1,37 @@ +# +# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Literal + +from pydantic import BaseModel, ConfigDict, Field + + +class HierarchicalMergerFromUpstream(BaseModel): + created_time: float | None = Field(default=None, alias="_created_time") + elapsed_time: float | None = Field(default=None, alias="_elapsed_time") + + name: str + file: dict | None = Field(default=None) + chunks: list[dict[str, Any]] | None = Field(default=None) + + output_format: Literal["json", "markdown", "text", "html"] | None = Field(default=None) + json_result: list[dict[str, Any]] | None = Field(default=None, alias="json") + markdown_result: str | None = Field(default=None, alias="markdown") + text_result: str | None = Field(default=None, alias="text") + html_result: list[str] | None = Field(default=None, alias="html") + + model_config = ConfigDict(populate_by_name=True, extra="forbid") + + # def to_dict(self, *, exclude_none: bool = True) -> dict: + # return self.model_dump(by_alias=True, exclude_none=exclude_none) diff --git a/rag/flow/parser/parser.py b/rag/flow/parser/parser.py index 26f5021f1..b6747fe30 100644 --- a/rag/flow/parser/parser.py +++ b/rag/flow/parser/parser.py @@ -12,18 +12,27 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import io import logging import random +from functools import partial import trio +import numpy as np +from PIL import Image from api.db import LLMType +from api.db.services.file2document_service import File2DocumentService +from api.db.services.file_service import FileService from api.db.services.llm_service import LLMBundle +from api.utils import get_uuid +from api.utils.base64_image import image2id from deepdoc.parser import ExcelParser from deepdoc.parser.pdf_parser import PlainParser, RAGFlowPdfParser, VisionParser from rag.flow.base import ProcessBase, ProcessParamBase from rag.flow.parser.schema import ParserFromUpstream from rag.llm.cv_model import Base as VLM +from rag.utils.storage_factory import STORAGE_IMPL class ParserParam(ProcessParamBase): @@ -43,17 +52,24 @@ class ParserParam(ProcessParamBase): "json", ], "ppt": [], - "image": [], + "image": [ + "text" + ], "email": [], - "text": [], - "audio": [], + "text": [ + "text", + "json" + ], + "audio": [ + "json" + ], "video": [], } self.setups = { "pdf": { "parse_method": "deepdoc", # deepdoc/plain_text/vlm - "vlm_name": "", + "llm_id": "", "lang": "Chinese", "suffix": [ "pdf", @@ -76,16 +92,46 @@ class ParserParam(ProcessParamBase): "output_format": "json", }, "markdown": { - "suffix": ["md", "markdown"], + "suffix": ["md", "markdown", "mdx"], "output_format": "json", }, "ppt": {}, "image": { "parse_method": "ocr", + "llm_id": "", + "lang": "Chinese", + "suffix": ["jpg", "jpeg", "png", "gif"], + "output_format": "json", + }, + "email": { + "fields": [] + }, + "text": { + "suffix": [ + "txt" + ], + "output_format": "json", + }, + "audio": { + "suffix":[ + "da", + "wave", + "wav", + "mp3", + "aac", + "flac", + "ogg", + "aiff", + "au", + "midi", + "wma", + "realaudio", + "vqf", + "oggvorbis", + "ape" + ], + "output_format": "json", }, - "email": {}, - "text": {}, - "audio": {}, "video": {}, } @@ -96,7 +142,7 @@ class ParserParam(ProcessParamBase): self.check_valid_value(pdf_parse_method.lower(), "Parse method abnormal.", ["deepdoc", "plain_text", "vlm"]) if pdf_parse_method not in ["deepdoc", "plain_text"]: - self.check_empty(pdf_config.get("vlm_name"), "VLM") + self.check_empty(pdf_config.get("llm_id"), "VLM") pdf_language = pdf_config.get("lang", "") self.check_empty(pdf_language, "Language") @@ -117,7 +163,23 @@ class ParserParam(ProcessParamBase): image_config = self.setups.get("image", "") if image_config: image_parse_method = image_config.get("parse_method", "") - self.check_valid_value(image_parse_method.lower(), "Parse method abnormal.", ["ocr"]) + self.check_valid_value(image_parse_method.lower(), "Parse method abnormal.", ["ocr", "vlm"]) + if image_parse_method not in ["ocr"]: + self.check_empty(image_config.get("llm_id"), "VLM") + + image_language = image_config.get("lang", "") + self.check_empty(image_language, "Language") + + text_config = self.setups.get("text", "") + if text_config: + text_output_format = text_config.get("output_format", "") + self.check_valid_value(text_output_format, "Text output format abnormal.", self.allowed_output_format["text"]) + + audio_config = self.setups.get("audio", "") + if audio_config: + self.check_empty(audio_config.get("llm_id"), "VLM") + audio_language = audio_config.get("lang", "") + self.check_empty(audio_language, "Language") def get_input_form(self) -> dict[str, dict]: return {} @@ -126,10 +188,8 @@ class ParserParam(ProcessParamBase): class Parser(ProcessBase): component_name = "Parser" - def _pdf(self, from_upstream: ParserFromUpstream): + def _pdf(self, name, blob): self.callback(random.randint(1, 5) / 100.0, "Start to work on a PDF.") - - blob = from_upstream.blob conf = self._param.setups["pdf"] self.set_output("output_format", conf["output_format"]) @@ -139,8 +199,8 @@ class Parser(ProcessBase): lines, _ = PlainParser()(blob) bboxes = [{"text": t} for t, _ in lines] else: - assert conf.get("vlm_name") - vision_model = LLMBundle(self._canvas._tenant_id, LLMType.IMAGE2TEXT, llm_name=conf.get("vlm_name"), lang=self._param.setups["pdf"].get("lang")) + assert conf.get("llm_id") + vision_model = LLMBundle(self._canvas._tenant_id, LLMType.IMAGE2TEXT, llm_name=conf.get("llm_id"), lang=self._param.setups["pdf"].get("lang")) lines, _ = VisionParser(vision_model=vision_model)(blob, callback=self.callback) bboxes = [] for t, poss in lines: @@ -149,6 +209,7 @@ class Parser(ProcessBase): if conf.get("output_format") == "json": self.set_output("json", bboxes) + if conf.get("output_format") == "markdown": mkdn = "" for b in bboxes: @@ -160,14 +221,10 @@ class Parser(ProcessBase): mkdn += b.get("text", "") + "\n" self.set_output("markdown", mkdn) - def _spreadsheet(self, from_upstream: ParserFromUpstream): + def _spreadsheet(self, name, blob): self.callback(random.randint(1, 5) / 100.0, "Start to work on a Spreadsheet.") - - blob = from_upstream.blob conf = self._param.setups["spreadsheet"] self.set_output("output_format", conf["output_format"]) - - print("spreadsheet {conf=}", flush=True) spreadsheet_parser = ExcelParser() if conf.get("output_format") == "html": html = spreadsheet_parser.html(blob, 1000000000) @@ -177,19 +234,13 @@ class Parser(ProcessBase): elif conf.get("output_format") == "markdown": self.set_output("markdown", spreadsheet_parser.markdown(blob)) - def _word(self, from_upstream: ParserFromUpstream): + def _word(self, name, blob): from tika import parser as word_parser self.callback(random.randint(1, 5) / 100.0, "Start to work on a Word Processor Document") - - blob = from_upstream.blob - name = from_upstream.name conf = self._param.setups["word"] self.set_output("output_format", conf["output_format"]) - - print("word {conf=}", flush=True) doc_parsed = word_parser.from_buffer(blob) - sections = [] if doc_parsed.get("content"): sections = doc_parsed["content"].split("\n") @@ -202,26 +253,18 @@ class Parser(ProcessBase): if conf.get("output_format") == "json": self.set_output("json", sections) - def _markdown(self, from_upstream: ParserFromUpstream): + def _markdown(self, name, blob): from functools import reduce - from rag.app.naive import Markdown as naive_markdown_parser from rag.nlp import concat_img - self.callback(random.randint(1, 5) / 100.0, "Start to work on a Word Processor Document") - - blob = from_upstream.blob - name = from_upstream.name + self.callback(random.randint(1, 5) / 100.0, "Start to work on a markdown.") conf = self._param.setups["markdown"] self.set_output("output_format", conf["output_format"]) - print("markdown {conf=}", flush=True) - markdown_parser = naive_markdown_parser() sections, tables = markdown_parser(name, blob, separate_tables=False) - # json - assert conf.get("output_format") == "json", "have to be json for doc" if conf.get("output_format") == "json": json_results = [] @@ -239,14 +282,86 @@ class Parser(ProcessBase): json_results.append(json_result) self.set_output("json", json_results) + else: + self.set_output("text", "\n".join([section_text for section_text, _ in sections])) + def _text(self, name, blob): + from deepdoc.parser.utils import get_text + + self.callback(random.randint(1, 5) / 100.0, "Start to work on a text.") + conf = self._param.setups["text"] + self.set_output("output_format", conf["output_format"]) + + # parse binary to text + text_content = get_text(name, binary=blob) + + if conf.get("output_format") == "json": + result = [{"text": text_content}] + self.set_output("json", result) + else: + result = text_content + self.set_output("text", result) + + def _image(self, from_upstream: ParserFromUpstream): + from deepdoc.vision import OCR + + self.callback(random.randint(1, 5) / 100.0, "Start to work on an image.") + + blob = from_upstream.blob + conf = self._param.setups["image"] + self.set_output("output_format", conf["output_format"]) + + img = Image.open(io.BytesIO(blob)).convert("RGB") + lang = conf["lang"] + + if conf["parse_method"] == "ocr": + # use ocr, recognize chars only + ocr = OCR() + bxs = ocr(np.array(img)) # return boxes and recognize result + txt = "\n".join([t[0] for _, t in bxs if t[0]]) + + else: + # use VLM to describe the picture + cv_model = LLMBundle(self._canvas.get_tenant_id(), LLMType.IMAGE2TEXT, llm_name=conf["llm_id"],lang=lang) + img_binary = io.BytesIO() + img.save(img_binary, format="JPEG") + img_binary.seek(0) + txt = cv_model.describe(img_binary.read()) + + self.set_output("text", txt) + + def _audio(self, from_upstream: ParserFromUpstream): + import os + import tempfile + + self.callback(random.randint(1, 5) / 100.0, "Start to work on an audio.") + + blob = from_upstream.blob + name = from_upstream.name + conf = self._param.setups["audio"] + self.set_output("output_format", conf["output_format"]) + + lang = conf["lang"] + _, ext = os.path.splitext(name) + with tempfile.NamedTemporaryFile(suffix=ext) as tmpf: + tmpf.write(blob) + tmpf.flush() + tmp_path = os.path.abspath(tmpf.name) + + seq2txt_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.SPEECH2TEXT, lang=lang) + txt = seq2txt_mdl.transcription(tmp_path) + + self.set_output("text", txt) async def _invoke(self, **kwargs): function_map = { "pdf": self._pdf, "markdown": self._markdown, "spreadsheet": self._spreadsheet, - "word": self._word + "word": self._word, + "text": self._text, + "image": self._image, + "audio": self._audio, } try: from_upstream = ParserFromUpstream.model_validate(kwargs) @@ -254,8 +369,20 @@ class Parser(ProcessBase): self.set_output("_ERROR", f"Input error: {str(e)}") return + name = from_upstream.name + if self._canvas._doc_id: + b, n = File2DocumentService.get_storage_address(doc_id=self._canvas._doc_id) + blob = STORAGE_IMPL.get(b, n) + else: + blob = FileService.get_blob(from_upstream.file["created_by"], from_upstream.file["id"]) + for p_type, conf in self._param.setups.items(): if from_upstream.name.split(".")[-1].lower() not in conf.get("suffix", []): continue - await trio.to_thread.run_sync(function_map[p_type], from_upstream) + await trio.to_thread.run_sync(function_map[p_type], name, blob) break + + outs = self.output() + async with trio.open_nursery() as nursery: + for d in outs.get("json", []): + nursery.start_soon(image2id, d, partial(STORAGE_IMPL.put), "_image_temps", get_uuid()) diff --git a/rag/flow/parser/schema.py b/rag/flow/parser/schema.py index 37292e058..f43661762 100644 --- a/rag/flow/parser/schema.py +++ b/rag/flow/parser/schema.py @@ -20,6 +20,5 @@ class ParserFromUpstream(BaseModel): elapsed_time: float | None = Field(default=None, alias="_elapsed_time") name: str - blob: bytes - + file: dict | None = Field(default=None) model_config = ConfigDict(populate_by_name=True, extra="forbid") diff --git a/rag/flow/pipeline.py b/rag/flow/pipeline.py index 9f88d29ea..2cd9cee3f 100644 --- a/rag/flow/pipeline.py +++ b/rag/flow/pipeline.py @@ -48,7 +48,24 @@ class Pipeline(Graph): obj.append({"component_name": component_name, "trace": [{"progress": progress, "message": message, "datetime": datetime.datetime.now().strftime("%H:%M:%S")}]}) else: obj = [{"component_name": component_name, "trace": [{"progress": progress, "message": message, "datetime": datetime.datetime.now().strftime("%H:%M:%S")}]}] - REDIS_CONN.set_obj(log_key, obj, 60 * 10) + REDIS_CONN.set_obj(log_key, obj, 60 * 30) + if self._doc_id: + percentage = 1./len(self.components.items()) + msg = "" + finished = 0. + for o in obj: + if o['component_name'] == "END": + continue + msg += f"\n[{o['component_name']}]:\n" + for t in o["trace"]: + msg += "%s: %s\n"%(t["datetime"], t["message"]) + if t["progress"] < 0: + finished = -1 + break + if finished < 0: + break + finished += o["trace"][-1]["progress"] * percentage + DocumentService.update_by_id(self._doc_id, {"progress": finished, "progress_msg": msg}) except Exception as e: logging.exception(e) @@ -108,5 +125,11 @@ class Pipeline(Graph): idx += 1 self.path.extend(cpn_obj.get_downstream()) + self.callback("END", 1, json.dumps(self.get_component_obj(self.path[-1]).output(), ensure_ascii=False)) + if self._doc_id: - DocumentService.update_by_id(self._doc_id, {"progress": 1 if not self.error else -1, "progress_msg": "Pipeline finished...\n" + self.error, "process_duration": time.perf_counter() - st}) + DocumentService.update_by_id(self._doc_id,{ + "progress": 1 if not self.error else -1, + "progress_msg": "Pipeline finished...\n" + self.error, + "process_duration": time.perf_counter() - st + }) diff --git a/rag/flow/splitter/__init__.py b/rag/flow/splitter/__init__.py new file mode 100644 index 000000000..b4663378e --- /dev/null +++ b/rag/flow/splitter/__init__.py @@ -0,0 +1,15 @@ +# +# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/rag/flow/splitter/schema.py b/rag/flow/splitter/schema.py new file mode 100644 index 000000000..cf097d792 --- /dev/null +++ b/rag/flow/splitter/schema.py @@ -0,0 +1,38 @@ +# +# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Literal + +from pydantic import BaseModel, ConfigDict, Field + + +class SplitterFromUpstream(BaseModel): + created_time: float | None = Field(default=None, alias="_created_time") + elapsed_time: float | None = Field(default=None, alias="_elapsed_time") + + name: str + file: dict | None = Field(default=None) + chunks: list[dict[str, Any]] | None = Field(default=None) + + output_format: Literal["json", "markdown", "text", "html"] | None = Field(default=None) + + json_result: list[dict[str, Any]] | None = Field(default=None, alias="json") + markdown_result: str | None = Field(default=None, alias="markdown") + text_result: str | None = Field(default=None, alias="text") + html_result: list[str] | None = Field(default=None, alias="html") + + model_config = ConfigDict(populate_by_name=True, extra="forbid") + + # def to_dict(self, *, exclude_none: bool = True) -> dict: + # return self.model_dump(by_alias=True, exclude_none=exclude_none) diff --git a/rag/flow/splitter/splitter.py b/rag/flow/splitter/splitter.py new file mode 100644 index 000000000..7cf902a05 --- /dev/null +++ b/rag/flow/splitter/splitter.py @@ -0,0 +1,112 @@ +# +# Copyright 2025 The InfiniFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import random +from functools import partial + +import trio + +from api.utils import get_uuid +from api.utils.base64_image import id2image, image2id +from deepdoc.parser.pdf_parser import RAGFlowPdfParser +from rag.flow.base import ProcessBase, ProcessParamBase +from rag.flow.splitter.schema import SplitterFromUpstream +from rag.nlp import naive_merge, naive_merge_with_images +from rag.utils.storage_factory import STORAGE_IMPL + + +class SplitterParam(ProcessParamBase): + def __init__(self): + super().__init__() + self.chunk_token_size = 512 + self.delimiters = ["\n"] + self.overlapped_percent = 0 + + def check(self): + self.check_empty(self.delimiters, "Delimiters.") + self.check_positive_integer(self.chunk_token_size, "Chunk token size.") + self.check_decimal_float(self.overlapped_percent, "Overlapped percentage: [0, 1)") + + def get_input_form(self) -> dict[str, dict]: + return {} + + +class Splitter(ProcessBase): + component_name = "Splitter" + + async def _invoke(self, **kwargs): + try: + from_upstream = SplitterFromUpstream.model_validate(kwargs) + except Exception as e: + self.set_output("_ERROR", f"Input error: {str(e)}") + return + + deli = "" + for d in self._param.delimiters: + if len(d) > 1: + deli += f"`{d}`" + else: + deli += d + + self.callback(random.randint(1, 5) / 100.0, "Start to split into chunks.") + if from_upstream.output_format in ["markdown", "text", "html"]: + if from_upstream.output_format == "markdown": + payload = from_upstream.markdown_result + elif from_upstream.output_format == "text": + payload = from_upstream.text_result + else: # == "html" + payload = from_upstream.html_result + + if not payload: + payload = "" + + cks = naive_merge( + payload, + self._param.chunk_token_size, + deli, + self._param.overlapped_percent, + ) + self.set_output("chunks", [{"text": c} for c in cks]) + + self.callback(1, "Done.") + return + + # json + sections, section_images = [], [] + for o in from_upstream.json_result or []: + sections.append((o.get("text", ""), o.get("position_tag", ""))) + section_images.append(id2image(o.get("img_id"), partial(STORAGE_IMPL.get))) + + chunks, images = naive_merge_with_images( + sections, + section_images, + self._param.chunk_token_size, + deli, + self._param.overlapped_percent, + ) + cks = [ + { + "text": RAGFlowPdfParser.remove_tag(c), + "image": img, + "positions": RAGFlowPdfParser.extract_positions(c), + } + for c, img in zip(chunks, images) + ] + async with trio.open_nursery() as nursery: + for d in cks: + nursery.start_soon(image2id, d, partial(STORAGE_IMPL.put), "_image_temps", get_uuid()) + print("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS\n", json.dumps(cks, ensure_ascii=False, indent=2)) + self.set_output("chunks", cks) + self.callback(1, "Done.") diff --git a/rag/flow/tests/dsl_examples/general_pdf_all.json b/rag/flow/tests/dsl_examples/general_pdf_all.json index 42eae3f61..c7bda516d 100644 --- a/rag/flow/tests/dsl_examples/general_pdf_all.json +++ b/rag/flow/tests/dsl_examples/general_pdf_all.json @@ -44,20 +44,58 @@ "markdown" ], "output_format": "json" + }, + "text": { + "suffix": ["txt"], + "output_format": "json" + }, + "image": { + "parse_method": "vlm", + "llm_id":"glm-4.5v", + "lang": "Chinese", + "suffix": [ + "jpg", + "jpeg", + "png", + "gif" + ], + "output_format": "text" + }, + "audio": { + "suffix": [ + "da", + "wave", + "wav", + "mp3", + "aac", + "flac", + "ogg", + "aiff", + "au", + "midi", + "wma", + "realaudio", + "vqf", + "oggvorbis", + "ape" + ], + "lang": "Chinese", + "llm_id": "SenseVoiceSmall", + "output_format": "json" } } - } } }, - "downstream": ["Chunker:0"], + "downstream": ["Splitter:0"], "upstream": ["Begin"] }, - "Chunker:0": { + "Splitter:0": { "obj": { - "component_name": "Chunker", + "component_name": "Splitter", "params": { - "method": "general", - "auto_keywords": 5 + "chunk_token_size": 512, + "delimiters": ["\n"], + "overlapped_percent": 0 } }, "downstream": ["Tokenizer:0"], diff --git a/rag/flow/tests/dsl_examples/hierarchical_merger.json b/rag/flow/tests/dsl_examples/hierarchical_merger.json new file mode 100644 index 000000000..98df8a937 --- /dev/null +++ b/rag/flow/tests/dsl_examples/hierarchical_merger.json @@ -0,0 +1,84 @@ +{ + "components": { + "File": { + "obj":{ + "component_name": "File", + "params": { + } + }, + "downstream": ["Parser:0"], + "upstream": [] + }, + "Parser:0": { + "obj": { + "component_name": "Parser", + "params": { + "setups": { + "pdf": { + "parse_method": "deepdoc", + "vlm_name": "", + "lang": "Chinese", + "suffix": [ + "pdf" + ], + "output_format": "json" + }, + "spreadsheet": { + "suffix": [ + "xls", + "xlsx", + "csv" + ], + "output_format": "html" + }, + "word": { + "suffix": [ + "doc", + "docx" + ], + "output_format": "json" + }, + "markdown": { + "suffix": [ + "md", + "markdown" + ], + "output_format": "text" + }, + "text": { + "suffix": ["txt"], + "output_format": "json" + } + } + } + }, + "downstream": ["Splitter:0"], + "upstream": ["File"] + }, + "Splitter:0": { + "obj": { + "component_name": "Splitter", + "params": { + "chunk_token_size": 512, + "delimiters": ["\r\n"], + "overlapped_percent": 0 + } + }, + "downstream": ["HierarchicalMerger:0"], + "upstream": ["Parser:0"] + }, + "HierarchicalMerger:0": { + "obj": { + "component_name": "HierarchicalMerger", + "params": { + "levels": [["^#[^#]"], ["^##[^#]"], ["^###[^#]"], ["^####[^#]"]], + "hierarchy": 2 + } + }, + "downstream": [], + "upstream": ["Splitter:0"] + } + }, + "path": [] +} + diff --git a/rag/flow/tokenizer/schema.py b/rag/flow/tokenizer/schema.py index d58725171..7ba0c30a6 100644 --- a/rag/flow/tokenizer/schema.py +++ b/rag/flow/tokenizer/schema.py @@ -22,7 +22,7 @@ class TokenizerFromUpstream(BaseModel): elapsed_time: float | None = Field(default=None, alias="_elapsed_time") name: str = "" - blob: bytes + file: dict | None = Field(default=None) output_format: Literal["json", "markdown", "text", "html"] | None = Field(default=None) diff --git a/rag/flow/tokenizer/tokenizer.py b/rag/flow/tokenizer/tokenizer.py index 5b43a9d82..ed30925fd 100644 --- a/rag/flow/tokenizer/tokenizer.py +++ b/rag/flow/tokenizer/tokenizer.py @@ -37,6 +37,7 @@ class TokenizerParam(ProcessParamBase): super().__init__() self.search_method = ["full_text", "embedding"] self.filename_embd_weight = 0.1 + self.fields = ["text"] def check(self): for v in self.search_method: @@ -61,10 +62,14 @@ class Tokenizer(ProcessBase): embedding_model = LLMBundle(self._canvas._tenant_id, LLMType.EMBEDDING, llm_name=embedding_id) texts = [] for c in chunks: - if c.get("questions"): - texts.append("\n".join(c["questions"])) - else: - texts.append(re.sub(r"]{0,12})?>", " ", c["text"])) + txt = "" + for f in self._param.fields: + f = c.get(f) + if isinstance(f, str): + txt += f + elif isinstance(f, list): + txt += "\n".join(f) + texts.append(re.sub(r"]{0,12})?>", " ", txt)) vts, c = embedding_model.encode([name]) token_count += c tts = np.concatenate([vts[0] for _ in range(len(texts))], axis=0) diff --git a/rag/llm/__init__.py b/rag/llm/__init__.py index 14e9a8a19..d91f57736 100644 --- a/rag/llm/__init__.py +++ b/rag/llm/__init__.py @@ -37,6 +37,18 @@ class SupportedLiteLLMProvider(StrEnum): TogetherAI = "TogetherAI" Anthropic = "Anthropic" Ollama = "Ollama" + Meituan = "Meituan" + CometAPI = "CometAPI" + SILICONFLOW = "SILICONFLOW" + OpenRouter = "OpenRouter" + StepFun = "StepFun" + PPIO = "PPIO" + PerfXCloud = "PerfXCloud" + Upstage = "Upstage" + NovitaAI = "NovitaAI" + Lingyi_AI = "01.AI" + GiteeAI = "GiteeAI" + AI_302 = "302.AI" FACTORY_DEFAULT_BASE_URL = { @@ -44,6 +56,18 @@ FACTORY_DEFAULT_BASE_URL = { SupportedLiteLLMProvider.Dashscope: "https://dashscope.aliyuncs.com/compatible-mode/v1", SupportedLiteLLMProvider.Moonshot: "https://api.moonshot.cn/v1", SupportedLiteLLMProvider.Ollama: "", + SupportedLiteLLMProvider.Meituan: "https://api.longcat.chat/openai", + SupportedLiteLLMProvider.CometAPI: "https://api.cometapi.com/v1", + SupportedLiteLLMProvider.SILICONFLOW: "https://api.siliconflow.cn/v1", + SupportedLiteLLMProvider.OpenRouter: "https://openrouter.ai/api/v1", + SupportedLiteLLMProvider.StepFun: "https://api.stepfun.com/v1", + SupportedLiteLLMProvider.PPIO: "https://api.ppinfra.com/v3/openai", + SupportedLiteLLMProvider.PerfXCloud: "https://cloud.perfxlab.cn/v1", + SupportedLiteLLMProvider.Upstage: "https://api.upstage.ai/v1/solar", + SupportedLiteLLMProvider.NovitaAI: "https://api.novita.ai/v3/openai", + SupportedLiteLLMProvider.Lingyi_AI: "https://api.lingyiwanwu.com/v1", + SupportedLiteLLMProvider.GiteeAI: "https://ai.gitee.com/v1/", + SupportedLiteLLMProvider.AI_302: "https://api.302.ai/v1", } @@ -62,6 +86,18 @@ LITELLM_PROVIDER_PREFIX = { SupportedLiteLLMProvider.TogetherAI: "together_ai/", SupportedLiteLLMProvider.Anthropic: "", # don't need a prefix SupportedLiteLLMProvider.Ollama: "ollama_chat/", + SupportedLiteLLMProvider.Meituan: "openai/", + SupportedLiteLLMProvider.CometAPI: "openai/", + SupportedLiteLLMProvider.SILICONFLOW: "openai/", + SupportedLiteLLMProvider.OpenRouter: "openai/", + SupportedLiteLLMProvider.StepFun: "openai/", + SupportedLiteLLMProvider.PPIO: "openai/", + SupportedLiteLLMProvider.PerfXCloud: "openai/", + SupportedLiteLLMProvider.Upstage: "openai/", + SupportedLiteLLMProvider.NovitaAI: "openai/", + SupportedLiteLLMProvider.Lingyi_AI: "openai/", + SupportedLiteLLMProvider.GiteeAI: "openai/", + SupportedLiteLLMProvider.AI_302: "openai/", } ChatModel = globals().get("ChatModel", {}) diff --git a/rag/llm/chat_model.py b/rag/llm/chat_model.py index bb04b520c..b43277fc0 100644 --- a/rag/llm/chat_model.py +++ b/rag/llm/chat_model.py @@ -895,25 +895,6 @@ class MistralChat(Base): yield total_tokens -## openrouter -class OpenRouterChat(Base): - _FACTORY_NAME = "OpenRouter" - - def __init__(self, key, model_name, base_url="https://openrouter.ai/api/v1", **kwargs): - if not base_url: - base_url = "https://openrouter.ai/api/v1" - super().__init__(key, model_name, base_url, **kwargs) - - -class StepFunChat(Base): - _FACTORY_NAME = "StepFun" - - def __init__(self, key, model_name, base_url="https://api.stepfun.com/v1", **kwargs): - if not base_url: - base_url = "https://api.stepfun.com/v1" - super().__init__(key, model_name, base_url, **kwargs) - - class LmStudioChat(Base): _FACTORY_NAME = "LM-Studio" @@ -936,15 +917,6 @@ class OpenAI_APIChat(Base): super().__init__(key, model_name, base_url, **kwargs) -class PPIOChat(Base): - _FACTORY_NAME = "PPIO" - - def __init__(self, key, model_name, base_url="https://api.ppinfra.com/v3/openai", **kwargs): - if not base_url: - base_url = "https://api.ppinfra.com/v3/openai" - super().__init__(key, model_name, base_url, **kwargs) - - class LeptonAIChat(Base): _FACTORY_NAME = "LeptonAI" @@ -954,60 +926,6 @@ class LeptonAIChat(Base): super().__init__(key, model_name, base_url, **kwargs) -class PerfXCloudChat(Base): - _FACTORY_NAME = "PerfXCloud" - - def __init__(self, key, model_name, base_url="https://cloud.perfxlab.cn/v1", **kwargs): - if not base_url: - base_url = "https://cloud.perfxlab.cn/v1" - super().__init__(key, model_name, base_url, **kwargs) - - -class UpstageChat(Base): - _FACTORY_NAME = "Upstage" - - def __init__(self, key, model_name, base_url="https://api.upstage.ai/v1/solar", **kwargs): - if not base_url: - base_url = "https://api.upstage.ai/v1/solar" - super().__init__(key, model_name, base_url, **kwargs) - - -class NovitaAIChat(Base): - _FACTORY_NAME = "NovitaAI" - - def __init__(self, key, model_name, base_url="https://api.novita.ai/v3/openai", **kwargs): - if not base_url: - base_url = "https://api.novita.ai/v3/openai" - super().__init__(key, model_name, base_url, **kwargs) - - -class SILICONFLOWChat(Base): - _FACTORY_NAME = "SILICONFLOW" - - def __init__(self, key, model_name, base_url="https://api.siliconflow.cn/v1", **kwargs): - if not base_url: - base_url = "https://api.siliconflow.cn/v1" - super().__init__(key, model_name, base_url, **kwargs) - - -class YiChat(Base): - _FACTORY_NAME = "01.AI" - - def __init__(self, key, model_name, base_url="https://api.lingyiwanwu.com/v1", **kwargs): - if not base_url: - base_url = "https://api.lingyiwanwu.com/v1" - super().__init__(key, model_name, base_url, **kwargs) - - -class GiteeChat(Base): - _FACTORY_NAME = "GiteeAI" - - def __init__(self, key, model_name, base_url="https://ai.gitee.com/v1/", **kwargs): - if not base_url: - base_url = "https://ai.gitee.com/v1/" - super().__init__(key, model_name, base_url, **kwargs) - - class ReplicateChat(Base): _FACTORY_NAME = "Replicate" @@ -1347,26 +1265,46 @@ class GPUStackChat(Base): super().__init__(key, model_name, base_url, **kwargs) -class Ai302Chat(Base): - _FACTORY_NAME = "302.AI" +class TokenPonyChat(Base): + _FACTORY_NAME = "TokenPony" - def __init__(self, key, model_name, base_url="https://api.302.ai/v1", **kwargs): + def __init__(self, key, model_name, base_url="https://ragflow.vip-api.tokenpony.cn/v1", **kwargs): if not base_url: - base_url = "https://api.302.ai/v1" - super().__init__(key, model_name, base_url, **kwargs) - - -class MeituanChat(Base): - _FACTORY_NAME = "Meituan" - - def __init__(self, key, model_name, base_url="https://api.longcat.chat/openai", **kwargs): - if not base_url: - base_url = "https://api.longcat.chat/openai" - super().__init__(key, model_name, base_url, **kwargs) + base_url = "https://ragflow.vip-api.tokenpony.cn/v1" class LiteLLMBase(ABC): - _FACTORY_NAME = ["Tongyi-Qianwen", "Bedrock", "Moonshot", "xAI", "DeepInfra", "Groq", "Cohere", "Gemini", "DeepSeek", "NVIDIA", "TogetherAI", "Anthropic", "Ollama"] + _FACTORY_NAME = [ + "Tongyi-Qianwen", + "Bedrock", + "Moonshot", + "xAI", + "DeepInfra", + "Groq", + "Cohere", + "Gemini", + "DeepSeek", + "NVIDIA", + "TogetherAI", + "Anthropic", + "Ollama", + "Meituan", + "CometAPI", + "SILICONFLOW", + "OpenRouter", + "StepFun", + "PPIO", + "PerfXCloud", + "Upstage", + "NovitaAI", + "01.AI", + "GiteeAI", + "302.AI", + ] + + import litellm + + litellm._turn_on_debug() def __init__(self, key, model_name, base_url=None, **kwargs): self.timeout = int(os.environ.get("LM_TIMEOUT_SECONDS", 600)) @@ -1374,7 +1312,7 @@ class LiteLLMBase(ABC): self.prefix = LITELLM_PROVIDER_PREFIX.get(self.provider, "") self.model_name = f"{self.prefix}{model_name}" self.api_key = key - self.base_url = (base_url or FACTORY_DEFAULT_BASE_URL.get(self.provider, "")).rstrip('/') + self.base_url = (base_url or FACTORY_DEFAULT_BASE_URL.get(self.provider, "")).rstrip("/") # Configure retry parameters self.max_retries = kwargs.get("max_retries", int(os.environ.get("LLM_MAX_RETRIES", 5))) self.base_delay = kwargs.get("retry_interval", float(os.environ.get("LLM_BASE_DELAY", 2.0))) diff --git a/rag/llm/embedding_model.py b/rag/llm/embedding_model.py index d39e0f0cc..aae5b6902 100644 --- a/rag/llm/embedding_model.py +++ b/rag/llm/embedding_model.py @@ -86,9 +86,10 @@ class DefaultEmbedding(Base): with DefaultEmbedding._model_lock: import torch from FlagEmbedding import FlagModel + if "CUDA_VISIBLE_DEVICES" in os.environ: input_cuda_visible_devices = os.environ["CUDA_VISIBLE_DEVICES"] - os.environ["CUDA_VISIBLE_DEVICES"] = "0" # handle some issues with multiple GPUs when initializing the model + os.environ["CUDA_VISIBLE_DEVICES"] = "0" # handle some issues with multiple GPUs when initializing the model if not DefaultEmbedding._model or model_name != DefaultEmbedding._model_name: try: @@ -145,7 +146,7 @@ class OpenAIEmbed(Base): ress = [] total_tokens = 0 for i in range(0, len(texts), batch_size): - res = self.client.embeddings.create(input=texts[i : i + batch_size], model=self.model_name, encoding_format="float") + res = self.client.embeddings.create(input=texts[i : i + batch_size], model=self.model_name, encoding_format="float", extra_body={"drop_params": True}) try: ress.extend([d.embedding for d in res.data]) total_tokens += self.total_token_count(res) @@ -154,7 +155,7 @@ class OpenAIEmbed(Base): return np.array(ress), total_tokens def encode_queries(self, text): - res = self.client.embeddings.create(input=[truncate(text, 8191)], model=self.model_name, encoding_format="float") + res = self.client.embeddings.create(input=[truncate(text, 8191)], model=self.model_name, encoding_format="float",extra_body={"drop_params": True}) return np.array(res.data[0].embedding), self.total_token_count(res) @@ -472,6 +473,7 @@ class MistralEmbed(Base): def encode(self, texts: list): import time import random + texts = [truncate(t, 8196) for t in texts] batch_size = 16 ress = [] @@ -495,6 +497,7 @@ class MistralEmbed(Base): def encode_queries(self, text): import time import random + retry_max = 5 while retry_max > 0: try: @@ -659,7 +662,7 @@ class OpenAI_APIEmbed(OpenAIEmbed): def __init__(self, key, model_name, base_url): if not base_url: raise ValueError("url cannot be None") - base_url = urljoin(base_url, "v1") + #base_url = urljoin(base_url, "v1") self.client = OpenAI(api_key=key, base_url=base_url) self.model_name = model_name.split("___")[0] @@ -751,7 +754,11 @@ class SILICONFLOWEmbed(Base): token_count = 0 for i in range(0, len(texts), batch_size): texts_batch = texts[i : i + batch_size] - texts_batch = [" " if not text.strip() else text for text in texts_batch] + if self.model_name in ["BAAI/bge-large-zh-v1.5", "BAAI/bge-large-en-v1.5"]: + # limit 512, 340 is almost safe + texts_batch = [" " if not text.strip() else truncate(text, 340) for text in texts_batch] + else: + texts_batch = [" " if not text.strip() else text for text in texts_batch] payload = { "model": self.model_name, @@ -938,6 +945,7 @@ class GiteeEmbed(SILICONFLOWEmbed): base_url = "https://ai.gitee.com/v1/embeddings" super().__init__(key, model_name, base_url) + class DeepInfraEmbed(OpenAIEmbed): _FACTORY_NAME = "DeepInfra" @@ -954,3 +962,12 @@ class Ai302Embed(Base): if not base_url: base_url = "https://api.302.ai/v1/embeddings" super().__init__(key, model_name, base_url) + + +class CometEmbed(OpenAIEmbed): + _FACTORY_NAME = "CometAPI" + + def __init__(self, key, model_name, base_url="https://api.cometapi.com/v1"): + if not base_url: + base_url = "https://api.cometapi.com/v1" + super().__init__(key, model_name, base_url) diff --git a/rag/llm/sequence2txt_model.py b/rag/llm/sequence2txt_model.py index 95203cace..b2d1a5aaa 100644 --- a/rag/llm/sequence2txt_model.py +++ b/rag/llm/sequence2txt_model.py @@ -218,7 +218,7 @@ class GPUStackSeq2txt(Base): class GiteeSeq2txt(Base): _FACTORY_NAME = "GiteeAI" - def __init__(self, key, model_name="whisper-1", base_url="https://ai.gitee.com/v1/"): + def __init__(self, key, model_name="whisper-1", base_url="https://ai.gitee.com/v1/", **kwargs): if not base_url: base_url = "https://ai.gitee.com/v1/" self.client = OpenAI(api_key=key, base_url=base_url) @@ -234,3 +234,13 @@ class DeepInfraSeq2txt(Base): self.client = OpenAI(api_key=key, base_url=base_url) self.model_name = model_name + + +class CometSeq2txt(Base): + _FACTORY_NAME = "CometAPI" + + def __init__(self, key, model_name="whisper-1", base_url="https://api.cometapi.com/v1", **kwargs): + if not base_url: + base_url = "https://api.cometapi.com/v1" + self.client = OpenAI(api_key=key, base_url=base_url) + self.model_name = model_name diff --git a/rag/llm/tts_model.py b/rag/llm/tts_model.py index 9520cbbbf..e55d11141 100644 --- a/rag/llm/tts_model.py +++ b/rag/llm/tts_model.py @@ -394,3 +394,11 @@ class DeepInfraTTS(OpenAITTS): if not base_url: base_url = "https://api.deepinfra.com/v1/openai" super().__init__(key, model_name, base_url, **kwargs) + +class CometAPITTS(OpenAITTS): + _FACTORY_NAME = "CometAPI" + + def __init__(self, key, model_name, base_url="https://api.cometapi.com/v1", **kwargs): + if not base_url: + base_url = "https://api.cometapi.com/v1" + super().__init__(key, model_name, base_url, **kwargs) diff --git a/rag/prompts/prompts.py b/rag/prompts/prompts.py index 13ea801b0..cc23da6ba 100644 --- a/rag/prompts/prompts.py +++ b/rag/prompts/prompts.py @@ -436,4 +436,217 @@ def gen_meta_filter(chat_mdl, meta_data:dict, query: str) -> list: return ans except Exception: logging.exception(f"Loading json failure: {ans}") - return [] \ No newline at end of file + return [] + + +def gen_json(system_prompt:str, user_prompt:str, chat_mdl): + _, msg = message_fit_in(form_message(system_prompt, user_prompt), chat_mdl.max_length) + ans = chat_mdl.chat(msg[0]["content"], msg[1:]) + ans = re.sub(r"(^.*|```json\n|```\n*$)", "", ans, flags=re.DOTALL) + try: + return json_repair.loads(ans) + except Exception: + logging.exception(f"Loading json failure: {ans}") + + +TOC_DETECTION = load_prompt("toc_detection") +def detect_table_of_contents(page_1024:list[str], chat_mdl): + toc_secs = [] + for i, sec in enumerate(page_1024[:22]): + ans = gen_json(PROMPT_JINJA_ENV.from_string(TOC_DETECTION).render(page_txt=sec), "Only JSON please.", chat_mdl) + if toc_secs and not ans["exists"]: + break + toc_secs.append(sec) + return toc_secs + + +TOC_EXTRACTION = load_prompt("toc_extraction") +TOC_EXTRACTION_CONTINUE = load_prompt("toc_extraction_continue") +def extract_table_of_contents(toc_pages, chat_mdl): + if not toc_pages: + return [] + + return gen_json(PROMPT_JINJA_ENV.from_string(TOC_EXTRACTION).render(toc_page="\n".join(toc_pages)), "Only JSON please.", chat_mdl) + + +def toc_index_extractor(toc:list[dict], content:str, chat_mdl): + tob_extractor_prompt = """ + You are given a table of contents in a json format and several pages of a document, your job is to add the physical_index to the table of contents in the json format. + + The provided pages contains tags like and to indicate the physical location of the page X. + + The structure variable is the numeric system which represents the index of the hierarchy section in the table of contents. For example, the first section has structure index 1, the first subsection has structure index 1.1, the second subsection has structure index 1.2, etc. + + The response should be in the following JSON format: + [ + { + "structure": (string), + "title": , + "physical_index": "<physical_index_X>" (keep the format) + }, + ... + ] + + Only add the physical_index to the sections that are in the provided pages. + If the title of the section are not in the provided pages, do not add the physical_index to it. + Directly return the final JSON structure. Do not output anything else.""" + + prompt = tob_extractor_prompt + '\nTable of contents:\n' + json.dumps(toc, ensure_ascii=False, indent=2) + '\nDocument pages:\n' + content + return gen_json(prompt, "Only JSON please.", chat_mdl) + + +TOC_INDEX = load_prompt("toc_index") +def table_of_contents_index(toc_arr: list[dict], sections: list[str], chat_mdl): + if not toc_arr or not sections: + return [] + + toc_map = {} + for i, it in enumerate(toc_arr): + k1 = (it["structure"]+it["title"]).replace(" ", "") + k2 = it["title"].strip() + if k1 not in toc_map: + toc_map[k1] = [] + if k2 not in toc_map: + toc_map[k2] = [] + toc_map[k1].append(i) + toc_map[k2].append(i) + + for it in toc_arr: + it["indices"] = [] + for i, sec in enumerate(sections): + sec = sec.strip() + if sec.replace(" ", "") in toc_map: + for j in toc_map[sec.replace(" ", "")]: + toc_arr[j]["indices"].append(i) + + all_pathes = [] + def dfs(start, path): + nonlocal all_pathes + if start >= len(toc_arr): + if path: + all_pathes.append(path) + return + if not toc_arr[start]["indices"]: + dfs(start+1, path) + return + added = False + for j in toc_arr[start]["indices"]: + if path and j < path[-1][0]: + continue + _path = deepcopy(path) + _path.append((j, start)) + added = True + dfs(start+1, _path) + if not added and path: + all_pathes.append(path) + + dfs(0, []) + path = max(all_pathes, key=lambda x:len(x)) + for it in toc_arr: + it["indices"] = [] + for j, i in path: + toc_arr[i]["indices"] = [j] + print(json.dumps(toc_arr, ensure_ascii=False, indent=2)) + + i = 0 + while i < len(toc_arr): + it = toc_arr[i] + if it["indices"]: + i += 1 + continue + + if i>0 and toc_arr[i-1]["indices"]: + st_i = toc_arr[i-1]["indices"][-1] + else: + st_i = 0 + e = i + 1 + while e <len(toc_arr) and not toc_arr[e]["indices"]: + e += 1 + if e >= len(toc_arr): + e = len(sections) + else: + e = toc_arr[e]["indices"][0] + + for j in range(st_i, min(e+1, len(sections))): + ans = gen_json(PROMPT_JINJA_ENV.from_string(TOC_INDEX).render( + structure=it["structure"], + title=it["title"], + text=sections[j]), "Only JSON please.", chat_mdl) + if ans["exist"] == "yes": + it["indices"].append(j) + break + + i += 1 + + return toc_arr + + +def check_if_toc_transformation_is_complete(content, toc, chat_mdl): + prompt = """ + You are given a raw table of contents and a table of contents. + Your job is to check if the table of contents is complete. + + Reply format: + {{ + "thinking": <why do you think the cleaned table of contents is complete or not> + "completed": "yes" or "no" + }} + Directly return the final JSON structure. Do not output anything else.""" + + prompt = prompt + '\n Raw Table of contents:\n' + content + '\n Cleaned Table of contents:\n' + toc + response = gen_json(prompt, "Only JSON please.", chat_mdl) + return response['completed'] + + +def toc_transformer(toc_pages, chat_mdl): + init_prompt = """ + You are given a table of contents, You job is to transform the whole table of content into a JSON format included table_of_contents. + + The `structure` is the numeric system which represents the index of the hierarchy section in the table of contents. For example, the first section has structure index 1, the first subsection has structure index 1.1, the second subsection has structure index 1.2, etc. + The `title` is a short phrase or a several-words term. + + The response should be in the following JSON format: + [ + { + "structure": <structure index, "x.x.x" or None> (string), + "title": <title of the section> + }, + ... + ], + You should transform the full table of contents in one go. + Directly return the final JSON structure, do not output anything else. """ + + toc_content = "\n".join(toc_pages) + prompt = init_prompt + '\n Given table of contents\n:' + toc_content + def clean_toc(arr): + for a in arr: + a["title"] = re.sub(r"[.·….]{2,}", "", a["title"]) + last_complete = gen_json(prompt, "Only JSON please.", chat_mdl) + if_complete = check_if_toc_transformation_is_complete(toc_content, json.dumps(last_complete, ensure_ascii=False, indent=2), chat_mdl) + clean_toc(last_complete) + if if_complete == "yes": + return last_complete + + while not (if_complete == "yes"): + prompt = f""" + Your task is to continue the table of contents json structure, directly output the remaining part of the json structure. + The response should be in the following JSON format: + + The raw table of contents json structure is: + {toc_content} + + The incomplete transformed table of contents json structure is: + {json.dumps(last_complete[-24:], ensure_ascii=False, indent=2)} + + Please continue the json structure, directly output the remaining part of the json structure.""" + new_complete = gen_json(prompt, "Only JSON please.", chat_mdl) + if not new_complete or str(last_complete).find(str(new_complete)) >= 0: + break + clean_toc(new_complete) + last_complete.extend(new_complete) + if_complete = check_if_toc_transformation_is_complete(toc_content, json.dumps(last_complete, ensure_ascii=False, indent=2), chat_mdl) + + return last_complete + + + diff --git a/rag/prompts/toc_detection.md b/rag/prompts/toc_detection.md new file mode 100644 index 000000000..29e068a7a --- /dev/null +++ b/rag/prompts/toc_detection.md @@ -0,0 +1,29 @@ +You are an AI assistant designed to analyze text content and detect whether a table of contents (TOC) list exists on the given page. Follow these steps: + +1. **Analyze the Input**: Carefully review the provided text content. +2. **Identify Key Features**: Look for common indicators of a TOC, such as: + - Section titles or headings paired with page numbers. + - Patterns like repeated formatting (e.g., bold/italicized text, dots/dashes between titles and numbers). + - Phrases like "Table of Contents," "Contents," or similar headings. + - Logical grouping of topics/subtopics with sequential page references. +3. **Discern Negative Features**: + - The text contains no numbers, or the numbers present are clearly not page references (e.g., dates, statistical figures, phone numbers, version numbers). + - The text consists of full, descriptive sentences and paragraphs that form a narrative, present arguments, or explain concepts, rather than succinctly listing topics. + - Contains citations with authors, publication years, journal titles, and page ranges (e.g., "Smith, J. (2020). Journal Title, 10(2), 45-67."). + - Lists keywords or terms followed by multiple page numbers, often in alphabetical order. + - Comprises terms followed by their definitions or explanations. + - Labeled with headers like "Appendix A," "Appendix B," etc. + - Contains expressive language thanking individuals or organizations for their support or contributions. +4. **Evaluate Evidence**: Weigh the presence/absence of these features to determine if the content resembles a TOC. +5. **Output Format**: Provide your response in the following JSON structure: + ```json + { + "reasoning": "Step-by-step explanation of your analysis based on the features identified." , + "exists": true/false + } + ``` +6. **DO NOT** output anything else except JSON structure. + +**Input text Content ( Text-Only Extraction ):** +{{ page_txt }} + diff --git a/rag/prompts/toc_extraction.md b/rag/prompts/toc_extraction.md new file mode 100644 index 000000000..02e1d031f --- /dev/null +++ b/rag/prompts/toc_extraction.md @@ -0,0 +1,53 @@ +You are an expert parser and data formatter. Your task is to analyze the provided table of contents (TOC) text and convert it into a valid JSON array of objects. + +**Instructions:** +1. Analyze each line of the input TOC. +2. For each line, extract the following three pieces of information: + * `structure`: The hierarchical index/numbering (e.g., "1", "2.1", "3.2.5", "A.1"). If a line has no visible numbering or structure indicator (like a main "Chapter" title), use `null`. + * `title`: The textual title of the section or chapter. This should be the main descriptive text, clean and without the page number. +3. Output **only** a valid JSON array. Do not include any other text, explanations, or markdown code block fences (like ```json) in your response. + +**JSON Format:** +The output must be a list of objects following this exact schema: +```json +[ + { + "structure": <structure index, "x.x.x" or None> (string), + "title": <title of the section> + }, + ... +] +``` + +**Input Example:** +``` +Contents +1 Introduction to the System ... 1 +1.1 Overview .... 2 +1.2 Key Features .... 5 +2 Installation Guide ....8 +2.1 Prerequisites ........ 9 +2.2 Step-by-Step Process ........ 12 +Appendix A: Specifications ..... 45 +References ... 47 +``` + +**Expected Output For The Example:** +```json +[ + {"structure": null, "title": "Contents"}, + {"structure": "1", "title": "Introduction to the System"}, + {"structure": "1.1", "title": "Overview"}, + {"structure": "1.2", "title": "Key Features"}, + {"structure": "2", "title": "Installation Guide"}, + {"structure": "2.1", "title": "Prerequisites"}, + {"structure": "2.2", "title": "Step-by-Step Process"}, + {"structure": "A", "title": "Specifications"}, + {"structure": null, "title": "References"} +] +``` + +**Now, process the following TOC input:** +``` +{{ toc_page }} +``` \ No newline at end of file diff --git a/rag/prompts/toc_extraction_continue.md b/rag/prompts/toc_extraction_continue.md new file mode 100644 index 000000000..433ac68ad --- /dev/null +++ b/rag/prompts/toc_extraction_continue.md @@ -0,0 +1,60 @@ +You are an expert parser and data formatter, currently in the process of building a JSON array from a multi-page table of contents (TOC). Your task is to analyze the new page of content and **append** the new entries to the existing JSON array. + +**Instructions:** +1. You will be given two inputs: + * `current_page_text`: The text content from the new page of the TOC. + * `existing_json`: The valid JSON array you have generated from the previous pages. +2. Analyze each line of the `current_page_text` input. +3. For each new line, extract the following three pieces of information: + * `structure`: The hierarchical index/numbering (e.g., "1", "2.1", "3.2.5"). Use `null` if none exists. + * `title`: The clean textual title of the section or chapter. + * `page`: The page number on which the section starts. Extract only the number. Use `null` if not present. +4. **Append these new entries** to the `existing_json` array. Do not modify, reorder, or delete any of the existing entries. +5. Output **only** the complete, updated JSON array. Do not include any other text, explanations, or markdown code block fences (like ```json). + +**JSON Format:** +The output must be a valid JSON array following this schema: +```json +[ + { + "structure": <string or null>, + "title": <string>, + "page": <number or null> + }, + ... +] +``` + +**Input Example:** +`current_page_text`: +``` +3.2 Advanced Configuration ........... 25 +3.3 Troubleshooting .................. 28 +4 User Management .................... 30 +``` + +`existing_json`: +```json +[ + {"structure": "1", "title": "Introduction", "page": 1}, + {"structure": "2", "title": "Installation", "page": 5}, + {"structure": "3", "title": "Configuration", "page": 12}, + {"structure": "3.1", "title": "Basic Setup", "page": 15} +] +``` + +**Expected Output For The Example:** +```json +[ + {"structure": "3.2", "title": "Advanced Configuration", "page": 25}, + {"structure": "3.3", "title": "Troubleshooting", "page": 28}, + {"structure": "4", "title": "User Management", "page": 30} +] +``` + +**Now, process the following inputs:** +`current_page_text`: +{{ toc_page }} + +`existing_json`: +{{ toc_json }} \ No newline at end of file diff --git a/rag/prompts/toc_index.md b/rag/prompts/toc_index.md new file mode 100644 index 000000000..860356d50 --- /dev/null +++ b/rag/prompts/toc_index.md @@ -0,0 +1,20 @@ +You are an expert analyst tasked with matching text content to the title. + +**Instructions:** +1. Analyze the given title with its numeric structure index and the provided text. +2. Determine whether the title is mentioned as a section tile in the given text. +3. Provide a concise, step-by-step reasoning for your decision. +4. Output **only** the complete JSON object. Do not include any other text, explanations, or markdown code block fences (like ```json). + +**Output Format:** +Your output must be a valid JSON object with the following keys: +{ +"reasoning": "Step-by-step explanation of your analysis.", +"exist": "<yes or no>", +} + +** The title: ** +{{ structure }} {{ title }} + +** Given text: ** +{{ text }} \ No newline at end of file diff --git a/rag/svr/task_executor.py b/rag/svr/task_executor.py index 84c73d2b6..51c9c6e8e 100644 --- a/rag/svr/task_executor.py +++ b/rag/svr/task_executor.py @@ -23,6 +23,7 @@ import time from api.utils import get_uuid from api.utils.api_utils import timeout +from api.utils.base64_image import image2id from api.utils.log_utils import init_root_logger, get_project_base_directory from graphrag.general.index import run_graphrag from graphrag.utils import get_llm_cache, set_llm_cache, get_tags_from_cache, set_tags_to_cache @@ -37,7 +38,6 @@ import xxhash import copy import re from functools import partial -from io import BytesIO from multiprocessing.context import TimeoutError from timeit import default_timer as timer import tracemalloc @@ -301,30 +301,8 @@ async def build_chunks(task, progress_callback): d["img_id"] = "" docs.append(d) return - - with BytesIO() as output_buffer: - if isinstance(d["image"], bytes): - output_buffer.write(d["image"]) - output_buffer.seek(0) - else: - # If the image is in RGBA mode, convert it to RGB mode before saving it in JPEG format. - if d["image"].mode in ("RGBA", "P"): - converted_image = d["image"].convert("RGB") - #d["image"].close() # Close original image - d["image"] = converted_image - try: - d["image"].save(output_buffer, format='JPEG') - except OSError as e: - logging.warning( - "Saving image of chunk {}/{}/{} got exception, ignore: {}".format(task["location"], task["name"], d["id"], str(e))) - - async with minio_limiter: - await trio.to_thread.run_sync(lambda: STORAGE_IMPL.put(task["kb_id"], d["id"], output_buffer.getvalue())) - d["img_id"] = "{}-{}".format(task["kb_id"], d["id"]) - if not isinstance(d["image"], bytes): - d["image"].close() - del d["image"] # Remove image reference - docs.append(d) + await image2id(d, partial(STORAGE_IMPL.put), task["kb_id"], d["id"]) + docs.append(d) except Exception: logging.exception( "Saving image of chunk {}/{}/{} got exception".format(task["location"], task["name"], d["id"])) diff --git a/sandbox/sandbox_base_image/nodejs/package-lock.json b/sandbox/sandbox_base_image/nodejs/package-lock.json index 6aa834100..d59ae603d 100644 --- a/sandbox/sandbox_base_image/nodejs/package-lock.json +++ b/sandbox/sandbox_base_image/nodejs/package-lock.json @@ -14,24 +14,24 @@ }, "node_modules/asynckit": { "version": "0.4.0", - "resolved": "https://registry.npmmirror.com/asynckit/-/asynckit-0.4.0.tgz", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", "license": "MIT" }, "node_modules/axios": { - "version": "1.9.0", - "resolved": "https://registry.npmmirror.com/axios/-/axios-1.9.0.tgz", - "integrity": "sha512-re4CqKTJaURpzbLHtIi6XpDv20/CnpXOtjRY5/CU32L8gU8ek9UIivcfvSWvmKEngmVbrUtPpdDwWDWL7DNHvg==", + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.12.0.tgz", + "integrity": "sha512-oXTDccv8PcfjZmPGlWsPSwtOJCZ/b6W5jAMCNcfwJbCzDckwG0jrYJFaWH1yvivfCXjVzV/SPDEhMB3Q+DSurg==", "license": "MIT", "dependencies": { "follow-redirects": "^1.15.6", - "form-data": "^4.0.0", + "form-data": "^4.0.4", "proxy-from-env": "^1.1.0" } }, "node_modules/call-bind-apply-helpers": { "version": "1.0.2", - "resolved": "https://registry.npmmirror.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", "license": "MIT", "dependencies": { @@ -44,7 +44,7 @@ }, "node_modules/combined-stream": { "version": "1.0.8", - "resolved": "https://registry.npmmirror.com/combined-stream/-/combined-stream-1.0.8.tgz", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", "license": "MIT", "dependencies": { @@ -56,7 +56,7 @@ }, "node_modules/delayed-stream": { "version": "1.0.0", - "resolved": "https://registry.npmmirror.com/delayed-stream/-/delayed-stream-1.0.0.tgz", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", "license": "MIT", "engines": { @@ -65,7 +65,7 @@ }, "node_modules/dunder-proto": { "version": "1.0.1", - "resolved": "https://registry.npmmirror.com/dunder-proto/-/dunder-proto-1.0.1.tgz", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", "license": "MIT", "dependencies": { @@ -79,7 +79,7 @@ }, "node_modules/es-define-property": { "version": "1.0.1", - "resolved": "https://registry.npmmirror.com/es-define-property/-/es-define-property-1.0.1.tgz", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", "license": "MIT", "engines": { @@ -88,7 +88,7 @@ }, "node_modules/es-errors": { "version": "1.3.0", - "resolved": "https://registry.npmmirror.com/es-errors/-/es-errors-1.3.0.tgz", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", "license": "MIT", "engines": { @@ -97,7 +97,7 @@ }, "node_modules/es-object-atoms": { "version": "1.1.1", - "resolved": "https://registry.npmmirror.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", "license": "MIT", "dependencies": { @@ -109,7 +109,7 @@ }, "node_modules/es-set-tostringtag": { "version": "2.1.0", - "resolved": "https://registry.npmmirror.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", "license": "MIT", "dependencies": { @@ -143,14 +143,15 @@ } }, "node_modules/form-data": { - "version": "4.0.2", - "resolved": "https://registry.npmmirror.com/form-data/-/form-data-4.0.2.tgz", - "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz", + "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==", "license": "MIT", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", "mime-types": "^2.1.12" }, "engines": { @@ -159,7 +160,7 @@ }, "node_modules/function-bind": { "version": "1.1.2", - "resolved": "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.2.tgz", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", "license": "MIT", "funding": { @@ -168,7 +169,7 @@ }, "node_modules/get-intrinsic": { "version": "1.3.0", - "resolved": "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", "license": "MIT", "dependencies": { @@ -192,7 +193,7 @@ }, "node_modules/get-proto": { "version": "1.0.1", - "resolved": "https://registry.npmmirror.com/get-proto/-/get-proto-1.0.1.tgz", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", "license": "MIT", "dependencies": { @@ -205,7 +206,7 @@ }, "node_modules/gopd": { "version": "1.2.0", - "resolved": "https://registry.npmmirror.com/gopd/-/gopd-1.2.0.tgz", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", "license": "MIT", "engines": { @@ -217,7 +218,7 @@ }, "node_modules/has-symbols": { "version": "1.1.0", - "resolved": "https://registry.npmmirror.com/has-symbols/-/has-symbols-1.1.0.tgz", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", "license": "MIT", "engines": { @@ -229,7 +230,7 @@ }, "node_modules/has-tostringtag": { "version": "1.0.2", - "resolved": "https://registry.npmmirror.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", "license": "MIT", "dependencies": { @@ -244,7 +245,7 @@ }, "node_modules/hasown": { "version": "2.0.2", - "resolved": "https://registry.npmmirror.com/hasown/-/hasown-2.0.2.tgz", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "license": "MIT", "dependencies": { @@ -256,7 +257,7 @@ }, "node_modules/math-intrinsics": { "version": "1.1.0", - "resolved": "https://registry.npmmirror.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", "license": "MIT", "engines": { @@ -265,7 +266,7 @@ }, "node_modules/mime-db": { "version": "1.52.0", - "resolved": "https://registry.npmmirror.com/mime-db/-/mime-db-1.52.0.tgz", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", "license": "MIT", "engines": { @@ -274,7 +275,7 @@ }, "node_modules/mime-types": { "version": "2.1.35", - "resolved": "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "license": "MIT", "dependencies": { diff --git a/web/src/assets/svg/llm/cometapi.svg b/web/src/assets/svg/llm/cometapi.svg new file mode 100644 index 000000000..8d9811864 --- /dev/null +++ b/web/src/assets/svg/llm/cometapi.svg @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- Generated by Pixelmator Pro 3.5.5 --> +<svg width="512" height="512" viewBox="0 0 512 512" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> + <image id="image1" x="42" y="15" width="429" height="482" xlink:href="data:image/png;base64, iVBORw0KGgoAAAANSUhEUgAAAa0AAAHiCAYAAABbWJOOAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAABraADAAQAAAABAAAB4gAAAACzb9T2AABAAElEQVR4Aey92Y8dV5avt+MMmSdnJpPzPEksUUNJVaWqWz3cqoYBA9dovxguw0Bf4PrNj/4Tino34Id+MdowbgOGB4gPhoGC+7F1G+7bt7tLVV0TqyRRFCXOTI45Z54h/H078lAkRVEczokTJxlbCp4xT+xYsff+7bXWb62VhLKVEiglsPUl8H5aDeEfRsLonumwHraFMLI9JOloTy88ba+HSnU5JM17oZ3eDeHDpfCTn3RCkqQ9PU/5Yy+1BJKX+urLiy8lMCwSOH26En7000qYv1QP9dZoqIyPhla7EZqro6FWqQAWtZB2RgJPQrNdDdUR3gOUOmkjXmLaroSkUgtJGAdEJkKaTAFa9Z5efpI2Q6eyFkJ7JXTCcqhUVkLKf7Vqm761QqXD5zwmvG5XmiG0NkLotEOn1gqhuhFaa+thLFkP42PN0F5shaVftkrQ6+kd2hI/VoLWlriN5UVsAQkkIUUhee895uRPQzh1hsefZJe18wOeH6mF1dZIaI5OhnZthu9uBxzmQrvF80o9VKvjvDcdKskIAIEG5WOYBaBmsx9J+Y20AqgBXIBVGvyc1z1sCVCVJC1+txlByuecjD5tAFarnG2JPqzwHq8Tnif3AC20s7ASWpVF/vZOqHKkaythpLFMz1bC6h1+41Q79vIn/PJ7PPspjzauKP6+D2V7aSQQb/tLc7XlhZYSKKIEUgDlZx+OheaeMZZjjnSMbmZHJzSiGa9aAWTQrJKAllSZZrGe4Tvb+O4UhxrTGIv+JKAwwiOgxGMIfseD5lQXuBIOwCsBKlKf97AloK7/CV5fPnqCJqddjYCVpmhiFTUswQugwliZBt7bBLE0XQCMV9EKMTOirXX8rLbKta/zO6thlGOdY3VpLeyZXQv3djfBdoDN85ZmSIW91VtvB+1Wl1Z5faUEnk8CSTgNYPwIoJg/Wwk7x9E5RithtYams4ivaQRzXgc/U7oNDQTtqAIYtWcxr6FFJQBUe4qFvsFfY+6LYIaJDzNfipkvdAC5pMqCDVhhCsQYx+9gKkwyU2Dge4NvakoAFXCjlqVJMCSAEeCDjRCAbaKNAUwJ5sQUwOJ7CWCVomkJZgkaWgSzzj2e3+Ev7vLZvVBrL4bOyEqo8VsNTIo3L7XDwel2mF/phLM3OiH8uBO1shLMEPPWaQzwspUSKCXQRwkAWKeTsP1cPSxsQ/sZaYTbdXxS+KVqmOiSUYEIsKntDElnF4Czm77sArh4HnYARHMoRpj40L46fB/nD0cFcMNnxRLuo1vPBFDsqEHFFwnvxSd8MviW0McUwOXi6RU6GF1L0cqSBGDhtS3l6uJ7KY9RY1J7AuDSRT7DjBhucZHzfHYDgOMIN/mLO6GOibHdXAobmB1ntq+G2wBYCw3s1E5BcSOcOSNgZuZFnpRt+CXgwC5bKYFSAr2SgKY+23sRNELY+2E1vDpXDXfGpvHrwNhrAUjJDhZfDvxSlbAdsJnjLzTtcUQTH48VtChMgSFVU9JUqAlQwHpJ5qyABoBF7QytK0HrSlP8XNGMiD8M82Ii0SNdAsTuIdvbGah1rvP6OkB/A4LH3bB2YyU03lrDhPiIP6w0JSLboWwvyQQYyntTdnrYJPD++9Vw7Z1a2DZVC42RWqjVamH97mgYGUWTSneGtLmfhfgooHUIzeggl7efpXQfjx69JUUMm+yet78JJsWAHyxNrgFkF/mZ84DZp4jz89DuXAvNjTthMlkI7ZFWaK23whhmxNW9raiBnYGOz4143lOXfzcYCZSgNRi5l2fdihL4f+anwvra7mjaSzu7oZzvxny3C/1IrQrfFKSIFPJE4nMo51G76mpWL4sG1fMbrxlRXxlaGKbEhPiwtEKMWKpJEVIHJsQULSxB+/IIaGDtmo/zYScmxD+T4Vi2YZJACVrDdLfKvhZDAn8L0eHjqyNhsjkaGuPGSo3jrRkLbUx+ak8JmlOS7Mu0qHQP7+mrMl5qlPdh9nU4IlFCc59+5dK3jBCes6kpQbWX0CGLMOUxgdiBPysVzII+sTt8foXHK7y+ynGZv7gS6hWIHZ0l7t9aaDZWwga0/IkjshGl7pcaGIIqYitBq4h3pexTcSWgz+rMPL6m9RlYaztCK92LRgVIAVRpkESxkwPihEcCI1DtCs0qQd/KCAnMuU2/l7NPgnjZXlwCgoxxbrFFwPG14AMhAzAKlTt8pAbGsUnq6KTzgBb+L4Cs07kEaF0NjdHF8JMDsBcjSST7ufLfQkmgnDCFuh1lZ4ojAYDlNIBy6qxaEHrUbCNsrBEjVZ0Ec7Zz7ACs1KL0Sx1gkTuABjXH4wSHPizp55uMObQrY6NAseJc30vRE1GMbBxoYNLoA9k2QgojEVOiBI4kghemQjWv9AvMuhf5LizF9r0wUoNODyNxqbYWVi9thKs/a8MClRhStgFLoJxEA74B5ekLJYEsK8WZM5Vw7CeVcP4SZIqEmKjmVGhW5qKPKoQ9gNJuFje0KmnpnT0AGH4s3o8+Kkx90s1LDapQN/bhziRQ4DEjxowcAX9Xqra1aT5Mb6ATX+MeXyMj1jxMxFsQOBZCs7Yedh5ohh9jOuSP+dyf7Kp2D/98+aqvEihBq6/iLX98aCQQzX6A1Z1jlTB2YCTU7o2F2tQ4yRsAqyog1TkMOJ0Ajl7lmgAqqOqCVII2lRLYayYKkurxWLIAi3/TBRsPSRgeaF9m3iADRyD+K0m+4ONzbEhgIiafc1+vMgbuYT5cDmv19dC4hd/rlH8neJXAhSDybCVo5Snt8lzFlEAWW4Wv6hJJaFcmQmV0J478vSxW+8lKIU0dEyCmwKQCNb0D0YIsFWmKX0uzH7BVtq0ggQzEzL4RkptsUtS2rqBXAViAVpujhhkxtK6G1uhNHvnegY3w36i1lS1PCZQTLk9pl+cqngSihgVEXTtXQ3+aDiMjOyFXvIb29CaL1rcBrBMsYgBYDPDVv9UN8HXulPOneHf0xXpkZg5JGEmKL4yUU6kJfvVzBTSv8Fs++wXf+DiMVa+G1fZyOPu/roef/rTUuF5M6s/01+WkeyZxlV/eMhKQtr5xfTTcXtoeGvVdBKIeRJMy4BdChVpW1LTQrFJirACzBIq6WdGz9EiKoZw7SmHrtczcJ3ClAJfJfs2BmJg2KsXPFS4xFiRsXOJzHquXwmj9aljfWAhnYR2e5hMQb+uJpThXVE684tyLsif9lEBXo5rEyLN0vkGCWupJSUkfBaTax9hVn2RhOs4jwBVNf6ZP8jC+SjNg6atCCC9Vi3kSI/GC2K9UGrzgBfMQ8oagFcKnvP9JqHKkKbR5KfXQ65cr62W8V/9GSgla/ZNt+ctFkYCA9QFmvYtoVvWNGQjs+Ko4OqRSSkmpFMIhFqHDPO5lYSIPIFpYBCkSvZZU9aLcxcH0IwMuzm3cloFg5kOM4HWLMXMlal0h/ZznF6nWrPZ1OYxVroX1xYWw81SZcaMPd60ErT4ItfzJAkigq1ktX6iTOImsFbWJUEtnidoxWwUMwBTtKhzByQ4TEBNgSKk7ZUql6Lsq50UBbmGBu0Dcl6VUiPXq0uYjgKUXeO8cgcoXwmjtCtuk26HTWAjrO9bDWViKP2XElWzDF76t5eR8YRGWP1A8CaBZvY8utROz3uo1YqzIrp62d7GYoFFVjtHfUywfmALRsJJYJDErSV+8Cyl7VHwJ6L9qckjWkCr/CY+fwDrVdHiRUXg5NBfNxgHA/a4dfkKS3hK4EMfztxK0nl925V8WTQJqV2x9w19RDmTPJEA0ZwLbAwQFH+ddGIHpKT43yzrlQCRX6Lui/Dze9KJdStmfoZIApA1ivSyfIjilqQD2Ka/PcvyGjdLHobN2NaxNUSaFGK+zxHid5i9KwsZz3eQStJ5LbOUfFVIClgaZ+deNcO/etlAZkwF4gEPN6gQLBCbBjtqVGS26xIpy/BfyRg59pwxUJsNG+imPf4CVCmi1P4v5DSv1mwQo3w2N3WtljNfz3WcdzmUrJTD8Eog+rLPVsNQiUe3oqwDT9yBavMMjmpW1rKLPyjL1Vs8t0ywN/x0v7hWkcYy5OYKhyqapk/4gmguT5B/o9G/CTPXjMH1BOr1H2Z5RAuVO8xkFVn69QBJISZn01xdGwkRrKlQa20gnByuweoQevor571ssGsfRrnax0yXRbWryWjKt42XAHV6gqyi7svUkoJ/LHIX4uiiTYoByrOeV/h626ke8L0UexiFZNsLEPAUBVtG6TCVVtqeQQDl5n0JI5VcKKAGDg89/NBamJihjT2mQekpuwMopmMmvAkr6rUxqK32dWCsCg8s4qwLexC3epVguhVFokLLxXVlew4uMyQu89zGbqY/IBEacVzIfg5N3niMh74/5bhmc/KSRUZoHnySd8rNiSiCaAs/NAlh7sLAcIhM7GpV+q1TAgiGYWNNqjAPtylgrtKuylRLIWwKO0ywbvOus4RQkVWYTFTdTsZo1jNYWlQKS8yhlX4Tld26EMxHcSq3rCfeq1LSeIJzyo4JIoMsKPENtq7W5emhQMViQqmAGTMkTGNJX2MkeBaxYAFLK2VfQriIjsBzfz3ELFZprrY+ivbau+wdPfF6255YAmTVgGVpNOVD+JJgSCrNh0vl9qOLr6rSuk//yXti7txl+hs/rdFmM8lFJl5P6UYmUr4snAX1XZ0m/9LvzlLS3CnBle6jVvsvE/y509ncAq6N0Gq2rbC8qARcEAavGP9X4iHMGlIqsAZ7oqGmXqPWiYt78+1iQEoo85IwQfkmg+z+zJfgkZpOvry+FXSdWGeEGMpcSf0DijtGylRIotgT+/WeNMNGAbNHaS5DwmzCxvsdE1yR4mF2rvqtpnmsOLNsLSkCgqleSMMETj/FaElog1RqAtdxKwyqPayVqvaCUN/88ZpSPgckGH19nLGMmDL8DvP4FX9fHobZ+KawcWiip8Q+LuwSth+VRviqSBCRbzM83Qnt5R6jW94ZKW5/Vv2Jy/2sMV5upl/QT8M5L2LzorlYEtkTNqMobPq8BPAIQDxll8v5zv8dn2P146ytNDWuEzyZrlTDFD/nYBKgEq8VmJ6wAWB6Pa3yFqi54Z2KKvsyM2OK5X/ezNs99bD7w3M/9G99/uVtqNnmqKMsuTNS6fkGaw9+HRuNiuHV1MZx/a7VMA5WNkJKI8XLPlGJf/d0L5GSvHgi1utksXmdSv8EyjIZFQcaQolnBCnTldZF8CZuXPsI/M/UkTHFMgERTgo2veZwAdNSWxni/weMoYDSu9sQhGNV1WD3S/E1Bzc/qm4/3wYgnAoxA82jznVU+XOK4A7gJUILVAmqaGpqa2ipvLHPc2+A9HlfanXAPBPPzdV5renx5G1WvuXVcP4QiUoulsfDo8bC29mFoTH0UDl/4PJz5Z9NFvfSxXY7RspUSKIYEJFx88AEBwidHwurqDvxWMAHTkxmVndIhKRktEjSsFL+WCkTGE9hyY9gLiuCBmiQojQIygk6D16O+B6D43hjPNd/NbGpEPn9QQxK0xvjeuH/H4wiP8TXPJ/lMrerRFt/hH/GMrz9ExBC8BJbHYBb7CUyIAM8SIHU3glb2XbUzAcrPBC61tAXfA6h8vsj3BbpoeuT1Ot/Z4FjnRP6Nr33kT1itM03t0T5vkdeKUPGSDipZY6xfx1f7GTfh11z2b5H6H9jAYUJcvBvWT66En/Ddl9TXVWpaW2TED/1lnIZs8YFr5OuNsEEappGJ10Kzhf8qeZ1ry5LbhsocK6a+KwFrS7QubAgQ3efRfMcLgUpw2Y7aMwtSecygIc3w2mMbr6f5XM1JUBLQGkgwAtzmawFupIJWxe/xVhjZfN/v+rqXTbBZ61TDCkDUBbcu8PiZZkEf1bgEMBSuCEjLfH8R4LrLG3f5kkAm8Kmx3fY9DoFtje/zwBAg9ImOx8N/Np9nz4b2X++G43osG+OkGkuSMVI/mVVjO35c8mWmvw21bZ+Ftes3wl9dsqryS0nS6PGwHdoBU3Z8kBL427+thYvbR8Pk7olQae4OzY3DoTIi2eINJuwJJutOJjClQzazWny5vg+y1y98blcoMChqQOM8EXwEnKhB8ShgCU5z2PXmQJ/tHNsEK44IYjzqdxJ8BDqBz+eWV46PPt/8jIcIit3XPvpeLxs4FLUhrH4RUPxt8Inbl5n+/Dx+R9Mhn3WfC14C1x0A6w4v7vmcx1scglYELgBMLU7tbJU/zh47EfT4SjRZ+ntbqFF4MkiPX+S4jLA+4479kucWOaH8SYuA5M/vhc9/vPGy0eJ7PW630JgpL6XvEohBwqyxox8RcDm1LWysk+S2epL9Jv6r5C2WvmOAlexAKwhLuBja5kQTKAQUrXKCitqPQDUF+OwCmHaOViMYdYFpmi+oWU1vgpOvJ3g9yd9oBhTUNPcNexPY9GktcahlraBO+bjgI2Dl4z0ePdTEBLN4rLejNqa5Ue2Nh6jhdUGya8YcYiyz/hZW0XSBe3yTgwwaCaDV+W1I2h8TOv95WK/z2T9svEwlT0rz4LDP+GHu/xnW8EuXRsLhyZnQblL2vvIa7/wQwPo+QLWbCUqKJqamCW6HuNl5NSFwBh9VRnLQvyRgqTHtwqZ3YrIWjk/UwoFGLezhtQAmwWIcEkVXK/ryMROIr7dCE3fHAGBNnGqTgo5Ao+YEDoU2T9SuNBfewNn1xUorfM5xYbkVLq+1wzUOwUyzJA/R7CiARR8Yf98FryGUFYmdYxXtbfSdjBrJLK/3MEe249WsI6CNUFtLw+y7gprmQr6y9WO6StAawpG8Zbo8enM8HO6Qgb1CctsqpsD0ba7tVY79TL5Mu0rwdQ1xs/MCVvRNsSDvaVQjSO0GlHagXe3gPRdqQUoToOZA/VRqVPqnjJl6GVoG7JtXqir6QEsBszFsqWqVymYOdsqR8Wp4c6Z+33x4E+3rJprXPKB2FRC7znOZiWpt60OMWpticBhRTicSkA4yNwCz9ihmwjlCQX4V7ibnw8+uXg2TH5n+qfWA6Lbk04dHx5a8xPKiCiWBrklw+UI9TFf2sY1+laChH7Cl/h7GM02C25mUki1QJ2ATDllGdieUMVKsrZGdJ81cyvnO0SQcYKE9MVmPGtWRiWrYB4AJVNMA1P24Kv5enOqaEcsJmo1eNS+xRy1Mf5gmRZ93g54FrSuoYxfQwD5ZaoVPOQQv/WELgJexY2pf0vXV4Py9IWx2G9p7uo41ghRQFJoMnf/IxfwitOu/Du2Ve+HcpZXw0x9v6aS7paY1hCN3qLv8gQyp8xNhe2NP2Gi9RSkRTYGvA1THmYDUwkrIKxi1KwFrqJpgY2xTpjllWtUeNCi1q92oTfqtdmxqVH4nY/5VYPRtgtTm1XYVjRKwvrz9URb8k9WWcUPgZ5paUzRSjqiFVYIarGbW+W2ZKVHToWCmGVEQu4kmFv1fgBf/D9sQc064Znv1O/iXvQ2Vt1PM6DXycVYb58Jbey4TNnKPz7esxhVvPRdYtlIC/ZXAaRiC3zX+qj0b6tU9od3CDEjewKCGlRCPFWAIRsIFS3iclP3tzwv+uhNHcBGkwJ+MIMFKKkliLyC1f6wWDqJZHcCutV+NCsDSR5UF+uKRAOH0a3Vz/L1gd17aP3dfo8aFFTDS6cEltK+MzCEb8YagxfHZcjtcXG1FAFP7kqGo6VAAk4YfNbfhkaKXnWlcSUWA+hgp/JzH30KN/4gqyRfDTZLu/vf7Vobnkp6+p6Wm9fSyKr/5IhI4daIOZ3kbDp4TmARf46feYtU/BVid5PkMMKVJcGiagCXgmHFCMsUhAEqw2sVOfx9ApenPYzfHTgDLQF+Dg/072+ZD9qL897kloBy7cW1uBOCz0CRzSIvvxE3DYUDq0Dia1motkjYuAWIX0b6ucegDu9NsR+q8fzMkzcumAjexXGk6webPjZ4+rwniuojtqtfCdOsC75WghRDKVkrg2SRg0PCx6wDS2i6m2RFASt/V25tgZWZ24q+KT2d3lRBwNE9JCpAoIQV9J7a9/bxxcqoWDo/XIlBp+tvGZ9LSJQ94SKjw7/2dsvVPAl35Iu64SahEBiYbCwgukjcMUhawzsM8/HylHS7hA7u2Vgm3cXhF4sam5qXvq/DmQ/29XnAaYxhfYXRVQwcCU9Jq4Ceuh//jykZYJ1v8f3eEeK+twyosNa3+zZ/ylzPSBWOsM8ekOcbkkmhB0HD6bWzx+xBQt+5VYWX14CKoKVDfyW40pz2bZr+DANVhnh9ni68pUE0ry1ABNPP97t8X9gK3aMeUuxsFsCpMcBdm0UM6bDkkZOhjNKxA7evyapUD8OLwUVOiGTgMXtYpVHjSRkZUYh4BVvq7IJ1uPnZCrb0aGmMXyDRzmzm3ZcgZJWht0UlbiMsyDqt+YTy0asdYwr9Ln37IcYo15CCPmDfQwgpsknHh8wCnIrtPf9UsmtUbUK3fnKmFN6fr4ShOf82Agpk+KhfJkvmH0ArW1HK5NTFOTlLM7MhI+NZUluD3FvT4j2Eb/nahGX5xZyN8hvY1v04CYIAL62H0mRV4mH4p6SQYz4UdADJTktZDpTOCf6tJpYSV8N5O8hluDeByTpatlEB/JPB/frGPvd9xfvwHrBgUbUyIwzJDe5xc/Tlnj341AhUrnWmS9Fnpr9JXdZDg3+OT1XAMsDrMISPQDBZlG04JWHbFhL4yDA1a/mRJ0gaByzxX+5J9KHFDaj1fG4ZmJvhFjotYNz4GqP4BP9evgOtzYWP5bhj55fKwZ88oNa1hGIbD1EdNgioof/VhlazURygj/se8fpfJ8y0OAiPJcFHgZuf1h+jUNxuF/ipNf+9sq4dTU/XwKs9Nq2SwcFe7KvDllF37BglIpmEPEv2Rmg3fmEkBq1b4w2Ir/OruRvgN2tfHPDcbh8l/zdBRcF+Xa7pJdg+jI0JwSrYxoHeFpIkTdurTsPbDZjhzxiBkw92GspWgNZS3raCdFrD+6upYmE7nwvZdR5gXP0C7+i4TSG1LSruAxRJRvCZYqV2ZSki/VKSts4gdJQhYE6CxPwKYC5uApnIla63UsYp3L5+pR9xzzbmj1TT6wNyIEOodQxncmBi6cHIy07r0d3moeZkrsaCKF1cEISNWQ2CAJuEYwKWRs0Nuz+2hMfK70H73cng/XRzWisglaD3TCC+//LUSMA7rZ1dHwo61naE9chxHwLus6sRhpdDb2emFMM1RuPGmVuVu27pVsv22Y+7T9CdImQ/wMIwzwWonJsKuhlUSLL52FAzdB9x21vQMuFjgI421yhiwhIuJjPc12nHTornQLBt/WGqGi7AOo8kQ4OrGeBULwGJwvgmmAS8OoSsFyJJKNbQ0cgpi5+2yMV5D1wq3iAydBMsOZxL44Wuj8IlnQwJgJR3AKtEsiIZFBVZrBMU4kuyrRfnXBasba2Xgr5R1tapXoa9rEhS8ZApKl1a7UrNygfPvyrZ1JSChZpbxoD/T+39wrBM3LjHtFq8/Hmni82rH5L1mm18lUNng5oI1oUrQklW4h6PBvEQ9xDaQYBqscZg9fgip8OX8K9hIG7ruGIf1owsjYT7ZS6Xho6Hd/h429O9g/P8udhaLNpKdmh1elpqpEJfnoNe8JxCpPUmmkK7+1sxIZAaawcKAYLNYWMLe7/F/CVaFuHv5dUIcauPAApc2C1Om0OJbMcbrLL4ucxyeJ9PGPOxD47/8XjdDfX69fKoz4b8iI3xIrjAnP6WTv+Cv/mPoTP1dmLy1Fv7pRHOYanKVmtZT3fPyS18rgVPs2eYntoXa+iEAi1yClSwOK6SHMUkQHSNEFGcbam8EIIsumqxWgDoStatqeHd2NLwBjd2gYbNXqIWV7eWVgHffMVBjyzVeNQaP8gPQ5Q+ywTE7v6zRsUoznMeUaF5DC1c2O6gxAF1xRny8f1xBTEK9j46NxgnQ7iyGyvr50J64HvZiJrQc9JBoXSVovbxzsjdXPv7FRNhIT2E+f5eJAfGiw/NkHxOAiVK8Jg5p/nHheYt4q7fRrl6brkXgciGyNIgLFetQ2UoJPCQBh4REjb2EPej/3AdJ4xWy9v8SluGv7jVhGzbRuKTHJ0XVuPRzbcMKchSuBrT45lpYT/4+7Ll5NpzZYcqnoWAUlqD10LAsXzyVBGQJvvdBNZw6siOsWl0YU2BC8tskgXSB/ZyUfOzoCrPsq1nJ8jOd0jaA6SCLjRqVNPbXeVTT0ndheRA7XZiOP9XNKL+UpwTc0DBcouYlgJmp38cpQEy2oSQNY7tMCbWKC8k4sAI1NS4yZqQ7sH68ypzVCLIWmgSkjd64GP42vRf+LFkrUH8f25VC7oYf29PyzeJI4Mc/roUdZGxvJ6RjChAuKma6ALDSA0wKUsrgAC5QYy2JC4t09denRsK/2dMIf7pjJLy9bYQaV7WMaMHCI7CVgFWgG1fQrsSNDf8wZGLFZYk6pvWSxMPeJwYhrwNW+rh8LFjLCBpgLf2COGWiXawitepSWF9ZDf/X/7hcsP5+pTulpvUVkZRvfK0ETp+uhMP/DtIFJoZKm4DFzrcZ8GS5SI7igqYCMSxBGUsFaS4q7oAlVcj8khH4GgHC3yERndqW5kB3yTGZbUH6XHZjeCSg1oWLi/pp1WhyVutSdZEqbyVqk/J+AXHjNrmgzGVYkJbV5LIMUJKQRg2NKyQbodlagQ5fDe9f7YTJPQvhv0hIZFXMVoJWMe9L8XolYH3/L+qkBZgOSX0/Oc1OQgh8kwH/GgNfH5ZR+OzaBt9cOgQiE9d2M1q8Bo1dwDKjhemX9F2NaQ7ky36/bKUEnlUCmp1HGEDwdgCvLOu/406ChtqXj26KPiX17g0YhpoKUb7wdz3rmXr8/cyoMBqJUklyED80zJFkCdWxiclwNbSvfkHwcYvgY7s76N5+5eJL0PqKSMo3HisBAavdAZgm9gFY1MGqfJ/vvc6Y3s9An8JEyFga7PgWfDwM/nXR2DdWISlqPdLYvw3pwmBhNa4y5uqxd7h88zkl4MZHrauCq3d/I/NzTaPhuzGa2mSiNpaSCFxqXBvME1NBDbixY4sJqykNJHClKwQfN0Pa3oCO0QyTtzfC6XQJKvzge/qIoErQekQg5cuvSCAJp08nYW1skghKYrE65BA00wVJcEPAh0WeM1QvAEu8GFjLwCpjBqpBvQJAyQ58k0Pt6iiHWS1kfZWtlECvJeD40xxd5Z8qVrdkPMv6b9C6GyhN0f9ybyNcR+OCaJhpXcDBQBEhI0tpHcF6khxF49pgx9cMrWQ1dFbXw/fD54DZBp8NtJuP3qsStB6VSPn6YQkIWHv/nPQva7MhrR8Bm6w4TMZ2GIMFCRh2pysUSWWXweUC8W2YgT/e2YjlQ8wjaOxV2UoJ5CEBTYQ7Rg1cJ7SCNGAHoMgbwL5K+j9bu9MOsh26KaDim4P9x5ygZK5xJnGknSV0x4VQv3M9fDBr5oxC1eIqjNN8sPesPPvXSuC/+l9GwqG5aUDqWxGsAvuvkL7C963dUwgkYCMbWVy7cX6/jWb1X+4bC380NxpNgy4WBhK7Cy5bKYF8JZCxksxjaKVrN1M+2mDER9AieXxxWiJopZCpEuK5knZYW7salhebYW26Ffbx0QcfFELjKjWt4gyZgvUEc99pQOnA5cmw3jqCLgNopW8AXicY0MR5DB6w2NBG7UoTjGytExAs3p0dCf/ZrkbM0m4dLLWvSOoqmHQH1R1JAKYaakkKYMHscEMfbPpa/E72+abvBTm7nJl7sYo84waA193mU9c734+f850HPu5+7aV7dHxmxAzHKdp+bTTGCrrJGsF9ZJLmq2tZrS61rgI0c4Sa3LqF33oZH9cXsIRHwkdfXAinfrIc0p9C1Bi8qbAErQKMlOJ1YROwvn+uHlYmdmJ7IzVTxyKO32InNscShTlh8JNM5tYcK8Ax/FUyA78Hld2g4SPEXk2AVGXOwK+OLMFoje398gYbabI3xODXB26ln29AFljE97LK91rtTiS21ECksXo1jCPvCTYDDzaJL6Y6GuOzcb7j53HT/uCXXsLnAjeiibLAxQWoE48/PQKjEKYhGpdEjV/j4LpKbO8d5P7AbRiQtFItb3Z7jn8hW7Gt6ZD+qdpaDhsjbepwmTFj4FkzStAa0PAo9Gn/llir+flGWGnsJRYL7arzNnZt4jlitmjiO+LgHuglTLCI7kC7soTIG5gENQtKujA9k9m53eG603UGbtXmIqfW1ARksoOSSfefZ+9tADoCkxUp+B+lmZ09YLTUBS3ffKD5PX9jAdBaa7bj3yUIUnmOEY8kaHm4GHdbBC1NtH7GdyY5uhpZnQW6zr3y7zWT1b03vOfzURZu3/e7/L9lm5emloprNewC0InkjT5YN1VaAn5fbcZqyUvKnnv1yC3JVy7uNtIOGleyO7ILq8l8aFfmI6e/892rrAMDzwxfgla+Q6L4ZzNFk3WxQhsqLEzBAFMwxSwYkkNsBbeDAo6ZgSwxntTF0gJBmv6sdSVQfQfShcQLNSx3r1uxabZzL5498pzXEbBY5JYAGDWnJVIwLMfnmSaVvf8lQG2QiVzQ8lgBuAQ0NasHG28BgP599r02n7uOmeJqFNk2AJoG5tgHB4CgBQZFEPJzwU0wEqwmeK7mJZDF5/gYJ9lsTHtAUBAAI3jx3e79zR43z8ELXw97c1QiklhVwNgtQdu0YsYS+lmLG3oFjWuhFcIqqPXwXcnx6mPlcdjASTLDIJukI29QvQHgIt1TvbaGXwsz4WCJGSVo5TgeCn+q08Rt/A0mwdXOLEvQIWzaJMENJsI9yDJp6e4aKwg7sfyvxIXL3aoTXU3qVRKVqmF9Dx/W6ya8BbBcALZaU9RdbWod0FGjEmwEIM17K2hDdyhQeGetFe6RfeEuC989D17f5XV8JBeeAOb31aQEKsFIvHr0Vprs2/fNVK7PK1s+uemItgtOakaPNt8hvjbeIwFOwBKMBKZtqBgzHNvIQjLD4eMc92vHRJ33ST4LgKmlWTU6amX3NbFMC/O+e/6t0LwOAV1SRp3pJMPQ9xy6ddjml7h3nRTtGPkPUONijmNNSeHuh/QQHXqXTi6TNWM1LB+7F86cjeTHQd2PErQGJfkinvfU2VpY2j4dKq0DrFavM1iht2MWTBNs3Pqx2BQ+usrldB2uk5pX9pI3x3RM75A38E38V6c4zHqxVTSsKN5NGUuSEDgEmbuA0G3ASVASjO6YHmilGQFLYNKcd1+z6mpbvhc1sAywBDoBUFDqZ8sW4S81rUkWaLUswSlqXZtalkAWQQzgis+7j4DaLMd2NyKbmhg4FvdLql38P7TNvgtQbr5qYMI4GzDBSVOhGm0Mk0qbUeNa44M+36onyZEORVFDukpIrhtu0Jl7oVW5jb54mT+8wzGQ7pWg9aTb9jJ9plngzDxmwVVzCJ7k0n/AQOUxMWs778cBPBCJuAgyt2MMllkt/gx24LuA1klSM3UZggPpWB9OCqbElSBqPKCLmtUaYHONooOf3l4LX9xZC5/fWQ8XeH7h9nq4srC+qUFlGll3FXnwd+J7m7/bhy5/5Sc9t+YutTm1u9sUvXBBdsH2XmaPLtLcU8BMzWv35EjYzz09aBLjbY1wlNpmJ+YaUSPze3X2S1WIa/6tfxgfv3Lm4XpDjUut1OKjJnOOLAfkZnmTJha4NXcX8eYN8roSirgG63CdQuhL7KDuYUBeJeHAXY6B9K4ErUGOhyKd+wOKw4XWdmw8R3DEkp4pQL6A2q5JkCeD7KoZBQwQNn/gu9tHwg8wCWaEC00sW2MBU74ClBrUTRKt3lxuRk3K59eXNsL8UjO+7+eaA9WybvN8kWSs+qC6IDXI+/ToueOK1l137eBDLXvt22qSkkPUJq/cWw8f3ViNWpbmw12YgXfyuJNHTYpz49nzacaDZsqBDsyHruf5Xth/N2QzmFJNNSZOmRLqt2TP+JQyJ7fQltmzDLLZRTet+xlkVHKo4NMauReO/duF8P/+xb3wb07knjGjBK1BDodCnFt6+3uUXv1sJtRGGJgdzIGUGSETEgcsIvxcA2pOZtlV5gvUDPjDuZFoFnyN5/qv8OnH3fqAuvfcp3UNUpOKpAdWqaY7ax41711ksVKT+hxNyueX7q6HLzj8TAKF38uYgtmjC34RAetphSPgarJUI9PMeeVeZlYckXXIABCw9hLOcHDb6P3jGBrYXqjjXdNh1FgYD/rcGC5D2SRnyIS1OeG8jDZZCr3fi6BWvM9+OJhGx3QRpEexV6+zW7gaGuM3Q2e8GT7IFES6lRu0lqA1mEFQnLNqcflLkuHWGgcIJIQliIaVJIcZpBOMw27cxkD6a57AWGEYZqA+LGntR2EMWgJC57wL1DCuUZrNYiwUi7Sa1NWFjXADTeo6x9VF8tNtvr6FNnWLz330+/xZXOC7ICXwdZ8P5Ab14KT23wU5Avim+S/eV+4v/0ewvsP1X0M2n2EOVfvax6ZlH6C1V7Ma9dHUxrZjYpzAVzYK2Pl3w9a8ZmML9c/G+8wFgGPx+RdqXLA5JcfkhgwPC5CepGSFt/6Wa0P67VAl+HhtcSGsXCbJLsl2bTkFHpeg9fDNecleoWX9b78eC4dnZ9jSHefiX4cx+Cq7KWI0IrV9IPJwApv0VpOgGtbbLE4mvz3CTnQOH4g7a74yNM2FucsAXEWjMHBXht8NzH6XMIfpm1KjukrGHMkWahxqVgb3xqPVjov60FzwM3Q0LsL8E5fj+KL7x9mLDiu4psN7MCCvI59xSB2yEdXADuH3OjzbiH4wwUtA0z/md6Te6y8aFuah41mzoGZCy6h2TYJcemQVomuFZcaOfi72L4NokrCM0dyFt+BVOrBIDOfn0KPWw5lLuOMObPBeLoHHJWgN4vYX5ZynWfsnd0+ETvMAAxCwwiyYhqN0D8r7YJq7ZM2C3Tisb6NhvYWmZcYLiznKshqmlgEWeeZYaQQrgeoKpIormP4uAliaASVVXOK12pbxU5r/BLmyZXLbaLej7PD6RS3KTYtUes2mX2BKPQh47Z8ZDQfY3PioOVEAM9CZ/yMYDIssNROOVqvRRAgbHjR3HKREoUDGWWPzw/gYIKsQ3xbxW0lyhD6tAV4fhfrIAo/LoXEOeC1BCxmUrS8SiExB5sXOC/Vwq7aHAfgdBiO0VkqPaBZM89eynJ8ClqBkipuTmHzUsKS1G4O1nd2zO9FhaF3NSrPXOgDU1RQ0A340vxo+ubkaPr25Fi7D/LuN+U9aekyrRBZVTYea/cr2eAkoGuWqJqrvT/Pqx8hSerwmw+P4u761exzm4VjYhWYupV6avT4yhhVjrPhjyB5acfsQGtdIZTT2ucP+8iybHSshayrU15W7xqWBI4XIGevnJawVnXfY6K7iRVgInRHZQO4qBK++tlLT6qt4C/rjZwCsOx+OQhacCvXOIXoJaKUncBKh+qcNhmbu5AvXEnMJWu1Vu74ZLt7hsBbWbjO1a+AfkqbvQdKEmlXG/kOzAqA+xwz4MaB1Hs3KBfcWDEHNf2V7NgmwXkfiRkanb8XNjnkRL2pijf5A/IRorocgb+xn46PvS3PiFD4vh9EwjCQDrbeTZNes8IYPCNb2WzMqQ4s4LtAid9SiC5llPkusm1C5nBBCQOxSaI6QLUMAS9e5IX3ddZWg9WzzZWt8e/Ic931mgpxi+5gGJxlkFHVMYA5Gs+BAxoRalI7oQxAt9F/9CaVFNAvuic71YVhmvhwamvcM9tV09dtrK+GXl5fD72+sxEV1Ed+MQOWC6/fK9uIScEFfw+93fZHoVzSRT2+thX/AfHgUreuNPePhnf0T4dv7JvBzsR9jnGlhHoYRZT+1PJzA6mDYhwHWvrdBrcb2CibDgY0fwmDSzjbMgqe4e3fJCP8ZrxfD0tWlcGYvJS77ayYcyAL14sO0/IUXksD12hgh+btDPf0e0+DbABcmwoQKxKlVTHOfz07MbRjwD2Di+Q4+rD+C2v4qMVlWGtbG70QdhqZmZTzVFXb77vrVqD5Bs9IkKNHiFiQLTVtqCvqsXGzL1hsJmDlkw/RHPJo3cYHNwTqURDcIxn9J4jixo4EJcTTGfGlOVJt5XEqq3vToxX/FYa9maHLoCps3/borMDRWACu1L/1c+rfyxy7CYKKdNTKMSfHGpjdJVxH+Wpi5TuAxz0/3z0wopblsL4sE9GW9/jo2kj07QrV2goH3nwNU+rP2bJoFcx0P3UlpCiZNgjIF/2THaPhTDjMEqHmpgRUVswQdwUe6tkQLweo3aFY/v7gU/tMXi+HDS0vhDwTKCliy31xE/W4JVv2bcNk9YauPnM0kkpFfoMwTSiDRxfyNDKmY6zALm2B8+Ub2f/869gK/bPfsK9MkgqwFJJe4jnsAMrHlAwCteDH0SjcCHSOBPY8rTIaFsLZBct2bG+Fv/rJvTMJcF6kXuG/ln/ZCAj/4t6Nh5cQMUZvHGOrkFQx/wmA7wtJLqhZU/pzxQRaYVYWPTFRj8tvvk+lCX9ZxzCENPpOy7MwoYpMwIcniDtT1y7AANQN+eGk5AtXveC7ZQkagFPYVdv5dDauI17JV+6RGa4JgGZkSXWJ+Ru6FQJYFa6P1cvESNGIyYMZbUVsXuOyiAGa/1Srv8E+XTTig3m+SM2AQJmE5tNt3wxSa1pm/zGK3+iDQErT6INRC/qRa1u8WJkMFhqBpmirJ2/TTg0qlliKI+JDLuPckHgYPS7J4HdbX24DVdzENHsVhPodZMMbX8J0iNn1Ry2RX19wnoeIPmP/+Ce3qF2hWv8c68jn5AQ2GNd5K5qCO89ISmP+dVOuSFHM/I/4mYGk61PelJubnFdLTRzBwk8TAdCr4WKRmdzQVOi80a9pfx9QtVC19W90xNoBxhqkQRiGKLccqz5cQ6EL4r//4Tnj//RDee4+3e9tKn1Zv5VnQXwOwPqAMVaeORtUh80VMiEsesVgfa4TR75zIpXmieDD5DKSUzv46ZsFvw/AyPZPswSI3F8IVdu3zMP++uLuB+W8laln/DGjpx1pcb0WCRUZdL8Fq0PfS+yXjbh2b2gYruxnyr2MqNKDbjYXZRtTEjm03SBl6EgQO/o/gMOi+P3p+tcFJTOb6gLvz6DKavgHH6512IMduNFc/+nd9fo3iYwWIjn7xExy3ocBfC8vvXgjvfdDCFt5mB9BTLC01rT7f0UL8/OnTlbD95jijej/ZLt5gEH2XfpmyiWkad0m5dVNIcsc4BhVKssW/giVoiibrY+3C2WyuwaLtchWOs04Tnxkt1K40Af4jQPVPX6BhXVnCb7URnf/WutIs5WJZtmJJwFvivemGJGgiNOmwYQmaegWFrKaXTL0MFoo2Fu2P2pbd07zuPPKarnEt+rqsw+V15tzoDfx8075ZgqtCGZMZaPCNSjt8vLsdzrzX0y6VmlbOd3cwp/uAYI9XZkKntY/RfpRRfZiDnRHZm52bOTYnm4zAXZgFLTPSjcUyZZM7SD8vWpNsIWBJptCZL9lCGvsvAK2Po+9qvWhdLvvzBAlo3m1uZtnwfl5mw2EoQkxIzGfH5jqxVMrYZlBykcak00Pg0rR+YCzTutS0Plmi5DGpnhynvub/PBs9SmEfBwKOg5PheFirHgyN6Yth8pwd0wXXsx6VoIU0t3z7/oEqdKODjPaToZOYvZ2SI1HDyh0i3B3uALAsMWLV4RP4sHaiYVkUr2i7WseFgOUiZ9aKX11ZDv/f+XvEXK2SvHUtpmKSTl224ZWArE99k7++uhxuYio0U8nbxHT94NBUTA9lSRSNcUUbm5m1IsS5ZAD+n+wYCY07EDSAhqsA8MZAVH1841pvYu2t9BZrzd+F29tXMDtYvqRng6Q0D/ZMlIX7IUqOYBb8d/8ee3NlFoo71PZAELFJcZPdPJLFndmYU3OSaXox9spgyT9lknWT4E7jx/Kz3DrzFNfcBas7LACXobL/7tpq+E+fL4b/eGExMgOlt6t5uegNZH14imsov/LNEnD7zy2Mwd6aCxfZnHhPvf/eV7WsGmNTk1yhYrrolwwI83Ta7JuEjGXIJR78H68r+zSnfzOBbfYIO2EluUos6N3wj0R7/8Ufp+HMGcX9wq3UtF5YhAX9AQfQB4zr+XNToVKXMYiTlBpZWBXYCanKbw6ufPofk5zGjBe1cIogSYkXMdIfEOs6lvPpydOdpUuV1lf1u+sr4e8/WwC4lsN55p/UaYNWI9ni6X6u/FZBJeA00ce1GokaWRiDbEM3JGpg0uPf2jtBCRR8SNXiLJdOXpUXY8xMJG0ByTuwI+96MDbNTbies42QzqgEkQmegpGQZnn8FSbLy2GmvhQa7yzxWU9it4pzFwo6qIe4W0mYv0SV8sldIWmRvZ0CbiHsBrBgEOZHvnByqWVN8U9WaqQWY7FM12QWDIkX7maL0gQiMyqY2SJqWACWpiMDhU1wK439/k68KJ3uYT+8FxISRgzsjvcGvwnPs4Szz3avXDP1EykvTazKtkXqikx+Pex0D35K8CLja1hF0zITv74hExcbsuA1vLpzLByBYTihVQB/UlG0LqeOPuIdbP6OYyYUsCwayRAO93iMGmMP5POUP0HMFvGeIZ0kQ4ahNMeZ/VewVd4LrYp29J44f0vQesq7MVxfg+L+IRT3eovCbZ19DKTXGUiH2QnN8Zzo9VjcMZdLcjeoJpWVGkHLgtb+xnQtMgVNBto1b+TSmSechDULYiWLFBPdYoyfQrA4S8zVr0mnpqZ1Dl+H5iN35S5ww9ri/kBgYhvsvemCVPe5QCVATbB779akMtHsJIcFFvn/qZtZKRYIAXAToGbq4m/ORV+ryQoSApvg4GP3ufIdhIg9v4CqlmV/s/FAv7kQWaMasPdgJZglTEPZCO7KbdCtTies8nyIjPameVLbus2R0eEz1mtO8kQacW1hjYm+LUCrjYmwegnwWsRd4SEp44VaCVovJL6C/rFxVz9Dm1omLmukdYgV6k16uo9VgR0Qr3Jsnkyf1T7YgTqMNQ2+wjEZ/QQ5duQJp4qLJP+Y4ucOFOhzANYvYAf+E6mYjMOyUKOfdf0cT/ipwn/kIqt/Rj+NfkS1hjpI5KKn1jvGvbKUR1ZM0XpUbDhYDM3VN04m9dozeMFZ56GTN2Ns1JJBsARkS2i5S2yRoGB4gKY4H5Wv2tgGmpiA5j0ZVBOs1umAVaQNIndMLNN/8xu+TtkT2+x4HdkVq1aXZsJ2WkPT6lB7qxPOQ+WfZ6O1jCzzlWeEcmqqaN1Jroe09Umo1ObDqZ/cCulPLV/yQne3BK1BzYx+nvevL4ywykyFKgF/leQQp4LmjpaVxLIjuW5j9WWZV9DA4beJxzrE4metINZC8LMYrUu6kP58Dp/VPwJWZrdQw9JMaBqmrdLUpBoAk0G0O7gXu6j4KyB5CE6C1bRszvtmMJwUgJXVgAUwtYunbWouglRXyxKU1LaWASwzUpjiyiDtm8hYcDNmSkAzQFsQG1TzzC7y9vXuagamAult2IX6uIzpOjFHZD4yUwMtSjP1mRaNo/TrjZlOrLv18zsbYRW5xmvKraNYerIsO5oIXX9Iqtu+Eqanb4a/+nCR12aCf+5WgtZzi67AfzjRGWXbOhtqNQZM6zBTcB8IYX5Bs1/k1gQm62AdpRifxRxN1WTaJlxZhWkurC6mLkYXSL/0i8tL4e8vSLpYCddgCOYorp7LRHzRDEiWovvst64WtRewOrJ9NLyyY4xMEFT95fBxJ/dK4OpXU55qMtLLYyZ8NgmSW8xQoUZ7mXyN15fIBcT90K+kH4zbMxAt17EheK63MoC1FppGDIFbP5/aquBeFDMhmBXn237u35szaq1puIypU22rG7uV03imJ6h8Wakj1p7kMCPxC9LSXw2zu1cZAiVo9WuCDeXvmmPwZ5cm2d4eZDZ9h4FzhEEzwrXkahZUdsZfvRJNgtQ24lHzhfFYRWkuSi6OOt4/Y9EUrKS1u4gKYjlN8J6LQ7CKOerQaCeoN7+XDcNe8jsKVLswzVoQcW5Tu9qOmUvNahrzrSXs1cL62bz7Mt7sl2VC1OIOzFA7bW8r+pJMsyRrz6zsF2FumozYsiLGw2V5HAd3VwRPzcWINW50NHm2do9FeQpeRWhWRWCqRdLTq9zrk5PIDm33CuZCKfECWa7NKsep6Z3CNfzrN0N95hbPBa7nbv3bUj13l8o/fG4JvP9+NfwNBR6bo2hZyQEIpqfYGFrosc7AcdPd95YtStmiqR/rDRZMJ88BTITT+k/cDhagqV1JCtBvoQ/rXwgctqTIb6G164h3gRym9iBQjbOASp6QMLADgDoyOxoOe8B+2wNw+d7kJtFCk5/3pM5jNx6p39dtXyUyVMdZYOnnbsZHZjrM7okJba1AfO5WFsRtMc1rAheA5mbCjO3euzb30LuUl79GM7LxeQ5h2YN43uLzdCcbNCsTcE1e2yBHuH1j2sWk04eZc69Dhc9KmGCCBWSlwucKW6nxoKkmwqMUkrgUaqufhp+ny+F7yXNrWyVo9XuG5vn7k+/Aee1MkfNrFzOagVLBl5WS/SK/siNOWs1/0nAPb1YhPk7pEbWsItHb9bPoT9HkY0qmf0TD+mg+82GxHkZzVJ637kXP5SIqqWIXgLSPGLgMqEwCOxp2saCqXbmwzrCiCWgjggb3yfvlMrv5kMuC67n0rUVtMLqEKhF4BCDNgQKSfbXfJ6g+LJvz2gIhCGhdlnvRjKsmJjmiCYmjJ8E/iuEbmuBosmSBK5Mb/eVNza+ChRsC70P3s2/4ub58rGyVK1MumuK/A2hZuuQGsorBx22zZuQKXA1QkpRxKX4twKtV2RkuYtJIocE/JyGjBK2+DJ0B/ehSpcF2eQ761X5m0n4GyQ56It1J20Vcl/rZM08wwoSxqKMxWa+w6Gge3M1zs1MPcjJ3rzvmZgOwXHiseSVg/QsJbzX76Gfx87wtKN2+PcujsnRxUkPS1KYfSpPfQTQqAesEviof92N6U/PSfNUlV/g3/u2g74fnzwZl9sRBKpBWiVHVzDUGsEoxl8whaUN/l8mKBTSB68YD2pekiahF9FmNcGy44bkKiNar+Lh4raYKdgJcSZhirCvvQcpXmbopMD/hMebfXfxyn0goopNWPe5mzHiW8fb833XDnKpt7QK8DoVO82ioTdzj9wQud03PfMdK0Hr+u1HAv5waC8kawcTJflYD4rPCNJ0kjVM+zUUopmpCqzIe6yRZ3A/rO4HRJLPJyTTI5oKjY92s3p/it9Ik+EuIF2dhCUoKcNErelOGyrmrWak17cNfpUZ1eLYRjkKuMAhWc+B+3neBH5aWXRtEB7SEejWLDTMOXu3L8iH7Z5pRi9zBNQlmXfKGpkSZh2peBi+7FvbzTmrK1FRpk7Lv/bAZeLyf+1CBzCLgClyDaoK/1g5N9AsTaayiYMYMi0ZudJIYVpBP32LcVjWyl2URVtITob16jWw9F8J/yKy7z9qPErSeVWJF/f7plI0qBIwUsKqginfSPcym3O6v01NLzxQalWbBPya34GuAlvWxsuztg5vALmAGsKpFmdHCzOyyBP/T5wvhI5LfampqudINQasA/vqh9J9IUdcUaJqhNziMIdrNgq4vq6tVDcElfWMXvWavRyKJGuVBtMdTu8Yw565SdHMFLVnf13ok1JC0JC7IeQSBS+K53FmPcWYC2QhBbG58RqpjoUYG9kGyCuNs4x/LkbtpdBMpaBl0vEDKqm61428Ufs++YBZ4NtNpQnae8HkIF34efnTBelui/jNNvtwWtZ5de/lDX5WAgHWY2KxOFQJGBxUciqlJcVMIGDnt9lhDI91W35UUd5PhGpM1ITWYPgwOsjL/lHE2Eiw0L/38YhaH5WInM80MDcPQWLuj6UkzoGxATYCmF/I4hu9Hc6DEBgFtKzXHTvSBjWTgJdtR35zX2pXFATKWfAKIafJVCzK2xHszggAAQABJREFUzuikfu5F3ARJCmkTeTSCqVCfohuK6DPksbvBGNS9UG5O/2nmoIH91xn/Zsm4vNqh6EPuTEKsQLorpL9TwuROfSK0Tqzw3MlXghZCeLnaj/BZLc+OhY3FudCpMCjSIwhATSu6ufMQRvRlMWk1RxwDtPRlzbHDi7b9PDrwhHOYyUCzoACl7+o/WF6EFE1qWJqehqW5c3exFqDe2T8V/vToFI8TLN6ZZvWswb/Dct3dfroIu/sRwNS4BC3p8m/smYjm3n++tBjvp2VjNtpNysdBlHi29bB7qqd+FBT1pxljphbv/ZEMY/hABmK5TcGv7bO+rROTCVky2uEzTON/WKzEwONc6e+J1Y3DDDdwb0g6bKjbM3jbO+HDD59Z2yo1ra+91UP0wdLVkbCMhlWLaVMYFLHsiNvtOM/zuBLzCO5lITmJr0HQMh4rmkfyOPnXnEM8MkWQaXgMFDYGy2wXUqgXWGAErH7uxL+mW8/1tky6Q2hS39o1Hl7DNKYp8BU0LNMJCVb6uAZpjnqui3rOP1J7MH6DEK+YqaOCFbyTkjWI96cgpRgY/ge0rovcZ8k1Uuj71dzyaOHaALhM9aB/1Ngzzc1v7uEeoQ1n96ZfPfjm31UumgmtDP4t5ud1Yrbs3+8WzP6YW9Pcgm8LUkZCloxO+H5oJb8Ld459Ec6EZ8oAX4JWbvesjydaaI6ych3EXnyEgQG9NIExqI+r/01UjDtfTDcH8WWpYZnBPfqx+n/6rz2Dk1FQMq7nCqwzfVf/RBzWz0nPpMYlA6zISpYLjVqqgDTOQqwp8NsUJ3wT39XJaA4cjT4t0ysJVrbs3/h0y//jJbsKKh/Hn4dy2j2ZxaGpiUmMOH9bivdGBJV+sQvd+GgqbDOmDEy3XwZpS8bQfGnKrH4HbT/phjsuENP9TPDzmE5vcpyj2nGTzjsPnC99bymWnzS1dMkBzvVd8mLcCytLt8KxWc2ET72zGLzu2ndJvQQn+G//h23sXv4V289vc7WvMAS3MaVzYQ1K97XEyHG0qzchBXyH/IKWSNAkwUcDay4kBggLWL/HJPjPAJbki89YVGSiucjkMlGfQwIuyAKRKZf2kDXirX3j4d2DU+G7BycxhY1HhuAuFmcXRf0minmAon6OK+zdn0Tw4p8spZK+LsxyAIa082guRT4uyt5vM7j3657H3+UfCRlq96p9zg3BShq8psyBN/pkL1AKIynj85V2lM1To8WLXoBCiqQLRmsKLTRJLjHSr4aVT5bDyX3t8MEHT3V7Sk3rRW/EwP6edE2nWatOoflXLsHMIb9gxxyDySwjw7RNuTR4F2EPk9IknScArn1E4Run5c5ukM2M4qYD0r+hyebX0NuNzZJ0UeRs7S7ClgaRLSeVXa3qj45Mh5OYBKWxaybs+ksGLOJB3t6Hzq0c1EphyUegUG4Chj6lCcBCc93Z+kpIYBiaqFdQEch63fxJf9ukv2pcbjoabOgkZhhLF2nwbjIGdOMazFUT6u5ljkqSOkIE8ufoOBtYI/ohj6/IN+6vIIcF1qhEvzsJvVvVbeTCuhn+/M+bT1u2pAStr0h2SN4QsCRg3L07jpo9yzOIF2En6vc0syK3bZ1mwAPsbtWuTnDsAsVMkjvoZkDqFfjPZrw4i49D4NJR7o67qE2pqWEJWDvYAGgSVLv68YmZSPOeYbGJ2sOgVr2iCu6BfikaQwEmAQnBQqahfi7lqgJkBo3OOpnbGQdq471uDi9rb5m9o1t7TLLIDnyPbjjUAmXTDqIJ5AyrOEc14Z+kPwYaW3+rBeu8H/J45DrNfUUPOsaPUqA2IaY0bA+V9Ylwdd8a7z1VaqcStB6R6tC8FLDmz5IBYwLiReUY/YZOWsFeHIP5cpkVOs0mAKij7NgErMM8TnRn6gAE2V2D1KTMnvDrKyvhV1QdNpDYhKuah4rcXFgNzjZw9jWIFn96bCZ878BkOETAqouvmSxMNlu2b5aA5Af9Wsd2NKJ5ThPdNkC/y/SLBT1BmH7sYRx/+kwtt+LYsy4bWBXePTQV8z5W0fwGdRvVSC0NdAT0+j6B6DfQsi6hGcokBNP736K2FTfVFKi1knqHmNKx66T+16/l8Y2tBK1vFFFBv7B6jlw3bOHaBhN3TjDiqPCTEgsBmPW5uWw66aZYRE3RZNYLE+LuZEfrojuo5k5R84yxM2ZLkEX28fwaSXEz4sWg+vU052WNjf4PTVuvwRDUh/Wd/ZORLTiLjPXZlO3pJaA81VhrFZIDMy7VMtwUmA7KOLYLmI0XGSeCWK81DH9Pdp6sxSSweWLjFM29TFdNv3WSFXo/7WPeLW40idsyzVpzOg2/ulcLH0HIiKDFpi6HfZ1XrcaFTyuG5RyBAn+RHZnZ3z2+sZWg9Y0iKugXLqLWzLYboZqSiDJ5FZV7O0iSiy9LwNLEIcVdDesApobtLAymjRnU2qoOJVvQheILUjIZjyUB49LdtejHKOhdjN1yFtdYyNQM3ibu6oeHp8L32ZUfIS2TGpZaQ9meTwIq/vqSTGbruLUOlhlDNBMbW+Wj2eL7oYOr2avhG8Suf02/pODlMYGyURnAZIkyYDzNqvohB7PX7AfATKbbTrPchM8n6Wf8q6hxseHupK+A3ufDevPi0/5CCVpPK6kifc8MGLPn66E2OsFNJzFu5zjABWMw5AJa7taY++zWCFrEzGBhR9M3DWAOxrvizlaTTMYWXI/mGH1YmggXMH8UNaegC4jmGs1+AtS3qM2khvU6wbLRJMhikpkEizT4hqsvmlOrlTTGc5np3iYZx7Hym6tVzHdkRSGZbD+CzB2T8IFiJhZzW/6C5Mxdko0ZTaZIsqj2l2fzbO6B4GPEOSshQ3+05UtM7WRC3dwaFlzOtQcT4U4QfDq8n46EsyQyOZ080VJZglZud6hXJ4I1qD/rzsRoqG5Ms0WkXlYsaZ05N3t1mq/5HQe9C62swW72C82Cki/ynX5fdtDFwV2tAcOmadKH4O7W4o76FvqxIH159ud71pWju27p0KegsqthvU0s1lEyXkjEcIErlaznk2/3r+KYZLwaiCzr0pIs0be5uTZrTnZj049x4mbKHIiWWhEYTc4sOcR6ZrIJNVOOsNnLe954Ps2lYzxq1je28hL091uA+W3eywm2NBFaTX0OAewI7da2MHtnLOyNMVtPBK3cWGbdQVQ+vqAETlPMcd9rjbC+DFMw5vH6I+KI3+RXGxzez77NAX/YIE7zCc4y6X44NxrjsvZjYphm8XUiDKKZoklm4Hn8WL+E2v53ny1ELctcg5p/cpqEz3TpiioGw0K6MHfgH2kSRMuKgMXud4xV1l14zhvxZ7qGYfqyGpeH41ftdQxVw7Hh2JGUIZAJLv1qjkHPZfyYVaIFL++/93lQ91i4RARhA2C9AhnjNol072xksyWnOaPRRtbgdbpyLazeuxmSK2vh//6fn8giLDWtfo3Sfv3uqZ8m4d61cWweEC+oTmzMQ0j7DlhejpNLU7gViPezQ3OXtneMSQiIDQKwnFim0JF4YZYLfViaBS1VoW/LQE8nZdGagKUWpQ/rKGZBwcpM7ZYVmWMXrvZV+rF6f9eUuxsBNS4B7OQuypmgXZg1RY0oJr8Ft9Tce9n8OQHrFmxCLQFmZ9nOxkTg2mZf6FjewOX5RIw5TCYHGYfGbm1fbuP/S/BvIYN8Jo7ZuEyCoD9+V6iOUAO6ba2t5SfJvwStJ0mniJ/d+ZDoQHjQzeoubvCxkBKcR3J//ut78xTWxTIR7hEmXZcxqGnQasW5N1aDLvni4t2N8KvLgNa11Ui8kBXW68WnV9cnIBl0qo/FXII/OjYdjkPN3oXWNQqYlYDVK0k//neMdYuJh6k71mKBXtdEyCbH/JSraBtYlONm6PF//XzvOk5XOdxQubmaw/zr/deX6f3Oe/p4PsQQSwdp5vewQoMb0laHDd/zXebz/BUMGUArJPi2antDddfFb/qRErS+SUJF+/zgTCWsrOHArMAarJxiepnuP9uq9XiH+Oilu0sVoPZAwHhtmknHIMfCEXevj343j9cuLossNma6OIdDXcbgF7AFNfMUFbCUiztsqe2v7xkjl+B4eBVWmQGxmQ8r7+UrjztVvHPol1XmZhlZA7msgqwmJPNUxp8Ein40kzebLWM3GxTJNm/sbZOKS1ajSkf+zXPK+jVvqLFbV0imuwqI50bIoPwYXaCqcXoAAUAqW9HV9sRWgtYTxVOwD0+froQlClVVV2TdWJn4OIBF3kH+SyFo9LH541gOyCmYxXhYVG4ni29dvOzjeZ/002Y3uEncjcysc2S+0PQyT+kFTYZFbIgq+lRMonocsoXJb1+DMSiA1dCwBiXHIsqq333yXkiG2E0CtDbhjZqXjeEym4Vaehttw2HU65G0jCnyKj9sHKE+WMduBeAUtAbVmMaY+6W/U29upUK9reTpAqZ602EpnWhaFK1NE0hl7UlYhNXwEyJPv6Y4ZAlavRF8Pr9y6ic1ijyOM5MELXxa+rWYcSBXvzuABQOaLGYV/C0HGOCvs1PUHq4vywVgEG0NP4G09k8oAGg14sX14gKW8lFWalmHMAkJWO/smwzHtmelKwYkwkHctsKc03GrqdBNhImIJWbMo211iRmGSvSaeSoIWpA0+mDxv3ruWmUm+rgi8WYA0jEVm+bBg/Rlb4Pga2SSW4tpnWIppVnWNaxGtZkw+tF4OHNyhV3DY4FrcPCem1S20Inq4+NhY4N8XTWrEnODqU1jZHkOzYEtYO1m0dU8aG0eUzY5vnMc4vevVAf6PDRiMxuYFPcSPi2ztxe5SbCwJpZsQX1ZPjfQVVJA2QYjgciGRePaR4UC74l1ymRwSpIR0PrRwKxogtQU+RuyZWT13WAwDsisralUMtVOk+lG3xbsRjQ/pnz/W2aowUSYUGeLHKpJZ1dYr8+GyXMqVI/tQalp9f+29O4MayNTodYmiryD/TfZAWDpxMxl42FRRwkYZnEXsDQT5jKoHyM9zTb6HcxoYG63z++shWvEZGnWKWoTl4zHsnCjRRxf3QnxgoSlAlnZBicBS7uMcm+2A1JteLhvESdneidNhcZwGYTcaxOh/lYZi5IyzNAheN3ErL0Tn1IlZsrIVx5is77qmAGeDalJAy6vVsLqqvkIe331X7k2gQkDJWFjaTrLMxJ/V3eEsanblCrRs/iVSV2C1ldkWOA3EkpUVxKKPeK0TDUP5gNYSsSMFw7mQ4CWBIxBNqeRE/4j/FiC1jX8EcvrWY2sQfbrcecWrCKBBXAyC4J5Bc3eblqhQfoxHtfXl/E9V0zt23WsBsZPfYsNxd3VJpr7OkzCdlhBoxe8sBT2rLnpMhxD4JIAIjHjE8ZyNRnbjCNzDc+vRdRABsZf6qc+BICbRPe6cY49vO4nXxHZMJIED2NCAnDo7/PrF+BnbDzubwa7+jyuR+V7j5eARIsqKf3Tzn4AyywY+rXinHv8H/TuXU9iUUdNB6Z92YHG5WI8iOaE1/dwGXPgx8S7aB6U9dXrhaVX1yZgaWbaiVYlvdny6wdhjRmn0y/zU6/6/rL8Tly00bisfyWb0Ht0DDq8VZAn2aBpPut101emFieb0FRSZ68vR3+azFfHeN7NS1Tb2sH1mktUN0Cu3JA00c0xiV61F+WKcJ4Wwqcc+mNaCVqPEUrh3hKw/upDoxCnYNjsBquyulk5gJajxgE9pT8L0DI2a44BPYjWBSzNgFLb1bQ0EbpjLWpzwTNdT8wtyC5emrPO95jxYlD21aIKa4D98laYfV0zofWv4uYCADPYu18Z9t18raDNnYNI9EvyEhonZoCz5sNBAJd+axPpalHZxWFS7MeiRj/uE8ouPvpJLnx36OD6qNZrYYlOPKY99s3HfK98a1ASELD++gIe+7k5RrNF0yBgdGaom9XIQ93R3j3GYJ7BdKCGJWNQM0Jug3lT7ppn1KZurTQjW1A/gLkFZXppailqw+oU0/VIvngDxuBuQgWkWrtI5i3DosqoKP1yc2agr1qwuSCtGi2AGT/XjyY4bWB/uwGhSDKRY3qeMiGSjAS0vJtlhazWsA/g3sPGVD82Uz2ncYp5MCQUsK1gRYL+HtojYe32YwX/2DfzFlZ5vidKAPtSZSy0sfVWKsQywBrEksENrvO87+ueuy2ZRO7AJGLIIBTE8m7GXpkKR//Vb69ljKs7OMs1p/SaltyLa1NCikkty+KDGQFjPCZLbSDDshVXAjOkVnKT4aG5cEITYR/GvLgkrf4uJkID5GUR+pgVLP0K/6DvAuuGtOzherWqOO8FMjeu/Z/xm+bB0AGw8GmlEDMaZPJ9zBpXzp6+D4UXPMEHH1QInJiAVXOEX9rP6MGXFetm9X8ccUJjNnZHcwF50lhsHdjuSPNuTnDT7Vxf3IigdY1HKe681X9+03NcrKw02YJ70KxOzLljH8nMgiwELgJlK64ETKhrmicrSAtaRzg0G/ajaQbM8h52wgVAS43LGC6zzufdHJZxo8X1x1RtlOyTmOGc9/0+N8+A3yGubVOUKtkdxsZmwv90qREsxfRAe+jFA++XT4sigakfJ2hZxDDUDkcSRkhmWKVzic1SBIKWyXH3MImlxOoa7f/4fVj43R2p9v6rgFVWdoSJjZblZ0VsYFMMJN4/PRrrZO2FLahT3wVRckbZiisBrYFqyJYQkTRzjE3HDog0/WoClxYDs2OY1cUNmSZCwSzP4e2oFBCib4u5fgwK/i7GrNpWTmOWLqTuDihZku4lv+psODo5GksxPSD8ErQeEEYhn944V8ExiTmwcxi0gFkTphlZ/dn2PUYAMorMgGHE/Bz27kFkc9f8t87O8y40XKnun5L94pZxNJgL853WjxHQ17ylOWl6tBYXvTcIWDUmy4VQYkaJWV8jtIK87QLt5sLchAfxab1KMuPdfQQtL9vAYrO7GHPoGJdcZDLfXFGL0wlcala6A45H0KpGEMsPKMzEiPujY5q65vawdG8s/ObcQ8yv/PpCT8r2DBLQlvu37Do6I+Mh4eaFFAdljM0yjVPfQau76xK0Mk0L3wyZyXXM5t1kC2rn197vxNZxLQEjOqvz3Io+5YULSlnpEUqZYxbUnzWDthr9IgOQ31N2u/zaIxKYZJNm5n0zZJjgtpuBvx+3UDO39HeLRQpaN00ntdGK2tYj3ervSy7OjWkELVxKugas7MD/eTTOQt5BM/0kHUAr3U7hkkZMwfPA2UvQekAYhXr6AfbdjeujoZlsDxVooAbcmXMwSaw/09f75vj0AK9I1URWd+zaOzikvZv2Ju+mff8WGQN0VF9mQlvcMUtqWkw9S3AycNhUQAYU6xeZRn6KLn/p5X23ts75LBQZTYRoW8ZsGcdlbJ2aWK/vY7ee123G+dWFZtyYaVkYBMmIywzb+EdNy3itLhkjpzvL2bUsGWRMWqcaoLX2MHOpr4tfThe5NU8zf6keWsRlddKjOCVPAFbk5Uoa7D5UlXs9Z74iw6gtsMpOsPjqy5LyrtYlkOXdlvBlqWUZl3UVTatTUPJFVy6allzgBK3t4/V4jDLvcvILdLtRPr6gBAQoExzrz9qNT9JD7avWxxlo3NbVxSz7u76tvKnvTm8rN8xiVclAS00rMiT6v+hk96vC5tycqqarmwsbsAhnHo456LuZ6QXHzcv755NrdXLIEExcP4imdZibSIqTSMDoO2yoEeiMNTpe88AMA1hCxiAAywFgKh2zb38Os0rTYLQIFtAs2B2sE5RQnwOs3J1Ldy8zX3QlM1yPXd+WQLUXFqhmXmOrJE0025Qu6cPlONbVtCRlHLVI5QCYRs7/eO2A1XY2X+YavU2/1tktrvW3P65tVdDR5Ln7iNkiG2S7EW4vlT6tPoy13v/kfKseOhWp7gbbHeAEJJSUEtr/JjhlrEGrE9diCqdBEDC8UhcGqe0mML2ElnWbvHDFpV9k98bduY57KdOaBcs2vBJw8bbKtGZey5fE6tIG12uK6ENzrMuQdawbkzjowPmsFJHugUoY97r7cM2P/KTWP8yDkM7SjhWNx6K554EvlebBB4RRjKcQMN5/n3D0SiPUqC2TBhLkJiTI1TmZF2jhk2FkSHe1DIl+rUERMAwgVsuyDMkCZIw1yqFLES5yM3h4BrDfRmaFcXbpZRteCUSrA9YptWaDjdWgJdn0a/EWpCQZuUkzp6aBx/p0Y2qnAYjRAOMs3KUa3QM5dAHRJqw+xGslFUhoJNGtUpLp/d9Z4Ti2ErS6kijK42nmw/K7BIXUSR7ZJmVTpLmTbxCHZE6alhNVxpCgpXnQHdZAQIsJfJO0TWbCFrT0bVmtuOhNavsUIQJmVvB52YZXAhG0uIdzkBJkEZriKfNPsqz24bI0BwpS9wArGYSaw6W/68eNZoc+nPNJP2kJol2MZZnDWl/6pGA+0gWCidMYrzVOFiCShK8Tt7WzBK1HpFSclz+CGVipYxYcwZ5r2qZAxuNArkHSnEBcyqOjnkQ/jEkzZQ9JwBgEa9DYLAFLh/T88kb0JfTXpN4b6cq2mkTbmsQf0BgE2vfmMspfQQIaxCTWbGMDsh8TobFb44CYSXT7sYCrUbkxE6gErYskhL7Nxq0VA43zNzFIxDLn6DSPbmRzbR2qsqepa99UGGs1uufOZRHsnqx8fBoJXKiF6c620GnpyzrMX0jA0MaU24iJmckZoDswbVk7a1CalhkvjFsRtAwmHrR9/2nunt9xQdNEaIHHfmUIf9q+lN97MQkITJlfK2ODyghVi1aD7tdGTvO3tbwErfNkgFfbUgMbxIbNDaspnaynZ9Bxro1phGtkOrTXp8K95RK0chX+s5xsYR0CRiA2K9mPXnUE4EI1zq85LFUONAU4WM3q7g6rHzWFvumqrDdkrkFzsQ0TaBmn5e5cbbVfC9s3ya78vHcScK32Xk6wCRG0PMZhiNYeZmL37oT8kiZCQcsipxG0MJUPwpf7JWiRzglBuD7k2ACt9jTZgCbD2EhpHsxR8M92qiVmRApomRw3TY6gYOUGWg5IwUmmoJncdcJOMjGZq2AoH+bcpBY7YdW2dEwPgv6b8yWXpyuwBNS6TKS7k5yE05gLNQP3o2kEXCNF2W2Ci81FeJOA40zT8pN82xS+rOjTYgNL9Ea+jaUI0yB+rWojtNv3kz+W5sF8b8OTz/Y+gcMN6mQRHgFYmQHDXINjT/6j3n3qpBSg3F05WH3MMjznu8PSDGLGC1lUZsKQQeXzQWQH6J10y18adglIcxesJGNMYYFQm+5X6/q1uhs2Na9BmAid/5oGjdWcZnFwTcjNTdtx7dOnZZIFTIWbrQStriQK8XhpBHUYzcr4hJhnUPC6rxb3u4sOhjhIGaAGFWoi9L3+Tc2vXpF7ScFJR7S52KS5r8IabA3IPPLVHpbvvKwScB4IVpoHJdn0M2icPRvhHbAIGf/m3VxgPnRrx+Wpb3nNgpTAZWYcXQauC7m0hDAf18FOOhvqI/c37yVo5SL9pzxJrU4dmRr09s08gxGwYtqmp/yBF/uapkGrlVo3yzo6+rLUvvJs2u3NwyZgaRYRvNYBrEHFqTzPtVuw0v7S7YE4z5+nz+XffLMEnAvWSIs+rZjOqX+TwwD6WN0A9BK4YsjHOpk4BpDDTNfAGG4CgUttKz9ChrFa5CHEMslMur95L0Hrm8dqjt9okRDX9CWJwIUDMsYq9G9mPHJlDk7VfzM8m7olxmU88p3+v0yjVqUPS0e0oLWhltX/E/fsDJo3LSuhdtgZBOWrZ1dS/tCDEnAiTgBWpubysR6raDz4jd49727eHPsLJIjWTBjnQizH07vzPM0v6RxwMzupj5vD3IS5NMN8MvfIGCbC+ykHS9DKRfpPeZJWa47t+WG+jS8rBbTybWr9ApUZnqOmxfOchuf9C3WyWoJcX5agNSwBxfcvgCdS8zXl6IfwWsq2NSTgWm2KLmO2TIjcT5+WEovAhZnQqgYmjFbjsno3n+QqUNcAmYNmxjHYmD1tTi2tQ0YTsEbZ/d0nYtxHr5x6UZ7msRIgddPp97TFzaFhHeIreziMz8q1mZxGk6CsQX1akeLKiM0TuJyOOpxvEVA5D3BJwBi2hb9LInFnbNqpsm0NCbiFU8My08l4HqCF2DSVmx3DGltZSZ5N6nuOkzIzD2aalkHGuZkHU6ppGfJjCjuyLXRHUW6Y2T1h+fioBAQsceFHYgagFXMNAlr5Ud27PXInKY03Ol0hY7CpzBWw7Ie7S0Grax5cIru0ZrZhampZOs51oBskWrYtIgHmh7Fa09Dex3nsdwxe9I0yFyQjRdCCAp9V685XnuKjboPpyCDMNchYxqBuEoCLjBgWxqWVmpZSGFwjOS5uxvmzY2H6KJpVFbYgFM80JVFkLEOSa88cEe5itFm7m8rSgsZxkls/oqYFSLmr7CYLFcSGqalhGRBtlm7Bq2xbQwLOBDOcaBas4c9SA+l3y4pDWgS1GZYHlHuzq2mpZUnSQsnMuSWWK6mFvwmwq9Nm7qfP+WqLfTrVinC2GvZMTKHWkM2dCsVZnkGrE+cdyhcnofZq/VrarhmjuWpaSkPWnZqVC78OaGm/wwZay2iH+uPM5iHw6kx38Rku6OVmlO0rEpBhq4ZV4ehXeZLuSeN8wIUlWH0JWpvmwe6XcngUmzMiRuY6yM08mF0bp09MuNAIS5cmwoehj3lIchDm0J/iPTBhbY6o78YcqShOcT3WzULjypc12JWjO5iMiGEByHwp707QFI1KwNK8toRJRH+W/qEhSOzeFWF8FGgFK2nKHj5fl/UVNykPfbV8MWQS0IQeD/rdd0WLSeEmTtC6CZs2kpIGYHUw96L+bXOQEledX3BxNjYqkDAwE+IuMV7r7PX8Fb0hG6P97e6pM2xdVuuh3aQESfoas8AkuaZtyl3L8kKdjCP8E+3XkSWk6zmf5nrOuh5MkquWtYQvSPBSyxq2xV7iSNwdQyaxoJ9VaAVg1xv+L1spgaeSgGPFeeGGx/HjmLJI5Aa7OMEsr+YaoH9bbUsGYc4FYVFr0bRMnlszZmux1LTyuvGPPc/OnUloNtC02nOsZidDksoaHOcYCGjpw8LXGhmEDlBwK7em8UyAsty4BAYfu5Mzv+nZm8s1KFTAlUxymQq0Jj2VrhzTUA3bxfRGJOWvPKcEHC7OC8eTZucVgEvrQ54m87iZVdNiTfDIc10AtROWBvS7dCSmcqJWSunTes7B9MJ/dppCZ/Mn0LLWpHNuB7CoThyooSXN02GSb2MsxviLaK/P//RxR9n1ZbnYO0nVTIa10f1IJvkCLeu311YiMaObPy7HTfKwiq/s9yMScMw4fmTTCl4Gr+fVXIy65KwGek6N9QEMy6e5FiWslQkuk2qlHpq1L6OM8+lBeZb7Eth+rh5qY2Yw1jRocly1LU2D9Ujby3nB7pIvJGCQXi33JkB1zWp3IWBoEtEEkrMYenbdalVSlTUNSo/+FqXa91FE0LIlsc7WADYGPbu48odyl4DzQLq7/lLnhkUh82yClBqWlhjJWm5y7UL/e8FE6aBlmcrJvKy1jVpJec/zzj94rsPbUHdXt/GWGha+LAAro7lbavrBb+byXFu1AcUmxNQEkHfTb6XJw93kIqAVTYMG/w9p81r0zV0CtMyQ8dbe1XBodpSgVOowsQL0O8ZnSMVWdvvrJMCS0CLvoPPCzV2ePq1ul1wVtM3p09L33UnsR7+BSy3LYpBkCErTHRwjJWh170jej3WC5dbJgFFJj2Kr3c/pTVMyAB0nu3AHpMEQ4tUglABhWu0kghaL/SCCKDNJ9O5fJ7R1ke5g7vyXK8sx2aoxPocBr5EqCjWCHoSse3eF5S/lJYEOO1tJGG6Eljma7fyXbun+sginUblmULfa+p3ZbDp3+9iypQmOWEzn1K6UPq0+Cvvrf1rnYjs11T4mQUArRE1rsKDl6pn970PuTeVS0NLJrLNZTWUQu8leX7jXsQwT8uz1lfDzi0vhHOXTpcELzsZula2UwNNIwKGyCkh0WYRq73k3TYSClklzrbdnHGc+i0XCmSpV1sp6qI5BJSxbvhLIUpFUQnNjgmSQqLsx16BBxd6LQeBFvH7HnzZrfS5qAINoXeDaKoClDDV7NlshApUsQjUu6zCNMuP1cVnqomylBL5JAkKUpkEJSrFUj2p8zs1VobtOSMyIloJ+61nZNXpqzRKVsLFeglbO9z2EDz6oho3XyFpcneauA1odaO4EzaUxFmEwaIEQ9LG4i/IQvPJuTkE1K7NHZFkwXPDz7kXvzxeBmIltjI35434Hk9D8dZGMETcIxL/g5xqAyHt/seUv9k8CzAUtETJs2xwDwKx4be5nGbYPmLUduf2eqJwj2+zj3SJcq39SLn/5sRJYPVANa8HCZgBVZyffMbP71KBXLcPMjc3yyDl4MIpJjURzmTvJbjmSODH7PR8ee5N696bdF7gE49urzfCHGyuhwa6ggXllAprmKI+NOqDl3C9bKYEnSMD5YA3IYWbVPuHynvqjErSeWlQ9+mJT1uDKzpBU97CaaRY0z+DAmxoWcXsx52DOucXitTshpfKa8ujGps9nK/i0HryxkkvMSfjJzdVNEyyuTRahCUBLzUuShvnsSvx6UGrFeq624+HYzHt8Okfc2LH/Yengn5yb49LRqVmQoTow1lgJWjnf+FBpjoa0shMtywrFmAY7BhPn3YuvnM+AQcuSeAzEPMgc1Jd1n/KOxpX3ovAVofT4Da8v0uDJksHlRe3KrOGWb9+Pf2s7id0y4Orxicuf64kEhImMKGSslESanvzsU/2Ip2qyw/G8+rbyPHe3g26oXBvc4DZilvv2QFauErS6dySvx9X2aKiqYVX2s13ahzm4MZA7n9f1luf5igT0b11ZWI+7ZVlgLkR/cnSaWK6JMBX9XIPfxHyl0+Ub8X4Z+D6PtmzpnA13Hjk1NSszYXheGYQCWN5N7cokBNLdt2HW1o2QEKvVd5fWIxdagtYjAunry59D2bxwYQpNC9AizyDJ1Nlv36/I2ddzlz9eGAlEjZIFSDNorbIaF8NR0uMIVUe2N8IcGpcVcstWLAlokXPDIXAYM5VnRW01rfvsQbW8/DErjk8tMtTADGOoXDmNUHIPxqoX1NJig7/erJaglde8kP3yv//jWGjsnwGwdpFLy9RNFH3Mv9hjXpdcnufxEnDxMw2PhJPLMApdCDUTCmYeYcdYpMTrO2AzW7aCSEDg0HxtcK8MV1l8eTXHDKeMhJ5BZcRwLKpt6fPWROjrHIanJ6KeVmoV4wlOXoJWXoOO8xCVt2OGMiRoWTWo7paRjiQMQx/K9pJJwOVOh77Z7H38mKBjfQb68fR7rTbHw66JeiztHgsOvmTyKerler+sRi145alpKQ9NhJEAAngJYoNoDNFIwHDRygGwvEROQ+5BlDs2+lNw/svcg0oll/bBBwTGHSFtU9WaWZoFye6el4adyxWWJ3lGCahUNeEwtzYSSpisR/ASwFwQpcibZHc/+5oJ4hG62fddNMo2GAkIFGrH92C4qh3nnZWii1M+dp8PQhJurrLA4lzObmmSOgeaVmpFjFLTykXsnmTqx0m4e3VX6LQOcgOgvJu1OLezlycqqARcCM3edm/VzPZrMYVVlqpn02/BAmHmDAOSMzeXzu+CXswW75b3aQF/1m1AKyNDlBM4h1vuaK+Ckvi0KiTObZWglYPQOQX+rPOXqL6Z7gesjvPGbvR7dg3l6pOP/It/Fs0+681AHBc5n8Iq+Qqz5Ki3qH78g0NTEbi2NWoRuEy1VQJX/vfUwF61rJsQaBbWW5EYkX8vyjOWRIx+jwGLPe79sBqaI40wug2Ke8w1mGla/T53+ftDJQEDRzU7rUMNu0VmeIthWsXZwGNp8Qe2jYYZchWOYy6sb7INh+oCh7izasQy9gx+N0Bcv6Mm3LL1WQIKPiR68Vps9JuhUuuUoNVPmZ8+XQmnztbC+ImJsLIMW7BNyqZ0BgULNZcUWqWi1U/pD+1vu6M3O8j80kb47dXMx/XmnvEYx/Uqfq6DM6NhxyQU+XL89P0eu2RKgLBSsCnGjNO6gynXCsJ5+7T6frFFPEEiYJH4rpMuMuBvYSncKEGrnzfq1E9ZVs6OhJVFwCocRehWKR7nuYBVLjn9lP0Q/7aLpLt6/SZS4A0qtVzL0jrMNRZNj2OUY5slk4YkjegUL0dTX+64G31jiCVgGFd3R38Wz03JlXdpmW7x0IyU05fLfeKPCuAZeYjE1jwRTfredCQmyQbHClmEFtn4t0rQ6qfU73xYCY2J0dDo7Kbmx0nEb4JcqJuRNVguM/2U/RD/dnd3b1xOmwUyy3qfkQBuYZq6YUYGfCqndo3HashmJogU5HJE9fyuu4FoAlCaaj+/sxYfY4wWi7aAlldTq7akTYP8oKMG9hoklXOLAM4/a5hFVzm0jvZdBHIHQ4d8UQAXpw3VTmke7Ot9PzhTCUvk0q8me9mivBEDio3NYu9Q6lmPSJ456MSMyTgj0UAROTH7Pi0e6UhxXrpIuJt14ZQAIB1ek1QkaahxYaaSybZ7sh4kaURqPAubMixbbySgZqFZ8NriRvj9jdXoz9K/mPeo9I6aNWUc/6ZVAdS28m7Kwo2UgLUcNU160G9BmCcqKnUJBloOIrpLTatfd94MGD+7ClWzPc6qQ2xWOIXQ1bQoG81nBWuODBdHF8p+j8PHXboCMZGRk9GksZb2LpuAld0P0wbFAoAsmJI1JARoqjIP3qnd4+EgxQN2T2XUeAtMlibD3oweWZ1rOLQELcvKSMKwCKP3Jc9mbJQalqQcta1BaFpCtQHVq9iuVzjM6tJ3MURBC1w8IfY+1EZL0OrbwDvDGtwko3ujTq7B9l6WHqjuMaAY0GJBznvUf8OFquo3OfShuKPKu0XQwsY1wsR0N1kjsdkANpN5X/Yznc/7IgFgo70RzVRXSAF1nmrIdzBdvbl3PLLZdpJFQ19XAxkK/CX0P5OIH/qy66TzwSwYVxeohXY9Ay39WTks1w/1xfs4gqblhsRKDIPY1LlkCVRraFtryMU1YwBLRVkE8qGR0csXa9cboZFuwymxN1Qq5BoM2ziqgBVH8dYS7dQL7ObvMSF9nncTx/XNTBJB66Lb1Rby7kfRz6eJ0AVDdqFmKgkaZtVQE/j8znr4Fn6uEzsaUfMax4w0iB150WX4tP1DxGhZmGYxxSrfz5DvPUIQYob1/KdI3OsKVm7mnC95Ny/ZQ+1zUIDlNZfmQaXQj9aAJVitmGPwID+vWRBf1gBG+lNe2zLq/h12lLdZBFX9826as4w9miIOybpSY9HERS+cnMUVW95iiueLiwdjaZUFdZXF9DqstnPza+H311fD/JFmDHo1g0ZlEse9jwhxEIvcQITTw5Na2l5TrH5DQctUW07hQQ3HiFMCVg+vcRh/qgStft216up4SOpzbEv2c4rZYRhp2WLYL4F88+86GV1c4wIbX3zz37zM33ABVUz6GhZZXC+yqP7TxRADky/cXg+v7R4LJ4nrmsNkqAZrJnm/X7ank4ABxFco2Pnx/Cqg1RwsYHHjRtjI6dMa5RgEEePppNb/b5Wg1XMZQ7I4/R511KukaersRJc/wClme36aHv+gC2DMIM3vDmonqdlDE6G2+0gk6PE1bsWfi/eKf6Rh3+pkWtYtUkFdRTO4SQooNYXDs42MYaivi4Wvjt+wlO/XjwZl6nzQFHiFjcAngNYN5Bll/fV/1tdP3GxoMh8jLi/6KwdQG8Lr1yxIuCBH5vvOxbeXGm6fkN8M2nszLSnvPR9pp9nM/vmfV8OFyhRDX7MgoJVsGxwUPN0VahB0QHowHnNvalf6X9xJTlFlzpgUGVNlezoJRF8XK21zBZMhAGb5DP1dtzAdHptrcIxFX9ceku/OAV4SXkrxPl62ApbyvEtIgfXOzt0CtGANDrJ5r0apFqyf0jkyCE0rAhb/LOr7hrXVRFB9XyriKQCr0FmFG7AY2ovtUtPq9Ug0bdPFOWjuFnhMd4cEEkZKHZiCr7+Mw7h7ks6rAzrvFrUsgGoCn9aXRIy8ezG85/OWudimLLZZja4srktavAuvTEOfHwfADs2ORhlrMnQBLMHr4ftuheBFyBfX0a4u3l2PJkKzkAyyadjVz2ssnuxaw0Lybm5m1bIWWSAErliwtN9rhWmcknQjdJJljoVQm2mWoNX7Oz8SRkZnQqdpzaydEbwsQ1LwZlqWFQDLI+/idorGSVnDLOhCum2Tsq0Jq2zPJgGBS1ry/8/em/7YcWZnnifukvtOJsnkToqSqqiqsl3S2K6eGZcKaBhjd43dbrSEQRsGyl+qgP4nTP0ZY8xgDAzmi4iZD42aNqbRbqnKVa4qURQpSkzuZJJMMpkLc99u3iXm97yRl0pRSTK3GxH33veVgnfNGxHnXZ73nPOcc0poWTINiqQxjj9mkkex4ARc8tWcALiGiOvKZ/OOjeZkjbi9xKPqxAKsUQBLoKX0TZJlkk1ToR0tS0SlDvkntcuLucl3usbYUnCx1ol4Gid0iXLJhlEJF62j6NM47b3gW9pttUBcVmYIYUdpm+qApSkb9SI7KB2iVGtIxjktNCnZSLoM5j3r5kHNS11DXNNj78dC8r8oP6V8M3enQ3sKYI1CLBiZXrXfO9xpbxGULHKGtC2ZY7UQOl9XnB2fvIieXYHGmeQlYL+OH+sGxwNAS4DltIpn34z3ibpDc0GMUIGWtC0xbeNs0rJ0IJ5456NOGJAJo1Ipkex9zZ5mfHDx3nd8J8UdS4fgGVMzK8BESAYM1dNKedPGSRqWwAvmu9utu5iQmK5bk7IapzXQno+i/r2mtWvpa84rpqtYjjQs+btU7kQ+L2V3kAZ2dD2bhsyyMj2JpdZsDTGxLqJBEPsmzVThA7enVqG5r7lYLckxieYAi+6QOVCbCx1RRoz4rka3LktMVcOSJh9bUxonXIwgV9EquYLlC56IsffCD9txGB4iC8ZBfluswboxwbrRwYDUrlK1gtjUOTJEHIirHb52+z3k0BNFW6YQbx7cu9FJlzp/lxLuKnv8E3xc18jw8LueBfvByW77/pEuex16/AHyGDYlaDHutWlTBWmZBYfHlxwBQ1R3F0y8d12x5V/SvIssEErhFFHd9ZjLRLF3W/6hXX5RY0eAJV+WEhBoUxtj4+xoWoEVrZxB7S160Nob4aNJfXie/dBRtKriAGWhjxE8M4Sg+9GnKUMSx7K/+zvR4NSAlIlwFnZQP9eNpS42M6HEJKuHcqx1t4pFmCGwM+MAVGwu33YnAW2QHcuQhUckYj0Xw1AdLJr8fRbr1wba7ATHATYOMkVlUIHVL/UxgncuH8XTC8yVqukqx+jsmvMBKkFxnIrF1+5AcufQBk5asMyC2tjJKqH342qaebLALLMuKFFurD5vl+U9LGAgXbR8ftYyrZ6IsScdr149/xbLe0uX5YIooDiAhBEEPXzC+/XRtIESe3ABE8kMC1sH4NHmZkh81y/tSmZC+bWkdbXn2WCFFbfAxncVjXsmYb+Lx0OmJZBLoKVURROLUWyXTGMyH76+v90RNcTmdP4ut1iKLtNYTYAkechsOgVwDwNY0kCVAWMJENNnSTbNB4GVrA+dEDDi9mXp3pWDUeTJJdaGRQ6GSzzNZXhnoBrswUy4xMnnrfOkB609kf55aVlvtcBJ3Qe95hD7IAgYAFYlbHM2roQH/lbvUQuaSBgyAyil034mS5jTCI1vqdKZFK/V05pzLELZ8JXdnEAN3/ZQAlqMEauLyyuus+OWqNEl85j8XMoEcQp6/Ik+VUleT8KLaarRchmqkKMCswVSynxx9cmy3SEJsVI3aQOXZHs2F9jAHezOMyeq8YvxXpU2s1oXFhkworrLvxVLizK7K6i4wBK0am2/XLX33vPmwT0R/uB7gc1cb7U16maFgcqQkBwXwEKnTc62sP070xiJ4jAiTUuDU8MzXtiKQKsXc0hEDMg6ivb278b/xask4NYe+lxmwuqxwsIkv9comSCUgPetQx0OvI4BXs+IGthwZcatd59jVcOa5n5vTa3Y5ceL7lEApli3JBmD6jsF18vQITPtITYOEatWmSTjbQwRR3F3Pi1tIOMCLfhgrKeYBq2Aeley9993cQd1QxKIt5u2ebbJYbykB1qtsnqE2a+0Tb0c+Lfqq2lHpQEZ+bQih2tMe6pngpKtXuS1vvbIjt+BtpdETMqzC2qSJ1qgFcelGC4x5m6ibdyFGi/6t96XNnKkF62rM+e0YKUSwlpVt03jWvcsOrvIFjILXni46O5Z/j1p90k3p2kxIWR1UK20bh6VySRu1JIkVENrHpmoCkSMRAzSkISroHfBUd7XO8SD1l6MzMGOjE2udlDw8QQj6jhHH+pJ6178dJy/oQ2UnKxiCi2up2mJW83SRBVI7evI2yC7yy5nEgHFfItFAtpVS+taYTEXGWFlbd5pXMNU2nnzQJSA9w38XVpEtbHQCqqNhvqtnpo8JYuYFQRYise6/GjJHQLpWIkGrxAaREEXuxglPY6IGHHLWuuCgokXWBMi9iBvxNNWOc0ctNdFBpmeu+ZBqyqJHT0qOS7z9dFtsmBYF4I9jDmQwGKj8KPl3Wze0e8m80caitpFiSGkwcn/DrPivBotgIpJcaC1zmBLImVNnPecpnNpDAi4FPKwxgKuCsnTFJmUyUy+LpkO5fd6jZpd0rzkZ6kGJ8tcqP5Le5OGJc1xDNq//Fifjy054JJJNE1NslSspMyDCkWIcnK6QjOxXqacBCqD4+rtAVwxmgfRssIFTISL3PAzt7YHrd10vwDrLHFY2TwZ3VV+xNXPEs29ndlbd8YT7ahEbRV7cJrBqedaxOJsWvOcpoUZymlajjFVBythnEKK+VwyoY3NR5qJ0hpdebxsZ8mmcRZ/13c4BF77qIEmc2ESFXW3Kw6Z/pQBX9T2395fsH8ZmXfpmrb7O7X+vka9iC/y7x4h0XEvbNpEzIMsApGmhXmQXW18oBVSDyazzDZqhfXUg9aeDLgfQ2e/8aATugWMQTvFjmAfOwPyDAJYYgfHveLv8qbEpJJpcJZd6CS+DVUwjv0W2FnKya/dpRz/OpSPUEGVCvIUsPoWrwTk3lkull1xyRUepX0tM0bEsFNevuOQNA73tuDvyjvw6oXtpv7SgpuW7YbGjcyeSob7AILJtYkVuzi66NiCqkMmX16amjZuis/qA6gGmAMCLmm0ej9OmUpuKFkRaLEeLCK/+HxaAqpwmUVo1TLZZ2n2vaa1m5F6d5RVtavTlhePshM4g7OQtE0ZfFkhrMHd/HAyf8va5IBKgcUCLe2u+N+Zi2SqiKPpNFFsihLnRlWMxZrShNWik3TcTBwySNs5JPMKa7oCbWU2XGTVkrbyFG3l/syqHUXTUuZ4ZZA/RWCyjqjPsmhe0hbk8+K/mMbQRvlpGsrcqQ2PS2GFafMGgPWbkQVnFrwDa1DmzzUhc4qaTOJdBNjLLKhK3trEScuSPONsZRYyWVwUoyW3gR5jTOMUgZbKkpRJ47TePGhVJbGTx1a2QasF6mZlj5K26Q1mJazB+s3n6HwZDNIlJrC0rTkOMQlzefxMLDps8mJpWtw0aRVMqQkrbUuTVotO0jTkWASQ4pMIwFC2XK0paVsTEBnkG3IlPHgU81AFKA9B1BhE85LWJTKNchpKS4g7zksbnYIAFkag/HKq6Hzx0aJ9OrrgNC7lX0w068Umfa1pJk1V1Q6UF1IarAqjyvQad405WVu0Dig+S5tYHmK0dsAcdEQM6mhV1p45HD1obTJotvRWCAnj/Gjesrku2C2H8WedYEunull158uq3m91V1olY0zzREHG7cp1xl3FFZejSSvgknalCbsfJqEWP+2In43c6kX7x1gloI2NjMbKokHiDBolUEAx0eJlLhRoiSovk6EOxXcpMFbEGoUvKNmrNj8CLz06Y5d73LvbkElL4Ipy5SjtMmfe45pEurgC6UKmwds8n+WaZe7U99PWlMpMmzXJMAItyQtBxdwEVE9ZA1T0cRmBSq7xiSvANBjOcMtz1ta2Ur11D1pVSWz38TyeLGttsfJaN6NJhR4xERqEDBEw4uvW7V72Vr6vujkFBucklGCZCfuomKrUSihcsTZN3MF1QoYm8MPMM19srNfhT/ZyCaywqK0WI3ahStMLmGQifAOKvMqfiGmozBrSmpWaqxWtQaYupYeS6VAOYP2/Vy3yXUUMQWlS8ll9hv/qUw4RL/SetC/NUoFw6hqyUMFHyetEf5sDLV46kI/7WmVpGV+V5SURH/cSHfSUBJgzoCWEjKh50KpKYjuP0rJ+PtaKrtzDRnMfo38A1QAzYagyJAyv+m7aecoMIBOhtC3GrHUlMLlVWvww/pLDvYUo+3jcBv367sbYrr66+DvfFyhQYncuc6E0MGVMP/AobwfxzRyCATfEIbbhENrXAQXMYgJ+RjDYA+ASCIlUIYLICP42lReRhqVD2tZTaesAVhq1q40d1oqVYQDtVMmLFdCtumcJKFouNmscecrPLZJWzG0FEw9JcqlYXPoqHsGD1k564QP2hWeDTraIAyRypAyJSpCEHfzUHky7nVzQ3v6NJjRYhVmAmkscitEo5TVg4709ZV04xOKmhU70d72WSSntC87e9kZ9/Zr6RunApiFpKPGuCk5Ka+hGwzpMP8pc+AYlUE7ta420CLQJUbkFXPJjyu+lzPLan8gc5kbcZsNO50E0YrwqY718nVWiyNh8wQHWF2PLLtOFiBeq4KximPqegC2tTcAkGXSyYZOmdQRWpqwMzp8V8/yTjJRvcMJpWtHmNR7ZBfQoZwqhumdsHhPPkh0/+8wz4EFrJ6P3h4hyEqZgxY4xjs7wEyr2uNnU2smvJ/43Iv/KAft4pWKPOERzLVXiVyC1iMmcpMmrXafIGHJQKzA0xetO4v2X9AWobwReKqqoSRG6jPJFp4EJOB4DKp8/jhIiK75L2R7Uv1qcxRhVuiJlNheQVX1fz9+TFk8BkHIELpDoVxrUJGQLaViTi2sOpFQzTAHRisla4ntpByzdo8zwuvd+5CDZyJerzZojYCSwwiiru9O0kJ9SOdW+OcAqcsPEZhmZMACubH7N3lYhyKh50KpKYjuPyjWY6SFVU3CEGXkS4fZt58/T/l0tOMrq/ASzwGMowqK6xkhzfSYe7S41YcWiEvVXGpec/fKfuMXw2Tf9k7RJQMClnYUelRZJliXlPJXJUIQa+bO0AdGirP4VaEWbk8jvpdg8Ld4aA0pl9HzTGBUIqXyISCDyU0mbEkNQxAuZCBUULcapzIEyWcajJTx/pdt7rfEu0opMqRrzrhwJQKbxHhdmqc8kKz2KNSi/9hwy1Ea29s310ioXMM0VzHPTKzAHRfl5dnIPWjvpha6zgRXGeglewTQYHEOeSpDbME0mF2V7H2cxEHCJAs/Yjb0xV52JSKmCxEBTLJDqPYlmLROUb/UhAXWVxpQIEqtE28x/tWl2VHixCqVFKzGsUhVpoZaW1QZZQybCzVhzjh3I70rrFjiJCai8gTJLCqhkKpSmV2+jRKEBChdQ7JseZVqNO0xAQnObAvpsng3rFIuB8g5qTaixPNVhFQdUZpNw2mbpxFWbZRFSba315kGrKomtPv6vn+Zt9VEXMxDyhalu1j7+lCwYjdO0yGgXO8/+RpR3ETKWMA2sYSLMuUUk3nvVrlsTWIUJ5di/PiGiNe3ZMI73evzZ9k4CClausBqKQi+NKb+AXwstTAt1FJe0uYahrndgyDjVWNXvSKNSDaxqEHq9DQ9t0hTTdoTYLG3QNOYli7gbIrQVZCotS/Nf2d1FwojF2hIETG0KPuKdQEeH7p5ZsRYuYkPzoLVBGFt6eqirzYqh2IL7ES6AVUHLCuquDMnL7lWTXQO3zD9KkjnlgCuKiO9ixL56bOEAAEAASURBVGy2833Z7+32M5mJpGmdJuPCtYkWR5MOK4Hbve/2t/3fJysBAY4OKic1dWMv6AgY8ucd7ml1mpbMgyJlxNk09wVOsq6MoeCIiCX3wBrvqZtq35R/H+JFAGgF2WnYA6tW6Xnmz9L5N7EW1/6y6voMyx0EEAfEZJHNPQhgDQJYWDDq+p5ecvHacY2xA5apUJHxjN/YWzugpewKJ9l9ijqtkuPagGqi++Yl0AgS0Eaw6uMTY1aHfHxYB2NvAqgZ5vqdRYgtaMEyC8YDWM4EiHkwWOKMY6yxAq0V61LQzVctAZF8dfK6fBagWVUqJ1FfD9OTgBbE94gkVZe386qLrrIIBVoyEyZRa0i7Tfk4xC5TtuszaFxyVitI1QPXq3rQf14PEhBrUJrVUSjuIhwpQa5AbJ30H9styOTKNHdmwTuwMTXvlXswHtDi5KKahpVl2DdoWvlpK1BH6+GcB60djwAFFedzvewATvIb0rRkGmxo4Ic8GGlabLcUYJiEpqWJm4NCJt+WHNTfG+p07CoBWdymyh2PHf+HXgIvkYAyhBzDl6VgYoGWxro2a0lsylR6RL4saVrj8jfiJhDxJYbG9j/AUKz6WcEYJpUZa8+uWv/bXwMt79PaUk8AVh+iqJ5Hq8qjXYXZYzAHD/KnPRwNDVqRphWZB2cZyEloWpq4Svcjltlx0tr88YkexxabIB5nEZv71wzeW+pP/yUvgXRJQFqVUjYp/ZVMgxrrcQOWkEH+LOUblB/79kZNq9biAhc5BfFZzp81x6sZW14mdRPxEcN//zXEbOgFd8/kLPPf0ghe0UfdmAb3o74OgVUUfXSswYb2rMg0oAE8yfGUGJso0/PXxtCeifllPySNSjntDvXk7TtDHW5XqvgeTXaxzHzzEqg3CWjUVse1TN/akCljiIKtNdbjbgpLUHymfNeKzXqEmSVyCcTh0wqhtbt82PNWCUR1XyATxqq9d7Zk5859bcGJXzJx98RenO/vLxIBaW2WXQOwgkHsrrAGQ7K7m1iDDb1iauclBqEyPSteQw7aBHKQuV2nMmQoS4ASsGpyi5whM4re981LoN4kIMDS2BVgKcWVMroPQXNXWqskxjSEQccU1FwXAUN5R6uJBb6GGrUQtDMLQrowI6iYo9yybLlWsggoQwYr7YbmQWuDMF74tP9gDpJgtxUDCBiUITGyuYehkp43/GopB2wR4NLu6wkDWRkyBFxfG0UvFNzefVDdlTqGFbEsx/BtyZQi57VMKb55CdSbBJTpQwHUx2DFniUbvvIMKo2Vy36fgPVAWpbmtua4QEuugBjnORkDSNsUBuMWlKdYddbcLnmTTvWzfROhfOMtFXuslCFdZE/Du6B2VtDBUbd1s75xfy95Q4NWMVsLbMOewCQaZUBrJ5ZE0zyuMgmlab0+2OYmurIp+OYlUG8S0FjWhkubr+9CLnIEDMZyUgQM+a+fAlaa45rrmvcxNuUbXIDk9gTgmiDfYJFFZ9Mr8KC1lV7J5nOQL3rRrt5AUz2Ok7CLP2sqEovSuIwxmB8sE3DIwEb5SqyJTXgK2vvvHe5yJd7l2/LNS6DeJCBwUsqqN6k79s6xLgdaYsQm1QRa8mU9gPvwGNDaHDJqdnUqljfHwjIKYI1BwFiz1q9T3atnTk5C1StI86Mo7h99lLPCJMUdy/vwmp7AIijWIGmbmkPTqnbPEgNaJoNR8rupOKRMhjIdJoFd0rjkBxD9XYwr+QMUiCmzim9eAmmXAFjlwEqB8mf2tdtraFoay7IYKF4r7qY5LFOgrCma4wKsKcArJpr7+u0G+K9IkGuZUbIbPzFyNz1Pda/KxYNWVRKbPX78cdYW8YwGeQo8VsgzaEc59qPGtrJcN5VNSqzBKqNIj/zvdmJJaVzyBah8g8yEJwEuTfokd6mbDR//npfAZhIQAUObrNMA1ttHu+wkoNVDouAW4jr0WdxNm8+INaiNqUyDIlxFmfnju5ZwDbPgPLvgUVuVpnV0zd53NPhvXIK3q3xDJBvfOJkjXTSmwPAAAj3EIymcMi2osFQIZ3AltWJvvMSYnkd02KiasXZitygFcYSJNoBNPo+5Lu65pskt08oJnNjfwryi7N6q1aS6SbrWJuqamEaAP81eSECKlCwC+wGtUwNRoLzyakrD0mdxzyPdk8JaBFTyZYmEIbZwfCQMBRNXVlhfp9G0Ji0L1f3Y0YK9K8D6KrP7Rtl7TWujNJ5//qiEltXdbWHLIUfAMKoVV1mDMh02UdNujLEMi5D0y4DW9YWSi9/Se0mZCFXGQYyrM1Dg3zzQAZOQUg6AqAAticnfRMPB3+oOJaC8mSq/onpZImCoivN+UpLJvyXQirtp7iozvvKLypel2KxFnFm8FU8LKbJGVA2rCIzBcIIU/Uv2I4BsQymS5y/Eg9bzEtn4urezxTJlgogrx3n7GEcbwkxgaG28qOSey8Yt35YG9pU5iu6xOwPDNtfhY7hMF7dFrJYow2ISnmDnqtgt7WSTMLPEcMv+FHUuAZUeURZ3AZY0LW26ulrjz35RFaPmtEz/0rLuQ7LS3I41DjMIqbBGXFYQwBqsjFu2bbV6bS969KD1IslIkyqH7dTnIKA4PIn6egLBtvG8aWWGUuUGuMDq+jzFGLF/qwS36hclYY7LsDVtxbc10A5w4ds6i7b1GqzCZ9Vem3Z78aJB7d9PSgLa6qpGmDZVMmefPdju8gw68kVCBCIpU4peUb0sgZYOl2tQZpU4mtOmRMAInnI6yBcAV45cg69o3qe1mYAEWB+TZ3DNOsgsqCwYaFphpGkh2c3+pBne01gWLVY1dpTSUrEcKhTXyaRTbkAisGMVg86mFE6a+EfYvSrWRaXW700XnE1ednmFQScBqLEKwp8s1RLQOJXmLwuAmK7fOdThgom10VKi3CSsAoIlzQtpWYq7fMS8kdlfz8UMjqUpdtmlbgqmcF+Nwewas0Leg9aOhH9eSXDvdlqmpY9dAKBV1mMHvyXGYLwr845uoDZ/pKHsEmriOp0hu8o9Emre4pA9fpBk991MwCRaG76t/Z05+zY7WIHW9UmywTDxVMRyhV2kiBm+eQkkJQFpWcolOMgYPd7f4oqZHoHt2odvKylvgzagmsvyZSmbu4gY2oDqvdimS1hh95th+0vtrEzuAQvJQyv1K5XTS5vXtDYVzzDg1EYwMTT3IAMJI4Dy3vh5BjcVxXNvakDLpCBn7T1s4NfmSzYAaHQCWN0JjSYxr5yvAP+AfFu/f7iTfJsk/xRgLfMY50R8Tl7+ZXNLQIClTZ3SM32bVE3fwoStYqaivLflAa2ExCMlh+nhfFg3HWhhNWFOC8xi2eIFMoHYMiecYX0d53HK/urILOLgql7eElpmXn5RiX/a3k9AMUlxg5BCj2WqFAfKgJHU+EpcHBsvQANaJjcBlzStfQDWSXaQB9pCO0j0WhJNC4Omv+K0lHT0fzzda2v42mahwC+ze1xD4/LaVhI909zn1LiU6U+WgMNsqP6HUz32/SNddrQv8ruCZYm1qqb1EMag/NPStJQcNxbA0l3rREGG5LiYBIPMlGXCRdbZLVUZ8qC12bCZW8pZe8c+AOswwiXXIL4t355JwA14BKN0TvcZ9Br4itk60FqxNmZiElH92lJEO9qcvQ4F/tFch40vFm0V0KoQtyizoW9eAnFKQOzWDrSp42hW3znYaW/hy9LzXuZKElncde/CCvl65wEoZb14CPniESZCJcRW8ccYm042wbwdAayeYMtf2uq5m5YJ90IBiYSRgepegTVoBBRTwonvkrbJt6oENNo0vjXQH7NDUz5C58QFxKSBJdG0aRUpQ6VKRMp4fX87Pq4Ox9DahyYo5pZ2vr55CcQhAWlR7fixVErnW8Ri/R4m6zOMSaVu6kTzSqoGnOat4rKmASyZ9x+IMcgcXsQyERsBQ6ZBZ7AJx3Ggjdgaj4Wtg5bXtL42ggGsfxhptU58WEEW0DJ8WjaAiBMyfH3t4lL1QqNuDV+RgGsETevwEgGT7CA7majybyXRBEoCpzaKxhzDHPNddrYLTM4l2I5T2O1XKGIpU6FvXgK1lIAASyV0BggaVoqm75Oq6fePdEIWorgjgBVoA1XLC3jJbysua5E5MLpSsc9n12wU4FLOQW02Y1K0ZIPkbKK6G76s4IHl8wQVt3hN6yX99uKPtJ51lrotmyMpbigNC+agAWCBamf59pwENMiX2aHdx7d1l0OmwiVeJwkLWjAcm1HJSNe1LWld8ikofkuLRVILxnPi8y8bVALKeqGEzsf6WlzsoCjuSoxbTYib1PjTvMQqSJqmEA2rZJ/NFB3VXcHELvVZrfvDaVgq6hgU0LAWOCZBcOpnFefs9GBhq6f3mlZVUlFapsBy7f2YBo8Tm4UvS2QMTIPRZ9Vv+scNElDcliLp97UW7VhH1r7bS+kxdnNJmT+ql9bLoiGtaxEtS4SMJwtFih1UbB77va5PC0eS4Fq9Tv/YWBIQhV3xWCpOqs3SHxzpsDcxD2rTlBRYVSXMsHcZbGZlHWGTeWGmYDPMCeUe1Gc1b+4cocgWKwhDFYpJ3bQ2TUGSJXsnUGaMLTUPWhvFdB5RZsJ+epBA4mAfjxAwvCdko4ief64RWA1QfAh43VsqWz/OZ5kKE7ISukvUAtHKjvcwSfqVKUOEjCWZCjkEZMX13eXz9+NfewnsRgLd5L4UQL11qNP+wJkFu5yZMGnA0j0JrJT1YpgN3B1AaxaNC3eWA6w4MItL4GwOsO6zY7wMtZLH/KJ998iWWIO6BzUPWpEcon/lwZoqD0BUheZu+wAsT8DYKJ9NnmuHRk0Bt2PThLiN72iQDBXK/p5lx5nkZM0RbTcACUOpnaRXPZlfw8eFGZMdptO44qT4biI7/1ZjSEBjXNR2ZYU5QMZ2maW/N9QBYxCzIM8VWJx0EyhJq1KYyjAU9xE2l6K4x2IW1M3LNBiua1lh8JD6hJ9akBu1jnDZ7r46Nmuj/JKX5sarSfL5B4i1+6JES4LcjZpWkheV/nNrMsi3pcDEUXxaivm4A9Vc2peCF2MxO7xATILMNlYSMbhew6egOJk/IYbrKLFcCkZW7kKvR79AeP7tLUtAJkHlwFQFbZkC/5AqxGILit6uSgTysSbZND9L/DO9Tpq6SYUG0dwFWFJ9NIdr3lwImEyAgQgXyoAxjM40YRP5gg1v7xK8pqXe+vDDrLWPtdpwX5e1B9TOMogYlV5WtJaad2aDnEDO3Ent5DARHlmM6vJIT1VqJ+UkTGLeCpDk1+pA6wuCnIuT0QSepBaYmgBVzMKCyCOxzNwG6Wx/G88kIA1LgCVm4EmytgusdCiLu7JeaPwl3RR/NQNgVc33sohI6xJgxdgwyBBArNgsCx9DwBi1YmnezlLs8W9Ycc9t/Uo8aIlk8Y+3c7aa6bKOVjEGD5K+STFa1M4idVPyY27rvZngN+XM1cTQhBCTUGYIKi5woNFgpgM2Erk6AVcL5hkFc7bm2pwZZ3xhjYreoRW43rEFNMLVGM0kiUjBn7QWEhBgSYuq1nVTtovfP9zl4gOHSIyblkrarvQIm0mRLzQvx9GylIYt5lYAqEjZRGXisPwI5/Kk/cGZVTtrL62dtdk1etCSWfBshtWsMMi+/FvYWo+wLe9EWKK5J7PSbtZTKX9PGgybN5vlH8VtfTqz5jJjdK8HUmYBrqSE6c7LAgM/xO2I/7vjpJLkTV2zFp4wLLh4LldiJeVy9peXDgm4zRAboU5VGIB48R2IFz98rQ8fVpvthzmoOC2NraSaIEnWA/ZmLnP7l5jtr0HAeAjVXXFaYtDG2KDrBvMoAY848Q3L5B/YQLFgV60MaG27MY2bvP3HtzA6D3VbOX8C5uD3Ee5bLGjH6PEOnnv5bGN4aB5oKogKtAoiKJHuEIlCOzCfVMuJb+Pn9vSrWj6qO2P5s2S20ZoiFuEqu86ICh9d/56e2P9Yw0lA4wa8MoVViCkoc+Afnei2H3Acgoih8SUNLEHMcoAlZWoRx7KyuP/qKRjhCBiAFh/EqGhVl4RHDIQbbBU/wcF20z4/PWH/keXiJRWKXzRwvKY1PBjYd9vwXZXIfJF5jd4WhxCzYJJD7kXdle73NTrl21L1U+U1c3FbfXnrzRPsxizPpUCkMhMq47YK8WlxqTK7nMmwgp0/YfJIunvYX50koA1QNqNSI3l7C4bgn77R5xLhHiK8Qv4ttyFKWFTyV8lkP0FqtXtYPhxjkMcpLCHSvmJsXIoIGGRzD+0h8a/XrWftkZ0L5Mfa0ZU0N2hF/qwsgTud8LMHUV3PsB1XFgwRMDyzcgcjW5NFCTnFHJT9/BJR98xjF2w8ROyWJnySTbgp8FR2gsM9LDpkK1BqJ13z8PiyjUyvPovhiteCkqRU/Lm3KgEBkupgHYMZ+HtDXfYOTEGVGzlYBSzGVtJjXPei8az0TNKuPp8tutygSpIbo4YViRRDhgWVKcDqEYJ5ZGHrtC0urm5V3pt9r7lB62MVdYTyk13qQTgHEOoJdgXEaeHPCiBo7GgfsJmYm+s9iU2H8prJt9WDptWDjX8fLL48k5p5n3hT1oLetpydhuWl7B3SuHRdi2iIyqChYGSRNfwQSLyrEr8ADVeGiDMvdyp4GJKFSBd/eKzb5RVUORxtgjSO9L0km8YreOUsHqrC8MXcmn3OMcm4VvaaWJtbQUVxD0cR3n0sWZgIYRAOn1XJhR1fTHOD1uQwK1Z/PwIcYlgq32A3omwFsDI8Jjz8Yh1eNTnZNKaI22hbRxaiYON+Jvx+ji4AIukm/xapCF2OOE1yxWytAFR6XxrXYwKRVc5EDmuvcSXdW8mdX4uAtCuZ/ZS7UoHqMgm+DWCdRUs/RgViAVbSPqyqhBR7JZ+VWLwiXtwgJkvlR3hwKZyq36v5owArVH2scI619TaM7Dug+kPLz63auSEZZHbcmhu02vvbrFTcT6bho+xPoLsHZHMPszvfA+y4HxryD8VSUjlvAZe0LB1UEiLgV+ZCpJ3gXWtH7DQsyCIKPpavaw1zihYfaVxuEeL6lskMX2ARUCXkHW8NE7xPf+qdS0BjRONAJUYUhyUNS9navzsU1cY6hoYl/2gLgKYxk3TT5ovphlYF+YI59/lc0fmzpHGJGCVAi6VFGpYAS4HEE+z87qKC3rdK67gVj2w5Me6LrrW5QWttlViszMF1syCgBWAlupS+qJvq832ZI6RtybfVy8TuA7R6182ErYCEJnrSc12LUgc7aBfLxfMeTIYlJrd215riImjMrBQpw8LoiGnO12dvN95VV83GynRxCi+CwErhEmfRtI4AYNKwVGokLU2gtMTGSxtFaVkXMc2LFKXNY6zhHG6eVMpMoFlk85jHu5bLPbTS/qdkv9h1NVYW6SZtImFcnx9i+/EmQW/fZ0l6gyVU/qz0jMIG6RrtANVkeusjWKqPiS7QEiFCu9nEG9fg6PBck3xbYhUKyARomuzStESN35VNI/Gb9BewVQnQ7W7TonEghuBr5A/8wcke+9M3+12aJrEEBVjSzjVu0tA0xUS0eEQGXJEvLkCAugBoKeBfpIzY9luisAfOLEgm9+Aax2XW1is8jtnVnjk7p0v5YFcia05NS4B18WLOwgNdbPVV7HEIUaoMiQesXQ2nb/4xaz0pY+C6Mo5bMxSLbCuSUFeFInPWAnilYdJr2dHaI8DKd6qIZIfLZtCGdiiNK9IHV52Pa6VEaRPe8VrXN/u6Ud6pat+KuVIRR9XD+iM0rD8mDktApc/TMG6r8hYgaWMoM6CqLFzDgXWL2Cz5tWJvmhjs9fhnmXPfxVl8k4t7YpniIjT3Pdn3NSdoffxx1h4dbbfWsAcBK5u7NKwuDq1Qvu2hBDShZLbQLvA+xIZPZyMCxFB7RMhI2wDUYiSTz1EK+OmxuzW3XnF23m5PrkDQYOcqc8seysj/VLokoCwXRyFYfI+g4d/n+D5+LCVcrma5cPuYFF2yNobKLyg/lmpkXZheI8/grq1wO7lDIZaqwK7xxxAw7Bbxr7esVJiwpce7orlvvJi0rRkbr612zyfP5K2lvI8MGOQZzJC+KUTjIqOqB62ayFzbKzmCq6URlET3Ncwu2h0ewZHdxu5VVPg0NF2FdtJdmAfzBJBq45hzZqCIaXh9PGvjZLGfEy0e/4HXuNLQa7u7Bg099bnqr/VDyjlBDNa38Vt9T4QLHpWaySW/5YspGabPbpjh6VKnqRLxVYgX1+ZLrirxHGMz/ubMggKnJ6ypABbkiyAzYbmOJcu9G2Wp3oOLak7QypdaKT52CIfFYYQrIobqZnnT4B4MqBf9hMgNhG3ZE2zuMhP+hrQyIe/JtzUIQSMHkKUDtqKdiwArmw8dY0yJT1U+XdVoe9iFf/lkye7NsIckW7xKPnjgelGvp/99aU0Kd5BWPUAfV8HqD9Cw5Ms6hsbdBzlHDME0Apa0rHFisKRdXQG07jAmn4otqA9ib6HUuwXW1Husppd4JANGFjLGkTV7f+9cws0JWkUjPsuUGFfFHlWKBKq7b7WUgBb2EoIWu+kJezFF6Su1kxiFIYUaD7IitLN4aBFJQ9MCpcz08nNpl93CLlyX5oKS0Q73d63YvaerrszJLL6DIuVNpDn6Vh8SUF9Kg26HGNRLDIaS3p4iAkaxV0rx9QaANcgmRQHo8mOlLasbRYcdME1TWucGTMHPZteceVB+LaVvSmAsii24glinENZNJsslJvMTJtCq/akAC4LGHrXmA61zBA7bfTSrcAjBomk5IoYHrT0aUC/6GY1YAZfMhKLB38TEpkwZB1kwWkCqVo48BA2ZCVOCW25nHTneoxyFmaDNETREix/oyLsd+K2pFVcJeYHdbgFAdtRi7nPPZuiLBOrf37EERGUXYMkEvI8Nk+jrorELsFRpWPFXQ5AwVHJEoRBpas/mEZqUC96HcHEV0BJjUMSLWbEFmWixjj8XSIwfKwjJ5A5LUKbBMDts2fJTGxnl/UN7ejnNBVpiDZ4fzlmuuxN2gEDrEL0LEcPXzYprYmoHuMI/iiW5ieN4gBQz0rg6tYhgghERgodUNWldWuikcYnqrEBT1UvSo7Ik6JqfKJ4L57eKSnqTYaq67xsXI1OfzLzKF6jCjd+i2vD3j3a5OCxR3EV1l0adJoZg9SacxYJ/lFdQMVifYRK8jNVCxVfnACxtCjXHYm06H0YUdnkUeLR7FmTvWOnIiJXOl+2n71XsZ3t7Nc0DWgIssQbtTBd2KkgYFcAqQ3XiUP6s5o1X29vx9Mpfc+Obf+QmVjE6mQm7ML11sUgozZPAQeCVtqYrkrNeFHjtzvUY+boi09Lwk2WT1nWPhLtLaJKqhqymRca3ZCXwbDTxRHrTfrRkkSukWX2bZLcyB0q7Ooh2JXNhRGlP9ppfdHb5hpWmSYClAGL5hr8EuKRhKfFz7IAVXWiFdXQCJeAm4HWJQf/Q3g/EIKxJax7QOn8+Y0uDOesL+9GyYA3aAIIWzV0Z3X1LQAJiExbKRZctQ+VLqqbCdipGggmpMRNuFI0WQPm5DnRJ88o6M9Jp8tEpFZRATIGcUzjD5wDkNYBL5sJ1/Nr4M/55TBJQf2ks5dgYSXsS4UL9pezsir36DgxBJU2WGTjtTdwKlf5R2Z+7mAWVCFcJqR+K4ZRc08nJ5K4M7uvBxJUcJsLateYBrZnTUNTwrBZFvghfQ8j7AK62VK6MtevvVP2ydBGZM+4y6XKwn9hA4kRuJfCY1EosIuBYKk00EqI8b1IIFcd1pCegACAO0p5Wt3u/QTzXLY4HxMxMLRddgclUCb6JLkZgJO1JWtRJqOxKxfQG5kAB11HIFzIHgmepb9KgltdNgl+gWf3LdBSPNS9GRpItsKgisQVQ3AkmDopPsPcr52DNWvOAVqE3Y20lijuWj7LknGY5lKbVVjPJ+h9+pQScfZ5vqWxCdZ+rGK4+0OoYmks/u2K4GY4QUf38lT8a0xfk5xJ0gVksespB1+ZifFTJ9iB7owMshoOdANdswcbwd6kysrLIl8Qy5C+92bB2HSWNSoHAKiOivIHyPQqw3sQU+Da+K9HY9/Ge/JHSmtPGDHxeMhor0uDHCBdR1vbPMKkLuO7iE5bmlVDjxAokDp9yXGc9xTSYuW8dnTO2OLDrpLgvu6fmAa1DbSx/pTxb9yMI+DSCFgHDg9bLRkeNP9N004RcZuJNQmBQ5Z1uAEtswn/FZy34jnIsLHI4pg20JBoBl3xwivPhMlkAKS7JMOtisVR+OqUAugst/vbTFbsxQTaNuQL1RqM8hokadHTxDdrUJ8obOcjYOQ5QnYTGfgqt6gQ+KwUNq096ATJlbpeGlUayxcau0RzRWBE43cYkeAlquw4RL2bRsuTjSqCJnkiOQReXNc5MuMJO7Ia1B6N2YGDF/lxBxrVrjQ9ajoDBujfzqMMqGUyCAYzB8BCrIP6sMJ/O5bB2HZ7GX5bpA2XLUXiVM00RUWIUahN5qtOgxmfca9aYVLZ1pcv5RboC/HGYC8VA025eZqkhKiTL5zUy3WITi9TpQuta4Fh2zvMoLVQqb6wOLkqEGBEn5E+UZtVNCIU2DEdhd8oEeAqQElgd6JJ2FbE/FXOnv4u05XTepKBI88L5SPH9Ki2TfFhfQG0f4fks7wmwksEsQCmoEJMVjDNVMQuGN0iJ99CyHbP2du0znDU+aAmVHo7jKCn3sjcQWCkDBqZBsmCELnVTOkdtE12VJqhiS8Ar51QugFZiEGpR0YKEPRf/EWmVeM3L1Dbt2mWaaiGvoqjxyrAgsBJwyXdyvG/VmQsfsfjIZDjl0kGVHU1eC5TkoCwhWoj03LdvSkDdr3Ehk542Ma0CK3xWAiTJ+SgalSoJ6xBYyVx7CMASmMkUWC9N40H5BBfxY41gBryESVD1sW4ySZ7I1Mwc0XcSaBqdpGQKZumEG5z/mmXDO5Zbm7DFf4L2/n7Nr6rxQetjxnZ/pd0K5BgMKqcRMmZB+bJwRIBaCXS6P+UmEtAEVMaMBdgY2kH+DkezIvtlGgzDFmcy7AHI0hR8vMltPHurmhroAAup/ConMFN9D1ahzIW3plbtJkQN0eNVIfkp7yswWWCtzBolHvnftw0ScEDFa4GVyBXOb4XGpIwV+wEsxVuJYCE2oOjrAqouHKLSwCLNasOPpfypul6ApbgrxTNexBz43yYwMwNY8msJsJIZH8rSHirzxSKb/kc8/x3PP7di5oktLC/b3xCTxZu1Fm/jg9YXt7EXdHawVR/E7irQ6gOwKEuizBi+pUkCAi6RoZQVXtH9uUyRpLWBe08z4fWunPVi0FUdrrQ3XaFMUDpYV60H+JX2Jee/iAHSBB7h49Ih4FIS3on1Q8l4lzH/+PaVBCRPZbEQAEl7VYqlIcyAqiZ8mIzsx9Co5MPS5qAaBC4tvQ6Gylc3yTPNAaFCtXiq0jP9FmbtVQgY06RoWlIYBd/RfIi/OR/WMoD1mHPf4CK+YBtxF313zo59r4CwY7msxgctETAyhj+rcgChngKsAK2Q+xZmxSLj+MdWHZ9RPaJJOYfGJXbUAigmDURtX4uo8JAzpH7VYZPzXznu5OP63lCHLeLIE1jdh2F4bXzZrhKgrKMCaKvwpMyEGqPrD003Wt3WhH9kdhVpQtR1pV4SmUKJbVU25HWChPVaCY2VdikCqgis3N/X2ThxdT3w70rDuowZ+eck6rwJYCkprvxbArVoNiRxYwoYDmUWhNoefok6eMPW2h7b3wwCZPEAlu66sUHL5Rmcgea+3Icl8Ci9/S3uWbWzvGlQvZ/mxswUOUOVV0XOYD1y7ft9FfsuC7/MhFgLmStpvomvX5uuNcs2FQWALROpq8h4yU4Kc1fkA5P2pRpOT9C8JrjnaWK8pik5ofRQei4CRxEwkyba6C3KNqIcj1GeR/msFEYgQsVBQF9aljYA8hUKsPR9AZZjA2pc1JmA1KNK/zWBNqXNmoKGL3I8oO9lJtRnCQIWlyc/ls2who7gZvmcx8vW3j5tmaniXucWfFXXNTZo/eFtgolbO+lxqhJnDmEePM5orqZtqrdx/aq+bKjPNYmr5AyZCrUDlcKlTutlgRpgxy0fl+jxAoN66czoWllc2TYFQbTQtsN6Ux0ngdY8SD3NQiWShsyGOsY4ZEZUfkOXIgr/l3bd8n1pMVPGDQFZPSXrFXBXmX8Cm43PxZeQH9CZ/tbNf9XnyvkoM6vYmS7OCnaO6mDp7/Wb9di0B1njH6ViUiFHmQQvAFhKgjuODytZk6CTKNMvmGeSRWZBy17lim9Zprxgw2ehT8XbGhu0FjNtFHrs56D8SLAflbYH8WrPXqfDO97BkfTZtLMUcGkyy2Sm1/Jn4dpA28pDhycrFwBWL+SM5+Xp4ru4n1w2dKatPhhugwDRoe6KLfSW7Bg+GqWE0jFBnrkJJeVF85LGpTRRszwXyC0T4yYfmA4HYloFU9wE3PJNiaIu8BEIKYGte0QG3S6Lfs5R1+W7kjYlP6B8WdK8RMRQvTP+d0DFtqWuNO5q16iX1FXacCifoAKHRbpQbaxrPNdmTYDFHoUtW6JNWtYYF3uD3Ra5BYM71to2aRdvrNm5odgvrbFBqzVPNnfASlT3EPByZsFEO9+ffJsS0IwQk4qCrBauVACsotuVMsfdRD7BItbL1rwDravediLuevlHJkP9L20Bt41blOWfiWKO8g6MFNclgoa0MB2TAjIOMQ+VbUOfVbNuKHGqy3nIYigtTIBfzYGo55IpD26xFMjp+W6aNBxduwKtBUj8v/4aUFl/X4/SqKRFCXSkJQmonPkPrWoAUNLzfSSzHWAzIpNfDwAmH5a+K3nIBKjcgTIB8lN13bQBE0tWfBuZ/wRQMglKyxJgjfFaQJYCwFIl4qd0LPFYwZdkFLrKznHcVo4V7O+GKnYuPl9WtcMbG7TKmS70qkErYRrEqpT4fqUqdf+4LQnILKhKrMUKmayZ7U/RLJTeifXLLfhBB/wl7G1ayOp5MdM6rEVfKYiUHkqaR7UJXETOiICraJOK1yFQeRIz4jyAJa1L/q9FfCLSuFZZ7VZZDPVYRHZrPOrvJUP9lhZNfSZzY6h1h/932qQ1tcpUy2PkV0KT4h6UIUSP1bRKIlIIfDrpPAGWii+qLpk0KAViS5tyGhWgJbDS3wmgJJNGahK1AEuZYObpowdktxhGk/5EJkFisfRa5XtkAk60RXWyFriGUaxUStU0bKttt2zowJz9qLZZL152340LWsqE8f+MUYakRDBxMMR2Etagb/UqAU1fmVGWIGcUCyFOajQudqnMefxdLS5jhrJo5Nnn1zNwvah/pMWIOp9FA9GCfrCrYmdKbY5ZqXyGAihl2JDZsGo6FIgJzFTjy2lqPFf+wyqQzQJ2Yi9K01JQ805bJwCkkvQCWWlCdANaEqZbaUq8J3NfFaD2A1AHMPl1YBoUwAnUWvmDjeAmsIrArzEBS5swAdYE/qr7aM2itH+KhnWLvtB7Lg5rN7uInXbkV3/HFSqvIDucwB6wq/mUy/mSck73HWBNmsyFibXGBK0PP8zaP4xQzS2DPyuLabBCheLAa1qJDbO9ObGWVUdAAKg02fVa/ixtSGX2Os6COMBi2MZiqN15I23QdT8CLpnh2ohVk465scn8J41qCVASlV5gFQEYMV9FTE1oYPMCrQ0al8yJU2hsIrnsArMcACnIV2AqjUvXWPVTCdAEXNKsZPJzJkA0KgFTMzbE70DpMeY/ZbdQAccLVAMQ6UJMWQFW0onb6ReM8aRpCoI5tjNoWJXP0LJuWakwbu9abPFYLxofjQla9h62ouudluncj8DJAhQe4UDTas6J8qLOr9f3BVZLTG7lY1MMl+pyTbEo/8mg2be6KdAIcEnj0kLfLE1mtDyA0QNgyASnFFIRwOPPApEEatVMG86vhRBlJlzDZCh57qZhFXSa0TPtiB9z7wFMAjCRZ/SZI1A4LWo3Z6vvv1WWF5m3r+O3+jUFHD+ahBXK5kG5BEU64uOkmwbEGpNnivXyJjuaS5gGLzGoRm1xYY73E7/CBgWtu+3W3kGhR4o9BgF5BgPyDDZ4TFrSQz3m84NV7EpV1qSCbwttgdeaTcok8G2Aa7/8JizeLSyYrJcN3wTQYAeRHdH9KpMIYWDO7Ce5SJNyeQ31/Nnrr97jrR03pVaqnl/7QonbHevvV5+rH9w16stN1CRv+WTFglUewaqGdQX/lSoQK7+gKO+uj5KVC4lwAzSpcJJBAvEi/DUd+aVVshPsfJaSoLdvJo4GBa1MO9YTmQRVoVisQc1fzRffGkQCmuCa6AIvmVq0KMh5Lce2nNyv4cw/QkSeYrpaWS2bwRoVgQMd/DVQaC6ASNPwFlhpMyXtqpqWSZnaL+GPvQbx4gHjtpqtHathsi0iXVAHK5xjzXzIlQ8zjn5HwZ071ksWjLmDBZiCiV+mhNSYoJXNwCcLTzB7h7hHmQW5Tw0h3xpJAurRKjmjhB9HzEItEAIz+QcWy+T4ayfXH76WHqWI981LIEYJyDwrH5XMf6p/9QWAJe1K/itpWHPr9bBSsDLJJChAUhXiB4AVGS+Cz+Bi3MK++9Ra9hfsPfd5jNJ78akaC7TEGASh7P8e7eLfY+jborqv5xp8sRD8J/UrAU14LQ7SuEqhtKyomqtiXJY4Vqj7uUZNLmWfUPYMF4jMKPEQVr99nuYr13iUz5A9E/kzI1/rNUDqSw6VFlGKJgGWqhlok5V4izQsZbVY5lDGi2uYBz/DCYpZcBm/1skVexfASoEvqyqrxgEtAdYHrEU/VuhOpZudwjG0K5iDxGcFZHX3mla1zxvuUVNfoKVd7Uq57JKNKmBTyXZFi9dnrQwBZc/AauiIARKCB66GGwqJ35AAS2NuAcBS0tu7xGj8jvgrhWhcxSQ4x4eyBqSmRTwcJcIlr2AwwuPnRIR/al2lm/ZnZ0XISNHFRlJrHNASYJ0dztmNrk6yC/ajZZG6SVqWcg2GX+cHp2bE+AuplQTGIWiUMcfMlsqOZfhgJWfv9LfYGejWAq6qxlWr8/vfbU4JCLCm1tmBl4m9ugil/Q7UdqddrftbUyQZAZK0rGnWygusoL+hLsoFYiQmbO1SyYLXUwdYkl3jgNbQxayt9bZZSxmwykLCyAxAvegEvFxUi27Wt+aRgAgZy+x2pXHJCS6aMZtglxbnCH4uETQ6RL+GtCBWm29eAjuVgBQnmfpcwDC+1RFCMZSS6QLa1Wc8avzps3Q1V9ARiA0msGdSgThzgeuD3p6/aX2L8/bn78PNTWdrHNA6fBjnRaWHulnfYtfwJtpVL48t6RS7v6o4JMCMdH4t5XVbxocwg7lQdOM/VpVbCkqeQOWCHW+tX2PbxXFl/hyNIgFBkQBLmr0ytH+Jdi+ihcbZQ8gXYgemwnf1DYHjALZglZ2cCBe/JLLuItmDRnCuLNriGWlfqW2NA1pzSzlr64SAUTkNWJ3CFotfCy3L76JTO/hqfWHSrEhSZGTHcWXsVytFQCyq/Krd7xSHtK4DZGvog13ogmNrfVH+9+teAgIqKU7LpM+aZyM07nxXJfIHlggajsyBYgxKy18F0KSJpahpLwe1XUlwIV6EwSWrwBYM8vctuzZjdnQtTUzBzeTWIKAFCWP/aI6MoF1WDE6gZR3nEIOwQe5vs67z771KAtXFJXKOK44rcBoXFhyX401+hm9TUPDN7iiuiyxQztclc6H2On6/8yoJN9/nAiDFAZIi0I2hh6TGEjPwGmUIbqz7riYZYGKupiTDxcZOChnU2sfN8uY9Hj8Hfq+Aqrehtk/Z4OnVJBPhbrzQlz2v/0X93LmMYwyOZjrZTuPHCkRzJ6FP2Ia2lXWOjJdJwH/W8BKobnS12LAZdrRjsbvkMJ8hL9/MWh6mYcUOKus4vq5ujjb8XT60q+GHxrZuUONonnGjtGGTHCOYA1VVW7FXVSq70osp0F1jTZp+ulogWvsMlPa7aFiXYQn+BjI7gNU5YzP9isVKrR9roxzrn1X3dx9l7d7tDjJwAlbhGRyK/z2Pp7hJWIME5/gN88b+burnWkNk1pHJRvR4pc9h3XELkeJm9J6Sx0b58rDyO1WLFEUaRF7tarqxo/Ei4NGYYHgARmYP0c5vQmOvxl19jpYlk6Del8lZuTA1xvS3KWq6HKVoIj1TOMLzKwDWRdbKzwheHLfJB4v208OwBdNHb99MhvWvaY2M4MtqlVnwABh1hCG2jxslI4bSYLtg483u27/X5BIQQCleZq5YJGM8i9Bcyb7Xl7fvc+j9skpo4Ovif+a1BlMEXk0utqa6fQGWAEhmPjDJ+a9UoFGMQAUKiyWoFGIaSyk0BW7sK64+XOGNRzCqr7As/gZ2CMA1d8f+6ixgRfXhn9UHYOmm6h+0OqGA5UowBSk/IgKGWVekYXnAUgf7trkE3NaTf6R1lTENiuEVzkYZ4x+wGL1OzScdSgM1hNmwFx6q6PG+NY8E3KYGbXwcB9YtwEqFGm9jDhRYyR8qM6G09RRqVs93krKzXwes0K6ojZWxYTJJj9m/ewuKUtqUwucv/Zuv6x+0iivEYWX7LEP5kbAi1iBJezxgfbOr/TubSUC7aeUq1E55eSnKYnBvKQt9uewWpt/rzQNseTtCfHq3ChaidakMSJWssdlv+vfqUwLVjYzSgmkTI3PfY2yCAiwVavwVpUQEVPKHwnAHrCJmoP4uVU2pmWTTDJyPCqZgMMorFXK8QCFHkS9GrXRSFYlTd+lbkWP9g1a+JWeZYg8dIwKG8g12AFx+S7yV3vff+WrWMn0VnKIgUJU7qZDDUGmgtKO+CjPsREfW3iCoS/FdPYCXqiTL9+UHWmMMIvHAUbhdXNUT2H/StlVodITNix7v8SgauzRzAZrTrhgzKVz1lZhJt7PK4FTG9ts8kgA3/B3HTSu1jFtXYcX+rftOXXZefYPWh1cx2hQwB+YH2D0M4tPCnxWqDIlfS+pyOCZ30Vp8pHXpWHKJd8U0VCaNinOyjyxnHWNMhI2DOLr24ejqouiiwEulT5SMV9qXb/UjATH86F5HnlgEhVTvaoxNikCqagZUYPoE5kFVDVDfp1a7isSuYSxK+xIMwXEMTg8Y0crYfoUd1jWrtDyxpasLNvxu0f68fnxYz4+o+gUtJcg9P9NumRWVHoHibgKsHjrMBxQ/38v+9ZYl4GY9/xRZoBRro/IR44XAJT9VMLL8GyfRuo5SGfgw/q5B7IX7WgKXFkoFJ32rDwmon2XiUwDwFJqV2H/KYPGAQ6Cl4wl9rU1LnfituCOXmgkNK5wAqK6xA7uMI/ZzsgTdtnzvY2udXLb3f6TA4rpu9Ut5V62JgW8dop9ew7H4HTqM1E1kdmfDS4/41aOuh2V6Ll6LG9Ygxyh02Q9Y4OTr0M5bC5526CssfvKJgW8ut6H+RvjlKvqm51aa+krUJyU6Uky/eTYj6sNHBAYrC/swtPVLJLZVctsveK4Et8rQrv5V3/J19sJpboHo7GhYFHAMYAi68iL4r7JZWIJrd6EQTVjr/LItkgT3/Pl038oWxFy/mtbZs7LH9JOCRASMI3Sacg3WLwhvobP8V+KXgAMsAEm78nkCkbWYTfFCDvoHsAqPsDM/isYln9dhXh9QgDKmw15nOlSsF36v9S2U30nF239anWXuVZMpUP7Kp/SdNGYFlqsPpV3Jb3WPgL0HgNj0moAtYgRGf5n6f7nDcN0kGDzk+S3WwcushBeJkP/C5jIL9pOjBQZh3YNVtSfqF7QG3wts6hEZMGANGqAVGmQM37wEaisBgZhMRsqgocWuM1d0dbqOi6gBRf47vTlX/kQgtk9xXpmQIbrOONSlSQOr7SX6X0cCWqGVvguccpqyNCxlsbgBbV3JbG+RkFL9J6LNBEAmoOJjB27q47poUQFHrtoVcBznrskhiIYls2Ape98ePpq1sbelhdXLHW1J7PUJWh9+yGowiu8qkB9LWpaKPULI8M1LoPYSECVaAS5aEVn/IjMhpA2Zk5R37nZHpH0pvks+L2lez9JDKVAZ1cuTNmrXT2L3CaRET9cGQ4fMgVFRRrKvA1QRWCmNl7KiRNT12l1RTX6ZWjtuGC4DVLd5TgxWSPLb3FVosCPWlZ21999xw7QmZ0/wR+sPtETAGEb5fTjajj9rP/tWpW8iGwbxWVVbQIIC9aduLgloJ68FUgSNWZxadxbLkDLWbH8LpkPMhqc7RZVXoHLOvVb15DaAiweG7LoD1psPdzRopD5oyrtHnkuzktoh4oTMgLfpC+UG1KHEtpFpl2S29Bd7iyg+jz+oG81KUpLWFK1za9z4Eh78J7yGIWj/GfC6Z8vzT6zyZMmykw0JWBJBvYFWYB/QNacedZNZ8DDMmP2MOJkFobn76sTqUN/ilYAWTC16ciqUWQiVRaFANnnt3pVcVcUntbO/Oq8SKJHWJbr8gJ6DXP3yf/Eo2ryqKXsNbGv9J5krKFx10hSaIK1qGq1JYCWtSn4r0dfH2EwIrKbpB1Ha5dfiwUXdOpBTB9ZTC/FfWbDIejcGS/A+YT5fMPQArRK+rPK0VdqWrBPAeu894XdDtvoCLe0wPviYyM5TPZbNHYfKeYDO66YTdR/eVdCQQzT9N6V1TxqXYni0vZX5cLUMcPGmFlOZopQ5voe08QIqZZMfQt0SZV4mROU4VLaNTsgbos3z1KWM4qUzJep1sw5ugZPkKgafyBQOcHguE+ASfijJV8QYAdMjHcharwVg85AqtHGoJkPWhkIMwjpdzblr6mA5hmDwhOeqNnyVm/nUKtk7tlYYs5Yzq/YThmBwSt9t2FZfoKVu+PG7gY087oMx+BodeBB1Wb4sprVvXgLpkIAAzNXwYpGVz2tyTZkzyi75rsBJfi4FKItteAjwqjIO+wE0aV3doFUVxDqzgBlAFmWcT8f9xXkV2gDI/KrQApFfRFfXZkDP5Y9SjJUymAi0pFWJxi4z7SLfUxhCtS+0ivMSpaRuW4VBRJHG4AF3cJMbucL2iOS3XTxOTtttEt/+He82GOlis96qpw1cYB+GaFljrbZa/AEd+Bd04B8wDF/nEd9W3Zk6N+sP/14DS0DAo8wZHTyRVtUFgCmrhg6RNfZzSOvSEQUty5wYZd/gLQZ4BF7KfajfkilRB//z+JV/TK/T3sAiB+zSehyw8PjsPT7Te9KwpCXJrCftaRyAUnaKp7yWuU+mQH2m78iPpUcd0qikidU5SKkLkYILGEZ7skVeXePxSwdYYZkEuJW7tpSZsL89taovN0urH03r3Dnm4nDW5rta8WDto+NOY9OFgGEQMLym1SwDtp7vUwuxtAYdT9d0J7CR+Re8QcMKyKwRmQtlMpT5cHAdwOQLi1JGRTkPq6mjeNuVTcmBXCho1roBuNIsJ8TgTH1Y9p4BDPjjgMb5BJGP6lKtADxVBqDTpDD9PQK0xNBUnJV8V5KlfqcBW5RDMJAPi0rDoXxYwRcMGBLfVi5bce2h/Yc3pxrwvl95S/UDWj/8oUyALbaY77ZCiTyDBBUH1k9nwiLkP80E37wE6kwCbtjyjyvPXoEswAqsQNc2gKgVdUoEDR37QCiZEmVWlE9MmpjMiWIjdvI9mRVF6uArTutKsxikAWHNc6AjjUn+J0ek4N5l8hMYyeQ3zpdkDhR5YhX/lSoCK0OFAE2alA5+pkGb07DY2gRkY6+Qkon4KwsvQra4boXsQ8uXlhr0xl95W/UDWnYyZ/MdnVZZI3WTHWS3Qc7BjAArS4fWg0XklZ3hv9CcEhBwSVuQ/2ZZuy9Gc3VAaz+m1k9+w8h0iBkR4JIpUYDVsw5aInkIuGR2FODx1JE4ZEYUuSPPextNiooV40+cuVK/Xz2fnm+3gStOc3KP3IMUHwET2OKApepb0ue6R2lTyqAv057ASo/uuR7da2mikSlQ2hZvIxX+WL+7/o8eo3f0RkM13RaFRcIFNubkEAzv0juf0UECrVtWyY9Z/jfz9v77EnNTtvoBrZVSixWW+iyXPQFYHaS3WuhQpW3azXxryk73N51OCTxbiL+2GkcvSIkHAJQwi0GPJ5ODNCoBkQ5wyoGP/GURiYNHEEkamgBMGpnYi22glr4D3jktroc/7MOvJn/YbiaRgEU+JZEfZK4TMCkPozSjqs9J2pKyTug7eq9q1nMaE59FmlMEcnquEiD6e2lSPG2ihoblaO3K0h5+wfL2EcewBdl7linMWWWRpLeOzt5UUtk4AOoHtBbxZGWVa9Bewzd5hI5s40YEWr55CTS8BKSthKzga5VKpDEBM9KcwBvn0K0WppS/q0r0EDhJy1L9rw4ASoDlgE4gx/dkVhTIOcjaBWoJZCJQinxRAixRy/W+iBHOvMdzZ94DiARw0iw3plmS2lAt+8FXHVjx1UbVpjYbrxIBGdjDp3ToY3r2KpnalaH9CgGAo9bRNm33l9ZsGpYgzvzNfqBZ3qsD0CIDxrkPmHEdbVYqUjcLAoYFQxzU0oJN6JuXQBNIQAs5YUe06npVffz6zVdNfo5tCGDJDBgRN6LYL70W2IFjzzQx3tpVk9kv0pQiU57AphpbVS2a6LQvlmV9T+ZBfcc3JwEkQZZ2C0lqa9O8cxc4v8p+/BLL2zDwf5eY1Dn7i/0wBJUAqLkBSxJLP2idoyvPvpeD5t7FbNvPqxPMW8yDIeZBNK1dTjg3bPw/XgINIgEBSMUBQ7S+SYvSzk7ApKny7NE9jz7Tre9mGgl/pCaE0gb16J5/9eg+W/88+h4vfEMCjmwhwFpCeNNoVnfooM/ZURAwXITSnn9odnKRtHUlD1ZfDZj0g9bZ4ZzlD3dYaa6fThbFHRKG9XILClvZzVz7Sgr+mZdAg0hAgPFMi9ELByEvujn3hRd96N+vrQTA71D5A1dYxaiBBWBlRGcvfQlEXccBOW6loxAutB/w2tXGrkg/aK3uy1t5uduyebSsMtncXWZ3n9F9Yy/6514CXgL1JAHtFvBNQRYN7CmgdJNHSorYRxRtvNOs8Vdb7cD0g1ZbsZXcWoPsQI7RsRAwQpLj+uYl4CXgJVCvElCl4ZDAYEdnhyFonwFYBA6XH1iuo2njr7bam+kHrXLYZhmCiTMAVkhmdygZW705/z0vAS8BL4FUSCAq2IiGFSwDVnOsZbe4riuWqXxiWSoMW9s9OwLZ4ueOOJmKS07rRaQftNrKLVbODpCC/xAc2YiAkVZp+uvyEvAS8BJ4XgJRvh5xUCBUhNDZXYb2y2hWX0C4gCGYe2Kl/cv2Nv6rd7z/6nnxPf86vaClYo//MNJq5VwP2pXqZg3CUSJtkwgYnn/xfEf6114CXgKpk4B8VwQLE38VkOHClOHCJb1FswopK1K+Y20tjy2zsmx/Ltq7b1uRQHpB6zwGwUxe1YgHOGANBjwG1M7yAcVb6Vj/HS8BL4EEJRCZAwVEa2y6ZwGsR5Yh7iqsqGgjDMGADBd94/Zv+uc9nX17/ZRe0FJG976efiIqyTUYyCzYu+7P8mrW9vrYf9tLwEsgXgkoYA12YIj/KpD/aoSN9zWe/45A4Vu2ukrC23DWZp7AHnzHxx1ss29SDFo9WSsHmARDMQYPcUBzD3zapm12sP+6l4CXQMwSUMXPMFxhvZrgeIB2dZGN92WyCqNprY3bWjBr+U+L9rP3vUlwB12TXtBqbctZYRUtK3OUTkfTCjrQtHzzEvAS8BJIswTWtaxgno32A5wcl1jD/tHylc+d72rxUsn+VglvT3kNa4e9mF7QqixmLZNTReIhdi3Uz8qQINf38w772f+Zl4CXQDwSmGJz/ZCl6jqpjGEHlq9YNhyxu6Vl+2cA67wrKeIXsl30RfpAS6zBizAE7452wrgBrOwAtRP6AC4fVLyLjvZ/6iXgJVATCQiARGcvsqkuQBa7hykQdmDwKcQLCBe5G1Y5tEgq3KKdf13f9YC1y26/E1RwAABAAElEQVRIF2idI2v7eXINWgdxWcEJOl5pm6LqxBYo16BvXgJeAl4CaZGAAEgJb/FfGf4rCBcBAcNh+Utg7BpL1kM7fWSW+CsyYLj4Kw9Ye9Bz6QKts3R5e3+OCnGkbaq8hj0YX1YIzR3A4p89uF//E14CXgJeAnshAQEQ5UJskaXpCY930KHQsMjJHmRuA1OPrT0zS7AwGphveymBdIHWzMWM9Z/O29qCMrmf4UZhD1oHh69QvJe97n/LS8BLYDcSEGAp+/oibgsB1lU0qc8tm7nIJvu+tbU/scWBJfuffcDwboT8or9NF2gd681YsUCdrIqIFycxDSqwuI0B8aLr9+97CXgJeAnEKYFqwLBqYI1w4mv4rn5NdWE0rI4RwnQW7ED/iv2Z83PFeV1Nc670gJYIGP/neM56yrAEMwfQtE7RC/JttTrDoLcGN82g9DfqJZBKCURZLta4NioME39lmAMzARnagwusWfetPDBn7wFWvrpwTbsvPaCltE3dK3krtcIaVAaM4CT5Bge4e0DM+7OQgW9eAl4CSUogFJkiXITVfI/L+CUb608cpb2r+6m1Dyzbj5zJ0G+va9xHqsSdfJOW1T5G3azWXlL1H2bnogS5XUBVniMd15i8lPwVeAl4CcQugUB0dmlXTwEsyBbhb9hDf2Tl0u/I0n7D2lvGrf3Ksr0rFqHP0B5H96RH0ypW2sncvw81+wQ7GKoUO8YggOU3LnEMBH8OLwEvgW9IAMAKSXgrwoWNsoGmBlbwKwDrc1f/Kp+fsR9TA8uGQm8S/IbsavZGOkDrA4bD2UynZbKHuNM32bAcYLBwbZ6BUbOe9z/sJeAl8CoJLPCFSYx+91mhrmP9ofYVfqxyecSKmWkb3F/gtQesV0lxjz9PHrRkGvz7i6RsGiQeS4lxs2fAKuK0QmjufOabl4CXgJdAfBIgd2BIscZAgETBxuAO6xDlRIKrvH8dduCYrWan7ScHqZHlzYHxdctXZ0oetBgV9sbbWZseAbQyxGcRVGz4tHxs1le95J95CXgJ1F4CETtQPixKitgUx01WJyoMVy5Aa79uE5MP7Kdvi/LutSuEkFRLnuRwnjyDhVFlcO8DrPBpCbDCTgSia/OaVlIjw5/XS6B5JCDHeQXzXwltagkN6j5rEISLzC95719YhojFap+wn71T5DNPaU94XKRA05pssZVSD+q3AokFWmhcxGb55iXgJeAlUHsJAFhiCIpw4TQsZbj4ktJ9v7IKiW+zLbdt8uCM/cynY6p9V2ztDMmDVle53Qq5A5AvlLqpn8tWyibfvAS8BLwE4pBABFgKGA7sAdrVP7N5vgyB/RqEiwncFQs2Jjq7b2mRQPKgtVjowl58GIEcc6zB0FcnTsvg8NfhJdDAEpBJELo6aZcspAZWcI9NM5nZ7be8vonTAo0rWLbhowU75wkXaRoHyfu0WrIEEQfHGDAnMRGK8p48kKaph/y1eAl4CdRAAs4kuIBW9ZAfv4zv6mOO/2LF8IpVSqPWf2vOhv93AZaIGT5YtAY9sNOfTA4gVDvrh9ZiU2MQMEqHoF1Ac7dedjcAqR8jO+1Q/3deAl4Cr5BA4LQrMlxk7jqtyqCzZ4Nh3Fqit8+igK3YL35RsXPn/EL0ClEm8XFyoPVjfFdjC10MEvILEkwsIkZgnex8ktf+kugJf04vAS+BGkrAaVbSmkqsNyrYeJvHL9ksX7VSmcDh7AM7fWrCFWzkAws8YNWwM3b108mB1thY3hYK+62FjO4m5qABYGhevNjVHfk/9hLwEvAS+LoE0JgcnX2VrOxzfEQ6JruAW+KyreG/6ul+YAcGVgAsBRV77errskvdq+S0mvliq+XyJMe1Ixz72PGI5u4BK3VDxF+Ql0BdS0AgVGGNUfzVAyw6v7BK5Z9Yb35F0car1loetU8GFl2FYQ9YddHRyWla+UoruQaPYVc+iqQUo+Vjs+piyPiL9BKoGwkIsBQQrJRMmATxW1XCfySq5gvrWntg/RPL9g4Bw77VlQSSAy1rxRRYHGIPNMSg6kfHkmnQNy8BLwEvgd1LIGRlccHC8l+R4SITXkLLumw5SBeraxNWrKza3bvycflWZxKIF7SUHFfFHm20xXKr/VbODvFqEJnJnxXvtdRZR/nL9RLwEtiqBMhuYcESfqxxzIIQLipkuMjgwypds47suPUTfzV5qWTvvedBa6siTdH34gWKKmBlyTEY4M9SFgwLoLyH7QyunPdopWhk+EvxEqhPCWAShNIekKE9UGZ2SokElc+tVLxtrWuP7f7iKklvIVyc8oSL+uzf2LWbrHVU2qwQyCx4isFFsUfR3Cn4GPgyJHU6hvxlewkkKwERKMJQqZbIcBEuolWR3QJWYGifs7Zco/bVLUqKUP/qzRX7C1IyecJFsv21y7PHq2nZaNYKYRsDSYzB0wCWYrSoWOzzDe6yH/2fewk0qwQEWMq8vkKewGk2wqO8/gJhAFjEYLVkRuzo4Sf2c8Dqr112i2aVU8Pcd3yUd/mzWtsAyUCmQHINBicALLK7W75hpOlvxEvASyBOCcjER7CwAVjkD1T8lSslEn7E4y8tLFy33rVJ+/kHZTvHSuNbQ0ggTk0rsHKhxcIMCXJDNK3wJMBFGZIKoOXDsxpiNPmb8BKITwKAUCCT4CyuBREuMAdmvrBM5SJrCsHDy6NmZ1fsR+47fO1cfFfmz1RTCcQDWtKyPoYnmCU2S9pVRRndMye5M/xZGa7Bb4Jq2sv+x70EGk0CGAUBK2KwbJxbu8LxK2w2n2EivGHFk2RnRwPz2lWj9bq7n3hAS6zB/GwXAEVS3NJxdkj70LQ6uAKZBuMzUTZkF/qb8hJoGgmozD3ZLcIVACtKeBuGJLqVD4vA4ZXMY+strdi/VTom7YR9SqZGHBnxgNYMwLR/iVisMqAVHEfL6megKQOGAMvbBhtxZPl78hLYKwmI7VcJASyyW1i46rSr0O6BSZ9RXfgqr29Yvv2xTRdn7X85QwViD1Z7Jfo0/k48oHUMcFrGl2WVQQCL2lmVHgZaRgp+GoXir8lLwEsgRRIIBVhKxRTOsdl9yqpBsDAZ2ivBF1Yujljn/ke2MrdkYxRs9ICVoo6rzaXEAVqBTV3JWdsAQKVg4hDzoHV7wKpNh/pf9RJoMAmgXQWrrBczAJLo7GhY9in1r760cumhdbY9tez4gr13RjkGfYaLBuv8zW6n9qB17lxgbV05MipDwKBuVgRaStvkm5eAl4CXwMslEJo0rBm+9IDHa4DV52TTuWBtpdvWcnXJ/uzPPFi9XIIN92ntzXOfhnm7O0LpkcyfsBP6n9gx/SVSFGj5BLkNN5z8DXkJ7IkExA3EJEiWi9Ae4vq+CWBdckeQuUxNrGkrHUG7cjFaMh16+vGeiL0+fqT2mtbdux3ETuwza6M6MT4tC3oRTbY+xOOv0kvASyBmCQiuMPORjim0SQAJokV4heefAV5X7d8fIwbLt2aWQO1By6zXKrljZDkkoJjkuCoYUHv9rpn71N+7l0C9SkBaEwHDxF+FwSOe/8ZpV5XyVcu1jVhfdqpeb8xf995JoPag1dLSa6WKGINH2Cn1RYDltfm960L/S14C9S4BxV5VlMhW5URmscY8ZJ24ZmH5AsQLkt3afWvJEZe1n4S4vjW7BGoY2EsWDGXCKIaUHjGqE5PZPTSVIal3PUtBi9ja2Q26rNKaaLa4fqzwuBaZN/jXNy8BL4FXSUCmQM0l5lD4hFmD/8p+DWj9gqAYTIK529bVOmGFj5ZJyaQ8g741uQRqo2lViz1+PJInLgugyhxCzvizwm6nadX1cu4m2DL3Mcc9ETdS4RFKriLwQ6NOGJnrAx5DstcHgU8G3OQTzN/+KyWgciJzzJkxDvxVMATDLOmYSncpbq4UTcu2MlTwBRtfKcem+UJtQEviGwSwJkvdlmsfsLIjYEjjoiwJQ7P+mmBWMSCkhwlmuYfJyISBGcOwvYeVed7jaeY1Jt0bPMvyHC0W5qRvXgJeAptJQMxA5Q58yuMojzfY6JHwVgHDlXtMtUkbOr7sSoqc8/FXmwmwWd+rDWi5CsW9pGmaHWBB34+ar7pZ5B40pW6qvyYdytwEW+LZE9hMt4GwL7mnYbTI62SWeWrl9hCo+iPulVLfpuKWHXzXx6PVX2/7K669BDSfNE8o2EjAsJHeNggvAFaXrLIybDMrBfsZ1YV5s/aX4s9QbxKoBWgFlHfM2OhKj5Xyr7F7goCxXjcrqEclS10aKp+ZmEvXASOKyxErks2OWKn8xHp6pgl2LNgUZVdyWUokVCb4DnTd0Nvf6202+OuNQwIAkXzCbp5c54QXmS9XLMhet1xxzPonVu29d8v2Mw9YcXRGPZ5j70GLNGH2DyNZ29fWg4r/OtrGURZ85Rrc+3PVVuLoVwBPkCGfmTMH3gW8foeGdcEyxctWan1qw0PL9nfSwnoy9p+mMlZclTOZ3SNR/CGcJ9+8BLwEqhKQ1rROXpI/OMB3xXwKKhfc87niY/vJa8y1M/qeb14CL5RAbdiD+1szVqgQRBycYdFH0zIArK5ASxMHCm5mBhC6w/Nfci//L5PsI8iPxIysTdvMg4LJ1q5o/PN8I58J0LwA5lAHPi3+881LwEsACTiflMqFkD9QZAv7L/iu/jOm9Y/JH3iD46l1nlQ6Jg9Yfry8UgJ7q/2cO5exv7+Yte7DHdYa9qOVHMEsvR+to4PBWidZMBQzInOgyRx4D5C6wWT6jHl3lSLLI7a2NGuDb67YXwBqP1uXL7nr7fFi1vKB7hNflmMN1mZD8Mou9V/wEkiNBCLtKgjlC4bAFIywobtB5fILVgpvOhN7OViwmZll+6kjOqXmwv2FpFcCewta9sOMtR/Ft0MW9wqpmwIjdVPYy6IvAkZ9LOIKcrRAcVd3sbuTnDNL+e4Qe3tpxMrHZuyvRch4bkd482Jg/UdyfAeyCYSTAMKJaBm+eQk0qwSiEBDlv1kGrKCuu+zsX/D8CyuVvrRc+bGVFmdt+CwVhtkoVjeAzSovf99blsDeAskPT+ZsPxWJc6VDlg2HWPzxZT0DrHSby1y+MyXgDKCzswuUgziTZUdol/EbP7CeYM6V8H4esCTq/vaAuj55zIOwJWFKqipzWFfm0C0PGP9FL4GXSsBVDEZrCk3ZK6Y57mJtYfNnvyHDxSe4ta5Ye+uozWXnHWA5n/BLf9F/6CXwNQnsraY135W3bKUbd9BRypAc40yifCub+96C49duYdcvMGGw0wvWM1xE9XoIbgzQsCjhnVlFw1pdsR+9JYru5q2rJbCVXN4qRbTLkMBimQnl20o3Tm9+M/5dL4EdS0BzCUsFc0n+KyNgWLFXhnaVJUSkWLlva+Uxqwwt2d/qe7Rz7l//j5fAliWwt6C1NN1qra0kxQ1OQ2I4wcItjSPtZjImmqOnLyO1aUqoXGKi/RNmwRtWKI7bfzi9wPt85yWtPYcRZKXFghz+uwAPV9jJ497K9iWn9x95CaRDAmz+ovgrpTZTwUbir7K/sHL5C+vK3LNifsX++pSvLpyOzqrbq9jbhTUIW6AnkBQ3OMU6f5Klvh3JpBm0NMnIKO1YTXe4bjSs8Ddoizcx9U1Y/vgS7+k7L2+tgJZV0ChDUlUFAq5O/sBnw3i51PynjSUBbf7wBeO/CjAJGsUaM5jWK7ADc22PbP/ogv32bSXFffV8aiy5+LvZYwnsDWi5XIPnM1bpaKd2Vj8gcJyFG6q7y7+HafDlisoe39NWf04XVeQQ6WKUSUY0fvbn1lJmwnWO2yQZpX/qisy95PdI/nvug8BmO/KWL7RbsYRpULkHA8A6TLNJ9CX35D/yEtiWBDSPZOpTAD7B9bADQ2IZFdMYlq+QOnDR9p1YtbcPlu2d5whM2zqN/7KXQCSBvQGtv79Itaw/6rJCRprGYXZafQxeFu5AgJVGx0400YJgggl2j8l20SqZT5l7dy3b/dSWHxVsbL92hS9H2w/ZS67+VZsVlgatnDvK/eLPkw/PAVYa79uPey+BvZRAyPxR5pdJ4q4e8BwfcOZLpjw5BMuYA3NzNni2aO8q5vEVc2kvr8r/VkNLYG9Aq/9gzpbLmAUrBxm8FHskbRO5jdK5eMs8ESrLxQyT7Da9SwoZAKtCHFapY8Ls0eozGu6rur7rNvLr6KBe2EH8z8f53Sjzh9JVKTOIb14CjSYBR2UXACk0RAxBAvADu8N4x3+VucJ+7bqVV0ZIyTRjw28SgM8sw8nbaGLw95OcBPYGtFrbclZcArQyBxm8h7kdfDpkhUhfY/KETDYCGkMmWoDdvRICWDiKV0oP7SdHSMG0b+s293JnFgNjh7WsHeJ3T/C7sCVhDcpc6puXQCNKIBQAMYdCW2H+TGFruMlzZWcnJye+4Fz+gU3NTNtPSXj7lx6sGnEIJH1Pu/e7aIEukyw2kyMDBhqHEZ/lTINJ39o3zq/YkRLXNu0AywJVRb2IKeMqGaaeWOeyamRtb0dYyWehmnSw2zwE+AFa1AtLN/HkG0Lxb3gJbEkCUbAwYBVS6DREuwpH0LauMO5/zaz5LZFZlBRpe2iFtXkHWN4cuCWx+i9tXwK717RUhqS10mqlDOVH7BADeIgdWNv2L6Xmf6E8gUpke59J97mV0bDCDDb4wohLI/Ozd0TK2F7LLqNhwhQMQsyDAeZBp2mlUcPc3n35b3sJPC8BbfmiRNCkYwqpZKDNHuZAq3xGGP2I9bWR9WJw1VcXfl5w/vVeS2D3mpbz6xBMG6rQowAL4BJrMF1NGhTBwSExV4Eytf9/xI98Rg60hzb8YMntDLd7vdIwl1tymBZFbwesn2lau98IbPda/Pe9BGorAQGWCBdUGA7uYwqknEj4jySJ/k8WtlyxXG7MJgdXHOGittfhf91LYA9SDc2SASNfPsBAlrahNEbk3yOwNj1eHZk0FG/1iP6+zWSjFlbuumUL43bs5JK9f4zcZ9scCQKsn4+1U7CunwmLH4/YrCiTfZ773q6RcZsn91/3EohbAuE8mz1ISsFt5hKEC2rKFdGyypUHNny4SHkezw6Mu0ua+Hy717Ra8GVlctDcAS0lx1VQbSY1kBVpWAH5BIPgSzSt/wqt/aq1kemiiA/r5y6+ZCfdHxDT1W2Z9gNMZNUL6+dQMHHGGVF28ov+b7wE0icBUdpFTAKw8FlZ5Zds0P6rtWT+m2XaRsyOrrHhA7DSd+H+ihpXArs3ZVVYsDMwBkOXDQL/Dv/JmJCOpvxnjwErdocBcVjBZSu3jllnYcX+Ddmld6oTfQw4FUrEopXElISAEfRxDm0A0nLf6ZC+v4p6lAAbPcJCwvX8gRl7yHwe5kZgCMoHHNxlbzZjg4NrmAMFaDTPEozk4P+NQwI7By2ZyD4gG4RYg2GFulnKuQfVPRWAtZ60M0rYeR1BfgbL6Qo64G0rz8zbu2cVvb8+4XYg5pXbWXaaABU+PBMBw2mYHrB2IEr/JymRgNh+oQsuhJDk4hipLgzBwjCnB3aV+XKdef7Q2tem7M9fh0HITPfNSyABCewMtARY50nbNPRjzGEV/DkuZdMgwxhSQhrWboo4RvkE77r4kYpdsmz+pvUcmrBfsDvcDWCdQ6OaGs9hYuy3bGYIMDzGOXqY8Al0nz+ll8AeSUCAFbi0ZkoQrZIi95hDV60cfGL53G3K9Ixa4cC8/ZkV+MwP9j0Su/+Z7UtgZ6DlkOkHLdY/T65BNKyQOCUj56AlSnXXRNIhltM0wHSTR2i5HC121wrZqT2h454dzlnXQIctlfdhJoF8YvLliUHom5dAfUogCIhfBIzCygLzBsKSYrAwpStLTIZ5FGYmbXp53n7i8gvu3EJRn9LxV50yCWyfiCEt6yKZ23sIq21vY+EOVJ0YQkKG1E1U7E2uAViUGBFT0PmxSM9kmASD7HWby4/Z+weUGHf3rTXbasvLfZZTNnfdtyEDl81+97/tf8FLIAkJCLDMZjlGAa1rzOVPCLj/pWU7f2sLlTtW+uVT+9tTq7uyUCRxX/6cDSmBnWhagQ2Pt1gvyXErmW+zcCuYOAVZzQVYAWmY0KpUdTi0X8JrumkrhTE7eVKTcm9aqauX0iWnqBkGCaOyTsBIg0l0b27P/0pTSmCKDd4tp12FAZu9DBaKtTHqYM1a54Wivfee166aclik86a3D1rngYUsaZvKRjBx9i1eYRoklVEIoy6RJqYTyTuDgMBHVyn1EpPvE2i5V22xNGH3T85TJXXvbPBhuRev2CkAEsYk2eyZ4ZzXNy+BepKA5oMS3mqTR4YLqgoHskxQADVLaZEgP2JHhigngqk9eH/v5k49Schfa2olsH3QEkdwppi3sJVn4VvcmXINUl4+AykjifFN8s4gwCQYjHE917iOj8nV+5nlobbnn6zauV2wBDfrtlwO0KqcRMtC0woUl5YQWG92cf49L4EtSEDRVy5DO3T2AN9VFsuEUVKkGN4nJdtT69y/AmD5gOEtiNJ/JX4JbA+0xJybHCWINtfJUq0KvScwKyiwtgXAin/xdrEkAdH69gjA0m6RuljhTSsQPHxyaMU+GdBucvdNfjzVDDt5tMUW12BLhpRfcVkwRMDwetbuJex/ofYSAKi0gXPZYSjLQwqzTED8FbkDNWeM9EymWnL9xDB6wKp9d/gz7FQC2wOaoYtZy5daSV1E3Sgt2ipDQoxSqMKHsS/eougqM7ui9amLRWkEIydaZu2B3fo/5qiSWtwzLUtJgfuPt2JuFEMS8kWASZSAYmmYPNmp8P3feQnEJAHFYIlVu8JcZb7gvwrsE+bwvzCyP7WW7E2bmnxiw/0L9n6gGEZpYr55CaRSAtsDrcOHya1XJn2RaO4kyA0N85ijuSuzeZyLN5PKTcIpJuMddorU8uFYLcIUzM7buXN76zhWUuBWaO3l0jFMgypyCWOQ1wE5FuO971QOIn9RqZYAmzsHRFgkKsq/qdyBYgf+M8meP7Wu7ts2VXpq/Xcp2OjBKtU96S/OSWB75sF8uY1EmQP4cI/x14cY/K1MiCyLeNyARdR+sAxw3MMseAUQvQSb744tTE/b2M/3xiS4cYAs9pJPcYnMH5k3uF9MogJrR+/fHuhv/E3/3EuglhJQirLId6UYLAArGOV0JIpWsUbl4SzftApEpX/dz2cyIJzy2lUt+8P/9p5JYHugtbTaYZmW/bDnTnMFKkMSb4LYaCKiRQWrANYs579GPrQLVinAGGx7ajupifVqUQLIYksSTJzNvMUCwL0HFHukQrFvXgJplMBGwHKEiwqUdsI/stmPCde4RComKgyfXtqTYPs03r+/poaWwPYW3rCtHa1KZkEt3Kohpb+PS8tSvmkouA6wbnANv4UpyGF3rDy3YINntl/E8VVdKw3y44+zNg1YV1ohntibnE9aZmQaTIQt+aqL9p83vQQ0U0IjGNgozGgP2GBdYq5cseLasBWLjy2XWbVf8x3fvATqUAKZLV2zFu+PBFCZLsxwWrzx64g9iKaFrWFLv7H7LxUBrAXO+YjjC2jtv2QCXuV6HtsMhIx3XdzJ7s+y8RfYl9ratzGBtvaRyR5zKHkGjfsOrA3A9hWKN8rKP0+JBMi7GRVrHGWMwg4MtbH7Z3LgXiDQ/o7Z02n75P8SScmbA1PSY/4ytieBrWlaontbf4f1afEmQa6KPToiAtNBHL542jKTkFiszKfgJI5kgiArMKHajiza2P9GTMk7ez8JF8dasPsrmFhZ7I9yzmqqKoF9XPcdj3T9WRpDAkEAnR3tSjGLYtQGRTQsXne1TNq315bs7A+wVvwrr2U1Rm835V1sTdNqJz6pK7cPwsNBJgJHiE/H0dzjEBo7x3COiUggpBLghtTFqgxbLj9u3W0LZJ1m13iuNpNwvtiK/jbAAnCS857i3kVxj5t4EoeM/TnqVwKiW2j8K7fmKM+uM08+4/nv2NzxWLluPX1PbHlxyc5TQ46BzOGbl0DdSmBrmlZXqc3WsgdYvInLWk/b5Bbvmt+34kuwzWfGmYg30XYuw3q6ZMXgnoUHZ+2vnI+rNoAlTaotbKOk+H7IHgKsk9wtORZ53zcvgXRIAAByteMAo1BkizvMkSuEo8AOzFLdoGXE/vKY/FpftXNfPfXPvATqUQJbA62QxTpHFgiz4+zTDjMx4sjmrh1hgXNFzCc5kyshiXAzI+g6M/aeK0FSm12jI2BwlslRCBeKR1OhRwgYIb4sMSZ98xJIUgLP2IFUF1bAcKBSPJlrxBBeYLwKsG5ZJTdGOZGFJC/Tn9tLoBYS2NoCXEbj+P/bO/Mfua4rv5/73quq3ri11FRzE5s0LVmUrcSQEGDyQywCAQY2bI/hAYmJJxnMIIAEDDD+F9T6FzJAgPiXzC8ZBCKMABMhniSORXkRJUoUKVLq5t4Lm2w2e99re+/mc251i6K5qJeq6lfV9xLFqu6uesv3vbrnnnO+53sEIoISMIwlPFiP0CArSCOI4BrCgoQDVdQziK5Kx8EJOXWILyqSNLWq3NcGl3MT9AqzqF5ozyxCopYWJIbex97TqsV96Le5HgQ0IOgaNqqEmdWCYQ2Z/4Fb86yUo8vS1jYo8rsp105kPdv17/UINAACazNaQaJFxKi6Jxgup7mnsk01Hq7VyD12guQMdF0TX5XLV++Qw1J2VI3HyyH5O7oRc86RUvsT8loo2WtI1A+PwNYjEGO2tN3OOPdlH+Sof5Kw/K4ULQzB0rCc+/sFOXWq+kX2W3/e/gg8AvKU8CBU9ncIhUV38DbMPnJJqLprUa3zstZm7NYPsIb79MvGClLu8YX8mC8nTMGwX3J0Hu49UZZe/lLr0bonkvIiFPfwMCGXI+6868eSrPXZ+e03KgJKuDDuuzGOVwVDkAiENjpNMFyZVroaTC/KzyBbmN5a5XkbFTl/3E2EwJONj5qPe2juSbAXNYiD5JPwtCiqRbSMRy3ICOxR9QTtMtu/y4PYvCFGby+y/0EZWKxPfF7zWQbWYBIiimsPcxz66ODhhyKg+RRcbh66uNDVPnJaQmsYl1vRn/X3+h4/qooA+Ssj+h3QcOAXfC8+AOb3IAqhIRgOScj346RnB1YVcr+xVCLwFE+L4z3SHspyDGvQkS8QKIOIUBuDxWYxWK4vlkzywxewBs8SEuxDK+2OxHMLMuW+kPyppsPI3/8qK90v7kCyiT5hySGOSQup1Vj7USEAqMFCCRzFBQtJxlr1ijX/SMG1qCdODZ+BwOJLA6p8wygh6RaPK+Dcz7qhHyrSAHnXe/LywWU5XlMmbZVPxW/OI7BxBJ5stLQdxyKaezvxOKwcZHKiTitQJYiN7+1xn1xlQqnBsgZ6rr1FDouiSDTSbPm2zLbNyl8dV4HcKu/4cQfDLrr72tFoQ+2DJo8VAgaTsNWc3uM+sB1+ByiQXqxTWlAvWL2qOa4VJBm36p8EG65PQosareGjqzP1CCsGbNuCVoUbQ+93ravCe7XUYJmrPNN+hwVdYm/A54VFa6eldHQBg1U7UlIVTsRvwiNQTQQeb7Q0RHa6L5Rca47Jh7by9nkMSSev1dOq7nCmiO7DVplQiTamoyAServs/kJmdyzKkBIv6mKwjPQyDWfadkqMd2UMNWlKPlEVjG089zqmmnrBom3ZJ3geBZP73AT3uWbDyGhBBkhKXDdKA1z+D7JKQEkErVt4Ud2bZdtsTVHXLKoaLCIPZoCfzvPzOXKs1yQujsinL0xVrV/ctoHVn2gzIPCo0VKD9faZUI4dbJE21+CR+iwkjKx2KHbtOKp53mqyqMUKFtj+IK/xrkpn8OhuyMzkorTvKMlbfF17+UutR+/bRr73ViBTGWULvsjuMFpOtqnWe07b9tWz0glTvatKHVDFs7oJHqz2TT9/G8Zg4RUnCxitPCEqQoYhpQEaKjQUYDvFFPW6Hr2/0na26TueB+FXIfQn5gqH+FvC5IQDywPSSjeDCdQvevle+OER2IYIPG5SMXL8WIbwww4pJXsrIR9Cg8bldapJ+dYvHV9QPKyKCC61WMFFipgvyaHJafnJq5rjqtcXE1fqewHFxBkyMbRcsS9wXPs4vu1FwFC8rcZ/E3owqZo++UWDcbIWYoy2Zrdf4FH1Sz68Iz/br7nHB9fnl+N5SfI56NdH+YxeU518/VgfAmDGQsFSnxigAqO5q4Ru3JL8lo7hI9JyZMbJlmmNoh8egW2KwKNG6xfnQ8Q1WyTcqUW13yD0owSMDAaFpwdzVBXwYmN0VHVsKPMhz39gwvtMyrl5eZdGjq/VQAD3SQfdi3d5eDCStqRFFpGqMuYYgS31FLaX0arkK3UhoYxAjBLMTcM1CaiT0/xiHI5JubAgUV5zWw/fDHE+gMkWgR0F2NrRGUz9WC8Cmpsif5UMslBAAQZmYLlE08bsXemIl5zB+mPc17sH/36PQIMj8KjROrQrkOUMckVoDVr7Daam3ZwjE1G1JyGzzPaH2PZlVu8XWZdfl1JmTP7dPphpvfVbSWo49Fc3MjJBMfECTMkwC9XdtV3pwLvkvLfN0AXEqrEa5vUNHsrivIIBv0Hd2jQ/M6Eej5HQevT6hGFEWJUCbELKoixCXeV4uwUWTx/Ou9UFgIXO7vQDb5EKvMxr8rrhZUlKt2VxfFmus5D7/luEbesWfXj6cfu/egS2CIFHjdZtaO57ii14PXhawVEmInJZQTWNlq7QWVGi3C6WLyeK1En5c8kFIyLT5ET2PToh1hKct5lZv7s7Ky1FPMqY0BbhwQr5AjIBwcLmHyvXA1agteSpYKkZcouGxH+CRFBSvC9L+Rk59ErhKZ1u4YDGWUK7O7BVzwCZLnQ8CePr7x2lW2hdGyFB8lfG5a/OOc/WQGnfVZyQ6QuL8u9P6XeC3G7v12/Rv8Mj0OQIPGy03qGxYWlYmV+7+AJBRJBvMIkxAdlqeRwrFF5loDE5WjTTQhrV2ewdnLl56TtUzzxW5dIeP20k/jetRCoxVsF3OW9EgQM8BkJd/NDk139lhU9q38LTFHMLY9XPpNknhfw1aW9nYTGxJF3zRXndTa6PwlFhmpILLLfjjO9hO2q08LaMN1qPovWV32heig4Gjh1I6YAaLItnq5EHUxiQ0o4x2XOoIP/2aMVgfeWT/qVHYDsj8LDRWhzMyLNRmyxbapPwOBKo7oGTbiL0s4nw4IOiVMJPZgYvqx+jQIKZNgrlYACS2aR0PpuvO4W3EhqMSHZ3SCHaT07mO5znQc4dBpxpTi/rwbXQBYSSLVhAEJrVpH9I4apE1yF0jkjQMiYdV0vy+usUDj8lJHXmDDnQV1pkeZHSAO09Jmq4PN0dMB4ZD0KBq4s3Lc4ewL4TBlRJpuCqROUhCUqT8tO9hGL98Ah4BP4YgYeNVjvCuKV4J2oQWp/UjcEir0WYB2GjP/7gun7WIIiqKGgbcNeozn7KViFelK9L3DEpff95SXqJ19d7nNHwXyfnvEBoMDjI+R7nvJU1qKHB5vMUVgu59VpYwoGGWisJaBpIyxclWwg9y8LMqOzct+w8K3NIr8jTr8vywVDipR3OWCVGvSyMl8Xob/Ke0T0323DfA2oSjRZo2ylO7xa4k89FkikyN2R5mojDdxZRElSj5odHwCPwGAS+arQQ3gnbJA6exQs6yOqPidxW/v70aesxm33oV3xaNQXNFKvKQf5C+EObOTJZxq0z0tWFh6XEi96HPlSXH8bHWySz2Il2m0o1aT2aTrqEBl0ua3OGui4nsI6dPPCwlFI9hYEe5tOf8JrrEfWLDe9Ka0QFUOcSuSvNs6xtLOzKSLSAgojWaQkFxmqwmj6sujZsVt/lPCz9ElFKIF/WJBIe53tg0NgM5apkMGLjxQV5w8kxbe4bt7pf/+wRaEIEHhgtpTv/cgTGHAw6YYltKCbeRETwAVbOYGmo4y5f0CtQMFBuL/fLqaMjD96zRa+iuTZJsniV5jBHcBDjCuutHr3C6ny+LrzHCl/Dgao8YpRSLdDY5f9J1lB7xbXp6CvKCVT01z2Q+grCLjJeaDWK3jvqpfqxisBqSFD7XyVmnntsFLw/Z130EQtDBKHjW/KTgzRxXAnBvrn6Qf/sEfAIPA6BB0ZL/2pi2F/SzYseVsya16rCCDSHhYICFN7AfEQo5Jxk9sBSS8EoRyrsSl0WhBNDLss6DysFB1blQ3AFwy4kdZcJk7wVUlmqnp+QQym0j0vXnqK8v1HWZjmLd64hVbxz7p2Kp1XlE2jgzSWsBo0WDCMubOxNSD4fsnC7RN6qX2KKtrtcmLaBT9AfukegvghUjFYvNTWn+yLJ7NxJQlhDPDAHqVva+HCxED6ueaw7GAO6DodINBGtz+Vuy72b+Y1vugqfVAKG5rOm79DoUY108jxbxUtoIqP1Zf5K6+GQW3IdoM01VvoUqxoKhhNCUm2TsnNPJX91Yp24Kob/MIgChqO3q5cFjsocNNl1bqlZ367sQGSw1LsiNC6I3BpCsWIxWuYmhdp3pKu8JOMf854jPhzYrHeBP6+qI1AxWvuYrDu6WyS/qErd5LQorq30ztrMDikSRovOkmw2KkVT/kyiZEB+fIAJtGtrv6SqYL9rDDHgL5XJMdJIVTUTTdsSbjKEZi1K4MYMcR0+4/S0AzS0atQtdh8Zg2zBNdqAJJAaLMWwvQUCRkmL0BFVhrRjoLqrIoaGmv3Q7sKExe1t7qtrYPIxxuoSJR43ZTGekvaeefCnXtEbLH+reATWg0DFaO0Zz8nC1DMStjzrCBiWfkgbqs1ytSe6wkTmx6gKuK4uCUNB581kRqSQkE9Zid2v5yir/t6RrMySuwss5ItAH1qLhgoIO2qK+VbbiCAHpIQL4yZM+pK5Qm6o7eFtiWj3Mk6OZeNnq0iFZP+6pExdm1HJK/XMbZbJuTlLBdZ2D3L3uO8AxgrslSmrhCNJPgcb2JnZW5QGzEh7Me9URVLxXVjbifl3eQTSgkDFaMlsq2QgJFj6SCVaZ4MGn1l3nRJfWCVd2AInN8mECZ2XVgoxHlaGcEgxmZBTh1SzbuuHagwuaXNHJlyBNWhc+xE8hK0/tE0eAQsGrqALSTl1i1u8VnULdOy4Bm3hqCx0zxKkKm+qJu4MV/YoCu5DMX2z7JGKl+VILCv30ybPojE/rmLDSmSha4Go2O0gC0BYmYTGyyHhccgvXVcn5MTrEGLSsHBrTJD9UXsEKpNMBkJCEuFxUJtlVAFjIwZLJ0ut8HfFwxoS/JTX77HSHJT52XFpeUXzW1s/ensDKRTxJLNHmFyQqVKquxYTN/xQk6veE9dA7mFICEnFWn/1CQqK5FJa5+X2UF7e6C5zbTZuniu5rEjaI6S+FDv0KY0LJzcDhhu9CcCTMgFXf2VYsKHXKJAtEsSGtWB4cWFQ/sWLeXm1xxusjSLsP+cRWEGgYrTigEkcMkKAdJN2KHb1WesqU9IVvhqscT47gCH4AG/tY8gX12l0OyO3XlmWXn671UMn3POo2A9GHW7CVYNltNFj1fuE1ftMtWwVYyQqaqulBZ+zmCccFbFwCG5JYXrCSWT1diWyKUo1+L1NWPD4rpxkZmmWGR7mmiOqbLvAcbsaLe07pgYLwovc5pkaRAyWoZWL0tkLeF3/8Vvz9b4h/P48As2KgBotrFNAfZY9xJcOQgJSPFgbjM86zlm/tORQNIZvmSxt8mspxJ/JgWNL8voB/dt6NraO/a7jrWqwTmGqTtKNOUM4UOuKxDHelNrf2Iw3o8XALo84yrnopHmWcNQFKWWvSl8XaiP7dVGx+aFX8R9o4ZILdZHzLFT3wyx0juJjs9DBn9t+QxEhJGh0waYLBq1D/Gc8rCsyWxiWPeU5+dnxkvzl9gPGn7FHoFYIRPJfB2DRob1n8LCsCp2qIoRrnrW2fRqjcfw5HjfYzkU+SvEwuZOpuMBvEjmxts3U/F3KdjsJASPoOIzG4EscJ/VZrjOxTrZBzfdfmx1UPCzk8Zk01VgRjmKVL+UbrDswYONF6a0iU9MxBtGmLMfdeHEvsU8tzM6y7FG3fF2ueW3gqOtWNcxKjtYihYW6iIbDDfjHlHXYgN/tWEK+Mx0LtrrC4nfmEagtAtCWqbMJIF84NXfX9DC3Mgl93Z51wtQ81gzPWjz8BeEQlWjqw4ZNydSxEsl+VqIp8LL0TI6qYSpSCGsPM6m8yHEz8brzVbZbo0246jlBeLFaf1VRGhH5kLM4iwd0AXbggBRY+fedVi+gSl4unmrHjUhaww5IHfu58t9m20p3Vy+1UY0+h77uwX3v1NlVlmyYT3PfU3sV2t+A/0dck2EJD03Lnn0FPPvqeLjrPkT/AY9A8yIQsSJHCUJ7Z7liYqV9Bxihr5vEKwbLEBpxLCnUFWL6L8Uohdulu7I7u7gpdlot8L48GEp7gqcVHGKyPcb54iVY1Rn8unOtxdFsZptqhEoc/zye7SQTJosEioWT+BMo/DcJTY1KZi4vPz5GDVZvlQwWe+wFJ9UZzBa0FktDya9gIDH8rpi40TDcOP565xtHtsC7spBdAureqH2Lky8k2zIu4/+zJKNvxKm7/zd+xv6THoFUIUDLEWjfgVHqMnkK2qRr7udJ40uVBcdS05Ag0jRO/BYNNfM5HLzbkj08Lwu6wk/J0PNR9YvxEQ17dmKkD2BcDzHhUEgNAePJZ5uSE/jqYbjcFQZLw4E0aBSrLE3CsclFzmuIdf2kTI8tSd+rsfygWh7Wyv6/N5iVeyGKKeqhBtRm2aPcLzBNob7z4qtH2VSvNR+r/7DQ4L0E3rO8vMZzPxhQfxVfc/VX8fSkLH9SkDfeSPhb9RYLTQWmPxmPwOYRiJhu8DychFEbX0KM1lM26r68qrJgFpmnmDTVs0LtwiDRFKK6ML08J39jtE4rTQMPYTQrmWinlMvIDVGfFeAhWPqEmYYiD3BlkAXibHgepQaIliJ0fRaUFkzYL50HFl3B8JuHIGXUYNwNqOVDBd9qMTZMU8e8dASWRgyvrh2gim6jLsK0ZINclR3k3id3S7G2Gq24dFtO9cCa9cMj4BGoBwLkIgiT2YScFl5H8lQ1Aw0alng/eRQaBwYUrdrgnwhLfSyFZZhSGKy/7klHLdZXkfsFFPcS7MhiEWMVHmfS0aLinbylMXIx6t26Vb5i72SxyGGZy0ya5K/KvyFSeE3Ks/MyfrrkVBa+eu7Veq3eahuSV4EucJJvrhisHJtvZi8L3J3EVZHzZZGGwVLcjfm//BrfncVCzgyIHMPz8sMj4BGoFwJ4VqgaCG3SVcboyUXF6mMRHtHQiArgssq0JJ1DmjlKOCmygI4a1N40hkUOvRrI8rDWZSmdXxlvaOTpuRIy5Ace6R4VDUFaijiGoIYD8W4Txf0zKS/flL779CPbSEuRNZ62C6+eCSU50sm90oPB/yZeHuUCboGTfvzWeJqPvM2VEeBdWcRujaVYW9mx2iwzQuUlvg3JdlwuaTmBM2yPfNz/wiPgEagNArpSVqHTitFyrTkeMw+pyXKkC5UGQl0hMP+bOeszWSBEePy5orx6YHMqC7U5t8pW49GQyRZKf6h1Wd/iPJh8tbllozQqVPVZ593e5Ng/wNP5g4StV+gPfI+FArTq41Z6awig0tzlWIZu1uQDk8MsXY6BH8op/GvmUZFkwsOCDWgguhh534Vk55MRGJTg3kU5gTdYzXwL+HNLJwLktPA8RI2WkhIME9RDQym7y3xhofeaEZ4vM4FqWPAahmtclkcwWM+ltxblf9mclG7vRuCDSZZ+Wcp6i53OYNgAU67S1fNMmhMsGTBY5izh24tShiHYsYx3+zx/08VEjZP+raM5KZa68Ow0H7jKMtXQYLMOcoIaUSBvqOougrpIAskoNFckzNyTZGlR+gbL8tZB/W744RHwCNQZAYyWeh6s2yvUZQ2Z6WB1D8fOJZ/tOO+5xd8xWCoNRF1KBqXwzNysvPFqej0sDlSmh9ukNYeHUCQ0CAFDmW9GNDSoxjmtngJ+LWQXpbQLbUW0aFuLVrUOS8o3ZXZyVGZlSd48XB+GZilp5dYAP20/gldubQfHkkktehzcuseXrNgVkpGxI2zjirvfk+CyRMiRleMxadm7KEPkFnt78G5rvFhY90n4D3gEtgcCSsRoZTJCFYMchQYB+abygCFoNTRCexFdaZK/suUPpVy6LEl5SHYuzco5iodrvcrf7DUI0Bg0ZXJ2EAgqXoLS+lUBI60GS88YwoVra8FCgdo3MYQEMVglpIGSXaMyOr/oFgv8si6jjC5lgihujJq7yl6p0Q8aJbS6RoRQD+Re1+7CygK8xuMTHtqs8ZzrMBzSVufowXl6FmjBPB6WN1hrRNa/zSNQdQSU4p7H+BDucfVZRfagHpbWo/AF1hWnPYdjconk801pDcaZTmflJCzBEymO51dqzYz8j9u7CKfBGpTnOadnOVdyWSkeBpULS2gqMAMsGigaxrMNKSuI+bm19b7rMqyki94an8Mqfr+6kaEHWqeUk2Pkc6jNchjqAifNRn894OgCTcsIyFEFhASTmxgk6q+09pDCYckMSr44I8l7S/LaSW+s1oOsf69HoEYIYLS0sy3TeUUlXNecmitRpYVBcleESJIPIIf3IzI7KeeOVFaaf1Ojo6nWZt/mjI73EfrcsQeDtdI3y3UmrtYearOdhMkzgGAhtLYIVMOxzNo+GKQP2VRtdvjErdKZGPxkZzv0biXqfINjojaLOi3nqepc3/BDT6KyQHPdCSBYuN5jtHOxLNLC5I78+fMzDX+W/gQ8Ak2GABN7oiEoJiPtiWTwsDBOmvgPQgwWoZJy65gEqFUvfEjy+VhtmWrVAlcNluyE5o48lZjDPNTT6qzW5qu8nVULkGCw8LAICdqEsFRM4Wo0KB15rRGq7ziPf5dpQ0EkOUJY8AVHwjCa92T5Ytchplzfo17P3hRzcoJoCJpgiNeVEHhi+yWilUuQTElpCe/LD4+ARyBtCDC5G/JVsAcDVTtIFjFaSDNhtMrRTTm1fzhtB7ym48ntzUlhvkuCTDf5GFVz19Agk3CKxpfJf5fDWuD4lHQB0UX7kAVQrItDcupIvT2sCkC3RiIJy+QDc9RkGWrbrOYFOzhG7pcGH1pvaFTdAqFnbZapIViLFJaJ/iBt5rac3zclvZwpN02Dn6k/fI9AUyLA5GR+j7HKMrm3oG5RZrWflySYlzyPRh3lRRhumSNMTweZfvCwLOSLlKVhNCDrSBdmDvyvYxxQCKe9hYSohod3ZDae2zL4i3mEcbM7gexljus74IcX7liXW3ZIVdrxiqqLzHButBBRrza+AO6EY2kvMlFE6NkbrCph7TfjEagJApGU5sfo2EGLkihgkreSKSUylSvJUH99KNW1OK1SvAOashbBEhZ0oU9lDKZlqLli8rTzGARqsKi7MuRQ1MMK8W5LhVFJonlp/1hZhPUd77xDycOfIG8VP0OI7BDkhB6OcT8YtnOcDexlOaFhvZ91IXCfhyqLgLnrTnBDovJdaSktSPkC7zniPaz63nV+bx6BdSEQyamXEWBtkqGsttOnA8KC5F9gvFmKibW5ZUUjLy0nCQtNw1OwMzV3aOnFpC3ak6ifwoNJyfWsUKtP6fvqPF4OpYMygTxNHpPwKMcIicXhR22bwaA15Hyu4UBlZS5yLqhbKKWdnmM26pO4eAOPfFyen5yX16g5lG825AnW+Sbxu/MIbCkCDbx6/iPclCCgFO3oXxMa1EmX0KCxXXgxeAkQC9Ix4aohIm/Iat/CEFR5IKsq7SiMdLQgkTVVkO+T4/rBFh1svgx+8R7Cwz2Ey14mZKxqKRgs8FPZpsab0pUdqEQWLd/QZplf8BrSRfi5ZGALxuRvO/J5efdd3vda450dJ+OHR2C7IdA8Rksp2m0dbYj4HpASjEEj9AiD8eZ6ZmHQtmpKcoQLTeqjIWhgZ1ot2CaHIkIuRVuLhFclyd2T73fCVuvU92zNkfaCUctITuIy+avgCHgd5/g6eWhoFfy25rDY90aGHuwKnR2yhXq0ro2OZZFAGUeI4O18y5y0HMqzSEjkB70NdXIbAcR/xiPQLAg0j9Fq3RNRSEwreMKCYmmfYXTCbeW1Kn1s3XCEC6seFooLMASNDDBN4l2Rw8qjMJJtmZSTXVrgvZVHKfJDvKkh08akrt7VUcghsAbNLl6jmtJQo5IzFAhFgVXNTAwW3QiCgDAsHtbu7hGkbxN5i6ux1Zg3FKz+YD0C6UCgeYzW8nJGWmj0WDIouQcvMOnuZlKCVLClgwnUIiisIUFaXFgKtS0GyyaXXdPMZAc0d1Ty0zB53rq3W8LSQTy/I0zyKOJrzzGVvGog9YsKjjGYk5+ytBJRgWfyhYGBKUiH4XBhUk7s42+MXve//88j4BFoMAQa32g58oV6A4OtUlLWWwQBIzmKISC3tZVUd5W5SighUAIATTOtpWhVfutW/AXyKWPFJfm759NhsPSmDfIQVkLCqgYChgutqpeasjqBJ3678FLB22pnZ7MA5iwQMFbW/IHnS1yGYZTZJ0Te3wJyyxOP2f/BI+AR2AACjW+0zpwJJfdtDFYLxdGlA2BAMbEWSlN3ZmG8bdm0m0BZ1xoseo4ZBFit/T0CrBABCmMyaZfkd9Crf54itprNdSEScQxDhQIG3awbx2Dpba/5qzzHPQl5hLY5iN2GwecYL9TZS/ckm5uTt17H4z2xtSFYPVI/PAIegU0h0PhGa7Anks7lHeRiugm5PQ8jTAthYQwaJKq2xGQRfjIFjkH1G+/wDOkChqDmsaLshJRpOTJ1sCSnkcRKw/hP13PyTBa1kLJqNB7mWA9w3IQGG2G4+ivKBygWtitKLgLZQuT3yDGNiMxPSNy2KMv70uPRNgKs/hg9AilGoPGNVnt7JMtLuyQy+/BkejBTSh7IMvFuEYEAg2UQIU6SAWJufXh9n7pQ1WJ8yxVsuxX/FpMuVm9IDa3+Iz3HMkVyWDnyWTTKdGr4tp3jXn1Xip81d0XXARHqr+x1PKtP8bkuIT91We6OLNDCZZXgko4FQoqR9IfmEWgUBLZoYq8SPDrpRkXtuEwjS1VwIJcl2qSw7rkYZaxpiIpCbe14iyp+EHzC648kppC1WL4t7T2EC8mppMkYqBp+xkLAaHmR4yKfJdS1wbg0Wkic4uHKCKyqW4xwrBU5JiF/VU4+hDHYJ3uW5+g7ph6YGitvsFJ8Kf2heQTWi0Bje1pnoGmXyV1FTqpJvYQewkTa6LHebgIJfkNOhS7P4mqCPiUcyIofT2sREkDP0IKcOKpGLT1DDf4Zsj9TiCUn9iVe0XpEVD2E3mr8lM5RobNXGJksDshZCRqCYskVYrxKBpHnwzAyV0bv6gv/7BHwCDQLAo1ttMbPQsnuxrOitsgGeFpymNecU908rcok6hoJoiUoMATFfEI90Pu0c7lJ88ZRPKyivN6TRtaakcHBSHYFz1CQ/RKQqdHf5bys9BUSr3pMKsmkDME8BpZmjclZfr7AouWWRBGajfs0l+iHR8Aj0MQINK7RclT3QXJZUTfXZz/hQYqJXQfmOl4uiABGWWuoXIgZYDJFMT84j4cFa611Qk7tXa7jwax9V++9F8n/GcvJzgxeVryfY8dgoTFoKS5Op5elRh/9QEKCrv+VuUoOi/ArDEFTGpCgMCk/+haKIn54BDwCzY5A4xotzcf8y6CSy4oDJl0thq3nUOaaqFGaYt8aproAEeR30pK7IhPzk5UcVj2PZz37gnE5C+MyzNC6Bcalk7wCP1eMvVUElscef8XDqnTTniFPOIzh+hAv69fkrobp+TYmnaV5Gb9EvtAPj4BHYDsg0LhG6zhTbRJ0MYn14CUQFpTddbtgrpFgQodnmgaqari1l+n0TA4rMyDLyaxrK3IylSHBCkQziOC2UNdWLn8HQ0VdFhR3o4xL/qmZSM8gFOgEb29zXDddnlBoKWIETzZDE8fygoxD3FNGjgAAEB1JREFUcEkz1unB0h+JR6ApEGhEo2XkHbyBXWMtMpfsheasuRgtKKazbl1m3AI0hXkMFmEpbSRIXVCg6uGEBKPpKbk8mpfeU+kiXazeqo58QTH2VIY8IEr41iKKa2BcUtemwrj69zQMtyjQvJV2F7Z0F4bQIsnnEuHNxuCeabknmZmifP+Y5rfSmC9MA4r+GDwCTYlA4xmt3l5I2aM5FJo0nLWX1fc+Vt4UFFvNx9RjoHJhh5hIL7FPpIJ4ZIJBKewbk5/uS+TPvlUXy7mhE1X1kGInJQLoMtpAi4lf5ByUNdgChmmhuauQLeE+xG7R5gLnfp7BGqOVkMuapwbur5+FcMElT4Nm44YuhP+QR8AjsFEEGsxo4QkcP21kPr9DcmX6ZQW0H4GAYQWDhQIGL2owdKPa07mER7XM8032dYHfseq3/RLnB6Vl54yccjmuGuy+ipscP5bhmHdJLtwncaw6gxBYoLxTrcVeto7mrsZHvStUbTkO8oR2lp+v8hqihcP6umTCYSmYGRk66NUtqnhL+E15BBoNgcYyWmo+fnE0kA68rFiLYW03kxoKGBY1d1qQVH/oHvXBROmUF+7yoyqHf8TPfRLl8Lieh7nWAAbLYVMkb5XpxGBh8E0PXqIWE2tocGuHmixB3cL1G6NBpoFkIeY8awU6DJcu4QPekz87QqjQD4+AR2C7I9BYRuvtt410/mWAb7AHD+sFJjnkhyy6gxis2tQTq8Gi1YWgvmAHcEbeZ1+fUtf0ueTz4xLNz8nJ5xonp1I2hAHtc+QBafJoNZeFknsaBv3GjEBZN+Sv1FDZczhd5ApzA9IRTEj2OV9/lYbL5I/BI5ACBBrIaClJ4Ewg3bsy6NHSgkQbFQpFxdKBwap+aMuRAVSWyWjjxqvs5xKTKerhmWvSsnhHxueX5c3XGoRqDXb/5XwkrVEHUkcQMKC5i+BtOfWLrbwNy+CLPiBityrJlCTXWYx8ynF9KuVwUHYtTsqffnNVjmkrj9Pv2yPgEUgJAo1jtHqZyv7VwVBmplsk24bckOlhsqOFhiuIxaCpU1S1oeEqza8gCWSusOn3qBE6J3FwU5YWZ2R8dllG300nQ/BxEPTioe752xx9pXah1KGsQa3PUgKLSjZt3bAUDBvq3Cx6gUposeEnGKubsARHJF9ekquwA6t8YbfuZP2ePQIegWogUH0PpRpH9bhtdN7ISL61Q6I2QoIwBkV18gwtSLTRY5Wo2hU2mk6Uqhx+ne2TV5EPUN24JJloUIq5GckW885g9b5VVSvJfmo3vvdWIJk8pJUiYUHp4fHMisHaiuuv4VRVX7/Ps7ZtoVGjNmvEYCUotbdk7klHftEVZ79V3ZVI7QD2W/YIeATqhUDjeFoaFiwvaREsdVmBGi0IGErVVkXyTRstNUDQAZx3Rf6KxL+hYaOVz1xtUBBepy/iGOW3BTn1zIqH1cvbGmGAjWo02gPdeIs9HLEqYGghdr2vvVLZyQ9aZK90UUCvMVffBjvQ2qvoBw6K6bgvpbtL8oOXNSToh0fAI+AReASBek9cjxzA2n9RgCEYIeiaHMaYaGhLSQTqZW3eW1D2mqFVu1NqNyoVpCFBhG+hXEfhFeqDpuWH6h3w24YaGKxejnr6uax0ugLsIxiOA3g05AHrfipl0Kt4WImlpYgWZGthtrkorcFdCaNpye4uyOu7Gyfs2lD3gj9Yj0BzINA4Rqs9h59T2IUBWaW6U2C8aZq7ztww16yy09TDghAAO7DCXiN0FdyW6fFJOfQKuZdGU15YMViHB7PSXsZIZQkPYrgEFRFj6skapMYND9bQsVkgXGj9lXqxrjkmSvi2MEJ34QX5/nNq0BqHidkc339/Fh6BhkOgAYwWk6+alv8+2EKBqVLde5jcIBMoiUBZg5vyGJgkbZFNoB5u7vK4yZ7OSlD6rbQlY3JuaEl6T8Bwa8ChsJzGnykFrRK1as+sbs6Vh9nDc0udzoijcAZrkf3ex3ghfWXPs/A4J+XSBZmenGwcBmadEPO78Qh4BJ6KQPqNVi8T7/nzobR2tdEFGO0ezcvo5EuGiR+eenZf+0dTUWkPzA0m1M/wQj6QMIDeHozLQj7vOg1/7TZS+wZDMUAkCzFSVwGdiQNtQaJ5wHqqX1AwHCyA7TWuGeoWqIiIMgWLN6W1lRq3V2N5M7X4+QPzCHgEUohA+o3WD9FDuLWnjf/xEAwUd0dz3wGW6mVBdd/QIG/iDNYoXtYgxuoiW/lE4vI5VNpnZHGF0t7b27jhKvWyZDxDrug5zu0lHioqTCE2SG4cNzbztUN9PM1LaY5wnP2N8BqyBbmrQBcG/Hz06IS8y3t+5MOB4OGHR8AjsA4E0m+0JseyErbulqT4HJMuEzCqDqIT76aG5rDu4QH0sc1P8UTOS6lwTeKFSv7qdSZU89qm4o6bOrpqfHj6fCAHujKyjL5gIN/GeBzAOGPsa1CI/fDxYrAsHqyZJVd4EdOJuoXpk9AMSCm649qJvCZaVNzY+D58zv4nj4BHoE4IpN9ozZZyEsSdzLXqMezloWHBjYwKIUBkEsYak6c2biRk5dTaC4QHyWHdmirIf1CD1eAT6jvvYNRbVQl/hxjCg1YOYaARxtXOzuqB1WTgWVES4AqyXcnAoFO3sFDao+S2JKUJWQznK80xGxzfmsDnN+oR8AisBYH0Gy2BNRjPPyNh2A3dHQac0ZzMeoYS2qG0q/qCCwnCWGPln8RfYLyuSBjfkB1t9+RPuyELMHrd/439373vRtLd0SqlWEkXauiVhKFelhr8ahst9Zg0jIp2IAoiNhkCV/KClmaNoXpYN8TMz8ie0WX5aYOSWjgxPzwCHoF0IJB+oxUu0P8pgv3GxGvpn4VMORPwWtHjjRS0GuqDBLkgwZuS4BPnAcTJVdTO70qubVJmm0yQtT3OSqa0U8ouNAh5BSV36wzWJokrj4Vd81doNGqzRoPBCuh7hcFSD0tK45LNzUnHi0V5/UV9nx8eAY+AR2BTCNRiEtvUAT30Ye2ka7KEuSiNFdTJ1WuwydrCgxWiPBNlskz6ZMzlr0TOMKGeJdR4UcLyLcmH4/Kj/UsN0QvrIWCe9gOY7QhbpFRGqsn0gJ2GVTH8rqatel5WJYS66mFRLIyxUkFhm5yly8gFyS0OOkr7+V/k5YQuHHxI8GlXzf/NI+ARWBsC6fe0ApuDh0ZOCxKGEdiDawhvqcFy/1TlQuaZTId4/ggx1n9mUh2We/lJmfpvJWlkduCTrq86oaepzZIArJJvOtwsNHdD7xa7Zg/1SVuv/F4NkP6DVcGDomxkmEQ+hPDxoZjydfnzb4x7I1WByv/vEfAIVBeB9BotJRP84/BOyQYI4wa0h09gDa6xaZZ1kkwLmDeMlblG3RVUa5hsgb2NXOGcdH9Ylp/3VmkGr+4FqcrWIjQZEy3Etj1sj/CgjQivBmsw92vZPYSWhMUALUUEZQvR3JVcJEd4VTLxsISoW3ivai04+vd4BDwCG0AgvUZrmg7Fz9KhOIE5KBah3K8lYKgR0nCVtmOf5jOjKC8woVIfpBNrEA5I6+KELBwsy8mT+r7mM1oaTv0FfbP2drdRKkV4kIJiS31bUAVRYddfjO7CVgi3Kr5CDRah1jDQLs6os7ciMtw1LQunG1NBhBPywyPgEUg/AunNaRV2BUyQrUy47UyKhLvM1xlYrQ+CwWZGgB0igPwG3+K3qGicp+3FLZmZm5JzhARPYtia1RM4jwLGHgqxyxYV9wBjr2FVCwlDvaxNprOUfWlhB6p6iKFdS2B+ybbflzIszFJ4RxYLs1RkleSUWxCk/873R+gR8Ag0JALpNVo7ImbZYiuhqHaQbWWCfJzRwltyoUDEWGWUiVnlgi7gYbH6D85KbC9LS9sA6vBTcvTFZZfDalaDpbdf3yUKsXOqFlIxWKJq7o5tqfmsDVotxdfOYrC0tg31e7QDrUGfMfgddPY+zNSoLObB/2BRelXhwhMu9FL44RHwCNQGgccZgtrsab1bbcdoaTGsSVoxQhTFPqLoruE9+jNpuMpO8t5BXmuRMCoX0i/FzFUJl6al8OxCU3tXX8U13J1Fn5buxOplJSp71YKR2eg1Bl8lXKi6BcXCFX1GVWi/zGLiCxlr/UJeOFSUnzRBMfZXMfSvPQIegVQjsNEJrT4nVY5DyBMhE2UImQJPQe2U+09lgMhdWeqD0LcTc5MHk2l8WYLMDSmW70knMkLPHC3Kq00cDvzjq9Cao1FmnvyfhgfRGTSEBdc/8Ja0GaYpsZ0lvCo8WKEYW/OC9BeL7Q2JimOy5yi9r7YRtuvH0X/CI+ARqAEC6TVaXWUrYwmGKZtntV/Ec9BclEKglovcFd6VlWEmVAyW7Wee7XcK7aVoTP5i3yLvdRauBpild5NhMYKETijVQMTQ9iPaIHNdUUE1WLogYDEgM+Cu6hawL4UFQXBFiqWbCPCOyU9fUu/LD4+AR8AjUHcE0mu0xpcSad05zcqexoGw1axr1KhyTFCuVd2C7sJiPiAceKFirMJpuZtflKlDeAjb0GDprVMOlNYe4SGpakgEbuuyWOCprEqo7JY6KxYDxvwa1ZALUgyGJWNnJOxZwOfy7MC6f039Dj0CHoFVBNJrtOR4LIUbsxK0DDB5fsxKn/qfhBW/TqpmDA9gEEN2VaJwUGTxvkwv5OXnr6qXsP08rNWrWSxFkCNUskl1BvG21hUenGIBcB+sh/jsdYzWFbJVlySTBefMtEx3Feh9tb3xXcXZP3sEPAJbhkB6jZZS03+F2sJCmRBVtojS+y0ihNDfMV6hmWNinpEoMyuFiUU5eVzDVYm8uY0Nlt5CLWFIlZqGBWFdEh5UFYzHjxVFC9dVGEPk6q+GMVZ4r/S8suSuEm2GmZ+RYguhwq6ivOHxfTyU/rceAY9APRFIr9HSEF8vdOt9o3PSFRASNFPoBUZSQonB8ChnC9IyW5LCcQ1XNW/t1bruhhiNv0g9UWVUapuQx3udVj0mWIHGTPKe+7wfdqAyL3mY8g1+NyRL7XcRD4HufiF2xdjbNeS6Lvz9mz0CHoFaI5Beo6VnrnU/77xTkJHvxpJrX5D9HG5puixlQoeuSHjv4yflWqOW1u0HIcSVRL1QjJGZwHDNcaiKER4XorXKCjTOYNGGJVDVEGStLOFXVypwm8/eISQ4JrnStPzVSquWtJ6rPy6PgEdgWyKQbqOll+TUKSZbplJm4soV2u8NVQWIR//P4YHG5QmKqUcIpe7HUB3kTTAvtdOzxQOjps0KzMqAujZqr2IlW5AXDEv99L6alEIWI0eR8I/xXP3wCHgEPAIpRCD9RutL0LZ5vupLHJ7yohRSHhArmeIWOa0OyCo7qK/ay7O2c4GBiV5gkIzyt7uYpbv8/h767xMSR9NSHCWc+CdFOaUGy2P9FJT9nzwCHoEtRKCBjNYWotQouz5/lWLqF2cxVDRkpMDYmm6Ks1VAGO3GeIxnJVvgXZUHJdtyR/btm5XXKCL2wyPgEfAINAgC/x8pa4f6RX1i0QAAAABJRU5ErkJggg=="/> + <image id="-" x="231" y="228" width="50" height="56" visibility="hidden" xlink:href="data:image/png;base64, iVBORw0KGgoAAAANSUhEUgAAADIAAAA4CAYAAAC/pKvXAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAMqADAAQAAAABAAAAOAAAAAA/e7cKAAAONklEQVRoBcVaCXRU1Rn+3iyZ7CsBskISthIVAS24gVsVFdkJUK0eC4prrVuxVI96il1E6Xa0YltRUFspBLXCUU9V3EUsR8GwSEJiEkgCZN8mmZn3+v1v5s28mUwmGUD9z5m599373//+//2X+9/7noLvAjRNwet1cfBoGXB6UmBHHBTVpk+tWlyw2DrR425FSk8jrhzVCyhatGwp0Q6ICn+tZkdmzViolgugeCZBw0jAkgmoKWTW7qPVA2gtfG4g/+XQLJ/BZnkPs7MPQ1HUwc737QgiGthcPZ6MPEjmLyYzyVAQw/oA8wnjWg+x2jjmn7Ba12BOTs1ghBmA8GBImHB0AarGQrEuJ9PL2RNn6tWrNoUSWcBFV6DRgNz861U1Wl0opv58mIKthsOyCTNzj3BhwmMR9dQJsrEsBrbkhVDVu0l2AmlbDdZiyPRZaTH6ryjRhnRKEs82DxG6KMFRpwdfd7ixo6kXB9pdumDGWJaC9jY02yoszP7A1B5UPTWClGkxOFB7F1TtAVJPNGaIsyqYlxOPe8ckoyjByoX1aoKFH2SJRTMu/nW4NXxCYX67vw2fslSlwwtSER/6Ceblvx1OMycvyOd06KraW8nOI5yMTgzEU4ApGQ6sHJeMC4c4YItyFjGzF2u68KfydnzRIgIJVQGlAxbtRuTnbcZZisvb5v33q9/cOOj6xo1WtKbcTCF+zTG6EOkxVtwzNhmrT0/Bacl20IKCoJdcNne7Ud/hQnuPR2fSTsEt9B0DZMzpHDst0wEn3b+szWX4kASMiWjr2I2X11Qa+FIGRptbB1v/d80VVPdLRE+VIVZysHZiGq7LT4DdbD/sO9TYg3U7G/Da3ibUtvTA5VFpIUCs3YpxmfFYcMYQLJ44BJkJ3u1F6Ak4KfhD+9rwh4PtcIlqvA5fhnTHObhoaIcX62QE2XwsC+h+niQupZErKeR8VXEqbitK8K+Oi0zsqe/C05/U4eUvjqPN6TbmDVvmpjqwfOpwXDt5KEakOfx0mnpV3LenFeuqOqgQH2jaeu45N6Mkr1taTlwjpbU3M0KtIY04iUq3FyXi0dNSEOuzJZnwuZ1H8di7tThwrJuy+llAHIUemxmHYUkx1IyGqmYnKpucutNLWD47LwkPXZaPS8ekwOozudpuDxbvaMRH1KwPuqidazE/9xWKwY3rROBjLQ5Hqr8igUIZPiE1BqVTM1BoMou1Oxrw8y0VcLoDm7MwWXJmJu6/OBeF6bHcS7yTOxmtdtR0YOW2KuyqbdcFGpEWi/VLxmBaYbKfw/JON6a80wDRkBeUzbC6l2FuQUuIJfvHRK4cqVliCCErsbwg0S+Eh3a8aXcj7nk1WIh8msqzi0ZjA5k7fXg8EriXOCiJ/FJirbhsdAq2Lh2P287Lhjj/N9TSz145hGr6kwEF8TYsHZnIKGisv3Yuei3F0h+9IOvrEyjETQbxsYwuS/LijUcy0IMn3juMTv+qQWds1YwRWERt+CzPj2+uDEu0Y9WMfCybkqXj7a7rwO2lFehyeTVA+TArOw5ZsX62hxFxutDwt5gJRqwn9kxk0jfOwLmRK5RqN1YIeH1fM3bWtBvdern4zKG4ZtJQxAgnA0BKrA1rZhWgeFiCbmJvHGjGVkY6A85MsWNMkpFvCv/aVURUohPkXc3GSHEpNUKtAEk0i5lZscYckD3i2c/qIeZlgJjOvRfmRNSEgWuUDu6gSyZl6uHcTZqvlTX5NZzAvkuGBuakw0/Alvb06ASpq8yg9OdxQj3Yj02yYajkHT74iqF2b4MeDY0mxFIQMZloQPQ2MScByQ6rHm5313ficKvXV6TvnIwYMzmqrnlcgAtzV391hy2flCcb3SPpfBJ6DXinvEXf6IxnKWX3/rKu09w0qHoyTSyBggjU0OHrmUwaMI6m5fd3b2NxdIIoykzuPGkGwQxqw2z2+xq6jC5/KYnfraXl+PJIpyln8nf3W1FpnkbS2E1nl7TGgCSaVzI1HQBLkfkp0B6utu2gAyoWmbsc1EZAH0BrPzt3xXEnbtpUju0VrYGd2UwoTL25y412pvcC4nNOX+SSZ1m8RHMmqmlJgxfEFX85N9AxQsgAORAF3JrO7wjOkww8KT+rbsec5/biqY/rmACaR5mxvHXp/rCqDR29XkFkdxdfM0D22E4GAT8oWlug198aprL2czvcnhvItVkBOM69wkzv9Gz/USQMEfoLV/iOLYcwZ91+vH1Q/MnEjGlERaNTz80MeRPpK+mmrKGNmUCryzzWUt7/EpoIIyNTNDHF3CT1SqYMohU5fwhcxHRC0hA32/oDybm27mukz3TginHpuGnqMEzITtDHyZiDNMNlGw+imju7ATkpMUGRT06R5twNHk/ZYAQhl8qF1Ea6Qdgo97e7mfdo3BC9LcLQuGHx+GqAKCUrLZHomU/rsP5/RzGtIBkXj07FsU4XNXFMT/ONOaQsZkqTx8zYgB3NvDEKQDfSkvcObFrbGpNoUOfzFxS8hU4njbX0SGDfkEh845ThiDHZc2C+8DWny4O3vm7G/Vsr8cT22j5CyIY697QMPWMWCm00qXePBrTFpp24NK19YEF6O3Kpx6mh/mGwtfZQB+3VyEaBq8en46zcyL5ijB2olL1CzO9K/gzY09aLvTwx+oDRRtvGTUUdWBDVRt9QRhgjQ8sKHllf4PnagIJ0B+6clsNDysmdooVeYUYcnl88GvFyf0SQ2PCfOicO+8Iym3ghYdkufZEF0XMr9Vrieb1ZRoSAuPU/Kjv16xyjaz5N4VFmu0OjTE2M8VL+gL729PwiJDPFN2A3NbH+m04qQW+RGLqd55ED8hRZkMba8RwluVVEkMuBvx7q9N9HWUn1jvOz8Mc5RRg1pM8dXURaYk5n5SVi7YJRmF6o32fo+PVMdZbvakZdQBt0FMsLmDOyVRAC4oaSl1vDfa0r2Hx+aFfos6h8F69tiqgBuf0QZuRWRA5QYt8Hj3fTib33VN7FDKUg+EBavB13XJCNl64ZiwKeIOUyQ0Duu+78sgVv1PsCCy8pqYJNmJf7GCfTSXox+9IFNldlMWV/j12jw3WHa5MLucfPkFuU+KAUQraVj7hTy9mijBny0fZeSP4kHEhyKPvEGVkJmMVAISbl41+fooVR6lFe2D1V0a7fSrKRJqV9ihj7LFydfdzgo39BSmvmMWtbJ3MZyIMp5Tp0eWEiHuDlnLFRGuMkNensUdHCnKyHqyw8xTMopEqm63NoA1fK8k4P7tvdgjd5NOgOZAEH6RvLMD//fTNueEEkQexyPEFE3iD27+hmQua6QrP6Ie96n56UhmKeWezmJTYjhqmLeJItvNXQg7t2N6OCd8J+4DpQ+BuwIH+TYVJGX3hBXinPgydmCxfMf/YwBkRTDmGONJdn7Kuz4lBM38mJ897/htIQ5nvIvDC9q8WFTYe78N8Gp2FK7NVfN3zF69JfYW6+vm+E0ggvSGnlDKjWUiJHF3JCqQsL/KXSbPLjbBhF7cg1qtQlDZe+RvqKCLCn1YVveOaoYfreqZudj5g4NvA+PO5fYGHBzlBN+LD6MZtN1c8R4XoD6Xssxa62I97+Y1yZdSwSH333kY2Vw7kGV0Ua9J30KTjOPWw1VGXWQEIIP32zX7uNL2v40lIU+r2Bxldv1ruhtW5BSXEgK43AT7AgG2vi4MYSGlx434lA6BR1Sfa5n78SviHaCyV30MsZbFo2dTI3m1GniKnoyPCugaF1C38lWDCirD+n7o9oQCMbNSvUmulE1N91hAyQA4CcR4IFD0E6icdu3iis5hRPYt6wiE7d3xwmxmpTuArnEjH0Nq2MOcNctlf0R+Qk2+u5PLdhXt5DmDf8KAP2oM3JPG9AEKt1JB38bHMniX4Kt+06qJ4GtofTVDB6dE/MmRTuC5ZlSMvbEN3QvtgBQeCeQfsc4kdRsIv15dj/zBcU6ALWo8q5/HTCV2R/+Bfs9kV06m24SDHlIeEHDNTqFUSilaZdR2QjWh2mun+JBbl7MOF6CqBf3QdO/wNRjdTPuzaa6lNISbwFs4ZXRuvU/ZH2CmLRppHZMV4kfheiaSvhynubbfwkwTaWJhfpcCWfXOiHm/4m0dsl1VBwEBbLnUhzr8CP0gceE5FgcKcFD78rkesG/kQbfC+HFRiS/xJKFO81H7QFbBsaPEywdafcy8o1DJx/69Pft2EXj3FLsSfn77ioIOgapC9q9C3M4vILyKhEK97ba39Burreb7OvNgxDb89S9hsmZ8zgYct2OuqtcOdUwFpzfYRMQOh+AFv89ZidecQgcKpLCyz2yWSCfqBtgOJa41+thzUL3M5byHBayKTybvtpaB5+TpH7Nez1TPWVS0JwvI+a5EtYDWfv/G9TCJnMRrNIg6Y+iVjlccwc1exnaHJdLnpoNkHATyig/Q72rj9j9rh2yC3L8eoHaWaBl4gB/CYeum/lqWorFuYH7osC/ae0RkHyngHTxKDooX+uVM3bdyXbbzKKwvfG6m+gdqzB7GLvnWXTkQsZ3ab7cXTWGEoVjf7Ak9ycnH3U1gltcNFKaQs4tWnoS9Wp/JRnBqOXd6UVVJGhR6Dmb/Djv8m3ux09i6B53yf6RrvI9ws8BP0e8wv1+yYT1W+1Gsi1zNM4LHlk6DzvSiu1TLHuRHq2bFy+SEbkzq5R/IbqMtaMTbWVGnwQnt71KCk6paHVzFp/9fCCWHA5jU12eX5j6PopFhR82MdEVJu8hsvzmVUF+1dgT+4WPCzn6+8ejNUMnllVL6BTv8K1nouFhR/0EUK+0ZIzgwZ5UbEVmvU6fhPyvQkhzPejEddKfoBUh7k5TcES+p7aj/HTHUYwVV0BrfdFlIw4odQ7LO0TbPw/q0HxBkS2mJQAAAAASUVORK5CYII="/> +</svg> diff --git a/web/src/assets/svg/llm/token-pony.svg b/web/src/assets/svg/llm/token-pony.svg new file mode 100644 index 000000000..a504e2224 --- /dev/null +++ b/web/src/assets/svg/llm/token-pony.svg @@ -0,0 +1,8 @@ +<?xml version="1.0" encoding="UTF-8"?> +<svg width="512px" height="512px" viewBox="0 0 512 512" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> + <title>logo2 + + + + + \ No newline at end of file diff --git a/web/src/components/embed-dialog/index.tsx b/web/src/components/embed-dialog/index.tsx index 48abfb0d0..9aa389565 100644 --- a/web/src/components/embed-dialog/index.tsx +++ b/web/src/components/embed-dialog/index.tsx @@ -139,7 +139,7 @@ function EmbedDialog({
- Embed code + {t('embedCode', { keyPrefix: 'search' })} {text}
diff --git a/web/src/constants/llm.ts b/web/src/constants/llm.ts index 2fde6aced..a3909611a 100644 --- a/web/src/constants/llm.ts +++ b/web/src/constants/llm.ts @@ -54,7 +54,9 @@ export enum LLMFactory { DeepInfra = 'DeepInfra', Grok = 'Grok', XAI = 'xAI', + TokenPony = 'TokenPony', Meituan = 'Meituan', + CometAPI = 'CometAPI', } // Please lowercase the file name @@ -114,5 +116,7 @@ export const IconMap = { [LLMFactory.DeepInfra]: 'deepinfra', [LLMFactory.Grok]: 'grok', [LLMFactory.XAI]: 'xai', + [LLMFactory.TokenPony]: 'token-pony', [LLMFactory.Meituan]: 'longcat', + [LLMFactory.CometAPI]: 'cometapi', }; diff --git a/web/src/hooks/llm-hooks.tsx b/web/src/hooks/llm-hooks.tsx index 635519d8c..974bcae94 100644 --- a/web/src/hooks/llm-hooks.tsx +++ b/web/src/hooks/llm-hooks.tsx @@ -136,6 +136,7 @@ export const useSelectLlmOptionsByModelType = () => { }; }; +// Merge different types of models from the same manufacturer under one manufacturer export const useComposeLlmOptionsByModelTypes = ( modelTypes: LlmModelType[], ) => { @@ -155,7 +156,12 @@ export const useComposeLlmOptionsByModelTypes = ( options.forEach((x) => { const item = pre.find((y) => y.label === x.label); if (item) { - item.options.push(...x.options); + x.options.forEach((y) => { + // A model that is both an image2text and speech2text model + if (!item.options.some((z) => z.value === y.value)) { + item.options.push(y); + } + }); } else { pre.push(x); } diff --git a/web/src/locales/zh.ts b/web/src/locales/zh.ts index 7bcd21b84..0e37060e6 100644 --- a/web/src/locales/zh.ts +++ b/web/src/locales/zh.ts @@ -155,7 +155,7 @@ export default { similarityThreshold: '相似度阈值', similarityThresholdTip: '我们使用混合相似度得分来评估两行文本之间的距离。 它是加权关键词相似度和向量余弦相似度。 如果查询和块之间的相似度小于此阈值,则该块将被过滤掉。默认设置为 0.2,也就是说文本块的混合相似度得分至少 20 才会被召回。', - vectorSimilarityWeight: '相似度相似度权重', + vectorSimilarityWeight: '向量相似度权重', vectorSimilarityWeightTip: '我们使用混合相似性评分来评估两行文本之间的距离。它是加权关键字相似性和矢量余弦相似性或rerank得分(0〜1)。两个权重的总和为1.0。', keywordSimilarityWeight: '关键词相似度权重', @@ -633,6 +633,8 @@ General:实体和关系提取提示来自 GitHub - microsoft/graphrag:基于 }, cancel: '取消', chatSetting: '聊天设置', + avatarHidden: '隐藏头像', + locale: '地区', }, setting: { profile: '概要', diff --git a/web/src/pages/agent/chat/box.tsx b/web/src/pages/agent/chat/box.tsx index 8404ee556..f57ed4282 100644 --- a/web/src/pages/agent/chat/box.tsx +++ b/web/src/pages/agent/chat/box.tsx @@ -62,7 +62,7 @@ function AgentChatBox() { return ( <> -
+
{/* */} diff --git a/web/src/pages/agent/chat/chat-sheet.tsx b/web/src/pages/agent/chat/chat-sheet.tsx index afe93de85..b994c6cd3 100644 --- a/web/src/pages/agent/chat/chat-sheet.tsx +++ b/web/src/pages/agent/chat/chat-sheet.tsx @@ -9,7 +9,7 @@ export function ChatSheet({ hideModal }: IModalProps) { return ( e.preventDefault()} > diff --git a/web/src/pages/agent/form/agent-form/index.tsx b/web/src/pages/agent/form/agent-form/index.tsx index 65fb364b7..f38fbf17f 100644 --- a/web/src/pages/agent/form/agent-form/index.tsx +++ b/web/src/pages/agent/form/agent-form/index.tsx @@ -145,7 +145,7 @@ function AgentForm({ node }: INextOperatorForm) { @@ -166,7 +166,7 @@ function AgentForm({ node }: INextOperatorForm) {
diff --git a/web/src/pages/agent/options.ts b/web/src/pages/agent/options.ts index 4ad5a6457..63dd07a65 100644 --- a/web/src/pages/agent/options.ts +++ b/web/src/pages/agent/options.ts @@ -2133,7 +2133,7 @@ export const QWeatherTimePeriodOptions = [ '30d', ]; -export const ExeSQLOptions = ['mysql', 'postgresql', 'mariadb', 'mssql'].map( +export const ExeSQLOptions = ['mysql', 'postgres', 'mariadb', 'mssql'].map( (x) => ({ label: upperFirst(x), value: x, diff --git a/web/src/pages/data-flow/options.ts b/web/src/pages/data-flow/options.ts index 4ad5a6457..63dd07a65 100644 --- a/web/src/pages/data-flow/options.ts +++ b/web/src/pages/data-flow/options.ts @@ -2133,7 +2133,7 @@ export const QWeatherTimePeriodOptions = [ '30d', ]; -export const ExeSQLOptions = ['mysql', 'postgresql', 'mariadb', 'mssql'].map( +export const ExeSQLOptions = ['mysql', 'postgres', 'mariadb', 'mssql'].map( (x) => ({ label: upperFirst(x), value: x, diff --git a/web/src/pages/dataset/sidebar/index.tsx b/web/src/pages/dataset/sidebar/index.tsx index dbced0871..46daf52e2 100644 --- a/web/src/pages/dataset/sidebar/index.tsx +++ b/web/src/pages/dataset/sidebar/index.tsx @@ -9,13 +9,7 @@ import { cn, formatBytes } from '@/lib/utils'; import { Routes } from '@/routes'; import { formatPureDate } from '@/utils/date'; import { isEmpty } from 'lodash'; -import { - Banknote, - Database, - DatabaseZap, - FileSearch2, - GitGraph, -} from 'lucide-react'; +import { Banknote, Database, FileSearch2, GitGraph } from 'lucide-react'; import { useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { useHandleMenuClick } from './hooks'; @@ -34,11 +28,11 @@ export function SideBar({ refreshCount }: PropType) { const items = useMemo(() => { const list = [ - { - icon: DatabaseZap, - label: t(`knowledgeDetails.overview`), - key: Routes.DataSetOverview, - }, + // { + // icon: DatabaseZap, + // label: t(`knowledgeDetails.overview`), + // key: Routes.DataSetOverview, + // }, { icon: Database, label: t(`knowledgeDetails.dataset`), diff --git a/web/src/pages/datasets/dataset-creating-dialog.tsx b/web/src/pages/datasets/dataset-creating-dialog.tsx index 904e25270..1f01924e5 100644 --- a/web/src/pages/datasets/dataset-creating-dialog.tsx +++ b/web/src/pages/datasets/dataset-creating-dialog.tsx @@ -19,9 +19,10 @@ import { Input } from '@/components/ui/input'; import { useNavigatePage } from '@/hooks/logic-hooks/navigate-hooks'; import { IModalProps } from '@/interfaces/common'; import { zodResolver } from '@hookform/resolvers/zod'; -import { useForm, useWatch } from 'react-hook-form'; +import { useForm } from 'react-hook-form'; import { useTranslation } from 'react-i18next'; import { z } from 'zod'; + import { ChunkMethodItem, EmbeddingModelItem, @@ -89,6 +90,7 @@ export function InputForm({ onOk }: IModalProps) { console.log('submit', data); onOk?.(data); } + const parseType = useWatch({ control: form.control, name: 'parseType', @@ -121,6 +123,7 @@ export function InputForm({ onOk }: IModalProps) { )} /> + {parseType === 1 && ( diff --git a/web/src/pages/datasets/dataset-dataflow-creating-dialog.tsx b/web/src/pages/datasets/dataset-dataflow-creating-dialog.tsx new file mode 100644 index 000000000..5bf28758b --- /dev/null +++ b/web/src/pages/datasets/dataset-dataflow-creating-dialog.tsx @@ -0,0 +1,123 @@ +import { ButtonLoading } from '@/components/ui/button'; +import { + Dialog, + DialogContent, + DialogFooter, + DialogHeader, + DialogTitle, +} from '@/components/ui/dialog'; +import { + Form, + FormControl, + FormField, + FormItem, + FormLabel, + FormMessage, +} from '@/components/ui/form'; +import { Input } from '@/components/ui/input'; +import { IModalProps } from '@/interfaces/common'; +import { zodResolver } from '@hookform/resolvers/zod'; +import { useForm, useWatch } from 'react-hook-form'; +import { useTranslation } from 'react-i18next'; +import { z } from 'zod'; +import { + DataExtractKnowledgeItem, + DataFlowItem, + EmbeddingModelItem, + ParseTypeItem, + TeamItem, +} from '../dataset/dataset-setting/configuration/common-item'; + +const FormId = 'dataset-creating-form'; + +export function InputForm({ onOk }: IModalProps) { + const { t } = useTranslation(); + + const FormSchema = z.object({ + name: z + .string() + .min(1, { + message: t('knowledgeList.namePlaceholder'), + }) + .trim(), + parseType: z.number().optional(), + }); + + const form = useForm>({ + resolver: zodResolver(FormSchema), + defaultValues: { + name: '', + parseType: 1, + }, + }); + + function onSubmit(data: z.infer) { + onOk?.(data.name); + } + const parseType = useWatch({ + control: form.control, + name: 'parseType', + }); + return ( +
+ + ( + + + * + {t('knowledgeList.name')} + + + + + + + )} + /> + + + {parseType === 2 && ( + <> + + + + + )} + + + ); +} + +export function DatasetCreatingDialog({ + hideModal, + onOk, + loading, +}: IModalProps) { + const { t } = useTranslation(); + + return ( + + + + {t('knowledgeList.createKnowledgeBase')} + + + + + {t('common.save')} + + + + + ); +} diff --git a/web/src/pages/flow/constant.tsx b/web/src/pages/flow/constant.tsx index 79a58baa0..b4ec32902 100644 --- a/web/src/pages/flow/constant.tsx +++ b/web/src/pages/flow/constant.tsx @@ -2911,7 +2911,7 @@ export const QWeatherTimePeriodOptions = [ '30d', ]; -export const ExeSQLOptions = ['mysql', 'postgresql', 'mariadb', 'mssql'].map( +export const ExeSQLOptions = ['mysql', 'postgres', 'mariadb', 'mssql'].map( (x) => ({ label: upperFirst(x), value: x, diff --git a/web/src/pages/user-setting/setting-model/ollama-modal/index.tsx b/web/src/pages/user-setting/setting-model/ollama-modal/index.tsx index 988c51d54..dd86b5796 100644 --- a/web/src/pages/user-setting/setting-model/ollama-modal/index.tsx +++ b/web/src/pages/user-setting/setting-model/ollama-modal/index.tsx @@ -37,6 +37,7 @@ const llmFactoryToUrlMap = { 'https://huggingface.co/docs/text-embeddings-inference/quick_tour', [LLMFactory.GPUStack]: 'https://docs.gpustack.ai/latest/quickstart', [LLMFactory.VLLM]: 'https://docs.vllm.ai/en/latest/', + [LLMFactory.TokenPony]: 'https://docs.tokenpony.cn/#/', }; type LlmFactory = keyof typeof llmFactoryToUrlMap; diff --git a/web/src/pages/user-setting/setting-model/system-model-setting-modal/index.tsx b/web/src/pages/user-setting/setting-model/system-model-setting-modal/index.tsx index f67755dbe..c3b7ffc06 100644 --- a/web/src/pages/user-setting/setting-model/system-model-setting-modal/index.tsx +++ b/web/src/pages/user-setting/setting-model/system-model-setting-modal/index.tsx @@ -1,7 +1,10 @@ import { IModalManagerChildrenProps } from '@/components/modal-manager'; import { LlmModelType } from '@/constants/knowledge'; import { useTranslate } from '@/hooks/common-hooks'; -import { ISystemModelSettingSavingParams } from '@/hooks/llm-hooks'; +import { + ISystemModelSettingSavingParams, + useComposeLlmOptionsByModelTypes, +} from '@/hooks/llm-hooks'; import { Form, Modal, Select } from 'antd'; import { useEffect } from 'react'; import { useFetchSystemModelSettingOnMount } from '../hooks'; @@ -43,6 +46,11 @@ const SystemModelSettingModal = ({ const onFormLayoutChange = () => {}; + const modelOptions = useComposeLlmOptionsByModelTypes([ + LlmModelType.Chat, + LlmModelType.Image2text, + ]); + return ( - {