From f1ca359c33b312e34c192ba957e122e2c5b03e39 Mon Sep 17 00:00:00 2001 From: Oleg Korshul Date: Tue, 11 Feb 2025 15:46:49 +0300 Subject: [PATCH] Full worked version --- .../content/ai/scripts/engine/engine.js | 83 ++++++++------- .../ai/scripts/engine/local_storage.js | 2 +- .../ai/scripts/engine/providers/base.js | 4 +- .../engine/providers/internal/anthropic.js | 8 +- .../providers/internal/google-gemini.js | 14 +++ .../engine/providers/internal/ollama.js | 20 +--- .../ai/scripts/engine/providers/provider.js | 100 ++++++++++++++++++ 7 files changed, 172 insertions(+), 59 deletions(-) diff --git a/sdkjs-plugins/content/ai/scripts/engine/engine.js b/sdkjs-plugins/content/ai/scripts/engine/engine.js index 4bf8d73e..bced277a 100644 --- a/sdkjs-plugins/content/ai/scripts/engine/engine.js +++ b/sdkjs-plugins/content/ai/scripts/engine/engine.js @@ -117,6 +117,12 @@ return provider.getRequestHeaderOptions(); }; + AI._getModelsSync = function(_provider) { + let provider = _provider.createInstance ? _provider : AI.Storage.getProvider(_provider.name); + if (!provider) provider = new AI.Provider(); + return provider.getModels(); + }; + AI._extendBody = function(_provider, body) { let provider = _provider.createInstance ? _provider : AI.Storage.getProvider(_provider.name); if (!provider) provider = new AI.Provider(); @@ -137,7 +143,7 @@ let provider = _provider.createInstance ? _provider : AI.Storage.getProvider(_provider.name); if (!provider) provider = new AI.Provider(_provider.name, _provider.url, _provider.key); - if (_provider.key && !provider.key) + if (_provider.key) provider.key = _provider.key; let url = provider.url; @@ -158,12 +164,8 @@ { AI.TmpProviderForModels = null; return new Promise(function (resolve, reject) { - let headers = AI._getHeaders(provider); - requestWrapper({ - url : AI._getEndpointUrl(provider, AI.Endpoints.Types.v1.Models), - headers : headers, - method : "GET" - }).then(function(data) { + + function resolveRequest(data) { if (data.error) resolve({ error : 1, @@ -201,6 +203,25 @@ models : AI.TmpProviderForModels.modelsUI }); } + } + + let syncModels = AI._getModelsSync(provider); + if (Array.isArray(syncModels)) + { + resolveRequest({ + error : 0, + data : syncModels + }); + return; + } + + let headers = AI._getHeaders(provider); + requestWrapper({ + url : AI._getEndpointUrl(provider, AI.Endpoints.Types.v1.Models), + headers : headers, + method : "GET" + }).then(function(data) { + resolveRequest(data); }); }); }; @@ -221,7 +242,9 @@ if (provider) { for (let i = 0, len = provider.models.length; i < len; i++) { - if (model.id === provider.models[i].id) { + if (model.id === provider.models[i].id || + model.id === provider.models[i].name) + { this.model = provider.models[i]; } } @@ -363,40 +386,27 @@ AI.Endpoints.Types.v1.Chat_Completions; objRequest.url = AI._getEndpointUrl(provider, endpointType, this.model); - objRequest.body = { - model : this.modelUI.id - }; - - objRequest.isUseProxy = AI._extendBody(provider, objRequest.body); - + let requestBody = {}; let processResult = function(data) { - let arrResult = data.data.choices || data.data.content; - if (!arrResult) + let result = provider.getChatCompletionsResult(data, this.model); + if (result.content.length === 0) return ""; - let choice = arrResult[0]; - if (!choice) - return ""; - let text = ""; - if (choice.message) - text = choice.message.content; - if (choice.text) - text = choice.text; - - let i = 0; let trimStartCh = "\n".charCodeAt(0); - while (text.charCodeAt(i) === trimStartCh) - i++; - if (i > 0) - text = text.substring(i); - return text; + return result.content[0]; }; if (1 === messages.length) { if (!isUseCompletionsInsteadChat) { - objRequest.body.messages = isMessages ? messages[0] : [{role:"user",content:messages[0]}]; + if (isMessages) + requestBody.messages = messages[0]; + else + requestBody.messages = [{role:"user",content:messages[0]}]; + objRequest.body = provider.getChatCompletions(requestBody, this.model); } else { - objRequest.body.prompt = messages[0]; + objRequest.body = provider.getCompletions({ text : messages[0] }); } + objRequest.isUseProxy = AI._extendBody(provider, objRequest.body); + let result = await requestWrapper(objRequest); if (result.error) { throw { @@ -434,16 +444,17 @@ return footer; } - let isBadAI = false; for (let i = 0, len = messages.length; i < len; i++) { let message = getHeader(i + 1, len) + messages[i] + getFooter(i + 1, len); if (!isUseCompletionsInsteadChat) { - objRequest.body.messages = [{role:"user",content:message}]; + objRequest.body = provider.getChatCompletions({ messages : [{role:"user",content:message}] }); } else { - objRequest.body.prompt = message; + objRequest.body = provider.getCompletions( { text : message }); } + objRequest.isUseProxy = AI._extendBody(provider, objRequest.body); + let result = await requestWrapper(objRequest); if (result.error) { throw { diff --git a/sdkjs-plugins/content/ai/scripts/engine/local_storage.js b/sdkjs-plugins/content/ai/scripts/engine/local_storage.js index ec763bc3..c9cac0e3 100644 --- a/sdkjs-plugins/content/ai/scripts/engine/local_storage.js +++ b/sdkjs-plugins/content/ai/scripts/engine/local_storage.js @@ -78,7 +78,7 @@ for (let i in obj.providers) { let pr = obj.providers[i]; - AI.Providers[i] = AI.createProviderInstance(pr.name, pr.url, pr.key); + AI.Providers[i] = AI.createProviderInstance(pr.name, pr.url, pr.key, pr.addon); AI.Providers[i].models = pr.models || []; if (fixVersion2) { diff --git a/sdkjs-plugins/content/ai/scripts/engine/providers/base.js b/sdkjs-plugins/content/ai/scripts/engine/providers/base.js index e5466d74..aa45492e 100644 --- a/sdkjs-plugins/content/ai/scripts/engine/providers/base.js +++ b/sdkjs-plugins/content/ai/scripts/engine/providers/base.js @@ -116,10 +116,10 @@ AI.CapabilitiesUI.All = capabilitiesAll; AI.InternalProviders = []; - AI.createProviderInstance = function(name, url, key) { + AI.createProviderInstance = function(name, url, key, addon) { for (let i = 0, len = window.AI.InternalProviders.length; i < len; i++) { if (name === AI.InternalProviders[i].name) - return AI.InternalProviders[i].createInstance(name, url, key, AI.InternalProviders[i].addon); + return AI.InternalProviders[i].createInstance(name, url, key, addon || AI.InternalProviders[i].addon); } return new Provider(name, url, key); }; diff --git a/sdkjs-plugins/content/ai/scripts/engine/providers/internal/anthropic.js b/sdkjs-plugins/content/ai/scripts/engine/providers/internal/anthropic.js index 473f933a..8d7f4339 100644 --- a/sdkjs-plugins/content/ai/scripts/engine/providers/internal/anthropic.js +++ b/sdkjs-plugins/content/ai/scripts/engine/providers/internal/anthropic.js @@ -29,7 +29,13 @@ class Provider extends AI.Provider { getEndpointUrl(endpoint, model) { if (AI.Endpoints.Types.v1.Chat_Completions === endpoint) return "/messages"; - return super.getEndpointUrl(); + return super.getEndpointUrl(endpoint, model); + } + + getRequestBodyOptions() { + return { + max_tokens : 4096 + }; } getRequestHeaderOptions() { diff --git a/sdkjs-plugins/content/ai/scripts/engine/providers/internal/google-gemini.js b/sdkjs-plugins/content/ai/scripts/engine/providers/internal/google-gemini.js index 68eac057..04ac4c63 100644 --- a/sdkjs-plugins/content/ai/scripts/engine/providers/internal/google-gemini.js +++ b/sdkjs-plugins/content/ai/scripts/engine/providers/internal/google-gemini.js @@ -73,4 +73,18 @@ class Provider extends AI.Provider { return headers; } + getChatCompletions(message, model) { + let body = { contents : [] }; + for (let i = 0, len = message.messages.length; i < len; i++) { + let rec = { + role : message.messages[i].role, + parts : [ { text : message.messages[i].content } ] + }; + if (rec.role === "assistant") + rec.role = "model"; + body.contents.push(rec); + } + return body; + } + } diff --git a/sdkjs-plugins/content/ai/scripts/engine/providers/internal/ollama.js b/sdkjs-plugins/content/ai/scripts/engine/providers/internal/ollama.js index 643a15f8..6951c28b 100644 --- a/sdkjs-plugins/content/ai/scripts/engine/providers/internal/ollama.js +++ b/sdkjs-plugins/content/ai/scripts/engine/providers/internal/ollama.js @@ -3,25 +3,7 @@ class Provider extends AI.Provider { constructor() { - super("Ollama", "http://localhost:11434/api", "", ""); + super("Ollama", "http://localhost:11434", "", "v1"); } - getEndpointUrl(endpoint, model) { - let Types = AI.Endpoints.Types; - switch (endpoint) - { - case Types.v1.Models: - return "/tags"; - - case Types.v1.Chat_Completions: - return "/chat"; - case Types.v1.Completions: - return "/generate"; - - default: - break; - } - - return super.getEndpointUrl(); - } } diff --git a/sdkjs-plugins/content/ai/scripts/engine/providers/provider.js b/sdkjs-plugins/content/ai/scripts/engine/providers/provider.js index e9a5ede2..f78c85e1 100644 --- a/sdkjs-plugins/content/ai/scripts/engine/providers/provider.js +++ b/sdkjs-plugins/content/ai/scripts/engine/providers/provider.js @@ -20,10 +20,22 @@ this.modelsUI = []; } + /** + * If you add an implementation here, then no request will be made to the service. + * @returns {Object[] | undefined} + */ + getModels() { + return undefined; + } + /** * Correct received (*models* endpoint) model object. */ correctModelInfo(model) { + if (undefined === model.id && model.name) { + model.id = model.name; + return; + } model.name = model.id; } @@ -136,6 +148,94 @@ return false; } + /** + * Get request body object by message. + * @param {Object} message + * *message* is in folowing format: + * { + * messages: [ + * { role: "developer", content: "You are a helpful assistant." }, + * { role: "system", content: "You are a helpful assistant." }, + * { role: "user", content: "Hello" }, + * { role: "assistant", content: "Hey!" }, + * { role: "user", content: "Hello" }, + * { role: "assistant", content: "Hey again!" } + * ] + * } + */ + getChatCompletions(message, model) { + return { + model : model.id, + messages : message.messages + } + } + + /** + * Get request body object by message. + * @param {Object} message + * *message* is in folowing format: + * { + * text: "Please, calculate 2+2." + * } + */ + getCompletions(message, model) { + return { + model : model.id, + prompt : message.text + } + } + + /** + * Convert *getChatCompletions* and *getCompletions* answer to result simple message. + * @returns {Object} result + * *result* is in folowing format: + * { + * content: ["Hello", "Hi"] + * } + */ + getChatCompletionsResult(message, model) { + let result = { + content : [] + }; + + let arrResult = message.data.choices || message.data.content || message.data.candidates; + if (!arrResult) + return result; + + let choice = arrResult[0]; + if (!choice) + return result; + + if (choice.message && choice.message.content) + result.content.push(choice.message.content); + if (choice.text) + result.content.push(choice.message.text); + if (choice.content) { + if (typeof(choice.content) === "string") + result.content.push(choice.content); + else if (Array.isArray(choice.content.parts)) { + for (let i = 0, len = choice.content.parts.length; i < len; i++) { + result.content.push(choice.content.parts[i].text); + } + } + } + + let trimArray = ["\n".charCodeAt(0)]; + for (let i = 0, len = result.content.length; i < len; i++) { + let iEnd = result.content[i].length - 1; + let iStart = 0; + while (iStart < iEnd && trimArray.includes(result.content[i].charCodeAt(iStart))) + iStart++; + while (iEnd > iStart && trimArray.includes(result.content[i].charCodeAt(iEnd))) + iEnd--; + + if (iEnd > iStart && ((0 !== iStart) || ((result.content[i].length - 1) !== iEnd))) + result.content[i] = result.content[i].substring(iStart, iEnd + 1); + } + + return result; + } + /** * ======================================================================================== * The following are methods for internal work. There is no need to overload these methods.