diff --git a/Common/config/default.json b/Common/config/default.json index cfdd1336..f5b0f00d 100644 --- a/Common/config/default.json +++ b/Common/config/default.json @@ -23,7 +23,7 @@ }, { "enable": true, - "name": "Anthropic Claude", + "name": "Anthropic", "url": "https://api.anthropic.com", "key": "", "models": [] @@ -55,6 +55,17 @@ "url": "https://api.aleph-alpha.com", "key": "", "models": [] + }, + { + "enable": true, + "name": "Stability AI", + "url": "https://api.stability.ai", + "key": "", + "models": [ + "Stable Diffusion", + "Stable Image Core", + "Stable Image Ultra" + ] } ], "actions": { @@ -83,7 +94,7 @@ "capabilities": 1 } }, - "timeout": 30000, + "timeout": "30s", "allowedCorsOrigins": [ "https://onlyoffice.github.io" ] diff --git a/DocService/sources/DocsCoServer.js b/DocService/sources/DocsCoServer.js index 4991b746..8d7caa8a 100644 --- a/DocService/sources/DocsCoServer.js +++ b/DocService/sources/DocsCoServer.js @@ -103,6 +103,7 @@ const queueService = require('./../../Common/sources/taskqueueRabbitMQ'); const operationContext = require('./../../Common/sources/operationContext'); const tenantManager = require('./../../Common/sources/tenantManager'); const { notificationTypes, ...notificationService } = require('../../Common/sources/notificationService'); +const aiProxyHandler = require('./ai/aiProxyHandler'); const cfgEditorDataStorage = config.get('services.CoAuthoring.server.editorDataStorage'); const cfgEditorStatStorage = config.get('services.CoAuthoring.server.editorStatStorage'); @@ -3438,9 +3439,10 @@ exports.install = function(server, callbackFunction) { } let [licenseInfo] = yield tenantManager.getTenantLicense(ctx); - + let pluginSettings = yield aiProxyHandler.getPluginSettings(ctx); sendData(ctx, conn, { - type: 'license', license: { + type: 'license', + license: { type: licenseInfo.type, light: false,//todo remove in sdk mode: licenseInfo.mode, @@ -3453,7 +3455,8 @@ exports.install = function(server, callbackFunction) { branding: licenseInfo.branding, customization: licenseInfo.customization, advancedApi: licenseInfo.advancedApi - } + }, + aiPluginSettings: pluginSettings }); ctx.logger.info('_checkLicense end'); } catch (err) { diff --git a/DocService/sources/ai/aiEngineWrapper.js b/DocService/sources/ai/aiEngineWrapper.js new file mode 100644 index 00000000..988732c0 --- /dev/null +++ b/DocService/sources/ai/aiEngineWrapper.js @@ -0,0 +1,157 @@ +/* + * (c) Copyright Ascensio System SIA 2010-2024 + * + * This program is a free software product. You can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License (AGPL) + * version 3 as published by the Free Software Foundation. In accordance with + * Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect + * that Ascensio System SIA expressly excludes the warranty of non-infringement + * of any third-party rights. + * + * This program is distributed WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For + * details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html + * + * You can contact Ascensio System SIA at 20A-6 Ernesta Birznieka-Upish + * street, Riga, Latvia, EU, LV-1050. + * + * The interactive user interfaces in modified source and object code versions + * of the Program must display Appropriate Legal Notices, as required under + * Section 5 of the GNU AGPL version 3. + * + * Pursuant to Section 7(b) of the License you must retain the original Product + * logo when distributing the program. Pursuant to Section 7(e) we decline to + * grant you any rights under trademark law for use of our trademarks. + * + * All the Product's GUI elements, including illustrations and icon sets, as + * well as technical writing content are licensed under the terms of the + * Creative Commons Attribution-ShareAlike 4.0 International. See the License + * terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode + * + */ + +'use strict'; + +const { buffer } = require('node:stream/consumers'); +const config = require('config'); +const utils = require('../../../Common/sources/utils'); +const operationContext = require('../../../Common/sources/operationContext'); +const fs = require('fs'); +const path = require('path'); +const vm = require('vm'); + +// Configuration constants +const cfgAiApiTimeout = config.get('ai-api.timeout'); + +function setCtx(ctx) { + sandbox.ctx = ctx; + sandbox.console = ctx.logger; +} + +// Set up the environment for the client-side engine.js +const sandbox = { + ctx: null, + window: {AI: {}}, + + /** + * Implementation of fetch that delegates to utils.httpRequest + * + * @param {string} url - The URL to fetch + * @param {Object} options - Fetch options (method, headers, body) + * @returns {Promise} - A promise that resolves to a response-like object + */ + fetch: function(url, options = {}) { + const ctx = sandbox.ctx; + const method = options.method || 'GET'; + + // Configure timeout options for the request + const timeoutOptions = { + connectionAndInactivity: cfgAiApiTimeout, + wholeCycle: cfgAiApiTimeout + }; + return utils.httpRequest( + sandbox.ctx, + method, + url, + options.headers || {}, + options.body || null, + timeoutOptions, + 10 * 1024 * 1024, + false + ) + .then(async (result) => { + const responseBuffer = await buffer(result.stream); + const text = responseBuffer.toString('utf8'); + + return { + status: result.response.status, + statusText: result.response.statusText, + ok: result.response.status >= 200 && result.response.status < 300, + headers: result.response.headers, + text: () => Promise.resolve(text), + json: () => Promise.resolve(JSON.parse(text)), + arrayBuffer: () => Promise.resolve(responseBuffer.buffer) + }; + }); + } +}; + +// Initialize minimal AI object with required functionality +sandbox.AI = sandbox.window.AI; +setCtx(operationContext.global); + +/** + * Simple loadInternalProviders implementation + */ +function loadInternalProviders() { + // Add simple provider loading logic + const enginePath = path.join(__dirname, 'engine', 'providers', 'internal'); + + try { + // Read providers directory + const files = fs.readdirSync(enginePath); + + // Load each provider + for (const file of files) { + if (file.endsWith('.js')) { + const providerPath = path.join(enginePath, file); + const providerCode = fs.readFileSync(providerPath, 'utf8'); + + try { + sandbox.ctx.logger.debug(`Loading provider ${file}:`); + let content = "(function(){\n" + providerCode + "\nreturn new Provider();})();"; + // Execute provider code in sandbox + let provider = vm.runInNewContext(content, sandbox, { + filename: file, + timeout: 5000 + }); + sandbox.AI.InternalProviders.push(provider); + } catch (error) { + sandbox.ctx.logger.error(`Error loading provider ${file}:`, error); + } + } + } + + sandbox.AI.onLoadInternalProviders(); + } catch (error) { + sandbox.ctx.logger.error('Error loading internal providers:', error); + } +} + +// Load engine.js +let engineCode = ''; +engineCode += fs.readFileSync(path.join(__dirname, 'engine', 'storage.js'), 'utf8'); +engineCode += fs.readFileSync(path.join(__dirname, 'engine', 'local_storage.js'), 'utf8'); +engineCode += fs.readFileSync(path.join(__dirname, 'engine', 'providers', 'base.js'), 'utf8'); +engineCode += fs.readFileSync(path.join(__dirname, 'engine', 'providers', 'provider.js'), 'utf8'); +engineCode += fs.readFileSync(path.join(__dirname, 'engine', 'engine.js'), 'utf8'); +vm.runInNewContext(engineCode, sandbox); + +sandbox.AI.loadInternalProviders = loadInternalProviders; +loadInternalProviders(); + + + + +exports.setCtx = setCtx; +exports.AI = sandbox.AI; diff --git a/DocService/sources/ai/aiProxyHandler.js b/DocService/sources/ai/aiProxyHandler.js new file mode 100644 index 00000000..038fd233 --- /dev/null +++ b/DocService/sources/ai/aiProxyHandler.js @@ -0,0 +1,368 @@ +/* + * (c) Copyright Ascensio System SIA 2010-2024 + * + * This program is a free software product. You can redistribute it and/or + * modify it under the terms of the GNU Affero General Public License (AGPL) + * version 3 as published by the Free Software Foundation. In accordance with + * Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect + * that Ascensio System SIA expressly excludes the warranty of non-infringement + * of any third-party rights. + * + * This program is distributed WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For + * details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html + * + * You can contact Ascensio System SIA at 20A-6 Ernesta Birznieka-Upish + * street, Riga, Latvia, EU, LV-1050. + * + * The interactive user interfaces in modified source and object code versions + * of the Program must display Appropriate Legal Notices, as required under + * Section 5 of the GNU AGPL version 3. + * + * Pursuant to Section 7(b) of the License you must retain the original Product + * logo when distributing the program. Pursuant to Section 7(e) we decline to + * grant you any rights under trademark law for use of our trademarks. + * + * All the Product's GUI elements, including illustrations and icon sets, as + * well as technical writing content are licensed under the terms of the + * Creative Commons Attribution-ShareAlike 4.0 International. See the License + * terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode + * + */ + +'use strict'; + +const { pipeline } = require('stream/promises'); +const { buffer } = require('node:stream/consumers'); +const config = require('config'); +const utils = require('./../../../Common/sources/utils'); +const operationContext = require('./../../../Common/sources/operationContext'); +const commonDefines = require('./../../../Common/sources/commondefines'); +const docsCoServer = require('./../DocsCoServer'); + +// Import the new aiEngineWrapper module +const aiEngineWrapper = require('./aiEngineWrapper'); + +const cfgAiApiAllowedOrigins = config.get('ai-api.allowedCorsOrigins'); +const cfgAiApiTimeout = config.get('ai-api.timeout'); +const cfgTokenEnableBrowser = config.get('services.CoAuthoring.token.enable.browser'); + +/** + * Helper function to set CORS headers if the request origin is allowed + * + * @param {object} req - Express request object + * @param {object} res - Express response object + * @param {object} ctx - Operation context for logging + * @param {boolean} handleOptions - Whether to handle OPTIONS requests (default: true) + * @returns {boolean} - True if this was an OPTIONS request that was handled + */ +function handleCorsHeaders(req, res, ctx, handleOptions = true) { + const requestOrigin = req.headers.origin; + + // If no origin in request or allowed origins list is empty, do nothing + if (!requestOrigin || cfgAiApiAllowedOrigins.length === 0) { + return false; + } + + // If the origin is in our allowed list + if (cfgAiApiAllowedOrigins.includes(requestOrigin)) { + res.setHeader('Access-Control-Allow-Origin', requestOrigin); + res.setHeader('Access-Control-Allow-Credentials', 'true'); + res.setHeader('Vary', 'Origin'); // Important when using dynamic origin + + // If debug logging is available + if (ctx && ctx.logger) { + ctx.logger.debug('CORS headers set for origin: %s (matched allowed list)', requestOrigin); + } + + // Handle preflight OPTIONS requests if requested + if (handleOptions && req.method === 'OPTIONS') { + res.setHeader('Access-Control-Allow-Methods', 'DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT'); + // Allow all headers with wildcard + res.setHeader('Access-Control-Allow-Headers', '*'); + + // For preflight request, we should also set non-CORS headers to match the API + res.setHeader('Allow', 'OPTIONS, HEAD, GET, POST, PUT, DELETE, PATCH'); + res.setHeader('Content-Length', '0'); + res.setHeader('Content-Type', 'text/html; charset=utf-8'); + + // Return 204 which is standard for OPTIONS preflight + res.sendStatus(204); // No Content response for OPTIONS + return true; // Signal that we handled an OPTIONS request + } + } + + return false; // Not an OPTIONS request or origin not allowed +} + +/** + * Makes an HTTP request to an AI API endpoint using the provided request and response objects + * + * @param {object} req - Express request object + * @param {object} res - Express response object + * @returns {Promise} - Promise resolving when the request is complete + */ +async function proxyRequest(req, res) { + // Create operation context for logging + const ctx = new operationContext.Context(); + ctx.initFromRequest(req); + + try { + ctx.logger.info('Start proxyRequest'); + const tenTokenEnableBrowser = ctx.getCfg('services.CoAuthoring.token.enable.browser', cfgTokenEnableBrowser); + + if (tenTokenEnableBrowser) { + let checkJwtRes = await docsCoServer.checkJwtHeader(ctx, req, 'Authorization', 'Bearer ', commonDefines.c_oAscSecretType.Session); + if (checkJwtRes.err) { + ctx.logger.error('checkJwtHeader error: %s', checkJwtRes.err); + res.sendStatus(403); + return; + } + } + + // 1. Handle CORS preflight (OPTIONS) requests if necessary + if (handleCorsHeaders(req, res, ctx) === true) { + return; // OPTIONS request handled, stop further processing + } + + let body = JSON.parse(req.body); + + // Configure timeout options for the request + const timeoutOptions = { + connectionAndInactivity: cfgAiApiTimeout, + wholeCycle: cfgAiApiTimeout + }; + + // Get request size limit if configured + const sizeLimit = 10 * 1024 * 1024; // Default to 10MB + + // Create a copy of the headers from the request + const headers = { ...body.headers }; + + // Get API key based on the target URL + const aiApi = config.get('ai-api'); + let apiKey; + + // Determine which API key to use based on the target URL + if (body.target) { + // Find the provider that matches the target URL + const matchedProvider = aiApi.providers.find(provider => + body.target.includes(provider.url)); + + if (matchedProvider) { + apiKey = matchedProvider.key; + } + } + + // Add authorization header if API key is available + if (apiKey) { + if (headers['x-api-key']) { + headers['x-api-key'] = apiKey; + } else if (body.target.includes('key=')) { + body.target = body.target.replace('key=', `key=${apiKey}&`); + } else { + headers['Authorization'] = `Bearer ${apiKey}`; + } + } else { + throw new Error('No API key found for the target URL'); + } + + // Create request parameters object + const requestParams = { + method: body.method, + uri: body.target, + headers, + body: body.data, + timeout: timeoutOptions, + limit: sizeLimit, + filterPrivate: false + }; + + // Create a safe copy for logging without sensitive info + const safeLogParams = { ...requestParams }; + // if (safeLogParams.headers) { + // safeLogParams.headers = { ...safeLogParams.headers }; + // if (safeLogParams.headers.Authorization) { + // safeLogParams.headers.Authorization = '[REDACTED]'; + // } + // } + + // Log the sanitized request parameters + ctx.logger.debug(`Proxying request: %j`, safeLogParams); + + // Use utils.httpRequest to make the request + const result = await utils.httpRequest( + ctx, // Operation context + requestParams.method, // HTTP method + requestParams.uri, // Target URL + requestParams.headers, // Request headers + requestParams.body, // Request body + requestParams.timeout, // Timeout configuration + requestParams.limit, // Size limit + requestParams.filterPrivate // Filter private requests + ); + + // Set the response headers to match the target response + res.set(result.response.headers); + + // Use pipeline to pipe the response data to the client + await pipeline(result.stream, res); + + } catch (error) { + ctx.logger.error(`AI API request error: %s`, error); + if(error.response){ + // Set the response headers to match the target response + res.set(error.response.headers); + + // Use pipeline to pipe the response data to the client + await pipeline(error.response.data, res); + } else { + res.status(500).json({ + "error": { + "message": "AI API request error", + "code": "500" + } + }); + } + } finally { + ctx.logger.info('End proxyRequest'); + } +} + +/** + * Process AI actions from configuration + * + * @param {Object} ctx - Operation context + * @param {Object} actions - The actions from configuration + * @returns {Object} Processed actions object + */ +function processActions(ctx, actions) { + const logger = ctx.logger; + + if (!actions || typeof actions !== 'object') { + return {}; + } + + try { + const processedActions = Object.entries(actions).reduce((acc, [key, value]) => { + if (value) { + acc[key] = { + name: value.name || key, + icon: value.icon || '', + model: value.model || '', + capabilities: Array.isArray(value.capabilities) ? value.capabilities : [] + }; + } + return acc; + }, {}); + + logger.info(`Processed ${Object.keys(processedActions).length} AI actions`); + return processedActions; + } catch (error) { + logger.error('Error processing AI actions:', error); + return {}; + } +} + +/** + * Process a single AI provider and its models + * + * @param {Object} ctx - Operation context + * @param {Object} provider - Provider configuration + * @param {boolean} includeDisabled - Whether to include disabled models + * @returns {Promise} Processed provider with models or null if provider is invalid + */ +async function processProvider(ctx, provider, includeDisabled) { + const logger = ctx.logger; + + if (!provider.url || !provider.key) { + return null; + } + let engineModels = []; + try { + if (provider.url && provider.key) { + aiEngineWrapper.setCtx(ctx); + // logger.info("processProvider %j", AI.Providers); + aiEngineWrapper.AI.Providers[provider.name].key = provider.key; + // Call getModels from engine.js + const result = await aiEngineWrapper.AI.getModels(provider); + logger.info(`Got ${JSON.stringify(result)} from AI.getModels for ${provider.name}`); + // Process result + if (!result.error && Array.isArray(result.models)) { + engineModels = result.models; + } + } + } catch (error) { + logger.error(`Error processing provider ${provider.name}:`, error); + } + // Return provider with any models we were able to get from config + return { + name: provider.name, + url: provider.url, + key: "", + models: engineModels + }; +} + +/** + * Retrieves all AI models from the configuration and dynamically from providers + * + * @param {Object} ctx - Operation context + * @param {boolean} [includeDisabled=false] - Whether to include disabled providers in the result + * @returns {Promise} Object containing providers and their models along with action configurations + */ +async function getPluginSettings(ctx, includeDisabled = false) { + const logger = ctx.logger; + logger.info('Starting getPluginSettings'); + const result = { + actions: {}, + providers: {}, + models: [] + }; + try { + // Get AI API configuration + const aiApi = config.get('ai-api'); + // Process providers and their models if configuration exists + if (aiApi?.providers && Array.isArray(aiApi.providers)) { + // Create an array of promises for each provider + const providerPromises = aiApi.providers + .filter(provider => includeDisabled || provider.enable !== false || !provider.key || !provider.url) + .map(provider => processProvider(ctx, provider, includeDisabled)); + + try { + let providers = await Promise.allSettled(providerPromises); + providers = providers.filter(provider => provider.status === 'fulfilled' && provider.value && provider.value.name && provider.value.models?.length > 0); + + const providerCount = providers.length; + let totalModels = 0; + // Convert providers array to object by provider name + result.providers = {}; + for(let i = 0; i < providers.length; i++) { + const provider = providers[i].value; + totalModels += provider.models.length; + result.providers[provider.name] = provider + result.models.push(...provider.models); + } + + logger.info(`Successfully processed ${providerCount} providers with a total of ${totalModels} models`); + } catch (error) { + logger.error('Error resolving provider promises:', error); + } + } + + // Process AI actions + if (aiApi?.actions && typeof aiApi.actions === 'object') { + result.actions = processActions(ctx, aiApi.actions); + } + + logger.info('Completed getPluginSettings successfully'); + } catch (error) { + logger.error('Error retrieving AI models from config:', error); + } + return result; +} + +module.exports = { + proxyRequest, + getPluginSettings +}; diff --git a/DocService/sources/ai/engine/buttons.js b/DocService/sources/ai/engine/buttons.js new file mode 100644 index 00000000..dbfcbd0c --- /dev/null +++ b/DocService/sources/ai/engine/buttons.js @@ -0,0 +1,359 @@ +(function(window, undefined) +{ + function generateGuid() + { + if (!window.crypto || !window.crypto.getRandomValues) + { + function s4() { + return Math.floor((1 + Math.random()) * 0x10000).toString(16).substring(1); + } + return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() + s4() + s4(); + } + + var array = new Uint16Array(8); + window.crypto.getRandomValues(array); + var index = 0; + function s4() { + var value = 0x10000 + array[index++]; + return value.toString(16).substring(1); + } + return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() + s4() + s4(); + } + + function translateItem(text) { + return window.Asc.plugin.tr(text); + }; + + window.Asc = window.Asc || {}; + var Asc = window.Asc; + + Asc.Buttons = {}; + Asc.Buttons.ButtonsContextMenu = []; + Asc.Buttons.ButtonsToolbar = []; + + Asc.Buttons.registerContextMenu = function() + { + window.Asc.plugin.attachEvent("onContextMenuShow", function(options) { + if (!options) + return; + + let items = { + guid: window.Asc.plugin.guid, + }; + for (let i = 0, len = Asc.Buttons.ButtonsContextMenu.length; i < len; i++) + { + let button = Asc.Buttons.ButtonsContextMenu[i]; + if (button.parent === null) + { + button.onContextMenuShow(options, items); + } + } + + if (items.items) + window.Asc.plugin.executeMethod("AddContextMenuItem", [items]); + }); + }; + + Asc.Buttons.registerToolbarMenu = function() + { + let items = { + guid : window.Asc.plugin.guid, + tabs : [] + }; + + for (let i = 0, len = Asc.Buttons.ButtonsToolbar.length; i < len; i++) + { + let button = Asc.Buttons.ButtonsToolbar[i]; + if (button.parent === null) + { + button.toToolbar(items); + } + + if (!!button.menu) { + for (item of button.menu) { + if (!!item.onclick) { + window.Asc.plugin.attachToolbarMenuClickEvent(item.id, item.onclick); + } + } + } + } + + if (items.tabs.length > 0) + window.Asc.plugin.executeMethod("AddToolbarMenuItem", [items]); + }; + + Asc.Buttons.updateToolbarMenu = function(id, name, buttons) + { + let buttonMainToolbar = new Asc.ButtonToolbar(null, id); + buttonMainToolbar.text = name; + + let items = { + guid : window.Asc.plugin.guid, + tabs : [] + }; + + buttonMainToolbar.childs = buttons; + for (let i = 0, len = buttons.length; i < len; i++) + buttons[i].parent = buttonMainToolbar; + + buttonMainToolbar.toToolbar(items); + + if (items.tabs.length > 0) + window.Asc.plugin.executeMethod("UpdateToolbarMenuItem", [items]); + }; + + var ToolbarButtonType = { + Button : "button", + BigButton : "big-button" + }; + + var ItemType = { + None : 0, + ContextMenu : 1, + Toolbar : 2 + }; + + function Button(parent, id) + { + this.itemType = ItemType.None; + this.editors = ["word", "cell", "slide"]; + + this.id = (id === undefined) ? generateGuid() : id; + + this.icons = null; + + this.text = ""; + this.hint = null; + this.data = ""; + + this.separator = false; + this.lockInViewMode = true; + this.enableToggle = false; + this.disabled = false; + this.removed = false; + + this.parent = parent ? parent : null; + this.childs = null; + + if (this.parent) + { + if (!this.parent.childs) + this.parent.childs = []; + this.parent.childs.push(this); + } + } + + Button.prototype.toItem = function() + { + let item = { + id : this.id, + text : translateItem(this.text) + }; + + if (this.hint !== null) + item.hint = translateItem(this.hint === "" ? this.hint : this.text); + + if (this.separator) + item.separator = true; + + if (this.data) + item.data = this.data; + + if (this.lockInViewMode) + item.lockInViewMode = true; + + if (this.enableToggle) + item.enableToggle = true; + + if (this.disabled) + item.disabled = true; + else + item.disabled = false; + + if (this.removed) + item.removed = true; + + if (this.icons) + item.icons = this.icons; + + if (this.itemType === ItemType.Toolbar) + item.type = this.type; + + if (this.menu) + item.items = this.menu.map(function(menuItem) { + menuItem.text = translateItem(menuItem.text); + return menuItem; + }); + + if (this.split) + item.split = true; + + return item; + }; + + Button.prototype.attachOnClick = function(handler) + { + }; + + Button.prototype.onClick = function() + { + console.log("BUTTON: " + this.text); + }; + + function ButtonContextMenu(parent, id) + { + Button.call(this, parent, id); + + this.itemType = ItemType.ContextMenu; + this.showOnOptionsType = []; + + Asc.Buttons.ButtonsContextMenu.push(this); + } + + ButtonContextMenu.prototype = Object.create(Button.prototype); + ButtonContextMenu.prototype.constructor = ButtonContextMenu; + + ButtonContextMenu.prototype.copy = function() + { + let ret = new ButtonContextMenu(this.parent, this.id); + ret.editors = this.editors; + + ret.separator = this.separator; + ret.lockInViewMode = this.lockInViewMode; + ret.enableToggle = this.enableToggle; + ret.disabled = this.disabled; + ret.showOnOptionsType = this.showOnOptionsType.slice(); + + return ret; + }; + + ButtonContextMenu.prototype.addCheckers = function() + { + let len = arguments.length; + this.showOnOptionsType = new Array(len); + for (let i = 0; i < len; i++) + this.showOnOptionsType[i] = arguments[i]; + }; + + ButtonContextMenu.prototype.attachOnClick = function(handler) + { + window.Asc.plugin.attachContextMenuClickEvent(this.id, handler); + }; + + ButtonContextMenu.prototype.onContextMenuShowAnalyze = function(options, parent) + { + return false; + }; + + ButtonContextMenu.prototype.onContextMenuShowExtendItem = function(options, item) + { + }; + + ButtonContextMenu.prototype.onContextMenuShow = function(options, parent) + { + if (this.onContextMenuShowAnalyze(options, parent)) + return; + + let isSupport = false; + for (let i = 0, len = this.editors.length; i < len; i++) + { + if (Asc.plugin.info.editorType === this.editors[i]) + { + isSupport = true; + break; + } + } + + if (!isSupport) + return; + + for (let i = 0, len = this.showOnOptionsType.length; i < len; i++) + { + if (options.type === this.showOnOptionsType[i] || this.showOnOptionsType[i] === "All") + { + if (!parent.items) + parent.items = []; + + let curItem = this.toItem(); + this.onContextMenuShowExtendItem(options, curItem); + + if (this.childs) + { + for (let j = 0, childsLen = this.childs.length; j < childsLen; j++) + { + this.childs[j].onContextMenuShow(options, curItem); + } + } + + parent.items.push(curItem); + return; + } + } + }; + + function ButtonToolbar(parent, id) + { + Button.call(this, parent, id); + + this.itemType = ItemType.Toolbar; + this.type = ToolbarButtonType.BigButton; + this.tab = ""; + + Asc.Buttons.ButtonsToolbar.push(this); + } + + ButtonToolbar.prototype = Object.create(Button.prototype); + ButtonToolbar.prototype.constructor = ButtonToolbar; + + ButtonToolbar.prototype.attachOnClick = function(handler) + { + window.Asc.plugin.attachToolbarMenuClickEvent(this.id, handler); + }; + + ButtonToolbar.prototype.toItem = function(items) + { + let item = Button.prototype.toItem.call(this); + item.type = this.type; + return item; + }; + + ButtonToolbar.prototype.toToolbar = function(items) + { + let currentItem = null; + if (this.parent === null) + { + let tab = { + id : this.id, + text : translateItem(this.text), + items : [] + }; + if (this.hint !== null) + tab.hint = translateItem(this.hint === "" ? this.hint : this.text); + + items.tabs.push(tab); + + currentItem = tab; + } + else + { + currentItem = this.toItem(); + + if (!items.items) + items.items = []; + + items.items.push(currentItem); + } + + if (this.childs) + { + for (let j = 0, childsLen = this.childs.length; j < childsLen; j++) + { + this.childs[j].toToolbar(currentItem); + } + } + }; + + Asc.ToolbarButtonType = ToolbarButtonType; + Asc.ButtonContextMenu = ButtonContextMenu; + Asc.ButtonToolbar = ButtonToolbar; +})(window); diff --git a/DocService/sources/ai/engine/engine.js b/DocService/sources/ai/engine/engine.js new file mode 100644 index 00000000..e8c5402d --- /dev/null +++ b/DocService/sources/ai/engine/engine.js @@ -0,0 +1,528 @@ +(function(window, undefined) +{ + window.AI = window.AI || {}; + var AI = window.AI; + + if (!AI.isLocalDesktop) + return; + + window.fetch = function(url, obj) { + function TextResponse(text, isOk) { + if (isOk) + this.textResponse = text; + else + this.message = text; + + this.text = function() { return new Promise(function(resolve) { + resolve(text) + })}; + this.json = function() { return new Promise(function(resolve, reject) { + try { + resolve(JSON.parse(text)); + } catch (error) { + reject(error); + } + })}; + this.ok = isOk; + }; + + return new Promise(function (resolve, reject) { + var xhr = new XMLHttpRequest(); + xhr.open(obj.method, url, true); + + for (let h in obj.headers) + if (obj.headers.hasOwnProperty(h)) + xhr.setRequestHeader(h, obj.headers[h]); + + xhr.onload = function() { + if (this.status == 200 || this.status == 0) + resolve(new TextResponse(this.response, true)); + else + resolve(new TextResponse(this.response, false)); + }; + xhr.onerror = function() { + reject(new TextResponse(this.response || "Failed to fetch.", false)); + }; + + xhr.send(obj.body); + }); + }; +})(window); + +(function(window, undefined) +{ + async function requestWrapper(message) { + return new Promise(function (resolve, reject) { + if (AI.isLocalDesktop && (AI.isLocalUrl(message.url) || message.isUseProxy)) { + window.AscSimpleRequest.createRequest({ + url: message.url, + method: message.method, + headers: message.headers, + body: message.isBlob ? message.body : (message.body ? JSON.stringify(message.body) : ""), + complete: function(e, status) { + let data = JSON.parse(e.responseText); + resolve({error: 0, data: data.data ? data.data : data}); + }, + error: function(e, status, error) { + if ( e.statusCode == -102 ) e.statusCode = 404; + resolve({error: e.statusCode, message: "Internal error"}); + } + }); + } else { + let request = { + method: message.method, + headers: message.headers + }; + if (request.method != "GET") { + request.body = message.isBlob ? message.body : (message.body ? JSON.stringify(message.body) : ""); + + if (message.isUseProxy) { + request = { + "method" : request.method, + "body" : JSON.stringify({ + "target" : message.url, + "method" : request.method, + "headers" : request.headers, + "data" : request.body + }) + } + if (proxyUrlParam){ + message.url = proxyUrlParam; + request["headers"] = { + "Authorization" : "Bearer " + Asc.plugin.info.jwt, + } + } else { + message.url = AI.PROXY_URL; + } + + } + } + + fetch(message.url, request) + .then(function(response) { + return response.json() + }) + .then(function(data) { + if (data.error) + resolve({error: 1, message: data.error.message ? data.error.message : ""}); + else + resolve({error: 0, data: data.data ? data.data : data}); + }) + .catch(function(error) { + resolve({error: 1, message: error.message ? error.message : ""}); + }); + } + }); + } + + AI.TmpProviderForModels = null; + + AI.PROXY_URL = "http://localhost:8000/ai-proxy"; + const proxyUrlParam = "http://localhost:8000/ai-proxy"; + + AI._getHeaders = function(_provider) { + let provider = _provider.createInstance ? _provider : AI.Storage.getProvider(_provider.name); + if (!provider) provider = new AI.Provider(); + return provider.getRequestHeaderOptions(); + }; + + AI._getModelsSync = function(_provider) { + let provider = _provider.createInstance ? _provider : AI.Storage.getProvider(_provider.name); + if (!provider) provider = new AI.Provider(); + return provider.getModels(); + }; + + AI._extendBody = function(_provider, body) { + let provider = _provider.createInstance ? _provider : AI.Storage.getProvider(_provider.name); + if (!provider) provider = new AI.Provider(); + let bodyPr = provider.getRequestBodyOptions(); + + if (provider.isUseProxy()) + bodyPr.target = provider.url; + + for (let i in bodyPr) { + if (!body[i]) + body[i] = bodyPr[i]; + } + + return provider.isUseProxy(); + }; + + AI._getEndpointUrl = function(_provider, endpoint, model) { + let provider = _provider.createInstance ? _provider : AI.Storage.getProvider(_provider.name); + if (!provider) provider = new AI.Provider(_provider.name, _provider.url, _provider.key); + + if (_provider.key) + provider.key = _provider.key; + + let url = provider.url; + if (url.endsWith("/")) + url = url.substring(0, url.length - 1); + if ("" !== provider.addon) + { + let plus = "/" + provider.addon; + let pos = url.lastIndexOf(plus); + if (pos === -1 || pos !== (url.length - plus.length)) + url += plus; + } + + return url + provider.getEndpointUrl(endpoint, model); + }; + + AI.getModels = async function(provider) + { + AI.TmpProviderForModels = null; + return new Promise(function (resolve, reject) { + + function resolveRequest(data) { + if (data.error) + resolve({ + error : 1, + message : data.message, + models : [] + }); + else { + AI.TmpProviderForModels = AI.createProviderInstance(provider.name, provider.url, provider.key); + let models = data.data; + if (data.data.models) + models = data.data.models; + for (let i = 0, len = models.length; i < len; i++) + { + let model = models[i]; + AI.TmpProviderForModels.correctModelInfo(model); + + if (!model.id) + continue; + + model.endpoints = []; + model.options = {}; + + if (AI.TmpProviderForModels.checkExcludeModel(model)) + continue; + + let modelUI = new AI.UI.Model(model.name, model.id, + provider.name, AI.TmpProviderForModels.checkModelCapability(model)); + AI.TmpProviderForModels.models.push(model); + AI.TmpProviderForModels.modelsUI.push(modelUI); + } + + resolve({ + error : 0, + message : "", + models : AI.TmpProviderForModels.modelsUI + }); + } + } + + let syncModels = AI._getModelsSync(provider); + if (Array.isArray(syncModels)) + { + resolveRequest({ + error : 0, + data : syncModels + }); + return; + } + + let headers = AI._getHeaders(provider); + requestWrapper({ + url : AI._getEndpointUrl(provider, AI.Endpoints.Types.v1.Models), + headers : headers, + method : "GET" + }).then(function(data) { + resolveRequest(data); + }); + }); + }; + + AI.Request = function(model) { + this.modelUI = model; + this.model = null; + this.errorHandler = null; + + if ("" !== model.provider) { + let provider = null; + for (let i in AI.Providers) { + if (model.provider === AI.Providers[i].name) { + provider = AI.Providers[i]; + break; + } + } + + if (provider) { + for (let i = 0, len = provider.models.length; i < len; i++) { + if (model.id === provider.models[i].id || + model.id === provider.models[i].name) + { + this.model = provider.models[i]; + } + } + } + } + }; + + AI.Request.create = function(action) { + let model = AI.Storage.getModelById(AI.Actions[action].model); + if (!model) { + onOpenSettingsModal(); + return null; + } + return new AI.Request(model); + }; + + AI.Request.prototype.setErrorHandler = function(callback) { + this.errorHandler = callback; + }; + + AI.Request.prototype.chatRequest = async function(content, block) { + return await this._wrapRequest(this._chatRequest, content, block !== false); + }; + + AI.Request.prototype._wrapRequest = async function(func, data, block) { + if (block) + await Asc.Editor.callMethod("StartAction", ["Block", "AI (" + this.modelUI.name + ")"]); + let result = undefined; + try { + result = await func.call(this, data); + } catch (err) { + if (err.error) { + if (block) + await Asc.Editor.callMethod("EndAction", ["Block", "AI (" + this.modelUI.name + ")"]); + if (this.errorHandler) + this.errorHandler(err); + else { + if (true) { + await Asc.Library.SendError(err.message, -1); + } else { + // since 8.3.0!!! + await Asc.Editor.callMethod("ShowError", [err.message, -1]); + } + } + return; + } + } + if (block) + await Asc.Editor.callMethod("EndAction", ["Block", "AI (" + this.modelUI.name + ")"]); + return result; + }; + + AI.Request.prototype._chatRequest = async function(content) { + let provider = null; + if (this.modelUI) + provider = AI.Storage.getProvider(this.modelUI.provider); + + if (!provider) { + throw { + error : 1, + message : "Please select the correct model for action." + }; + return; + } + + let isUseCompletionsInsteadChat = false; + if (this.model) { + let isFoundChatCompletions = false; + let isFoundCompletions = false; + for (let i = 0, len = this.model.endpoints.length; i < len; i++) { + if (this.model.endpoints[i] === AI.Endpoints.Types.v1.Chat_Completions) { + isFoundChatCompletions = true; + break; + } + if (this.model.endpoints[i] === AI.Endpoints.Types.v1.Completions) { + isFoundCompletions = true; + break; + } + } + + if (isFoundCompletions && !isFoundChatCompletions) + isUseCompletionsInsteadChat = true; + } + + let isNoSplit = false; + let max_input_tokens = AI.InputMaxTokens["32k"]; + if (this.model && this.model.options && undefined !== this.model.options.max_input_tokens) + max_input_tokens = this.model.options.max_input_tokens; + + let header_footer_overhead = 500; + // for test chunks: + if (false) { + max_input_tokens = 50; + let header_footer_overhead = 0; + } + + if (max_input_tokens < header_footer_overhead) + max_input_tokens = header_footer_overhead + 1000; + + let headers = AI._getHeaders(provider); + + let isMessages = Array.isArray(content); + + if (isUseCompletionsInsteadChat && isMessages) { + content = content[content.length - 1].content; + isMessages = false; + } + + if (isMessages) + isNoSplit = true; + + let input_len = content.length; + let input_tokens = isMessages ? 0 : Asc.OpenAIEncode(content).length; + + let messages = []; + if (input_tokens < max_input_tokens || isNoSplit) { + messages.push(content); + } else { + let chunkLen = (((max_input_tokens - header_footer_overhead) / input_tokens) * input_len) >> 0; + let currentLen = 0; + while (currentLen != input_len) { + let endSymbol = currentLen + chunkLen; + if (endSymbol >= input_len) + endSymbol = undefined; + messages.push(content.substring(currentLen, endSymbol)); + if (undefined === endSymbol) + currentLen = input_len; + else + currentLen = endSymbol; + } + } + + let objRequest = { + headers : headers, + method : "POST" + }; + + let endpointType = isUseCompletionsInsteadChat ? AI.Endpoints.Types.v1.Completions : + AI.Endpoints.Types.v1.Chat_Completions; + objRequest.url = AI._getEndpointUrl(provider, endpointType, this.model); + + let requestBody = {}; + let processResult = function(data) { + let result = provider.getChatCompletionsResult(data, this.model); + if (result.content.length === 0) + return ""; + + if (0 === result.content[0].indexOf("")) { + let end = result.content[0].indexOf(""); + if (end !== -1) + result.content[0] = result.content[0].substring(end + 8); + } + + return result.content[0]; + }; + + if (1 === messages.length) { + if (!isUseCompletionsInsteadChat) { + if (isMessages) + requestBody.messages = messages[0]; + else + requestBody.messages = [{role:"user",content:messages[0]}]; + objRequest.body = provider.getChatCompletions(requestBody, this.model); + } else { + objRequest.body = provider.getCompletions({ text : messages[0] }); + } + + objRequest.isUseProxy = AI._extendBody(provider, objRequest.body); + if (proxyUrlParam) { + objRequest.body.target = provider.url; + objRequest.isUseProxy = true; + } + + + let result = await requestWrapper(objRequest); + if (result.error) { + throw { + error : result.error, + message : result.message + }; + return; + } else { + return processResult(result); + } + + } else { + + let lastFooterForOldModels = ""; + let indexTask = content.indexOf(": \""); + if (-1 != indexTask && indexTask < 100) { + lastFooterForOldModels = content.substring(0, indexTask); + } + + function getHeader(part, partsCount) { + let header = "[START PART " + part + "/" + partsCount + "]\n"; + if (part != partsCount) { + header = "Do not answer yet. This is just another part of the text I want to send you. Just receive and acknowledge as \"Part " + part + "/" + partsCount + " received\" and wait for the next part.\n" + header; + } + return header; + } + + function getFooter(part, partsCount) { + let footer = "\n[END PART " + part + "/" + partsCount + "]\n"; + if (part != partsCount) { + footer += "Remember not answering yet. Just acknowledge you received this part with the message \"Part " + part + "/" + partsCount + " received\" and wait for the next part."; + } else { + footer += "ALL PARTS SENT. Now you can continue processing the request." + lastFooterForOldModels; + } + return footer; + } + + for (let i = 0, len = messages.length; i < len; i++) { + + let message = getHeader(i + 1, len) + messages[i] + getFooter(i + 1, len); + if (!isUseCompletionsInsteadChat) { + objRequest.body = provider.getChatCompletions({ messages : [{role:"user",content:message}] }); + } else { + objRequest.body = provider.getCompletions( { text : message }); + } + + objRequest.isUseProxy = AI._extendBody(provider, objRequest.body); + if (proxyUrlParam) { + objRequest.body.target = provider.url; + objRequest.isUseProxy = true; + } + + let result = await requestWrapper(objRequest); + if (result.error) { + throw { + error : result.error, + message : result.message + }; + return; + } else if (i === (len - 1)) { + return processResult(result); + } + + } + } + }; + + function normalizeImageSize(size) { + let width = 0, height = 0; + if (size.width > 750 || size.height > 750) + width = height = 1024; + else if (size.width > 375 || size.height > 350) + width = height = 512; + else + width = height = 256; + + return {width: width, height: height, str: width + 'x' + height} + }; + + async function getImageBlob(base64) + { + return new Promise(function(resolve) { + const image = new Image(); + image.onload = function() { + const img_size = {width: image.width, height: image.height}; + const canvas_size = normalizeImageSize(img_size); + const draw_size = canvas_size.width > image.width ? img_size : canvas_size; + let canvas = document.createElement('canvas'); + canvas.width = canvas_size.width; + canvas.height = canvas_size.height; + canvas.getContext('2d').drawImage(image, 0, 0, draw_size.width, draw_size.height*image.height/image.width); + canvas.toBlob(function(blob) {resolve({blob: blob, size: canvas_size, image_size :img_size})}, 'image/png'); + }; + image.src = img.src; + }); + } + +})(window); diff --git a/DocService/sources/ai/engine/library.js b/DocService/sources/ai/engine/library.js new file mode 100644 index 00000000..ee0c003c --- /dev/null +++ b/DocService/sources/ai/engine/library.js @@ -0,0 +1,512 @@ +(function(exports, undefined) +{ + let Editor = {}; + + Editor.callMethod = async function(name, args) + { + return new Promise(resolve => (function(){ + Asc.plugin.executeMethod(name, args || [], function(returnValue){ + resolve(returnValue); + }); + })()); + }; + + Editor.callCommand = async function(func) + { + return new Promise(resolve => (function(){ + Asc.plugin.callCommand(func, false, true, function(returnValue){ + resolve(returnValue); + }); + })()); + }; + + Editor.pause = async function(msec) + { + return new Promise(resolve => (function(){ + setTimeout(function(){ + resolve(); + }, msec); + })()); + }; + + Editor.getType = function() { + if (Asc.plugin.info.editorSubType === "pdf") + return "pdf"; + return window.Asc.plugin.info.editorType; + }; + + exports.Asc = exports.Asc || {}; + exports.Asc.Editor = Editor; + + function Library() { + this.version = 0; + } + + exports.Asc.PluginsMD = { + latex: function(md) { + // Inline: $...$ + md.inline.ruler.after("escape", "latex_inline", function(state, silent) { + let start = state.pos; + if (state.src[start] !== '$') + return false; + if (state.src[start + 1] === '$') + return false; + + let content = ""; + let end = start + 1; + while ((end = state.src.indexOf('$', end)) !== -1) { + if (state.src.charCodeAt(end - 1) === 92/*\\*/) { + end++; + continue; + } + content = state.src.slice(start + 1, end); + content = content.trim(); + break; + } + + if (!content) + return false; + + if (!silent) { + let token = state.push("latex_inline", "span", 0); + token.content = content; + token.attrs = [["class", "oo-latex-inline"]]; + } + + state.pos = end + 1; + return true; + }); + md.renderer.rules.latex_inline = function(tokens, idx) { + return `${tokens[idx].content}`; + }; + + // Block: $$...$$ + md.block.ruler.before("fence", "latex_block", function(state, startLine, endLine, silent) { + let startPos = state.bMarks[startLine] + state.tShift[startLine]; + let maxPos = state.eMarks[startLine]; + let line = state.src.slice(startPos, maxPos).trim(); + + if (!line.startsWith("$$")) + return false; + if (silent) + return true; + + let content = ""; + let found = false; + + for (let i = startLine + 1; i < endLine; i++) { + let pos = state.bMarks[i] + state.tShift[i]; + let max = state.eMarks[i]; + let nextLine = state.src.slice(pos, max).trim(); + + if (nextLine === "$$") { + found = true; + state.line = i + 1; + break; + } + + content += nextLine + "\n"; + } + + if (!found) return false; + + const token = state.push("latex_block", "span", 0); + token.block = true; + token.content = content.trim(); + token.attrs = [["class", "oo-latex"]]; + token.map = [startLine, state.line]; + + return true; + }); + md.renderer.rules.latex_block = function(tokens, idx) { + return `${tokens[idx].content}\n`; + }; + } + }; + + function decodeHtmlText(text) { + return text + .replace(/"/g, '"') + .replace(/'/g, "'") + .replace(/&/g, '&') + .replace(/</g, '<') + .replace(/>/g, '>') + .replace(/ /g, ' '); + } + + Library.prototype.GetEditorVersion = async function() + { + if (this.version !== 0) + return this.version; + + let version = await Editor.callMethod("GetVersion"); + if ("develop" == version) + version = "99.99.99"; + + let arrVer = version.split("."); + while (3 > arrVer.length) + arrVer.push("0"); + + this.version = 1000000 * parseInt(arrVer[0]) + 1000 * parseInt(arrVer[1]) + parseInt(arrVer[2]); + return this.version; + }; + + Library.prototype.GetCurrentWord = async function() + { + return await Editor.callMethod("GetCurrentWord"); + }; + + Library.prototype.GetSelectedText = async function() + { + let result = await Editor.callMethod("GetSelectedText"); + if (result !== "") + return result; + + return this.GetSelectedContent("text"); + }; + + Library.prototype.GetSelectedContent = async function(type) { + return await Editor.callMethod("GetSelectedContent", [{ type : type }]); + }; + + Library.prototype.GetSelectedImage = async function(type) { + let res = await Editor.callMethod("GetSelectedContent", [{ type : "html" }]); + let index1 = res.indexOf("src=\"data:image/"); + if (-1 === index1) + return ""; + index1 += 5; + let index2 = res.indexOf("\"", index1); + if (-1 === index2) + return ""; + return res.substring(index1, index2); + }; + + Library.prototype.ReplaceTextSmart = async function(text) + { + return await Editor.callMethod("ReplaceTextSmart", [text]); + }; + + Library.prototype.InsertAsText = async function(text) + { + Asc.scope.data = (text || "").split("\n\n"); + return await Editor.callCommand(function() { + let oDocument = Api.GetDocument(); + for (let ind = 0; ind < Asc.scope.data.length; ind++) { + let text = Asc.scope.data[ind]; + if (text.length) { + let oParagraph = Api.CreateParagraph(); + oParagraph.AddText(text); + oDocument.Push(oParagraph); + } + } + }); + }; + + Library.prototype.InsertAsMD = async function(data, plugins) + { + let htmlContent = Asc.Library.ConvertMdToHTML(data, plugins) + return await Asc.Library.InsertAsHTML(htmlContent); + }; + + Library.prototype.ConvertMdToHTML = function(data, plugins) + { + let c = window.markdownit(); + if (plugins) { + for (let i = 0, len = plugins.length; i < len; i++) + c.use(plugins[i]); + } + return c.render(this.getMarkdownResult(data)); + }; + + Library.prototype.InsertAsHTML = async function(data) + { + switch (Asc.Editor.getType()) { + case "word": { + if (true) { + await Editor.callCommand(function() { + let document = Api.GetDocument(); + document.RemoveSelection(); + }, false); + } else { + await Editor.callCommand(function() { + let doc = Api.GetDocument(); + let paras = doc.GetAllParagraphs(); + if (paras.length) + { + let lastPara = paras[paras.length - 1]; + let lastElement = lastPara.GetElement(lastPara.GetElementsCount() - 1); + if (lastElement && lastElement.MoveCursorToPos) + { + lastElement.MoveCursorToPos(100000); + } + } + }); + } + } + default: + break; + } + return await Editor.callMethod("PasteHtml", [data]); + }; + + Library.prototype.InsertAsComment = async function(text) + { + return await Editor.callMethod("AddComment", [{ + UserName : "AI", + Text : decodeHtmlText(text), + Time: Date.now(), + Solver: false + }]); + }; + + Library.prototype.InsertAsHyperlink = async function(content, hint) + { + let text = content; + start = text.indexOf('htt'); + end = text.indexOf(' ', start); + if (end == -1) + end = text.length; + + Asc.scope.link = text.slice(start, end); + return await Editor.callCommand(function(){ + let oDocument = Api.GetDocument(); + let oRange = oDocument.GetRangeBySelect(); + oRange.AddHyperlink(Asc.scope.link, "Meaning of the word"); + }); + }; + + Library.prototype.InsertAsReview = async function(content, isHtml) + { + let isTrackRevisions = await Editor.callCommand(function(){ + let res = Api.asc_GetLocalTrackRevisions(); + Api.asc_SetLocalTrackRevisions(true); + return res; + }); + + Asc.scope.localTrackRevisions = isTrackRevisions; + + await Editor.callMethod(isHtml ? "PasteHtml" : "PasteText", [content.trim()]); + + if (true !== isTrackRevisions) + { + await Editor.callCommand(function(){ + Api.asc_SetLocalTrackRevisions(Asc.scope.localTrackRevisions); + }); + } + }; + + Library.prototype.PasteText = async function(text) + { + return await Editor.callMethod("PasteText", [text]); + }; + + Library.prototype.SendError = async function(text, errorLevel) + { + Asc.scope.errorText = text; + Asc.scope.errorLevel = errorLevel; + return await Editor.callCommand(function(){ + Api.sendEvent("asc_onError", Asc.scope.errorText, Asc.scope.errorLevel); + }); + }; + + Library.prototype.GetLocalImagePath = async function(url) { + return await Editor.callMethod("getLocalImagePath", [url]); + }; + + Library.prototype.AddGeneratedImage = async function(base64) { + let editorVersion = await Asc.Library.GetEditorVersion(); + + if (Asc.Editor.getType() === "pdf") { + return await Editor.callMethod("PasteHtml", [""]); + } + + if (editorVersion >= 9000000) { + let urlLocal = await this.GetLocalImagePath(base64); + if (urlLocal.error === true) + return; + + Asc.scope.url = urlLocal.url; + } else { + Asc.scope.url = url; + } + + switch (window.Asc.plugin.info.editorType) { + case "word": { + return await Editor.callCommand(function() { + let document = Api.GetDocument(); + let paragraph = Api.CreateParagraph(); + let drawing = Api.CreateImage(Asc.scope.url, 100 * 36000, 100 * 36000); + paragraph.AddDrawing(drawing); + document.RemoveSelection(); + document.InsertContent([paragraph], true); + }, false); + } + case "cell": { + return await Editor.callCommand(function() { + let worksheet = Api.GetActiveSheet(); + worksheet.AddImage(Asc.scope.url, 100 * 36000, 100 * 36000, 0, 2 * 36000, 2, 3 * 36000); + }, false); + } + case "slide": { + return await Editor.callCommand(function() { + let presentation = Api.GetPresentation(); + let slide = presentation.GetCurrentSlide(); + let image = Api.CreateImage(Asc.scope.url, 150 * 36000, 150 * 36000); + slide.AddObject(image); + }, false); + } + default: + break; + } + }; + + Library.prototype.AddOleObject = async function(imageUrl, data) { + switch (window.Asc.plugin.info.editorType) { + case "word": { + await Editor.callCommand(function(){ + let document = Api.GetDocument(); + document.RemoveSelection(); + }); + break; + } + default: + break; + } + + let W = 100; + let H = 100; + + let info = window.Asc.plugin.info; + var obj = { + guid : info.guid, + widthPix : info.mmToPx * W, + heightPix : info.mmToPx * H, + width : W, + height : H, + imgSrc : imageUrl, + data : data + }; + + return await Editor.callMethod("AddOleObject", [obj]); + }; + + Library.prototype.trimResult = function(data, posStart, isSpaces, extraCharacters) { + let pos = posStart || 0; + if (-1 != pos) { + let trimC = ["\"", "'", "\n", "\r", "`"]; + if (true === isSpaces) + trimC.push(" "); + while (pos < data.length && trimC.includes(data[pos])) + pos++; + + let posEnd = data.length - 1; + while (posEnd > 0 && trimC.includes(data[posEnd])) + posEnd--; + + if (posEnd > pos) + return data.substring(pos, posEnd + 1); + } + return data; + }; + + Library.prototype.getTranslateResult = function(data, dataSrc) { + data = this.trimResult(data, 0, true); + let trimC = ["\"", "'", "\n", "\r", " "]; + if (dataSrc.length > 0 && trimC.includes(dataSrc[0])) { + data = dataSrc[0] + data; + } + if (dataSrc.length > 1 && trimC.includes(dataSrc[dataSrc.length - 1])) { + data = data + dataSrc[dataSrc.length - 1]; + } + return data; + }; + + Library.prototype.getMarkdownResult = function(data) { + let markdownEscape = data.indexOf("```md"); + if (-1 !== markdownEscape && markdownEscape < 5) + data = data.substring(markdownEscape + 5); + return this.trimResult(data); + }; + + exports.Asc = exports.Asc || {}; + exports.Asc.Library = new Library(); + + exports.Asc.Prompts = { + getFixAndSpellPrompt(content) { + let prompt = `I want you to act as an editor and proofreader. \ +I will provide you with some text that needs to be checked for spelling and grammar errors. \ +Your task is to carefully review the text and correct any mistakes, \ +ensuring that the corrected text is free of errors and maintains the original meaning. \ +Only return the corrected text. \ +Here is the text that needs revision: \"${content}\"`; + return prompt; + }, + getSummarizationPrompt(content, language) { + let prompt = "Summarize the following text. "; + if (language) { + prompt += "and translate the result to " + language; + prompt += "Return only the resulting translated text."; + } else { + prompt += "Return only the resulting text."; + } + prompt += "Text: \"\"\"\n"; + prompt += content; + prompt += "\n\"\"\""; + return prompt; + }, + getTranslatePrompt(content, language) { + let prompt = "Translate the following text to " + language; + prompt += ". Return only the resulting text."; + prompt += "Text: \"\"\"\n"; + prompt += content; + prompt += "\n\"\"\""; + return prompt; + }, + getExplainPrompt(content) { + let prompt = "Explain what the following text means. Return only the resulting text."; + prompt += "Text: \"\"\"\n"; + prompt += content; + prompt += "\n\"\"\""; + return prompt; + }, + getTextLongerPrompt(content) { + let prompt = "Make the following text longer. Return only the resulting text."; + prompt += "Text: \"\"\"\n"; + prompt += content; + prompt += "\n\"\"\""; + return prompt; + }, + getTextShorterPrompt(content) { + let prompt = "Make the following text simpler. Return only the resulting text."; + prompt += "Text: \"\"\"\n"; + prompt += content; + prompt += "\n\"\"\""; + return prompt; + }, + getTextRewritePrompt(content) { + let prompt = "Rewrite the following text differently. Return only the resulting text."; + prompt += "Text: \"\"\"\n"; + prompt += content; + prompt += "\n\"\"\""; + return prompt; + }, + getTextKeywordsPrompt(content) { + let prompt = `Get Key words from this text: "${content}"`; + return prompt; + }, + getExplainAsLinkPrompt(content) { + let prompt = "Give a link to the explanation of the following text. Return only the resulting link."; + prompt += "Text: \"\"\"\n"; + prompt += content; + prompt += "\n\"\"\""; + return prompt; + }, + getImageDescription() { + return "Describe in detail everything you see in this image. Mention the objects, their appearance, colors, arrangement, background, and any noticeable actions or interactions. Be as specific and accurate as possible. Avoid making assumptions about things that are not clearly visible." + }, + getImagePromptOCR() { + return "Extract all text from this image as accurately as possible. Preserve original reading order and formatting if possible. Recognize tables and images if possible. Do not add or remove any content. Output recognized objects in md format if possible. If not, return plain text."; + } + }; + +})(window); diff --git a/DocService/sources/ai/engine/local_storage.js b/DocService/sources/ai/engine/local_storage.js new file mode 100644 index 00000000..6c81b3e0 --- /dev/null +++ b/DocService/sources/ai/engine/local_storage.js @@ -0,0 +1,224 @@ +(function(exports, undefined) +{ + exports.AI = exports.AI || {}; + var AI = exports.AI; + + AI.DEFAULT_SERVER_SETTINGS = null; + + var localStorageKey = "onlyoffice_ai_plugin_storage_key"; + + AI.Providers = {}; + + AI.serializeProviders = function() { + let result = []; + for (let i in AI.Providers) { + if (AI.Providers[i].name) { + result.push({ + name : AI.Providers[i].name, + url : AI.Providers[i].url, + key : AI.Providers[i].key, + models : AI.Providers[i].models + }); + } + } + return result; + }; + + AI.Models = []; + + AI.Storage.save = function() { + try { + let obj = { + version : AI.Storage.Version, + providers : {}, + models : AI.Models, + customProviders : AI.InternalCustomProvidersSources + }; + + for (let pr in AI.Providers) + { + obj.providers[pr] = {}; + obj.providers[pr].name = AI.Providers[pr].name; + obj.providers[pr].url = AI.Providers[pr].url; + obj.providers[pr].key = AI.Providers[pr].key; + obj.providers[pr].models = AI.Providers[pr].models; + } + + window.localStorage.setItem(localStorageKey, JSON.stringify(obj)); + + if (this.onChangeStorage) + this.onChangeStorage(); + return true; + } + catch (e) { + } + return false; + }; + + AI.Storage.load = function() { + let obj = null; + try { + obj = JSON.parse(window.localStorage.getItem(localStorageKey)); + } catch (e) { + obj = AI.DEFAULT_SERVER_SETTINGS; + + if (obj) { + AI.DEFAULT_SERVER_SETTINGS.version = AI.Storage.Version; + } + } + + if (obj) { + let fixVersion2 = false; + switch (obj.version) + { + case undefined: + case 1: + obj = null; + break; + case 2: + // redesign provider url: add /v1 + fixVersion2 = true; + break; + case 3: + default: + break; + } + + if (obj) { + let oldProviders = AI.Providers; + AI.Providers = {}; + + AI.InternalCustomProvidersSources = obj.customProviders || {}; + AI.loadCustomProviders(); + + for (let i = 0, len = AI.InternalCustomProviders.length; i < len; i++) { + let pr = AI.InternalCustomProviders[i]; + oldProviders[pr.name] = pr; + } + + for (let i = 0, len = AI.InternalCustomProviders.length; i < len; i++) { + if (AI.InternalCustomProviders[i].name === name) { + AI.InternalCustomProviders.splice(i, 1); + break; + } + } + + for (let i in obj.providers) { + let pr = obj.providers[i]; + AI.Providers[i] = AI.createProviderInstance(pr.name, pr.url, pr.key, pr.addon); + AI.Providers[i].models = pr.models || []; + + if (fixVersion2) { + if (!AI.isInternalProvider(pr.name)) + AI.Providers[i].addon = "v1"; + } + } + + for (let pr in oldProviders) + { + if (!AI.Providers[pr]) + AI.Providers[pr] = oldProviders[pr]; + } + + AI.Models = obj.models; + } + + return true; + } + return false; + }; + + AI.Storage.addModel = function(model) { + + if (AI.Providers[model.provider.name]) { + AI.Providers[model.provider.name].name = model.provider.name; + AI.Providers[model.provider.name].url = model.provider.url; + AI.Providers[model.provider.name].key = model.provider.key; + } else { + AI.Providers[model.provider.name] = + AI.createProviderInstance(model.provider.name, model.provider.url, model.provider.key); + } + + if (AI.TmpProviderForModels && + model.provider.name === AI.TmpProviderForModels.name && + AI.TmpProviderForModels.models.length > 0) { + AI.Providers[model.provider.name].models = AI.TmpProviderForModels.models; + } + + let isFoundModel = false; + for (let i = 0, len = AI.Models.length; i < len; i++) + { + if (AI.Models[i].id === model.id) + { + AI.Models[i].provider = model.provider.name; + AI.Models[i].name = model.name; + AI.Models[i].capabilities = model.capabilities; + isFoundModel = true; + } + } + + if (!isFoundModel) + AI.Models.push(new AI.UI.Model(model.name, model.id, model.provider.name, + model.capabilities === undefined ? AI.CapabilitiesUI.All : model.capabilities)); + + this.save(); + }; + + AI.Storage.removeModel = function(modelId) { + for (let i = 0, len = AI.Models.length; i < len; i++) + { + if (AI.Models[i].id === modelId) + { + AI.Models.splice(i, 1); + this.save(); + return; + } + } + }; + + AI.Storage.getModelByName = function(name) { + for (let i in AI.Models) { + if (AI.Models[i].name === name) + return AI.Models[i]; + } + return null; + }; + + AI.Storage.getModelById = function(id) { + for (let i in AI.Models) { + if (AI.Models[i].id === id) + return AI.Models[i]; + } + return null; + }; + + AI.Storage.serializeModels = function() { + let result = []; + for (let i in AI.Models) { + if (AI.Models[i].id) { + result.push({ + name : AI.Models[i].name, + id : AI.Models[i].id, + provider : AI.Models[i].provider, + capabilities : AI.Models[i].capabilities, + }); + } + } + return result; + }; + + AI.Storage.getProvider = function(name) { + if (AI.Providers[name]) + return AI.Providers[name]; + return null; + }; + + AI.onLoadInternalProviders = function() { + for (let i = 0, len = AI.InternalProviders.length; i < len; i++) { + let pr = AI.InternalProviders[i]; + AI.Providers[pr.name] = pr; + } + AI.Storage.load(); + }; + +})(window); diff --git a/DocService/sources/ai/engine/providers/base.js b/DocService/sources/ai/engine/providers/base.js new file mode 100644 index 00000000..000166f6 --- /dev/null +++ b/DocService/sources/ai/engine/providers/base.js @@ -0,0 +1,247 @@ +"use strict"; + +(function(){ + + window.AI = window.AI || {}; + var AI = window.AI; + + // Tokens + AI.InputMaxTokens = { + "4k" : 4096, + "8k" : 8192, + "16k" : 16384, + "32k" : 32768, + "64k" : 65536, + "128k" : 131072, + "200k" : 204800, + "256k" : 262144 + }; + + let keys = []; + for (let i in AI.InputMaxTokens) + keys.push(i); + + AI.InputMaxTokens.keys = keys; + AI.InputMaxTokens.getFloor = function(value) { + let result = undefined; + for (let i = 0, len = AI.InputMaxTokens.keys.length; i < len; i++) { + if (AI.InputMaxTokens[AI.InputMaxTokens.keys[i]] <= value) + result = AI.InputMaxTokens[AI.InputMaxTokens.keys[i]]; + } + return result; + }; + + // UI + AI.UI = AI.UI || {}; + + AI.UI.Model = function(name, id, provider, capabilities) { + this.capabilities = capabilities || AI.CapabilitiesUI.None; + this.provider = provider || ""; + this.name = name || ""; + this.id = id || ""; + }; + + AI.UI.Provider = function(name, key, url) { + this.name = name || ""; + this.key = key || ""; + this.url = url || ""; + }; + + AI.UI.Action = function(name, icon, model) { + this.name = name || ""; + this.icon = icon || ""; + this.model = model || ""; + }; + + // Endpoints + AI.Endpoints = { + + Types : { + + Undefined : -1, + + v1 : { + + Models : 0x00, + + Chat_Completions : 0x01, + Completions : 0x02, + + Images_Generations : 0x11, + Images_Edits : 0x12, + Images_Variarions : 0x13, + + Embeddings : 0x21, + + Audio_Transcriptions : 0x31, + Audio_Translations : 0x32, + Audio_Speech : 0x33, + + Moderations : 0x41, + + Realtime : 0x51, + + Language : 0x61, + Code : 0x62, + + OCR : 0x70 + } + + } + }; + + AI.CapabilitiesUI = { + + None : 0x00, + + Chat : 0x01, + + Image : 0x02, + + Embeddings : 0x04, + + Audio : 0x08, + + Moderations : 0x10, + + Realtime : 0x20, + + Code : 0x40, + + Vision : 0x80 + + }; + + let capabilitiesAll = 0; + for (let item in AI.CapabilitiesUI) + capabilitiesAll |= AI.CapabilitiesUI[item]; + AI.CapabilitiesUI.All = capabilitiesAll; + + AI.InternalProviders = []; + AI.createProviderInstance = function(name, url, key, addon) { + for (let i = 0, len = window.AI.InternalCustomProviders.length; i < len; i++) { + if (name === AI.InternalCustomProviders[i].name) + return AI.InternalCustomProviders[i].createInstance(name, url, key, addon || AI.InternalCustomProviders[i].addon); + } + for (let i = 0, len = window.AI.InternalProviders.length; i < len; i++) { + if (name === AI.InternalProviders[i].name) + return AI.InternalProviders[i].createInstance(name, url, key, addon || AI.InternalProviders[i].addon); + } + return new AI.Provider(name, url, key); + }; + + AI.isInternalProvider = function(name) { + for (let i = 0, len = AI.InternalProviders.length; i < len; i++) { + if (name === AI.InternalProviders[i].name) + return true; + } + return false; + }; + + AI.loadInternalProviders = async function() { + let providersText = await AI.loadResourceAsText("./scripts/engine/providers/config.json"); + if ("" === providersText) + return; + + try { + let providers = JSON.parse(providersText); + for (let i = 0, len = providers.length; i < len; i++) { + let providerContent = await AI.loadResourceAsText("./scripts/engine/providers/internal/" + providers[i] + ".js"); + if (providerContent !== "") { + let content = "(function(){\n" + providerContent + "\nreturn new Provider();})();"; + let provider = eval(content); + + if (provider.isOnlyDesktop() && (-1 === navigator.userAgent.indexOf("AscDesktopEditor"))) + continue; + + window.AI.InternalProviders.push(provider); + } + } + } catch(err) { + } + + AI.onLoadInternalProviders(); + }; + + AI.InternalCustomProvidersSources = {}; + AI.InternalCustomProviders = []; + + AI.loadCustomProviders = function() { + + AI.InternalCustomProviders = []; + for (let name in AI.InternalCustomProvidersSources) { + AI.addCustomProvider(AI.InternalCustomProvidersSources[name], true); + } + + }; + + AI.addCustomProvider = function(providerContent, isRegister) { + + try { + let content = "(function(){\n" + providerContent + "\nreturn new Provider();})();"; + let provider = eval(content); + + if (!provider.name) + return false; + + if (provider.isOnlyDesktop() && (-1 === navigator.userAgent.indexOf("AscDesktopEditor"))) + return false; + + AI.InternalCustomProvidersSources[provider.name] = providerContent; + + for (let i = 0, len = AI.InternalCustomProviders.length; i < len; i++) { + if (AI.InternalCustomProviders[i].name === provider.name) { + AI.InternalCustomProviders.splice(i, 1); + break; + } + } + + AI.InternalCustomProviders.push(provider); + + if (!isRegister) + { + AI.Storage.save(); + AI.Storage.load(); + } + + return true; + + } catch(err) { + } + + return false; + + }; + + AI.removeCustomProvider = function(name) { + + if (AI.InternalCustomProvidersSources[name]) + delete AI.InternalCustomProvidersSources[name]; + + for (let i = 0, len = AI.InternalCustomProviders.length; i < len; i++) { + if (AI.InternalCustomProviders[i].name === name) { + AI.InternalCustomProviders.splice(i, 1); + + if (!AI.isInternalProvider(name) && AI.Providers[name]) { + delete AI.Providers[name]; + } + + AI.Storage.save(); + AI.Storage.load(); + break; + } + } + + }; + + AI.getCustomProviders = function() { + + let names = []; + for (let i = 0, len = AI.InternalCustomProviders.length; i < len; i++) { + names.push(AI.InternalCustomProviders[i].name); + } + return names; + + }; + +})(); diff --git a/DocService/sources/ai/engine/providers/config.json b/DocService/sources/ai/engine/providers/config.json new file mode 100644 index 00000000..7a8b519e --- /dev/null +++ b/DocService/sources/ai/engine/providers/config.json @@ -0,0 +1,13 @@ +[ + "openai", + "anthropic", + "google-gemini", + "deepseek", + "together.ai", + "groq", + "ollama", + "mistral", + "gpt4all", + "xAI", + "stabilityai" +] diff --git a/DocService/sources/ai/engine/providers/internal/anthropic-ai-models.txt b/DocService/sources/ai/engine/providers/internal/anthropic-ai-models.txt new file mode 100644 index 00000000..52cec3d6 --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/anthropic-ai-models.txt @@ -0,0 +1,38 @@ +[ + { + "type": "model", + "id": "claude-3-7-sonnet-20250219", + "display_name": "Claude 3.7 Sonnet", + "created_at": "2025-02-24T00:00:00Z" + }, + { + "type": "model", + "id": "claude-3-5-sonnet-20241022", + "display_name": "Claude 3.5 Sonnet (New)", + "created_at": "2024-10-22T00:00:00Z" + }, + { + "type": "model", + "id": "claude-3-5-haiku-20241022", + "display_name": "Claude 3.5 Haiku", + "created_at": "2024-10-22T00:00:00Z" + }, + { + "type": "model", + "id": "claude-3-5-sonnet-20240620", + "display_name": "Claude 3.5 Sonnet (Old)", + "created_at": "2024-06-20T00:00:00Z" + }, + { + "type": "model", + "id": "claude-3-haiku-20240307", + "display_name": "Claude 3 Haiku", + "created_at": "2024-03-07T00:00:00Z" + }, + { + "type": "model", + "id": "claude-3-opus-20240229", + "display_name": "Claude 3 Opus", + "created_at": "2024-02-29T00:00:00Z" + } +] \ No newline at end of file diff --git a/DocService/sources/ai/engine/providers/internal/anthropic.js b/DocService/sources/ai/engine/providers/internal/anthropic.js new file mode 100644 index 00000000..5218e3de --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/anthropic.js @@ -0,0 +1,100 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("Anthropic", "https://api.anthropic.com", "", "v1"); + } + + checkModelCapability = function(model) { + if (0 == model.id.indexOf("claude-2")) + { + model.options.max_input_tokens = AI.InputMaxTokens["100k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + return AI.CapabilitiesUI.Chat; + } + + if (0 == model.id.indexOf("claude-3-5-haiku")) + { + model.options.max_input_tokens = AI.InputMaxTokens["200k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + return AI.CapabilitiesUI.Chat; + } + + model.options.max_input_tokens = AI.InputMaxTokens["200k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + return AI.CapabilitiesUI.Chat | AI.CapabilitiesUI.Vision; + } + + getEndpointUrl(endpoint, model) { + switch (endpoint) + { + case AI.Endpoints.Types.v1.Chat_Completions: + case AI.Endpoints.Types.v1.Images_Generations: + case AI.Endpoints.Types.v1.Images_Edits: + case AI.Endpoints.Types.v1.Images_Variarions: + { + return "/messages"; + } + default: + break; + } + return super.getEndpointUrl(endpoint, model); + } + + getRequestBodyOptions() { + return { + max_tokens : 4096 + }; + } + + getRequestHeaderOptions() { + let headers = { + "Content-Type" : "application/json", + "anthropic-version" : "2023-06-01", + "anthropic-dangerous-direct-browser-access": "true" + }; + if (this.key) + headers["x-api-key"] = this.key; + return headers; + } + + getChatCompletions(message, model) { + let systemPrompt = this.getSystemMessage(message, true); + let result = super.getChatCompletions(message, model); + if (systemPrompt !== "") { + result.system = systemPrompt; + } + return result; + } + + getImageGeneration(message, model) { + return this.getImageGenerationWithChat(message, model, "Image must be in svg format. "); + } + + async getImageVision(message, model) { + return { + model : model.id, + messages : [ + { + role: "user", + content: [ + { + type: "text", + text: message.prompt + }, + { + type: "image", + source: { + type: "base64", + media_type: AI.ImageEngine.getMimeTypeFromBase64(message.image), + data: AI.ImageEngine.getContentFromBase64(message.image) + } + } + ] + } + ] + } + } + +} diff --git a/DocService/sources/ai/engine/providers/internal/deepseek-ai-models.txt b/DocService/sources/ai/engine/providers/internal/deepseek-ai-models.txt new file mode 100644 index 00000000..be9ac271 --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/deepseek-ai-models.txt @@ -0,0 +1,15 @@ +{ + "object": "list", + "data": [ + { + "id": "deepseek-chat", + "object": "model", + "owned_by": "deepseek" + }, + { + "id": "deepseek-reasoner", + "object": "model", + "owned_by": "deepseek" + } + ] +} \ No newline at end of file diff --git a/DocService/sources/ai/engine/providers/internal/deepseek.js b/DocService/sources/ai/engine/providers/internal/deepseek.js new file mode 100644 index 00000000..fdcca102 --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/deepseek.js @@ -0,0 +1,9 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("Deepseek", "https://api.deepseek.com", "", ""); + } + +} diff --git a/DocService/sources/ai/engine/providers/internal/google-gemini.js b/DocService/sources/ai/engine/providers/internal/google-gemini.js new file mode 100644 index 00000000..e595df11 --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/google-gemini.js @@ -0,0 +1,140 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("Google-Gemini", "https://generativelanguage.googleapis.com", "", "v1beta"); + } + + correctModelInfo(model) { + model.id = model.name; + let index = model.name.indexOf("models/"); + if (index === 0) + model.name = model.name.substring(7); + } + + checkExcludeModel(model) { + if (model.id === "models/chat-bison-001" || + model.id === "models/text-bison-001") + return true; + + if (-1 !== model.id.indexOf("gemini-1.0")) + return true; + + return false; + } + + checkModelCapability(model) { + if (model.inputTokenLimit) + model.options.max_input_tokens = model.inputTokenLimit; + + if (Array.isArray(model.supportedGenerationMethods) && + model.supportedGenerationMethods.includes("generateContent")) + { + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + let caps = AI.CapabilitiesUI.Chat; + if (-1 !== model.id.indexOf("vision")) + caps |= AI.CapabilitiesUI.Vision; + + return AI.CapabilitiesUI.Chat | AI.CapabilitiesUI.Vision; + } + + if (Array.isArray(model.supportedGenerationMethods) && + model.supportedGenerationMethods.includes("embedContent")) + { + model.endpoints.push(AI.Endpoints.Types.v1.Embeddings); + return AI.CapabilitiesUI.Embeddings; + } + + return AI.CapabilitiesUI.All; + } + + getEndpointUrl(endpoint, model) { + let Types = AI.Endpoints.Types; + let url = ""; + switch (endpoint) + { + case Types.v1.Models: + url = "/models"; + break; + default: + let addon = ":generateContent"; + if (endpoint === Types.v1.Images_Generations) { + if (-1 != model.id.indexOf("imagen-3")) + addon = ":predict"; + } + url = "/" + model.id + addon; + break; + } + if (this.key) + url += "?key=" + this.key; + return url; + } + + getRequestHeaderOptions() { + let headers = { + "Content-Type" : "application/json" + }; + return headers; + } + + getChatCompletions(message, model) { + let body = { contents : [] }; + for (let i = 0, len = message.messages.length; i < len; i++) { + let rec = { + role : message.messages[i].role, + parts : [ { text : message.messages[i].content } ] + }; + if (rec.role === "assistant") + rec.role = "model"; + else if (rec.role === "system") { + body.system_instruction = rec; + continue; + } + body.contents.push(rec); + } + return body; + } + + getImageGeneration(message, model) { + if (-1 != model.id.indexOf("flash")) { + let result = this.getImageGenerationWithChat(message, model); + result.generationConfig = {"responseModalities":["TEXT","IMAGE"]}; + return result; + } + if (-1 != model.id.indexOf("imagen-3")) { + return { + instances: [ + { + prompt: message.prompt + } + ], + parameters: { + "sampleCount": 1 + } + }; + } + + return {}; + } + + async getImageVision(message, model) { + return { + contents : [ + { + role: "user", + parts: [ + { text: message.prompt }, + { + inline_data: { + mime_type: AI.ImageEngine.getMimeTypeFromBase64(message.image), + data: AI.ImageEngine.getContentFromBase64(message.image) + } + } + ] + } + ] + } + } + +} diff --git a/DocService/sources/ai/engine/providers/internal/gpt4all.js b/DocService/sources/ai/engine/providers/internal/gpt4all.js new file mode 100644 index 00000000..b8fe6a6c --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/gpt4all.js @@ -0,0 +1,19 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("GPT4All", "http://localhost:4891", "", "v1"); + } + + getRequestBodyOptions() { + return { + max_tokens : 4096 + }; + } + + isOnlyDesktop() { + return true; + } + +} diff --git a/DocService/sources/ai/engine/providers/internal/groq.js b/DocService/sources/ai/engine/providers/internal/groq.js new file mode 100644 index 00000000..3674a9cb --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/groq.js @@ -0,0 +1,29 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("Groq", "https://api.groq.com/openai", "", "v1"); + } + + checkModelCapability = function(model) { + if (model.context_length) + model.options.max_input_tokens = AI.InputMaxTokens.getFloor(model.context_length); + + if (-1 !== model.id.toLowerCase().indexOf("vision")) { + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + model.endpoints.push(AI.Endpoints.Types.v1.Vision); + return AI.CapabilitiesUI.Chat | AI.CapabilitiesUI.Vision; + } + + if (-1 !== model.id.toLowerCase().indexOf("whisper")) { + model.endpoints.push(AI.Endpoints.Types.v1.Audio_Transcriptions); + model.endpoints.push(AI.Endpoints.Types.v1.Audio_Translations); + return AI.CapabilitiesUI.Audio; + } + + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + return AI.CapabilitiesUI.Chat; + } + +} diff --git a/DocService/sources/ai/engine/providers/internal/mistral.js b/DocService/sources/ai/engine/providers/internal/mistral.js new file mode 100644 index 00000000..c517c377 --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/mistral.js @@ -0,0 +1,115 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("Mistral", "https://api.mistral.ai", "", "v1"); + } + + checkModelCapability = function(model) { + if (-1 !== model.id.indexOf("mistral-embed")) + { + model.options.max_input_tokens = AI.InputMaxTokens["8k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Embeddings); + return AI.CapabilitiesUI.Embeddings; + } + if (-1 !== model.id.indexOf("mistral-moderation")) + { + model.options.max_input_tokens = AI.InputMaxTokens["8k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Moderations); + return AI.CapabilitiesUI.Moderations; + } + if (-1 !== model.id.indexOf("pixtral")) + { + model.options.max_input_tokens = AI.InputMaxTokens["128k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Images_Generations); + model.endpoints.push(AI.Endpoints.Types.v1.Images_Edits); + model.endpoints.push(AI.Endpoints.Types.v1.Images_Variarions); + return AI.CapabilitiesUI.Image; + } + if (-1 !== model.id.indexOf("mistral-small")) + { + model.options.max_input_tokens = AI.InputMaxTokens["32k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + return AI.CapabilitiesUI.Chat; + } + if (-1 !== model.id.indexOf("mistral-medium")) + { + model.options.max_input_tokens = AI.InputMaxTokens["32k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + return AI.CapabilitiesUI.Chat; + } + if (-1 !== model.id.indexOf("codestral")) + { + model.options.max_input_tokens = AI.InputMaxTokens["256k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Code); + return AI.CapabilitiesUI.Code | AI.CapabilitiesUI.Chat; + } + + model.options.max_input_tokens = AI.InputMaxTokens["128k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + + let capUI = AI.CapabilitiesUI.Chat; + if (model.capabilities && model.capabilities.vision) + capUI = AI.CapabilitiesUI.Vision; + return capUI; + } + + getEndpointUrl(endpoint, model) { + let Types = AI.Endpoints.Types; + let url = ""; + switch (endpoint) + { + case Types.v1.OCR: + url = "/ocr"; + break; + default: + break; + } + if (!url) + return super.getEndpointUrl(endpoint, model); + return url; + } + + async getImageOCR(message, model) { + let result = { + model: model.id, + document: { + type: "image_url", + image_url: message.image + } + }; + //result.output_format = "markdown"; + result.include_image_base64 = true; + return result; + } + + getImageOCRResult(messageInput, model) { + let message = messageInput.data ? messageInput.data : messageInput; + let images = []; + let markdownContent = ""; + if (!message.pages) + return markdownContent; + + for (let i = 0, len = message.pages.length; i < len; i++) { + let page = message.pages[i]; + + let images = page.images; + let md = page.markdown; + + for (let j = 0, imagesCount = images.length; j < imagesCount; j++) { + let src = "](" + images[j].id + ")"; + let dst = "](" + images[j].image_base64 + ")"; + + src = src.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + md = md.replace(new RegExp(src, "g"), dst); + } + + markdownContent += md; + markdownContent += "\n\n"; + } + + return markdownContent; + } + +} diff --git a/DocService/sources/ai/engine/providers/internal/ollama.js b/DocService/sources/ai/engine/providers/internal/ollama.js new file mode 100644 index 00000000..93e44a4f --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/ollama.js @@ -0,0 +1,22 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("Ollama", "http://localhost:11434", "", "v1"); + } + + getImageGeneration(message, model) { + let result = super.getImageGeneration(message, model); + result.options = {}; + if (result.width) + result.options.width = result.width; + if (result.height) + result.options.height = result.height; + delete result.width; + delete result.height; + delete result.n; + return result; + } + +} diff --git a/DocService/sources/ai/engine/providers/internal/open-ai-models.txt b/DocService/sources/ai/engine/providers/internal/open-ai-models.txt new file mode 100644 index 00000000..fced5b13 --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/open-ai-models.txt @@ -0,0 +1,431 @@ +{ + "object": "list", + "data": [ + { + "id": "gpt-4o-audio-preview-2024-12-17", + "object": "model", + "created": 1734034239, + "owned_by": "system" + }, + { + "id": "dall-e-3", + "object": "model", + "created": 1698785189, + "owned_by": "system" + }, + { + "id": "dall-e-2", + "object": "model", + "created": 1698798177, + "owned_by": "system" + }, + { + "id": "gpt-4o-audio-preview-2024-10-01", + "object": "model", + "created": 1727389042, + "owned_by": "system" + }, + { + "id": "text-embedding-3-small", + "object": "model", + "created": 1705948997, + "owned_by": "system" + }, + { + "id": "o4-mini", + "object": "model", + "created": 1744225351, + "owned_by": "system" + }, + { + "id": "gpt-4.1-nano", + "object": "model", + "created": 1744321707, + "owned_by": "system" + }, + { + "id": "gpt-4.1-nano-2025-04-14", + "object": "model", + "created": 1744321025, + "owned_by": "system" + }, + { + "id": "gpt-4o-realtime-preview-2024-10-01", + "object": "model", + "created": 1727131766, + "owned_by": "system" + }, + { + "id": "o4-mini-2025-04-16", + "object": "model", + "created": 1744133506, + "owned_by": "system" + }, + { + "id": "gpt-4o-realtime-preview", + "object": "model", + "created": 1727659998, + "owned_by": "system" + }, + { + "id": "babbage-002", + "object": "model", + "created": 1692634615, + "owned_by": "system" + }, + { + "id": "gpt-4", + "object": "model", + "created": 1687882411, + "owned_by": "openai" + }, + { + "id": "text-embedding-ada-002", + "object": "model", + "created": 1671217299, + "owned_by": "openai-internal" + }, + { + "id": "text-embedding-3-large", + "object": "model", + "created": 1705953180, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-audio-preview", + "object": "model", + "created": 1734387424, + "owned_by": "system" + }, + { + "id": "gpt-4o-audio-preview", + "object": "model", + "created": 1727460443, + "owned_by": "system" + }, + { + "id": "o1-preview-2024-09-12", + "object": "model", + "created": 1725648865, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-realtime-preview", + "object": "model", + "created": 1734387380, + "owned_by": "system" + }, + { + "id": "gpt-4.1-mini", + "object": "model", + "created": 1744318173, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-realtime-preview-2024-12-17", + "object": "model", + "created": 1734112601, + "owned_by": "system" + }, + { + "id": "gpt-3.5-turbo-instruct-0914", + "object": "model", + "created": 1694122472, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-search-preview", + "object": "model", + "created": 1741391161, + "owned_by": "system" + }, + { + "id": "gpt-4.1-mini-2025-04-14", + "object": "model", + "created": 1744317547, + "owned_by": "system" + }, + { + "id": "chatgpt-4o-latest", + "object": "model", + "created": 1723515131, + "owned_by": "system" + }, + { + "id": "davinci-002", + "object": "model", + "created": 1692634301, + "owned_by": "system" + }, + { + "id": "gpt-3.5-turbo-1106", + "object": "model", + "created": 1698959748, + "owned_by": "system" + }, + { + "id": "gpt-4o-search-preview", + "object": "model", + "created": 1741388720, + "owned_by": "system" + }, + { + "id": "gpt-4-turbo", + "object": "model", + "created": 1712361441, + "owned_by": "system" + }, + { + "id": "gpt-4o-realtime-preview-2024-12-17", + "object": "model", + "created": 1733945430, + "owned_by": "system" + }, + { + "id": "gpt-3.5-turbo-instruct", + "object": "model", + "created": 1692901427, + "owned_by": "system" + }, + { + "id": "gpt-3.5-turbo", + "object": "model", + "created": 1677610602, + "owned_by": "openai" + }, + { + "id": "gpt-4-turbo-preview", + "object": "model", + "created": 1706037777, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-search-preview-2025-03-11", + "object": "model", + "created": 1741390858, + "owned_by": "system" + }, + { + "id": "gpt-4-0125-preview", + "object": "model", + "created": 1706037612, + "owned_by": "system" + }, + { + "id": "gpt-4o-2024-11-20", + "object": "model", + "created": 1739331543, + "owned_by": "system" + }, + { + "id": "whisper-1", + "object": "model", + "created": 1677532384, + "owned_by": "openai-internal" + }, + { + "id": "gpt-4o-2024-05-13", + "object": "model", + "created": 1715368132, + "owned_by": "system" + }, + { + "id": "gpt-4-turbo-2024-04-09", + "object": "model", + "created": 1712601677, + "owned_by": "system" + }, + { + "id": "gpt-3.5-turbo-16k", + "object": "model", + "created": 1683758102, + "owned_by": "openai-internal" + }, + { + "id": "o1-preview", + "object": "model", + "created": 1725648897, + "owned_by": "system" + }, + { + "id": "gpt-4-0613", + "object": "model", + "created": 1686588896, + "owned_by": "openai" + }, + { + "id": "gpt-4.5-preview", + "object": "model", + "created": 1740623059, + "owned_by": "system" + }, + { + "id": "gpt-4.5-preview-2025-02-27", + "object": "model", + "created": 1740623304, + "owned_by": "system" + }, + { + "id": "gpt-4o-search-preview-2025-03-11", + "object": "model", + "created": 1741388170, + "owned_by": "system" + }, + { + "id": "omni-moderation-2024-09-26", + "object": "model", + "created": 1732734466, + "owned_by": "system" + }, + { + "id": "o3-mini-2025-01-31", + "object": "model", + "created": 1738010200, + "owned_by": "system" + }, + { + "id": "o3-mini", + "object": "model", + "created": 1737146383, + "owned_by": "system" + }, + { + "id": "tts-1-hd", + "object": "model", + "created": 1699046015, + "owned_by": "system" + }, + { + "id": "gpt-4o", + "object": "model", + "created": 1715367049, + "owned_by": "system" + }, + { + "id": "tts-1-hd-1106", + "object": "model", + "created": 1699053533, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini", + "object": "model", + "created": 1721172741, + "owned_by": "system" + }, + { + "id": "gpt-4o-2024-08-06", + "object": "model", + "created": 1722814719, + "owned_by": "system" + }, + { + "id": "gpt-4.1", + "object": "model", + "created": 1744316542, + "owned_by": "system" + }, + { + "id": "gpt-4o-transcribe", + "object": "model", + "created": 1742068463, + "owned_by": "system" + }, + { + "id": "gpt-4.1-2025-04-14", + "object": "model", + "created": 1744315746, + "owned_by": "system" + }, + { + "id": "o1-2024-12-17", + "object": "model", + "created": 1734326976, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-2024-07-18", + "object": "model", + "created": 1721172717, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-transcribe", + "object": "model", + "created": 1742068596, + "owned_by": "system" + }, + { + "id": "o1-mini", + "object": "model", + "created": 1725649008, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-audio-preview-2024-12-17", + "object": "model", + "created": 1734115920, + "owned_by": "system" + }, + { + "id": "gpt-3.5-turbo-0125", + "object": "model", + "created": 1706048358, + "owned_by": "system" + }, + { + "id": "o1-mini-2024-09-12", + "object": "model", + "created": 1725648979, + "owned_by": "system" + }, + { + "id": "tts-1", + "object": "model", + "created": 1681940951, + "owned_by": "openai-internal" + }, + { + "id": "gpt-4-1106-preview", + "object": "model", + "created": 1698957206, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-tts", + "object": "model", + "created": 1742403959, + "owned_by": "system" + }, + { + "id": "tts-1-1106", + "object": "model", + "created": 1699053241, + "owned_by": "system" + }, + { + "id": "o1", + "object": "model", + "created": 1734375816, + "owned_by": "system" + }, + { + "id": "o1-pro", + "object": "model", + "created": 1742251791, + "owned_by": "system" + }, + { + "id": "o1-pro-2025-03-19", + "object": "model", + "created": 1742251504, + "owned_by": "system" + }, + { + "id": "omni-moderation-latest", + "object": "model", + "created": 1731689265, + "owned_by": "system" + } + ] +} \ No newline at end of file diff --git a/DocService/sources/ai/engine/providers/internal/openai.js b/DocService/sources/ai/engine/providers/internal/openai.js new file mode 100644 index 00000000..46e2b9b1 --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/openai.js @@ -0,0 +1,89 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("OpenAI", "https://api.openai.com", "", "v1"); + } + + checkExcludeModel(model) { + if (-1 !== model.id.indexOf("babbage-002") || + -1 !== model.id.indexOf("davinci-002")) + return true; + return false; + } + + checkModelCapability(model) { + if (-1 !== model.id.indexOf("whisper-1")) + { + model.endpoints.push(AI.Endpoints.Types.v1.Audio_Transcriptions); + model.endpoints.push(AI.Endpoints.Types.v1.Audio_Translations); + return AI.CapabilitiesUI.Audio; + } + if (-1 !== model.id.indexOf("tts-1")) + { + model.endpoints.push(AI.Endpoints.Types.v1.Audio_Speech); + return AI.CapabilitiesUI.Audio; + } + if (-1 !== model.id.indexOf("babbage-002") || + -1 !== model.id.indexOf("davinci-002")) + { + model.options.max_input_tokens = AI.InputMaxTokens["16k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Completions); + return AI.CapabilitiesUI.Chat; + } + if (-1 !== model.id.indexOf("embedding")) + { + model.endpoints.push(AI.Endpoints.Types.v1.Embeddings); + return AI.CapabilitiesUI.Embeddings; + } + if (-1 !== model.id.indexOf("moderation")) + { + model.endpoints.push(AI.Endpoints.Types.v1.Moderations); + return AI.CapabilitiesUI.Moderations; + } + if (-1 !== model.id.indexOf("realtime")) + { + model.endpoints.push(AI.Endpoints.Types.v1.Realtime); + return AI.CapabilitiesUI.Realtime; + } + if ("dall-e-2" === model.id) + { + model.endpoints.push(AI.Endpoints.Types.v1.Images_Generations); + model.endpoints.push(AI.Endpoints.Types.v1.Images_Edits); + model.endpoints.push(AI.Endpoints.Types.v1.Images_Variarions); + return AI.CapabilitiesUI.Image; + } + if ("dall-e-3" === model.id) + { + model.endpoints.push(AI.Endpoints.Types.v1.Images_Generations); + return AI.CapabilitiesUI.Image; + } + + if (0 === model.id.indexOf("gpt-4o") || + 0 === model.id.indexOf("o1-") || + 0 === model.id.indexOf("gpt-4-turbo")) + model.options.max_input_tokens = AI.InputMaxTokens["128k"]; + else if (0 === model.id.indexOf("gpt-4")) + model.options.max_input_tokens = AI.InputMaxTokens["8k"]; + else if (-1 != model.id.indexOf("gpt-3.5-turbo-instruct")) { + model.options.max_input_tokens = AI.InputMaxTokens["4k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Completions); + return AI.CapabilitiesUI.Chat; + } + else if (0 === model.id.indexOf("gpt-3.5-turbo")) + model.options.max_input_tokens = AI.InputMaxTokens["16k"]; + + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + return AI.CapabilitiesUI.Chat | AI.CapabilitiesUI.Vision; + }; + + getImageGeneration(message, model) { + let result = super.getImageGeneration(message, model); + result.size = result.width + "x" + result.height; + delete result.width; + delete result.height; + return result; + } + +} diff --git a/DocService/sources/ai/engine/providers/internal/proxy.js b/DocService/sources/ai/engine/providers/internal/proxy.js new file mode 100644 index 00000000..cc11101a --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/proxy.js @@ -0,0 +1,62 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("Proxy", "http://localhost:8000", "", "ai-proxy"); + } + + checkModelCapability = function(model) { + if (model.context_length) + model.options.max_input_tokens = AI.InputMaxTokens.getFloor(model.context_length); + + if ("chat" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + let result = AI.CapabilitiesUI.Chat; + + if (-1 !== model.id.toLowerCase().indexOf("vision")) { + model.endpoints.push(AI.Endpoints.Types.v1.Vision); + result |= AI.CapabilitiesUI.Vision; + } + return result; + } + + if ("image" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Images_Generations); + model.endpoints.push(AI.Endpoints.Types.v1.Images_Edits); + model.endpoints.push(AI.Endpoints.Types.v1.Images_Variarions); + return AI.CapabilitiesUI.Image; + } + + if ("moderation" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Moderations); + return AI.CapabilitiesUI.Moderations; + } + + if ("embedding" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Embeddings); + return AI.CapabilitiesUI.Embeddings; + } + + if ("language" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Language); + return AI.CapabilitiesUI.Language; + } + + if ("code" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Code); + return AI.CapabilitiesUI.Code | AI.CapabilitiesUI.Chat; + } + + if ("rerank" === model.type) { + return AI.CapabilitiesUI.None; + } + + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + return AI.CapabilitiesUI.Chat; + } + + isUseProxy() { + return true; + } +} diff --git a/DocService/sources/ai/engine/providers/internal/stabilityai.js b/DocService/sources/ai/engine/providers/internal/stabilityai.js new file mode 100644 index 00000000..9ba7fba4 --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/stabilityai.js @@ -0,0 +1,64 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("Stability AI", "https://api.stability.ai", "", ""); + } + + getModels() { + return [ + { + id: "Stable Diffusion" + }, + { + id: "Stable Image Core" + }, + { + id: "Stable Image Ultra" + } + ]; + } + + checkModelCapability(model) { + model.endpoints.push(AI.Endpoints.Types.v1.Images_Generations); + return AI.CapabilitiesUI.Image; + }; + + getImageGeneration(message, model) { + let formData = new FormData(); + formData.append("prompt", message.prompt); + formData.append("output_format", "png"); + return formData; + } + + getEndpointUrl(endpoint, model) { + let Types = AI.Endpoints.Types; + let url = ""; + switch (endpoint) + { + case Types.v1.Images_Generations: + if (model.id === "Stable Diffusion") + return "/v2beta/stable-image/generate/sd3"; + if (model.id === "Stable Image Core") + return "/v2beta/stable-image/generate/core"; + if (model.id === "Stable Image Ultra") + return "/v2beta/stable-image/generate/ultra"; + break; + default: + break; + } + + return super.getEndpointUrl(endpoint, model); + } + + getRequestHeaderOptions() { + let headers = { + "Accept": "application/json" + }; + if (this.key) + headers["Authorization"] = "Bearer " + this.key; + return headers; + } + +} diff --git a/DocService/sources/ai/engine/providers/internal/together-ai-models.txt b/DocService/sources/ai/engine/providers/internal/together-ai-models.txt new file mode 100644 index 00000000..cc21f144 --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/together-ai-models.txt @@ -0,0 +1,2083 @@ +[ + { + "id": "WhereIsAI/UAE-Large-V1", + "object": "model", + "created": 1703216381, + "type": "embedding", + "running": false, + "display_name": "UAE-Large-V1", + "organization": "WhereIsAI", + "link": "https://huggingface.co/bert-base-uncased", + "license": "apache-2.0", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.016, + "output": 0.016, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", + "object": "model", + "created": 1743878353, + "type": "chat", + "running": false, + "display_name": "Llama 4 Maverick Instruct (17Bx128E)", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", + "license": "llama4", + "context_length": 1048576, + "config": { + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- if messages[0]['content'] is string %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- else %}\n {#- FIXME: The processor requires an array, always. #}\n {%- set system_message = messages[0]['content'][0]['text']|trim %}\n {%- endif %}\n {%- set messages = messages[1:] %}\n {%- set user_supplied_system_message = true %}\n{%- else %}\n {%- set system_message = \"\" %}\n {%- set user_supplied_system_message = false %}\n{%- endif %}\n\n{#- System message if the user supplied one #}\n{%- if user_supplied_system_message %}\n {{- \"<|header_start|>system<|header_end|>\\n\\n\" }}\n {%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n {%- endif %}\n {%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {%- endif %}\n {{- system_message }}\n {{- \"<|eot|>\" }}\n{%- endif %}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_content = messages[0]['content'] %}\n {%- set first_user_message = \"\" %}\n {%- if first_content is string %}\n {%- set first_user_message = first_content %}\n {%- else %}\n {%- for content in first_content %}\n {%- if content['type'] == 'image' %}\n {%- set first_user_message = first_user_message + '<|image|>' %}\n {%- elif content['type'] == 'text' %}\n {%- set first_user_message = first_user_message + content['text'] %}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {%- set first_user_message = first_user_message | trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|header_start|>user<|header_end|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|header_start|>' + message['role'] + '<|header_end|>\\n\\n' }}\n {%- if message['content'] is string %}\n {{- message['content'] }}\n {%- else %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {{- '<|image|>' }}\n {%- elif content['type'] == 'text' %}\n {{- content['text'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- \"<|eot|>\" }}\n {%- elif 'tool_calls' in message and message.tool_calls|length > 0 %}\n {{- '<|header_start|>assistant<|header_end|>\\n\\n' -}}\n {{- '<|python_start|>' }}\n {%- if message['content'] is string %}\n {{- message['content'] }}\n {%- else %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {{- '<|image|>' }}\n {%- elif content['type'] == 'text' %}\n {{- content['text'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '<|python_end|>' }}\n {%- for tool_call in message.tool_calls %}\n {{- '{\"name\": \"' + tool_call.function.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.function.arguments | tojson }}\n {{- \"}\" }}\n {%- endfor %}\n {{- \"<|eot|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|header_start|>ipython<|header_end|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|header_start|>assistant<|header_end|>\\n\\n' }}\n{%- endif %}", + "stop": [ + "<|eot|>", + "<|eom|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot|>" + }, + "pricing": { + "hourly": 0, + "input": 0.27, + "output": 0.85, + "base": 0, + "finetune": 0 + } + }, + { + "id": "togethercomputer/m2-bert-80M-32k-retrieval", + "object": "model", + "created": 1699120644, + "type": "embedding", + "running": false, + "display_name": "M2-BERT-Retrieval-32k", + "organization": "Together", + "link": "https://huggingface.co/togethercomputer/m2-bert-80M-32k-retrieval", + "license": "apache-2.0", + "context_length": 32768, + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.008, + "output": 0.008, + "base": 0, + "finetune": 0 + } + }, + { + "id": "google/gemma-2-9b-it", + "object": "model", + "created": 1708648606, + "type": "chat", + "running": false, + "display_name": "Gemma-2 Instruct (9B)", + "organization": "google", + "link": "https://huggingface.co/google/gemma-2-9b-it", + "license": "gemma", + "context_length": 8192, + "config": { + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '' + role + '\n' + message['content'] | trim + '\n' }}{% endfor %}{% if add_generation_prompt %}{{'model\n'}}{% endif %}", + "stop": [ + "", + "" + ], + "bos_token": "", + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.3, + "output": 0.3, + "base": 0, + "finetune": 0 + } + }, + { + "id": "cartesia/sonic", + "object": "model", + "created": 0, + "type": "audio", + "running": false, + "display_name": "Cartesia Sonic", + "organization": "Together", + "link": "https://www.cartesia.ai", + "context_length": 0, + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 65, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "BAAI/bge-large-en-v1.5", + "object": "model", + "created": 1700837688, + "type": "embedding", + "running": false, + "display_name": "BAAI-Bge-Large-1p5", + "organization": "BAAI", + "license": "MIT", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.016, + "output": 0.016, + "base": 0, + "finetune": 0 + } + }, + { + "id": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", + "object": "model", + "created": 1705292440, + "type": "chat", + "running": false, + "display_name": "Nous Hermes 2 Mixtral 8X7B Dpo", + "organization": "Nousresearch", + "link": "https://huggingface.co/api/models/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", + "license": "apache-2.0", + "context_length": 32768, + "config": { + "chat_template": "{{bos_token}}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}", + "stop": [ + "<|im_end|>" + ], + "bos_token": "", + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 0.6, + "output": 0.6, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-2-13b-chat-hf", + "object": "model", + "created": 1689720415, + "type": "chat", + "running": false, + "display_name": "LLaMA-2 Chat (13B)", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Llama-2-13b-chat-hf", + "license": "LLaMA license Agreement (Meta)", + "context_length": 4096, + "config": { + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' or message['role'] == 'tool' %}{{ bos_token + '[INST] ' + content + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<>\\n' + content + '\\n<>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content + ' ' + eos_token }}{% endif %}{% endfor %}", + "stop": [ + "[/INST]", + "" + ], + "bos_token": "", + "eos_token": "" + }, + "pricing": { + "hourly": 0, + "input": 0.22, + "output": 0.22, + "base": 0, + "finetune": 0 + } + }, + { + "id": "black-forest-labs/FLUX.1-schnell-Free", + "object": "model", + "created": 0, + "type": "image", + "running": false, + "display_name": "FLUX.1 [schnell] Free", + "organization": "Black Forest Labs", + "link": "https://huggingface.co/black-forest-labs/FLUX.1-schnell", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "black-forest-labs/FLUX.1.1-pro", + "object": "model", + "created": 0, + "type": "image", + "running": false, + "display_name": "FLUX1.1 [pro]", + "organization": "Black Forest Labs", + "link": "https://huggingface.co/black-forest-labs/FLUX.1-schnell", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "Qwen/Qwen2.5-7B-Instruct-Turbo", + "object": "model", + "created": 1728671048, + "type": "chat", + "running": false, + "display_name": "Qwen2.5 7B Instruct Turbo", + "organization": "Qwen", + "link": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct", + "license": "Qwen", + "context_length": 32768, + "config": { + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n", + "stop": [ + "<|im_end|>" + ], + "bos_token": "<|endoftext|>", + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 0.3, + "output": 0.3, + "base": 0, + "finetune": 0 + } + }, + { + "id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free", + "object": "model", + "created": 1738187359, + "type": "chat", + "running": false, + "display_name": "DeepSeek R1 Distill Llama 70B Free", + "organization": "DeepSeek", + "link": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B", + "license": "mit", + "context_length": 8192, + "config": { + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set system_prompt='' %}{% set is_tool = false %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set is_tool = false -%}{%- set is_output_first = true -%}{%- set is_first = false -%}{%- for tool in message['tool_calls']%}{%- if is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set parts = content | split('') %}{% set content = parts[parts.length-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set is_tool = true -%}{%- if is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not is_tool %}{{'<|Assistant|>'}}{% endif %}", + "stop": [ + "<|end▁of▁sentence|>" + ], + "bos_token": "<|begin▁of▁sentence|>", + "eos_token": "<|end▁of▁sentence|>" + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama-llama-2-70b-hf", + "object": "model", + "created": 1736983494, + "type": "language", + "running": false, + "display_name": "LLaMA-2 (70B)", + "link": "https://huggingface.co/api/models/meta-llama/Llama-2-70b-hf", + "license": "llama2", + "context_length": 4096, + "config": { + "chat_template": null, + "stop": [ + "" + ], + "bos_token": "", + "eos_token": "" + }, + "pricing": { + "hourly": 0, + "input": 0.9, + "output": 0.9, + "base": 0, + "finetune": 0 + } + }, + { + "id": "BAAI/bge-base-en-v1.5", + "object": "model", + "created": 1700837779, + "type": "embedding", + "running": false, + "display_name": "BAAI-Bge-Base-1.5", + "organization": "BAAI", + "link": "https://huggingface.co/api/models/BAAI/bge-base-en-v1.5", + "license": "MIT", + "context_length": 512, + "config": { + "chat_template": null, + "stop": [], + "bos_token": "[PAD]", + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.008, + "output": 0.008, + "base": 0, + "finetune": 0 + } + }, + { + "id": "Gryphe/MythoMax-L2-13b", + "object": "model", + "created": 1693943905, + "type": "chat", + "running": false, + "display_name": "MythoMax-L2 (13B)", + "organization": "Gryphe", + "license": "other", + "context_length": 4096, + "config": { + "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '### Instruction:\n' + message['content'] + '\n' }}{% else %}{{ '### Response:\n' + message['content'] + '\n' }}{% endif %}{% endfor %}{{ '### Response:' }}", + "stop": [ + "" + ], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.3, + "output": 0.3, + "base": 0, + "finetune": 0 + } + }, + { + "id": "google/gemma-2-27b-it", + "object": "model", + "created": 1708648606, + "type": "chat", + "running": false, + "display_name": "Gemma-2 Instruct (27B)", + "organization": "Google", + "link": "https://huggingface.co/google/gemma-2b-it", + "license": "gemma-terms-of-use", + "context_length": 8192, + "config": { + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '' + role + '\n' + message['content'] | trim + '\n' }}{% endfor %}{% if add_generation_prompt %}{{'model\n'}}{% endif %}", + "stop": [ + "", + "" + ], + "bos_token": "", + "eos_token": "" + }, + "pricing": { + "hourly": 0, + "input": 0.8, + "output": 0.8, + "base": 0, + "finetune": 0 + } + }, + { + "id": "Qwen/Qwen2-VL-72B-Instruct", + "object": "model", + "created": 1736448718, + "type": "chat", + "running": false, + "display_name": "Qwen2-VL (72B) Instruct", + "organization": "Qwen", + "link": "https://huggingface.co/Qwen/Qwen2-VL-72B-Instruct", + "license": "tongyi-qianwen", + "context_length": 32768, + "config": { + "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}", + "stop": [ + "<|im_end|>", + "<|endoftext|>" + ], + "bos_token": null, + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 1.2, + "output": 1.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/LlamaGuard-2-8b", + "object": "model", + "created": 1713429380, + "type": "moderation", + "running": false, + "display_name": "Meta Llama Guard 2 8B", + "organization": "Meta", + "link": null, + "license": "Llama-3 (Other)", + "context_length": 8192, + "config": { + "chat_template": "{% if messages.length % 2 == 0 %}{% set final_role = 'Agent' %}{% else %}{% set final_role = 'User' %}{% endif %}[INST] Task: Check if there is unsafe content in '{{ final_role }}' messages in conversations according our safety policy with the below categories.\n\n\nS1: Violent Crimes.\nS2: Non-Violent Crimes.\nS3: Sex Crimes.\nS4: Child Exploitation.\nS5: Specialized Advice.\nS6: Privacy.\nS7: Intellectual Property.\nS8: Indiscriminate Weapons.\nS9: Hate.\nS10: Self-Harm.\nS11: Sexual Content.\n\n\n\n\n{% for message in messages %}{% if message['role'] == 'user' and loop.index0 % 2 == 0 %}User: {{ message['content'].trim() }}{% elif message['role'] == 'assistant' and loop.index0 % 2 != 0 %}Agent: {{ message['content'].trim() }}{% else %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.last %}{% set role = message['role'] %}{% endif %}{{ '\n\n' }}{% endfor %}\n\nProvide your safety assessment for ONLY THE LAST {{ role }} message in the above conversation:\n - First line must read 'safe' or 'unsafe'.\n - If unsafe, a second line must include a comma-separated list of violated categories. [/INST]", + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.2, + "output": 0.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "cartesia/sonic-2", + "object": "model", + "created": 0, + "type": "audio", + "running": false, + "display_name": "Cartesia Sonic 2", + "organization": "Together", + "link": "https://www.cartesia.ai", + "context_length": 0, + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 65, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "togethercomputer/m2-bert-80M-8k-retrieval", + "object": "model", + "created": 1699120644, + "type": "embedding", + "running": false, + "display_name": "M2-BERT-Retrieval-8k", + "organization": "Together", + "link": "https://huggingface.co/togethercomputer/m2-bert-80M-8k-retrieval", + "license": "apache-2.0", + "context_length": 8192, + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.008, + "output": 0.008, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free", + "object": "model", + "created": 1733967427, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3.3 70B Instruct Turbo Free", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct", + "license": "Llama-3.3 (Other)", + "context_length": 131072, + "config": { + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n", + "stop": [ + "<|eot_id|>", + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "togethercomputer/MoA-1", + "object": "model", + "created": 1733856970, + "type": "chat", + "running": false, + "display_name": "Together AI MoA-1", + "organization": "Together AI", + "link": "https://github.com/togethercomputer/MoA", + "context_length": 32768, + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Meta-Llama-3-70B-Instruct-Turbo", + "object": "model", + "created": 0, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3 70B Instruct Turbo", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct", + "license": "Llama-3 (Other)", + "context_length": 8192, + "config": { + "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}", + "stop": [ + "<|eot_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>" + }, + "pricing": { + "hourly": 0, + "input": 0.88, + "output": 0.88, + "base": 0, + "finetune": 0 + } + }, + { + "id": "mistralai/Mistral-7B-Instruct-v0.2", + "object": "model", + "created": 1702325373, + "type": "chat", + "running": false, + "display_name": "Mistral (7B) Instruct v0.2", + "organization": "mistralai", + "license": "apache-2.0", + "context_length": 32768, + "config": { + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' or message['role'] == 'tool' %}{{ bos_token + '[INST] ' + content + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<>\\n' + content + '\\n<>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content + ' ' + eos_token }}{% endif %}{% endfor %}", + "stop": [ + "[/INST]", + "" + ], + "bos_token": "", + "eos_token": "" + }, + "pricing": { + "hourly": 0, + "input": 0.2, + "output": 0.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "togethercomputer/m2-bert-80M-2k-retrieval", + "object": "model", + "created": 1699985626, + "type": "embedding", + "running": false, + "display_name": "M2-BERT-Retrieval-2K", + "organization": "Together", + "license": "Apache-2", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.008, + "output": 0.008, + "base": 0, + "finetune": 0 + } + }, + { + "id": "google/gemma-2b-it", + "object": "model", + "created": 1708648606, + "type": "chat", + "running": false, + "display_name": "Gemma Instruct (2B)", + "organization": "Google", + "link": "https://huggingface.co/google/gemma-2b-it", + "license": "gemma-terms-of-use", + "context_length": 8192, + "config": { + "chat_template": "{{ bos_token }}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{% for message in messages %}{{'' + role + '\n' + message['content'] + '' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>model\n' }}{% endif %}", + "stop": [ + "", + "" + ], + "bos_token": "", + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.1, + "output": 0.1, + "base": 0, + "finetune": 0 + } + }, + { + "id": "black-forest-labs/FLUX.1-pro", + "object": "model", + "created": 0, + "type": "image", + "running": false, + "display_name": "FLUX.1 [pro]", + "organization": "Black Forest Labs", + "link": "https://huggingface.co/black-forest-labs/FLUX.1-schnell", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "mistralai/Mistral-Small-24B-Instruct-2501", + "object": "model", + "created": 1738246136, + "type": "chat", + "running": false, + "display_name": "Mistral Small (24B) Instruct 25.01", + "organization": "mistralai", + "link": "https://huggingface.co/mistralai/Mistral-Small-Instruct-2501", + "license": "apache-2.0", + "context_length": 32768, + "config": { + "chat_template": "{%- if messages[0][\"role\"] == \"system\" %}{%- set system_message = messages[0][\"content\"] %}{%- set loop_messages = messages[1:] %}{%- else %}{%- set today = strftime_now(\"%Y-%m-%d\") %}{%- set system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.\\nYour knowledge base was last updated on 2023-10-01. The current date is \" + today + \".\\n\\nWhen you're not sure about some information, you say that you don't have the information and don't make up anything.\\nIf the user's question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. \\\"What are some good restaurants around me?\\\" => \\\"Where are you?\\\" or \\\"When is the next flight to Tokyo\\\" => \\\"Where do you travel from?\\\")\" %}{%- set loop_messages = messages %}{%- endif %}{%- if not tools is defined %}{%- set tools = none %}{%- elif tools is not none %}{%- set parallel_tool_prompt = \"You are a helpful assistant that can call tools. If you call one or more tools, format them in a single JSON array or objects, where each object is a tool call, not as separate objects outside of an array or multiple arrays. Use the format [{\\\"name\\\": tool call name, \\\"arguments\\\": tool call arguments}, additional tool calls] if you call more than one tool. If you call tools, do not attempt to interpret them or otherwise provide a response until you receive a tool call result that you can interpret for the user.\" %}{%- if system_message is defined %}{%- set system_message = parallel_tool_prompt + \"\\n\\n\" + system_message %}{%- else %}{%- set system_message = parallel_tool_prompt %}{%- endif %}\n{%- endif %}{%- set user_messages = loop_messages | selectattr(\"role\", \"equalto\", \"user\") | list %}{%- for message in loop_messages | rejectattr(\"role\", \"equalto\", \"tool\") | rejectattr(\"role\", \"equalto\", \"tool_results\") | selectattr(\"tool_calls\", \"undefined\") %}{%- if (message[\"role\"] == \"user\") != (loop.index0 % 2 == 0) %}{{- raise_exception(\"After the optional system message, conversation roles must alternate user/assistant/user/assistant/...\") }}{%- endif %}{%- endfor %}{{- bos_token }}{%- for message in loop_messages %}{%- if message[\"role\"] == \"user\" %}{%- if tools is not none and (message == user_messages[user_messages.length-1]) %}{{- \"[AVAILABLE_TOOLS] [\" }}{%- for tool in tools %}{%- set tool = tool.function %}{{- '{\"type\": \"function\", \"function\": {' }}{%- for key, val in tool.items() if key != \"return\" %}{%- if val is string %}{{- '\"' + key + '\": \"' + val + '\"' }}{%- else %}{{- '\"' + key + '\": ' + val|tojson }}{%- endif %}{%- if not loop.last %}{{- \", \" }}{%- endif %}{%- endfor %}{{- \"}}\" }}{%- if not loop.last %}{{- \", \" }}{%- else %}{{- \"]\" }}{%- endif %}{%- endfor %}{{- \"[/AVAILABLE_TOOLS]\" }}{%- endif %}{%- if loop.last and system_message is defined %}{{- \"[SYSTEM_PROMPT]\" + system_message + \"[/SYSTEM_PROMPT][INST]\" + message[\"content\"] + \"[/INST]\" }}{%- else %}{{- \"[INST]\" + message[\"content\"] + \"[/INST]\" }}{%- endif %}{%- elif message[\"role\"] == \"tool_calls\" or message.tool_calls is defined %}{%- if message.tool_calls is defined %}{%- set tool_calls = message.tool_calls %}{%- else %}{%- set tool_calls = message.content %}{%- endif %}{{- \"[TOOL_CALLS] [\" }}{%- for tool_call in tool_calls %}{%- set out = tool_call.function|tojson %}{{- out }}{%- if not tool_call.id is defined or tool_call.id|length < 9 %}{{- raise_exception(\"Tool call IDs should be alphanumeric strings with length >= 9! (1)\" + tool_call.id) }}{%- endif %}{{- ', \"id\": \"' + tool_call.id + '\"}' }}{%- if not loop.last %}{{- \", \" }}{%- else %}{{- \"]\" + eos_token }}{%- endif %}{%- endfor %}{%- elif message[\"role\"] == \"assistant\" %}{{- \" \" + message[\"content\"] + eos_token }}{%- elif message[\"role\"] == \"tool_results\" or message[\"role\"] == \"tool\" %}{%- if message.content is defined and message.content.content is defined %}{%- set content = message.content.content %}{%- else %}{%- set content = message.content %}{%- endif %}{{- '[TOOL_RESULTS] {\"content\": ' + content|string + \", \" }}{%- if not message.tool_call_id is defined or message.tool_call_id|length < 9 %}{{- raise_exception(\"Tool call IDs should be alphanumeric strings with length >= 9! (2)\" + message.tool_call_id) }}{%- endif %}{{- '\"call_id\": \"' + message.tool_call_id + '\"}[/TOOL_RESULTS]' }}{%- else %}{{- raise_exception(\"Only user and assistant roles are supported, with the exception of an initial optional system message!\") }}{%- endif %}\n{%- endfor %}", + "stop": [ + "[/INST]", + "" + ], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.8, + "output": 0.8, + "base": 0, + "finetune": 0 + } + }, + { + "id": "Gryphe/MythoMax-L2-13b-Lite", + "object": "model", + "created": 1693943905, + "type": "chat", + "running": false, + "display_name": "Gryphe MythoMax L2 Lite (13B)", + "organization": "Gryphe", + "license": "other", + "context_length": 4096, + "config": { + "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '### Instruction:\n' + message['content'] + '\n' }}{% else %}{{ '### Response:\n' + message['content'] + '\n' }}{% endif %}{% endfor %}{{ '### Response:' }}", + "stop": [ + "" + ], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.1, + "output": 0.1, + "base": 0, + "finetune": 0 + } + }, + { + "id": "scb10x/scb10x-llama3-1-typhoon2-70b-instruct", + "object": "model", + "created": 1743036812, + "type": "chat", + "running": false, + "display_name": "Typhoon 2 70B Instruct", + "organization": "SCB10X", + "link": "https://huggingface.co/api/models/scb10x/llama3.1-typhoon2-70b-instruct", + "license": "llama3.1", + "context_length": 8192, + "config": { + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{{- system_message }}\n{%- if tools is not none %}\n {{- \"\\n\" }}\n {{- \"You are given a question and a set of possible functions. Based on the question, you will need to make one or more function/tool calls to achieve the purpose.\" }}\n {{- \"If none of the function can be used, point it out. If the given question lacks the parameters required by the function, also point it out.\" }}\n {{- \"You should only return the function call in tools call sections.\" }}\n {{- \"If you decide to invoke any of the function(s), you MUST put it in the format of [Function(arguments1={{params_name1: params_value1,params_name2: params_value2, ...}}, name1=function_name1), Function(arguments2={{params}}, name2=function_name2) , ...]\"}}\n {{- \"You SHOULD NOT include any other text in the response.\\nHere is a list of functions in JSON format that you can invoke.\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- \"<|eot_id|>\" }}\n\n\n{%- for message in messages %}\n {%- if not (message.role == 'tool') %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif message.role == \"tool\" %}\n {{- \"<|start_header_id|>tool<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}", + "stop": [ + "<|eot_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0.88, + "output": 0.88, + "base": 0, + "finetune": 0 + } + }, + { + "id": "black-forest-labs/FLUX.1-redux", + "object": "model", + "created": 1732148338, + "type": "image", + "running": false, + "display_name": "FLUX.1 Redux [dev]", + "organization": "Black Forest Labs", + "link": "https://huggingface.co/black-forest-labs/FLUX.1-Redux-dev", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Meta-Llama-Guard-3-8B", + "object": "model", + "created": 1713429380, + "type": "moderation", + "running": false, + "display_name": "Meta Llama Guard 3 8B", + "organization": "Meta", + "link": null, + "license": "llama", + "context_length": 8192, + "config": { + "chat_template": "{% if messages.length % 2 == 0 %}{% set final_role = 'Agent' %}{% else %}{% set final_role = 'User' %}{% endif %}<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\nTask: Check if there is unsafe content in '{{ final_role }}' messages in conversations according our safety policy with the below categories.\n\n\nS1: Violent Crimes.\nS2: Non-Violent Crimes.\nS3: Sex Crimes.\nS4: Child Exploitation.\nS5: Defamation.\nS6: Specialized Advice.\nS7: Privacy.\nS8: Intellectual Property.\nS9: Indiscriminate Weapons.\nS10: Hate.\nS11: Self-Harm.\nS12: Sexual Content.\nS13: Elections.\nS14: Code Interpreter Abuse.\n\n\n\n\n{% for message in messages %}{% if message['role'] == 'user' and loop.index0 % 2 == 0 %}User: {{ message['content'].trim() }}{% elif message['role'] == 'assistant' and loop.index0 % 2 != 0 %}Agent: {{ message['content'].trim() }}{% endif %}{% if loop.last %}{% set role = message['role'] %}{% endif %}{{ '\n\n' }}{% endfor %}\n\nProvide your safety assessment for {{ role }} in the above conversation:\n - First line must read 'safe' or 'unsafe'.\n - If unsafe, a second line must include a comma-separated list of violated categories.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.2, + "output": 0.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "black-forest-labs/FLUX.1-depth", + "object": "model", + "created": 1732141533, + "type": "image", + "running": false, + "display_name": "FLUX.1 Depth [dev]", + "organization": "Black Forest Labs", + "link": "https://huggingface.co/black-forest-labs/FLUX.1-Depth-dev", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "black-forest-labs/FLUX.1-canny", + "object": "model", + "created": 1732144835, + "type": "image", + "running": false, + "display_name": "FLUX.1 Canny [dev]", + "organization": "Black Forest Labs", + "link": "https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-3-8b-chat-hf", + "object": "model", + "created": 1713420479, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3 8B Instruct Reference", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct", + "license": "Llama-3 (Other)", + "context_length": 8192, + "config": { + "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}", + "stop": [ + "<|eot_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>" + }, + "pricing": { + "hourly": 0, + "input": 0.2, + "output": 0.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "arcee-ai/caller", + "object": "model", + "created": 1743531347, + "type": "chat", + "running": false, + "display_name": "Arcee AI Caller", + "organization": "Arcee AI", + "link": "https://huggingface.co/api/models/togethercomputer/arcee-ai-caller", + "context_length": 32768, + "config": { + "chat_template": null, + "stop": [ + "<|im_end|>" + ], + "bos_token": "<|endoftext|>", + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 0.55, + "output": 0.85, + "base": 0, + "finetune": 0 + } + }, + { + "id": "togethercomputer/MoA-1-Turbo", + "object": "model", + "created": 0, + "type": "chat", + "running": false, + "display_name": "Together AI MoA-1-Turbo", + "organization": "Together AI", + "link": "https://github.com/togethercomputer/MoA", + "license": null, + "context_length": 32768, + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "mistralai/Mistral-7B-Instruct-v0.1", + "object": "model", + "created": 1695860851, + "type": "chat", + "running": false, + "display_name": "Mistral (7B) Instruct", + "organization": "mistralai", + "link": "https://huggingface.co/api/models/mistralai/Mistral-7B-Instruct-v0.1", + "license": "apache-2.0", + "context_length": 32768, + "config": { + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' or message['role'] == 'tool' %}{{ bos_token + '[INST] ' + content + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<>\\n' + content + '\\n<>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content + ' ' + eos_token }}{% endif %}{% endfor %}", + "stop": [ + "" + ], + "bos_token": "", + "eos_token": "" + }, + "pricing": { + "hourly": 0, + "input": 0.2, + "output": 0.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "scb10x/scb10x-llama3-1-typhoon2-8b-instruct", + "object": "model", + "created": 1743028923, + "type": "chat", + "running": false, + "display_name": "Typhoon 2 8B Instruct", + "organization": "SCB10X", + "link": "https://huggingface.co/api/models/scb10x/llama3.1-typhoon2-8b-instruct", + "license": "llama3.1", + "context_length": 8192, + "config": { + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{{- system_message }}\n{%- if tools is not none %}\n {{- \"\\n\" }}\n {{- \"You are given a question and a set of possible functions. Based on the question, you will need to make one or more function/tool calls to achieve the purpose.\" }}\n {{- \"If none of the function can be used, point it out. If the given question lacks the parameters required by the function, also point it out.\" }}\n {{- \"You should only return the function call in tools call sections.\" }}\n {{- \"If you decide to invoke any of the function(s), you MUST put it in the format of [Function(arguments1={{params_name1: params_value1,params_name2: params_value2, ...}}, name1=function_name1), Function(arguments2={{params}}, name2=function_name2) , ...]\"}}\n {{- \"You SHOULD NOT include any other text in the response.\\nHere is a list of functions in JSON format that you can invoke.\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- \"<|eot_id|>\" }}\n\n\n{%- for message in messages %}\n {%- if not (message.role == 'tool') %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif message.role == \"tool\" %}\n {{- \"<|start_header_id|>tool<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}", + "stop": [ + "<|eot_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0.18000000000000002, + "output": 0.18000000000000002, + "base": 0, + "finetune": 0 + } + }, + { + "id": "mistralai/Mixtral-8x7B-v0.1", + "object": "model", + "created": 1744319758, + "type": "language", + "running": false, + "display_name": "Mixtral-8x7B v0.1", + "organization": "mistralai", + "link": "https://huggingface.co/mistralai/Mixtral-8x7B-v0.1", + "license": "apache-2.0", + "context_length": 32768, + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.6, + "output": 0.6, + "base": 0, + "finetune": 0 + } + }, + { + "id": "black-forest-labs/FLUX.1-dev-lora", + "object": "model", + "created": 1736906515, + "type": "image", + "running": false, + "display_name": "FLUX.1 [dev] LoRA", + "organization": "Black Forest Labs", + "link": "https://huggingface.co/black-forest-labs/FLUX.1-dev", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "deepseek-ai/DeepSeek-R1", + "object": "model", + "created": 1737396322, + "type": "chat", + "running": false, + "display_name": "DeepSeek R1", + "organization": "DeepSeek", + "link": "https://huggingface.co/deepseek-ai/DeepSeek-R1", + "context_length": 163840, + "config": { + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set system_prompt='' %}{% set is_tool = false %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set is_tool = false -%}{%- set is_output_first = true -%}{%- set is_first = false -%}{%- for tool in message['tool_calls']%}{%- if is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set parts = content | split('') %}{% set content = parts[parts.length-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set is_tool = true -%}{%- if is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not is_tool %}{{'<|Assistant|>'}}{% endif %}", + "stop": [ + "<|end▁of▁sentence|>" + ], + "bos_token": "<|begin▁of▁sentence|>", + "eos_token": "<|end▁of▁sentence|>", + "max_output_length": 32768 + }, + "pricing": { + "hourly": 0, + "input": 3, + "output": 7, + "base": 0, + "finetune": 0 + } + }, + { + "id": "arcee-ai/arcee-blitz", + "object": "model", + "created": 1743449087, + "type": "chat", + "running": false, + "display_name": "Arcee AI Blitz", + "organization": "Arcee AI", + "link": "https://huggingface.co/api/models/togethercomputer/Arcee-Blitz", + "license": "apache-2.0", + "context_length": 32768, + "config": { + "chat_template": "{%- set today = strftime_now(\"%Y-%m-%d\") %}\n{%- set default_system_message = \"You are Arcee Blitz, a Large Language Model (LLM) created by Arcee AI.\\nYour knowledge base was last updated on 2024-10-01. The current date is \" + today + \".\\n\\nWhen you're not sure about some information, you say that you don't have the information and don't make up anything.\\nIf the user's question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. \\\"What are some good restaurants around me?\\\" => \\\"Where are you?\\\" or \\\"When is the next flight to Tokyo\\\" => \\\"Where do you travel from?\\\")\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set system_message = default_system_message %}\n {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n {%- if message['role'] == 'user' %}\n {{- '[INST]' + message['content'] + '[/INST]' }}\n {%- elif message['role'] == 'system' %}\n {{- '[SYSTEM_PROMPT]' + message['content'] + '[/SYSTEM_PROMPT]' }}\n {%- elif message['role'] == 'assistant' %}\n {{- message['content'] + eos_token }}\n {%- else %}\n {{- raise_exception('Only user, system and assistant roles are supported!') }}\n {%- endif %}\n{%- endfor %}", + "stop": [ + "" + ], + "bos_token": "", + "eos_token": "" + }, + "pricing": { + "hourly": 0, + "input": 0.45, + "output": 0.75, + "base": 0, + "finetune": 0 + } + }, + { + "id": "deepseek-ai/DeepSeek-V3-p-dp", + "object": "model", + "created": 1740979667, + "type": "chat", + "running": false, + "display_name": "DeepSeek V3", + "organization": "DeepSeek", + "link": "https://huggingface.co/deepseek-ai/DeepSeek-V3", + "context_length": 131072, + "config": { + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set is_first = false %}{% set is_tool = false %}{% set is_output_first = true %}{% set system_prompt='' %}{% set is_first_sp = true %}{%- for message in messages %}{%- if message['role'] == 'system' %}{%- if is_first_sp %}{% set system_prompt = system_prompt + message['content'] %}{% set is_first_sp = false %}{%- else %}{% set system_prompt = system_prompt + '\n\n' + message['content'] %}{%- endif %}{%- endif %}{%- endfor %}{% if tools %}{% set system_prompt = system_prompt + '\n\nYou can access the following functions. Use them if required -\n' + (tools | tojson) + '\n' %}{% endif %}{{bos_token}}{{system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- set is_first = true -%}{%- else %}{{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set is_tool = false -%}{%- else %}{{'<|Assistant|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set is_tool = true -%}{%- if is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set is_output_first = false %}{%- else %}{{'\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not is_tool %}{{'<|Assistant|>'}}{% endif %}", + "stop": [ + "<|end▁of▁sentence|>" + ], + "bos_token": "<|begin▁of▁sentence|>", + "eos_token": "<|end▁of▁sentence|>", + "max_output_length": 32768 + }, + "pricing": { + "hourly": 0, + "input": 1.25, + "output": 1.25, + "base": 0, + "finetune": 0 + } + }, + { + "id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", + "object": "model", + "created": 1738182549, + "type": "chat", + "running": false, + "display_name": "DeepSeek R1 Distill Qwen 14B", + "organization": "DeepSeek", + "link": "https://huggingface.co/api/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", + "license": "mit", + "context_length": 131072, + "config": { + "chat_template": "{% if not add_generation_prompt %}{% set add_generation_prompt = false %}{% endif %}{# Initialize variables since Nunjucks doesn't support namespace #}{% set is_first = false %}{% set is_tool = false %}{% set is_output_first = true %}{% set system_prompt = '' %}{# Get system prompt #}{% for message in messages %}{% if message.role == 'system' %}{% set system_prompt = message.content %}{% endif %}{% endfor %}{{bos_token}}{{system_prompt}}{% for message in messages %}{% if message.role == 'user' %}{% set is_tool = false %}<|User|>{{message.content}}{% endif %}{% if message.role == 'assistant' and not message.content %}{% set is_tool = false %}{% for tool in message.tool_calls %}{% if not is_first %}<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>{{tool.type}}<|tool▁sep|>{{tool.function.name}}```json{{tool.function.arguments}}```<|tool▁call▁end|>{% set is_first = true %}{% else %}<|tool▁call▁begin|>{{tool.type}}<|tool▁sep|>{{tool.function.name}}```json{{tool.function.arguments}}```<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|>{% endif %}{% endfor %}{% endif %}{% if message.role == 'assistant' and message.content %}{% if is_tool %}<|tool▁outputs▁end|>{{message.content}}<|end▁of▁sentence|>{% set is_tool = false %}{% else %}{% set content = message.content %}{% if '' in content %}{% set parts = content | split('') %}{% set content = parts[parts.length-1] %}{% endif %}<|Assistant|>{{content}}<|end▁of▁sentence|>{% endif %}{% endif %}{% if message.role == 'tool' %}{% set is_tool = true %}{% if is_output_first %}<|tool▁outputs▁begin|><|tool▁output▁begin|>{{message.content}}<|tool▁output▁end|>{% set is_output_first = false %}{% else %}<|tool▁output▁begin|>{{message.content}}<|tool▁output▁end|>{% endif %}{% endif %}{% endfor %}{% if is_tool %}<|tool▁outputs▁end|>{% endif %}{% if add_generation_prompt and not is_tool %}<|Assistant|>{% endif %}", + "stop": [ + "<|end▁of▁sentence|>" + ], + "bos_token": "<|begin▁of▁sentence|>", + "eos_token": "<|end▁of▁sentence|>", + "max_output_length": 32768 + }, + "pricing": { + "hourly": 0, + "input": 1.6, + "output": 1.6, + "base": 0, + "finetune": 0 + } + }, + { + "id": "deepseek-ai/DeepSeek-V3", + "object": "model", + "created": 1735450433, + "type": "chat", + "running": false, + "display_name": "DeepSeek V3-0324", + "organization": "DeepSeek", + "link": "https://huggingface.co/deepseek-ai/DeepSeek-V3", + "context_length": 131072, + "config": { + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='', is_first_sp=true) %}{%- for message in messages %}{%- if message['role'] == 'system' %}{%- if ns.is_first_sp %}{% set ns.system_prompt = ns.system_prompt + message['content'] %}{% set ns.is_first_sp = false %}{%- else %}{% set ns.system_prompt = ns.system_prompt + '\n\n' + message['content'] %}{%- endif %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{{'<|Assistant|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}", + "stop": [ + "<|end▁of▁sentence|>" + ], + "bos_token": "<|begin▁of▁sentence|>", + "eos_token": "<|end▁of▁sentence|>", + "max_output_length": 12288 + }, + "pricing": { + "hourly": 0, + "input": 1.25, + "output": 1.25, + "base": 0, + "finetune": 0 + } + }, + { + "id": "Qwen/Qwen2.5-VL-72B-Instruct", + "object": "model", + "created": 1742408085, + "type": "chat", + "running": false, + "display_name": "Qwen2.5-VL (72B) Instruct", + "organization": "Qwen", + "link": "https://huggingface.co/api/models/Qwen/Qwen2.5-VL-72B-Instruct", + "license": "tongyi-qianwen", + "context_length": 32768, + "config": { + "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}", + "stop": [ + "<|im_end|>", + "<|endoftext|>" + ], + "bos_token": null, + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 1.95, + "output": 8, + "base": 0, + "finetune": 0 + } + }, + { + "id": "Qwen/Qwen2.5-Coder-32B-Instruct", + "object": "model", + "created": 1731556615, + "type": "chat", + "running": false, + "display_name": "Qwen 2.5 Coder 32B Instruct", + "organization": "Qwen", + "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct", + "license": "Qwen", + "context_length": 16384, + "config": { + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n", + "stop": [ + "<|im_end|>" + ], + "bos_token": "<|endoftext|>", + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 0.8, + "output": 0.8, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-3.3-70B-Instruct-Turbo", + "object": "model", + "created": 1733466629, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3.3 70B Instruct Turbo", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct", + "license": "Llama-3.3 (Other)", + "context_length": 131072, + "config": { + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n", + "stop": [ + "<|eot_id|>", + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0.88, + "output": 0.88, + "base": 0, + "finetune": 0 + } + }, + { + "id": "Qwen/Qwen2-72B-Instruct", + "object": "model", + "created": 1744167438, + "type": "chat", + "running": false, + "display_name": "Qwen 2 Instruct (72B)", + "organization": "Qwen", + "link": "https://huggingface.co/Qwen/Qwen2-72B-Instruct", + "license": "tongyi-qianwen", + "context_length": 32768, + "config": { + "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}", + "stop": [ + "<|im_start|>", + "<|im_end|>" + ], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0.9, + "output": 0.9, + "base": 0, + "finetune": 0 + } + }, + { + "id": "microsoft/WizardLM-2-8x22B", + "object": "model", + "created": 1713206398, + "type": "chat", + "running": false, + "display_name": "WizardLM-2 (8x22B)", + "organization": "microsoft", + "link": "https://huggingface.co/microsoft/WizardLM-2-8x22B", + "license": "apache-2.0", + "context_length": 65536, + "config": { + "chat_template": "{% for message in messages %}{{message['role'].toLocaleUpperCase() + ': ' + message['content'] + '\n'}}{% endfor %}{{ 'ASSISTANT: ' }}", + "stop": [ + "" + ], + "bos_token": "", + "eos_token": "" + }, + "pricing": { + "hourly": 0, + "input": 1.2, + "output": 1.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", + "object": "model", + "created": 1738048961, + "type": "chat", + "running": false, + "display_name": "DeepSeek R1 Distill Llama 70B", + "organization": "DeepSeek", + "link": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B", + "license": "mit", + "context_length": 131072, + "config": { + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set system_prompt='' %}{% set is_tool = false %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set is_tool = false -%}{%- set is_output_first = true -%}{%- set is_first = false -%}{%- for tool in message['tool_calls']%}{%- if is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set parts = content | split('') %}{% set content = parts[parts.length-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set is_tool = true -%}{%- if is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not is_tool %}{{'<|Assistant|>'}}{% endif %}", + "stop": [ + "<|end▁of▁sentence|>" + ], + "bos_token": "<|begin▁of▁sentence|>", + "eos_token": "<|end▁of▁sentence|>", + "max_output_length": 32768 + }, + "pricing": { + "hourly": 0, + "input": 2, + "output": 2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo", + "object": "model", + "created": 1727218691, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3.2 11B Vision Instruct Turbo", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct", + "license": "llama", + "context_length": 131072, + "config": { + "chat_template": "{% for message in messages %}\n{% if loop.index0 == 0 %}{{ bos_token }}{% endif %}\n{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' }}\n{% if message['content'] is string %}\n{{ message['content'] }}\n{% else %}\n{% for content in message['content'] | sort(attribute=\"type\") %}\n{% if content['type'] == 'image' %}\n{{ '<|image|>' }}\n{% elif content['type'] == 'text' %}\n{{ content['text'] }}\n{% endif %}\n{% endfor %}\n{% endif %}\n{{ '<|eot_id|>' }}\n{% endfor %}\n{% if add_generation_prompt %}\n{{ '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{% endif %}", + "stop": [ + "<|eot_id|>", + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0.18000000000000002, + "output": 0.18000000000000002, + "base": 0, + "finetune": 0 + } + }, + { + "id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", + "object": "model", + "created": 1738185844, + "type": "chat", + "running": false, + "display_name": "DeepSeek R1 Distill Qwen 1.5B", + "organization": "DeepSeek", + "link": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", + "license": "mit", + "context_length": 131072, + "config": { + "chat_template": "{% if not add_generation_prompt %}{% set add_generation_prompt = false %}{% endif %}{# Initialize variables since Nunjucks doesn't support namespace #}{% set is_first = false %}{% set is_tool = false %}{% set is_output_first = true %}{% set system_prompt = '' %}{# Get system prompt #}{% for message in messages %}{% if message.role == 'system' %}{% set system_prompt = message.content %}{% endif %}{% endfor %}{{bos_token}}{{system_prompt}}{% for message in messages %}{% if message.role == 'user' %}{% set is_tool = false %}<|User|>{{message.content}}{% endif %}{% if message.role == 'assistant' and not message.content %}{% set is_tool = false %}{% for tool in message.tool_calls %}{% if not is_first %}<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>{{tool.type}}<|tool▁sep|>{{tool.function.name}}```json{{tool.function.arguments}}```<|tool▁call▁end|>{% set is_first = true %}{% else %}<|tool▁call▁begin|>{{tool.type}}<|tool▁sep|>{{tool.function.name}}```json{{tool.function.arguments}}```<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|>{% endif %}{% endfor %}{% endif %}{% if message.role == 'assistant' and message.content %}{% if is_tool %}<|tool▁outputs▁end|>{{message.content}}<|end▁of▁sentence|>{% set is_tool = false %}{% else %}{% set content = message.content %}{% if '' in content %}{% set parts = content | split('') %}{% set content = parts[parts.length-1] %}{% endif %}<|Assistant|>{{content}}<|end▁of▁sentence|>{% endif %}{% endif %}{% if message.role == 'tool' %}{% set is_tool = true %}{% if is_output_first %}<|tool▁outputs▁begin|><|tool▁output▁begin|>{{message.content}}<|tool▁output▁end|>{% set is_output_first = false %}{% else %}<|tool▁output▁begin|>{{message.content}}<|tool▁output▁end|>{% endif %}{% endif %}{% endfor %}{% if is_tool %}<|tool▁outputs▁end|>{% endif %}{% if add_generation_prompt and not is_tool %}<|Assistant|>{% endif %}", + "stop": [ + "<|end▁of▁sentence|>" + ], + "bos_token": "<|begin▁of▁sentence|>", + "eos_token": "<|end▁of▁sentence|>" + }, + "pricing": { + "hourly": 0, + "input": 0.18000000000000002, + "output": 0.18000000000000002, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo", + "object": "model", + "created": 1727227657, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3.2 90B Vision Instruct Turbo", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Llama-3.2-90B-Vision-Instruct", + "license": "llama", + "context_length": 131072, + "config": { + "chat_template": "{% for message in messages %}\n{% if loop.index0 == 0 %}{{ bos_token }}{% endif %}\n{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' }}\n{% if message['content'] is string %}\n{{ message['content'] }}\n{% else %}\n{% for content in message['content'] | sort(attribute=\"type\") %}\n{% if content['type'] == 'image' %}\n{{ '<|image|>' }}\n{% elif content['type'] == 'text' %}\n{{ content['text'] }}\n{% endif %}\n{% endfor %}\n{% endif %}\n{{ '<|eot_id|>' }}\n{% endfor %}\n{% if add_generation_prompt %}\n{{ '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{% endif %}", + "stop": [ + "<|eot_id|>", + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 1.2, + "output": 1.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Meta-Llama-3-8B-Instruct-Lite", + "object": "model", + "created": 0, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3 8B Instruct Lite", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct", + "license": "Llama-3 (Other)", + "context_length": 8192, + "config": { + "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}", + "stop": [ + "<|eot_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>", + "max_output_length": 8192 + }, + "pricing": { + "hourly": 0, + "input": 0.1, + "output": 0.1, + "base": 0, + "finetune": 0 + } + }, + { + "id": "black-forest-labs/FLUX.1-schnell", + "object": "model", + "created": 0, + "type": "image", + "running": false, + "display_name": "FLUX.1 Schnell", + "organization": "Black Forest Labs", + "link": "https://huggingface.co/black-forest-labs/FLUX.1-schnell", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "object": "model", + "created": 1702342468, + "type": "chat", + "running": false, + "display_name": "Mixtral-8x7B Instruct v0.1", + "organization": "mistralai", + "link": "https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1", + "license": "apache-2.0", + "context_length": 32768, + "config": { + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' or message['role'] == 'tool' %}{{ bos_token + '[INST] ' + content + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<>\\n' + content + '\\n<>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content + ' ' + eos_token }}{% endif %}{% endfor %}", + "stop": [ + "[/INST]", + "" + ], + "bos_token": "", + "eos_token": "" + }, + "pricing": { + "hourly": 0, + "input": 0.6, + "output": 0.6, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-3-70b-chat-hf", + "object": "model", + "created": 1713429236, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3 70B Instruct Reference", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct", + "license": "Llama-3 (Other)", + "context_length": 8192, + "config": { + "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}", + "stop": [ + "<|eot_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>" + }, + "pricing": { + "hourly": 0, + "input": 0.88, + "output": 0.88, + "base": 0, + "finetune": 0 + } + }, + { + "id": "mistralai/Mistral-7B-Instruct-v0.3", + "object": "model", + "created": 1716406261, + "type": "chat", + "running": false, + "display_name": "Mistral (7B) Instruct v0.3", + "organization": "mistralai", + "link": "https://huggingface.co/api/models/mistralai/Mistral-7B-Instruct-v0.3", + "license": "apache-2.0", + "context_length": 32768, + "config": { + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' or message['role'] == 'tool' %}{{ bos_token + '[INST] ' + content + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<>\\n' + content + '\\n<>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content + ' ' + eos_token }}{% endif %}{% endfor %}", + "stop": [ + "" + ], + "bos_token": "", + "eos_token": "" + }, + "pricing": { + "hourly": 0, + "input": 0.2, + "output": 0.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "Salesforce/Llama-Rank-V1", + "object": "model", + "created": 1723745254, + "type": "rerank", + "running": false, + "display_name": "Salesforce Llama Rank V1 (8B)", + "organization": "salesforce", + "license": "llama3", + "context_length": 8192, + "config": { + "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\nAfter carefully reading the query, document, and guidelines, I have determined that the relevance score is: ' }}{% endif %}", + "stop": [ + "<|eot_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0.1, + "output": 0.1, + "base": 0, + "finetune": 0 + } + }, + { + "id": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", + "object": "model", + "created": 1731110984, + "type": "chat", + "running": false, + "display_name": "Llama 3.1 Nemotron 70B Instruct HF", + "organization": "nvidia", + "link": "https://huggingface.co/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", + "license": "llama3.1", + "context_length": 32768, + "config": { + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n", + "stop": [ + "<|eot_id|>", + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0.88, + "output": 0.88, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-Vision-Free", + "object": "model", + "created": 1727236346, + "type": "chat", + "running": false, + "display_name": "Meta Llama Vision Free", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct", + "license": "llama", + "context_length": 131072, + "config": { + "chat_template": "{% for message in messages %}\n{% if loop.index0 == 0 %}{{ bos_token }}{% endif %}\n{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' }}\n{% if message['content'] is string %}\n{{ message['content'] }}\n{% else %}\n{% for content in message['content'] | sort(attribute=\"type\") %}\n{% if content['type'] == 'image' %}\n{{ '<|image|>' }}\n{% elif content['type'] == 'text' %}\n{{ content['text'] }}\n{% endif %}\n{% endfor %}\n{% endif %}\n{{ '<|eot_id|>' }}\n{% endfor %}\n{% if add_generation_prompt %}\n{{ '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{% endif %}", + "stop": [ + "<|eot_id|>", + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-Guard-3-11B-Vision-Turbo", + "object": "model", + "created": 1727242489, + "type": "moderation", + "running": false, + "display_name": "Meta Llama Guard 3 11B Vision Turbo", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Llama-Guard-3-11B-Vision", + "license": "llama", + "context_length": 131072, + "config": { + "chat_template": "{%- if messages|length % 2 == 0 -%}\n {%- set role = 'Agent' -%}\n{%- else -%}\n {%- set role = 'User' -%}\n{%- endif -%}\n{%- if categories is not defined -%}\n {%- \n set categories = ({\n \"S1\": \"Violent Crimes.\",\n \"S2\": \"Non-Violent Crimes.\",\n \"S3\": \"Sex Crimes.\",\n \"S4\": \"Child Exploitation.\",\n \"S5\": \"Defamation.\",\n \"S6\": \"Specialized Advice.\",\n \"S7\": \"Privacy.\",\n \"S8\": \"Intellectual Property.\",\n \"S9\": \"Indiscriminate Weapons.\",\n \"S10\": \"Hate.\",\n \"S11\": \"Self-Harm.\",\n \"S12\": \"Sexual Content.\",\n \"S13\": \"Elections.\",\n })\n -%}\n{%- endif -%}\n\n{{- \"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\\n\\n\" -}}\n{%- set ns = namespace(contains_image=false) -%}\n{%- for message in messages -%}\n {%- for content in message['content'] | selectattr('type', 'equalto', 'image') -%}\n {%- set ns.contains_image = true -%}\n {%- endfor -%}\n{%- endfor -%}\n{%- if ns.contains_image -%}\n{{- \"<|image|> \"-}}\n{%- endif -%}\n{{- \"Task: Check if there is unsafe content in '\" + role + \"' messages in conversations according our safety policy with the below categories.\\n\\n\" -}}\n{{- \"\\n\"-}}\n\n{%- for key in categories -%}\n {%- if key not in excluded_category_keys -%}\n {{-key + \": \" + categories[key] + \"\\n\" -}}\n {%- endif -%}\n{%- endfor -%}\n\n{{- \"\\n\\n\"-}}\n{{- \"\\n\\n\"-}}\n{%- for message in messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...')-}}\n {%- endif -%}\n {%- set content = message['content'] -%}\n {%- if message['role'] == 'user' -%}\n {%- set role = 'User' -%}\n {%- elif message['role'] == 'assistant' -%}\n {%- set role = 'Agent' -%}\n {%- endif -%}\n {%- for content in message['content'] | selectattr('type', 'equalto', 'text') -%}\n{{- role + ': ' + content['text'] | trim + '\\n\\n'-}}\n {%- endfor -%}\n{%- endfor -%}\n{{- \"\\n\\n\"-}}\n{{- \"Provide your safety assessment for ONLY THE LAST \" + role + \" message in the above conversation:\\n\"-}}\n{{- \" - First line must read 'safe' or 'unsafe'.\\n\"-}}\n{{- \" - If unsafe, a second line must include a comma-separated list of violated categories. <|eot_id|><|start_header_id|>assistant<|end_header_id|>\"-}}", + "stop": [ + "<|eot_id|>", + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0.18000000000000002, + "output": 0.18000000000000002, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-3.2-3B-Instruct-Turbo", + "object": "model", + "created": 1727229064, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3.2 3B Instruct Turbo", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct", + "license": "Llama-3.1 (Other)", + "context_length": 131072, + "config": { + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {{- \"<|eot_id|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n", + "stop": [ + "<|eot_id|>", + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0.060000000000000005, + "output": 0.060000000000000005, + "base": 0, + "finetune": 0 + } + }, + { + "id": "black-forest-labs/FLUX.1-dev", + "object": "model", + "created": 1732138026, + "type": "image", + "running": false, + "display_name": "FLUX.1 [dev]", + "organization": "Black Forest Labs", + "link": "https://huggingface.co/black-forest-labs/FLUX.1-dev", + "config": { + "chat_template": null, + "stop": [], + "bos_token": null, + "eos_token": null + }, + "pricing": { + "hourly": 0, + "input": 0, + "output": 0, + "base": 0, + "finetune": 0 + } + }, + { + "id": "Qwen/Qwen2.5-72B-Instruct-Turbo", + "object": "model", + "created": 1728633510, + "type": "chat", + "running": false, + "display_name": "Qwen2.5 72B Instruct Turbo", + "organization": "Qwen", + "link": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct", + "license": "Qwen", + "context_length": 131072, + "config": { + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n", + "stop": [ + "<|im_end|>" + ], + "bos_token": "<|endoftext|>", + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 1.2, + "output": 1.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "arcee-ai/virtuoso-medium-v2", + "object": "model", + "created": 1743430178, + "type": "chat", + "running": false, + "display_name": "Arcee AI Virtuoso-Medium", + "organization": "Arcee AI", + "link": "https://huggingface.co/api/models/togethercomputer/Virtuoso-Medium-v2", + "license": "apache-2.0", + "context_length": 131072, + "config": { + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Virtuoso Medium, created by Arcee AI. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n", + "stop": [ + "<|im_end|>" + ], + "bos_token": "<|endoftext|>", + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 0.5, + "output": 0.8, + "base": 0, + "finetune": 0 + } + }, + { + "id": "arcee-ai/virtuoso-large", + "object": "model", + "created": 1743515466, + "type": "chat", + "running": false, + "display_name": "Arcee AI Virtuoso-Large", + "organization": "Arcee AI", + "link": "https://huggingface.co/api/models/togethercomputer/arcee-ai-Virtuoso-Large", + "context_length": 131072, + "config": { + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Virtuoso Large, created by Arcee AI. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n", + "stop": [ + "<|im_end|>" + ], + "bos_token": "<|endoftext|>", + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 0.75, + "output": 1.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "arcee-ai/maestro-reasoning", + "object": "model", + "created": 1743527998, + "type": "chat", + "running": false, + "display_name": "Arcee AI Maestro", + "organization": "Arcee AI", + "link": "https://huggingface.co/api/models/togethercomputer/arcee-ai-maestro-32b-01", + "context_length": 131072, + "config": { + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- '' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" and not message.tool_calls %}\n {%- set content = message.content %}\n {%- if not loop.last %}\n {%- set content = message.content.split('')[-1].lstrip('\\n') %}\n {%- endif %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set content = message.content %}\n {%- if not loop.last %}\n {%- set content = message.content.split('')[-1].lstrip('\\n') %}\n {%- endif %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n\\n' }}\n{%- endif %}\n", + "stop": [ + "<|im_end|>" + ], + "bos_token": "<|endoftext|>", + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 0.9, + "output": 3.3, + "base": 0, + "finetune": 0 + } + }, + { + "id": "arcee-ai/coder-large", + "object": "model", + "created": 1743543441, + "type": "chat", + "running": false, + "display_name": "Arcee AI Coder-Large", + "organization": "Arcee AI", + "link": "https://huggingface.co/api/models/togethercomputer/arcee-ai-coder-large", + "context_length": 32768, + "config": { + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Coder Large, created by Arcee AI. You are a helpful coding assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n", + "stop": [ + "<|im_end|>" + ], + "bos_token": "<|endoftext|>", + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 0.5, + "output": 0.8, + "base": 0, + "finetune": 0 + } + }, + { + "id": "arcee_ai/arcee-spotlight", + "object": "model", + "created": 1743530644, + "type": "chat", + "running": false, + "display_name": "Arcee AI Spotlight", + "organization": "Arcee AI", + "link": "https://huggingface.co/api/models/togethercomputer/arcee-ai-spotlight-export", + "context_length": 131072, + "config": { + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n", + "stop": [ + "<|im_end|>" + ], + "bos_token": "<|endoftext|>", + "eos_token": "<|im_end|>" + }, + "pricing": { + "hourly": 0, + "input": 0.18000000000000002, + "output": 0.18000000000000002, + "base": 0, + "finetune": 0 + } + }, + { + "id": "Qwen/QwQ-32B", + "object": "model", + "created": 1741207789, + "type": "chat", + "running": false, + "display_name": "Qwen QwQ-32B", + "organization": "Qwen", + "link": "https://huggingface.co/Qwen/QwQ-32B", + "license": "Qwen", + "context_length": 131072, + "config": { + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n", + "stop": [ + "<|im_end|>", + "<|endoftext|>" + ], + "bos_token": null, + "eos_token": "<|im_end|>", + "max_output_length": 32768 + }, + "pricing": { + "hourly": 0, + "input": 1.2, + "output": 1.2, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-4-Scout-17B-16E-Instruct", + "object": "model", + "created": 1743878170, + "type": "chat", + "running": false, + "display_name": "Llama 4 Scout Instruct (17Bx16E)", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", + "license": "llama4", + "context_length": 1048576, + "config": { + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- if messages[0]['content'] is string %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- else %}\n {#- FIXME: The processor requires an array, always. #}\n {%- set system_message = messages[0]['content'][0]['text']|trim %}\n {%- endif %}\n {%- set messages = messages[1:] %}\n {%- set user_supplied_system_message = true %}\n{%- else %}\n {%- set system_message = \"\" %}\n {%- set user_supplied_system_message = false %}\n{%- endif %}\n\n{#- System message if the user supplied one #}\n{%- if user_supplied_system_message %}\n {{- \"<|header_start|>system<|header_end|>\\n\\n\" }}\n {%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n {%- endif %}\n {%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {%- endif %}\n {{- system_message }}\n {{- \"<|eot|>\" }}\n{%- endif %}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_content = messages[0]['content'] %}\n {%- set first_user_message = \"\" %}\n {%- if first_content is string %}\n {%- set first_user_message = first_content %}\n {%- else %}\n {%- for content in first_content %}\n {%- if content['type'] == 'image' %}\n {%- set first_user_message = first_user_message + '<|image|>' %}\n {%- elif content['type'] == 'text' %}\n {%- set first_user_message = first_user_message + content['text'] %}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {%- set first_user_message = first_user_message | trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|header_start|>user<|header_end|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|header_start|>' + message['role'] + '<|header_end|>\\n\\n' }}\n {%- if message['content'] is string %}\n {{- message['content'] }}\n {%- else %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {{- '<|image|>' }}\n {%- elif content['type'] == 'text' %}\n {{- content['text'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- \"<|eot|>\" }}\n {%- elif 'tool_calls' in message and message.tool_calls|length > 0 %}\n {{- '<|header_start|>assistant<|header_end|>\\n\\n' -}}\n {{- '<|python_start|>' }}\n {%- if message['content'] is string %}\n {{- message['content'] }}\n {%- else %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {{- '<|image|>' }}\n {%- elif content['type'] == 'text' %}\n {{- content['text'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '<|python_end|>' }}\n {%- for tool_call in message.tool_calls %}\n {{- '{\"name\": \"' + tool_call.function.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.function.arguments | tojson }}\n {{- \"}\" }}\n {%- endfor %}\n {{- \"<|eot|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|header_start|>ipython<|header_end|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|header_start|>assistant<|header_end|>\\n\\n' }}\n{%- endif %}", + "stop": [ + "<|eot|>", + "<|eom|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot|>" + }, + "pricing": { + "hourly": 0, + "input": 0.18000000000000002, + "output": 0.5900000000000001, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Llama-2-70b-hf", + "object": "model", + "created": 1689720415, + "type": "language", + "running": false, + "display_name": "LLaMA-2 (70B)", + "organization": "Meta", + "link": "https://huggingface.co/api/models/meta-llama/Llama-2-70b-hf", + "license": "llama2", + "context_length": 4096, + "config": { + "chat_template": null, + "stop": [ + "" + ], + "bos_token": "", + "eos_token": "" + }, + "pricing": { + "hourly": 0, + "input": 0.9, + "output": 0.9, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", + "object": "model", + "created": 1721698359, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3.1 405B Instruct Turbo", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct", + "license": "llama", + "context_length": 130815, + "config": { + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n", + "stop": [ + "<|eot_id|>", + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 3.5, + "output": 3.5, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", + "object": "model", + "created": 1721603683, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3.1 70B Instruct Turbo", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Meta-Llama-3.1-70B-Instruct", + "license": "Llama-3.1 (Other)", + "context_length": 131072, + "config": { + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n", + "stop": [ + "<|eot_id|>", + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0.88, + "output": 0.88, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Meta-Llama-3-8B-Instruct-Turbo", + "object": "model", + "created": 0, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3 8B Instruct Turbo", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct", + "license": "Llama-3 (Other)", + "context_length": 8192, + "config": { + "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}", + "stop": [ + "<|eot_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>" + }, + "pricing": { + "hourly": 0, + "input": 0.18000000000000002, + "output": 0.18000000000000002, + "base": 0, + "finetune": 0 + } + }, + { + "id": "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", + "object": "model", + "created": 1741298134, + "type": "chat", + "running": false, + "display_name": "Meta Llama 3.1 8B Instruct Turbo", + "organization": "Meta", + "link": "https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct", + "license": "Llama-3.1 (Other)", + "context_length": 131072, + "config": { + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n", + "stop": [ + "<|eot_id|>", + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>" + }, + "pricing": { + "hourly": 0, + "input": 0.18000000000000002, + "output": 0.18000000000000002, + "base": 0, + "finetune": 0 + } + } +] \ No newline at end of file diff --git a/DocService/sources/ai/engine/providers/internal/together.ai.js b/DocService/sources/ai/engine/providers/internal/together.ai.js new file mode 100644 index 00000000..cb341a11 --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/together.ai.js @@ -0,0 +1,63 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("Together AI", "https://api.together.xyz", "", "v1"); + } + + checkModelCapability = function(model) { + if (model.context_length) + model.options.max_input_tokens = AI.InputMaxTokens.getFloor(model.context_length); + + if ("chat" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + let result = AI.CapabilitiesUI.Chat; + + if (-1 !== model.id.toLowerCase().indexOf("vision")) { + model.endpoints.push(AI.Endpoints.Types.v1.Vision); + result |= AI.CapabilitiesUI.Vision; + } + return result; + } + + if ("image" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Images_Generations); + model.endpoints.push(AI.Endpoints.Types.v1.Images_Edits); + model.endpoints.push(AI.Endpoints.Types.v1.Images_Variarions); + return AI.CapabilitiesUI.Image; + } + + if ("moderation" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Moderations); + return AI.CapabilitiesUI.Moderations; + } + + if ("embedding" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Embeddings); + return AI.CapabilitiesUI.Embeddings; + } + + if ("language" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Language); + return AI.CapabilitiesUI.Language; + } + + if ("code" === model.type) { + model.endpoints.push(AI.Endpoints.Types.v1.Code); + return AI.CapabilitiesUI.Code | AI.CapabilitiesUI.Chat; + } + + if ("rerank" === model.type) { + return AI.CapabilitiesUI.None; + } + + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + return AI.CapabilitiesUI.Chat; + } + + isUseProxy() { + return true; + } + +} diff --git a/DocService/sources/ai/engine/providers/internal/xAI.js b/DocService/sources/ai/engine/providers/internal/xAI.js new file mode 100644 index 00000000..5c877d10 --- /dev/null +++ b/DocService/sources/ai/engine/providers/internal/xAI.js @@ -0,0 +1,34 @@ +"use strict"; + +class Provider extends AI.Provider { + + constructor() { + super("xAI", "https://api.x.ai", "", "v1"); + } + + checkExcludeModel(model) { + if (-1 !== model.id.indexOf("-beta")) + return true; + return false; + } + + checkModelCapability = function(model) { + if (-1 != model.id.indexOf("vision")) + { + model.options.max_input_tokens = AI.InputMaxTokens["32k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + return AI.CapabilitiesUI.Chat | AI.CapabilitiesUI.Vision; + } + + if (-1 != model.id.indexOf("image")) + { + model.endpoints.push(AI.Endpoints.Types.v1.Image_Generation | AI.Endpoints.Types.v1.Images_Edits); + return AI.CapabilitiesUI.Image; + } + + model.options.max_input_tokens = AI.InputMaxTokens["128k"]; + model.endpoints.push(AI.Endpoints.Types.v1.Chat_Completions); + return AI.CapabilitiesUI.Chat; + } + +} diff --git a/DocService/sources/ai/engine/providers/preinstall-example.json b/DocService/sources/ai/engine/providers/preinstall-example.json new file mode 100644 index 00000000..43b8278a --- /dev/null +++ b/DocService/sources/ai/engine/providers/preinstall-example.json @@ -0,0 +1,163 @@ +{ + "actions": { + "Chat": { + "name": "Ask AI", + "icon": "ask-ai", + "model": "llama-3.2-90b-vision-preview", + "capabilities": 1 + }, + "Summarization": { + "name": "Summarization", + "icon": "summarization", + "model": "llama3.2:latest", + "capabilities": 1 + }, + "Translation": { + "name": "Translation", + "icon": "translation", + "model": "gemini-1.5-pro-latest", + "capabilities": 1 + }, + "TextAnalyze": { + "name": "Text analysis", + "icon": "", + "model": "claude-3-sonnet-20240229", + "capabilities": 1 + } + }, + + "providers": { + "OpenAI": { + "name": "OpenAI", + "url": "https://api.openai.com", + "key": "OPEN-AI-KEY", + "models": [ + { + "id": "chatgpt-4o-latest", + "object": "model", + "created": 1723515131, + "owned_by": "system", + "name": "chatgpt-4o-latest", + "endpoints": [ + 1 + ], + "options": {} + }, + { + "id": "gpt-4o", + "object": "model", + "created": 1715367049, + "owned_by": "system", + "name": "gpt-4o", + "endpoints": [ + 1 + ], + "options": { + "max_input_tokens": 131072 + } + } + ] + }, + "Together AI": { + "name": "Together AI", + "url": "https://api.together.xyz", + "key": "", + "models": [] + }, + "Mistral": { + "name": "Mistral", + "url": "https://api.mistral.ai", + "key": "", + "models": [] + }, + "Deepseek": { + "name": "Deepseek", + "url": "https://api.deepseek.com", + "key": "", + "models": [ + { + "id": "deepseek-chat", + "object": "model", + "owned_by": "deepseek", + "name": "deepseek-chat", + "endpoints": [], + "options": {} + }, + { + "id": "deepseek-reasoner", + "object": "model", + "owned_by": "deepseek", + "name": "deepseek-reasoner", + "endpoints": [], + "options": {} + } + ] + }, + "Ollama": { + "name": "Ollama", + "url": "http://localhost:11434", + "key": "", + "models": [ + { + "id": "llama3.2:latest", + "object": "model", + "created": 1739120925, + "owned_by": "library", + "name": "llama3.2:latest", + "endpoints": [], + "options": {} + } + ] + } + }, + "models": [ + { + "capabilities": 129, + "provider": "Groq", + "name": "Groq [llama-3.2-90b-vision-preview]", + "id": "llama-3.2-90b-vision-preview" + }, + { + "capabilities": 1, + "provider": "Together AI", + "name": "Together AI [mistralai/Mistral-7B-v0.1]", + "id": "mistralai/Mistral-7B-v0.1" + }, + { + "capabilities": 1, + "provider": "Together AI", + "name": "Together AI [deepseek-ai/DeepSeek-V3]", + "id": "deepseek-ai/DeepSeek-V3" + }, + { + "capabilities": 129, + "provider": "OpenAI", + "name": "OpenAI [chatgpt-4o-latest]", + "id": "chatgpt-4o-latest" + }, + { + "capabilities": 129, + "provider": "Anthropic", + "name": "Anthropic [claude-3-sonnet-20240229]", + "id": "claude-3-sonnet-20240229" + }, + { + "capabilities": 129, + "provider": "Google-Gemini", + "name": "Google-Gemini [gemini-1.5-pro-latest]", + "id": "gemini-1.5-pro-latest" + }, + { + "capabilities": 255, + "provider": "Ollama", + "name": "Ollama [llama3.2:latest]", + "id": "llama3.2:latest" + }, + { + "capabilities": 255, + "provider": "Deepseek", + "name": "Deepseek [deepseek-chat]", + "id": "deepseek-chat" + } + ] +} \ No newline at end of file diff --git a/DocService/sources/ai/engine/providers/provider.js b/DocService/sources/ai/engine/providers/provider.js new file mode 100644 index 00000000..303362c9 --- /dev/null +++ b/DocService/sources/ai/engine/providers/provider.js @@ -0,0 +1,532 @@ +"use strict"; + +(async function(){ + + class Provider { + /** + * Provider base class. + * @param {string} name Provider name. + * @param {string} url Url to service. + * @param {string} key Key for service. This is an optional field. Some providers may require a key for access. + * @param {string} addon Addon for url. For example: v1 for many providers. + */ + constructor(name, url, key, addon) { + this.name = name || ""; + this.url = url || ""; + this.key = key || ""; + this.addon = addon || ""; + + this.models = []; + this.modelsUI = []; + } + + /** + * If you add an implementation here, then no request will be made to the service. + * @returns {Object[] | undefined} + */ + getModels() { + return undefined; + } + + /** + * Correct received (*models* endpoint) model object. + */ + correctModelInfo(model) { + if (undefined === model.id && model.name) { + model.id = model.name; + return; + } + model.name = model.id; + } + + /** + * Return *true* if you do not want to work with a specific model (model.id). + * The model will not be presented in the combo box with the list of models. + * @returns {boolean} + */ + checkExcludeModel(model) { + return false; + } + + /** + * Return enumeration with capabilities for this model (model.id). (Some providers does not get the information for this functionalities). + * Example: AI.CapabilitiesUI.Chat | AI.CapabilitiesUI.Image; + * @returns {number} + */ + checkModelCapability(model) { + return AI.CapabilitiesUI.All; + } + + /** + * Url for a specific endpoint. + * @returns {string} + */ + getEndpointUrl(endpoint, model) { + let Types = AI.Endpoints.Types; + switch (endpoint) + { + case Types.v1.Models: + return "/models"; + + case Types.v1.Chat_Completions: + return "/chat/completions"; + case Types.v1.Completions: + return "/completions"; + + case Types.v1.Images_Generations: + return "/images/generations"; + case Types.v1.Images_Edits: + return "/images/edits"; + case Types.v1.Images_Variarions: + return "/images/variations"; + + case Types.v1.Embeddings: + return "/embeddings"; + + case Types.v1.Audio_Transcriptions: + return "/audio/transcriptions"; + case Types.v1.Audio_Translations: + return "/audio/translations"; + case Types.v1.Audio_Speech: + return "/audio/speech"; + + case Types.v1.Moderations: + return "/moderations"; + + case Types.v1.Language: + return "/completions"; + case Types.v1.Code: + return "/completions"; + + case Types.v1.Realtime: + return "/realtime"; + + case Types.v1.OCR: + return "/chat/completions"; + + default: + break; + } + + return ""; + } + + /** + * An object-addition to the model. It is used, among other things, to configure the model parameters. + * Don't override this method unless you know what you're doing. + * @returns {Object} + */ + getRequestBodyOptions() { + return {}; + } + + /** + * The returned object is an enumeration of all the headers for the requests. + * @returns {Object} + */ + getRequestHeaderOptions() { + let headers = { + "Content-Type" : "application/json" + }; + if (this.key) + headers["Authorization"] = "Bearer " + this.key; + return headers; + } + + /** + * This method returns whether a proxy server needs to be used to work with this provider. + * Don't override this method unless you know what you're doing. + * @returns {boolean} + */ + isUseProxy() { + return false; + } + + /** + * This method returns whether this provider is only supported in the desktop application. + * Don't override this method unless you know what you're doing. + * @returns {boolean} + */ + isOnlyDesktop() { + return false; + } + + /** + * Get request body object by message. + * @param {Object} message + * *message* is in folowing format: + * { + * messages: [ + * { role: "developer", content: "You are a helpful assistant." }, + * { role: "system", content: "You are a helpful assistant." }, + * { role: "user", content: "Hello" }, + * { role: "assistant", content: "Hey!" }, + * { role: "user", content: "Hello" }, + * { role: "assistant", content: "Hey again!" } + * ] + * } + */ + getChatCompletions(message, model) { + return { + model : model.id, + messages : message.messages + } + } + + /** + * Get request body object by message. + * @param {Object} message + * *message* is in folowing format: + * { + * text: "Please, calculate 2+2." + * } + */ + getCompletions(message, model) { + return { + model : model.id, + prompt : message.text + } + } + + /** + * Convert *getChatCompletions* and *getCompletions* answer to result simple message. + * @returns {Object} result + * *result* is in folowing format: + * { + * content: ["Hello", "Hi"] + * } + */ + getChatCompletionsResult(message, model) { + let result = { + content : [] + }; + + let arrResult = message.data.choices || message.data.content || message.data.candidates; + if (!arrResult) + return result; + + let choice = arrResult[0]; + if (!choice) + return result; + + if (choice.message && choice.message.content) + result.content.push(choice.message.content); + if (choice.text) + result.content.push(choice.text); + if (choice.content) { + if (typeof(choice.content) === "string") + result.content.push(choice.content); + else if (Array.isArray(choice.content.parts)) { + for (let i = 0, len = choice.content.parts.length; i < len; i++) { + result.content.push(choice.content.parts[i].text); + } + } + } + + let trimArray = ["\n".charCodeAt(0)]; + for (let i = 0, len = result.content.length; i < len; i++) { + let iEnd = result.content[i].length - 1; + let iStart = 0; + while (iStart < iEnd && trimArray.includes(result.content[i].charCodeAt(iStart))) + iStart++; + while (iEnd > iStart && trimArray.includes(result.content[i].charCodeAt(iEnd))) + iEnd--; + + if (iEnd > iStart && ((0 !== iStart) || ((result.content[i].length - 1) !== iEnd))) + result.content[i] = result.content[i].substring(iStart, iEnd + 1); + } + + return result; + } + + /** + * Get available sizes for input images. + * @returns {Array.} sizes + */ + getImageSizesInput(model) { + return [ + { w: 256, h: 256 }, + { w: 512, h: 512 }, + { w: 1024, h: 1024 } + ]; + } + + /** + * Get available sizes for outpit images. + * @returns {Array.} sizes + */ + getImageSizesOutput(model) { + return [ + { w: 256, h: 256 }, + { w: 512, h: 512 }, + { w: 1024, h: 1024 } + ]; + } + + /** + * Get request body object by message. + * @param {Object} message + * *message* is in folowing format: + * { + * prompt: "", + * width:1024, + * height:1024, + * background: "transparent", + * quality: "high" + * } + */ + getImageGeneration(message, model) { + let sizes = this.getImageSizesOutput(model); + let index = sizes.length - 1; + + return { + model : model.id, + width : message.width || sizes[index].w, + height : message.width || sizes[index].h, + n : 1, + response_format : "b64_json", + prompt : message.prompt + }; + } + + /** + * Convert *getImageGeneration* answer to result base64 image. + * @returns {String} Image in base64 format + */ + async getImageGenerationResult(message, model) { + let imageUrl = ""; + let getProp = function(name) { + if (message[name]) + return message[name]; + if (message.data && message.data[name]) + return message.data[name]; + return undefined; + }; + + if (!imageUrl) { + let data = getProp("data"); + if (data && data[0] && data[0].b64_json) + imageUrl = data[0].b64_json; + } + + if (!imageUrl) { + let artifacts = getProp("artifacts"); + if (artifacts && artifacts[0] && artifacts[0].base64) + imageUrl = artifacts[0].base64; + } + + if (!imageUrl) { + let result = getProp("result"); + if (result && result.imageUrl) + imageUrl = result.imageUrl; + } + + if (!imageUrl) { + let generations = getProp("generations"); + if (generations && generations[0] && generations[0].url) + imageUrl = generations[0].url; + } + + if (!imageUrl) { + let candidates = getProp("candidates"); + if (candidates && candidates[0] && candidates[0].content) + imageUrl = candidates[0].content; + } + + if (!imageUrl) { + let image = getProp("image"); + if (image) + imageUrl = image; + } + + if (!imageUrl) { + let response = getProp("response"); + if (response) { + let matches = response.match(/data:image\/[^;]+;base64,([^"'\s]+)/); + if (matches && matches[1]) + imageUrl = matches[1]; + } + } + + if (!imageUrl) { + let content = getProp("content"); + if (content) { + for (let i = 0, len = content.length; i < len; i++) { + if (content[i].type === 'text') { + let svgMatch = content[i].text.match(//); + if (svgMatch) { + imageUrl = svgMatch[0]; + break; + } + } + } + } + + if (imageUrl) { + imageUrl = "data:image/svg+xml;base64," + btoa(imageUrl); + } + } + + if (!imageUrl) + return ""; + + return await AI.ImageEngine.getBase64FromUrl(imageUrl); + } + + /** + * Get request body object by message. + * @param {Object} message + * *message* is in folowing format: + * { + * image: "base64...", + * prompt: "text" + * } + */ + async getImageVision(message, model) { + return { + model : model.id, + messages : [ + { + role: "user", + content: [ + { + type: "text", + text: message.prompt + }, + { + type: "image_url", + image_url: { + url: message.image + } + } + ] + } + ] + } + } + + getImageVisionResult(message, model) { + let result = this.getChatCompletionsResult(message, model); + + if (result.content.length === 0) + return ""; + + if (0 === result.content[0].indexOf("")) { + let end = result.content[0].indexOf(""); + if (end !== -1) + result.content[0] = result.content[0].substring(end + 8); + } + + return result.content[0]; + + } + + /** + * Get request body object by message. + * @param {Object} message + * *message* is in folowing format: + * { + * image: "base64..." + * } + */ + async getImageOCR(message, model) { + return await this.getImageVision({ + image : message.image, + prompt : Asc.Prompts.getImagePromptOCR() + }, model); + } + + getImageOCRResult(message, model) { + return this.getImageVisionResult(message, model); + } + + /** + * ======================================================================================== + * The following are methods for internal work. There is no need to overload these methods. + * ======================================================================================== + */ + createInstance(name, url, key, addon) { + //let inst = Object.create(Object.getPrototypeOf(this)); + let inst = new this.constructor(); + inst.name = name; + inst.url = url; + inst.key = key; + inst.addon = addon || ""; + return inst; + } + + checkModelsUI() { + for (let i = 0, len = this.models.length; i < len; i++) { + let model = this.models[i]; + let modelUI = new window.AI.UI.Model(model.name, model.id, model.provider); + modelUI.capabilities = this.checkModelCapability(model); + this.modelsUI.push(modelUI); + } + } + + getSystemMessage(message, isRemove) { + let messages = message.messages; + let isFound = false; + if (!messages) + return ""; + let result = ""; + for (let i = 0; i < messages.length; ++i) { + if (messages[i].role === "system") { + if (isFound) { + messages.splice(i, 1); + } else { + isFound = true; + result = messages[i].content; + if (isRemove === true) { + messages.splice(i, 1); + } + } + } + } + return result; + } + + getImageGenerationWithChat(message, model, addon) { + let prompt = "Please generate image. "; + if (addon) + prompt += addon; + // TODO: sizes + prompt += "Here is the description for the image content:\""; + prompt += message.prompt; + prompt += "\""; + + let data = { + messages : [ + { + role: "user", + content: prompt + } + ] + }; + + return this.getChatCompletions(data, model); + } + + getImageVisionWithChat(message, model) { + let prompt = "Please generate image. "; + if (addon) + prompt += addon; + + let data = { + messages : [ + { + role: "user", + content: message.prompt + } + ] + }; + + return this.getChatCompletions(data, model); + } + + } + + window.AI.Provider = Provider; + await AI.loadInternalProviders(); + +})(); diff --git a/DocService/sources/ai/engine/register.js b/DocService/sources/ai/engine/register.js new file mode 100644 index 00000000..9753cb41 --- /dev/null +++ b/DocService/sources/ai/engine/register.js @@ -0,0 +1,661 @@ +function registerButtons(window, undefined) +{ + function getToolBarButtonIcons(icon) { + return "resources/icons/%theme-type%(light|dark)/big/" + icon + "%scale%(default).png"; + } + + function getContextMenuButtonIcons(icon) { + return "resources/icons/%theme-type%(light|dark)/" + icon + "%scale%(default).png"; + } + + // register contextmenu buttons + let buttonMain = new Asc.ButtonContextMenu(); + buttonMain.text = "AI"; + buttonMain.icons = getContextMenuButtonIcons("general-ai"); + buttonMain.addCheckers("All"); + + function chatWindowShow(attachedText) + { + if (window.chatWindow) { + window.chatWindow.activate(); + return; + } + + let requestEngine = AI.Request.create(AI.ActionType.Chat); + if (!requestEngine) + return; + + let variation = { + url : "chat.html", + description : window.Asc.plugin.tr("Chatbot"), + isVisual : true, + buttons : [], + icons: "resources/icons/%theme-name%(theme-default|theme-system|theme-classic-light)/%theme-type%(light|dark)/ask-ai%state%(normal|active)%scale%(default).png", + isModal : false, + isCanDocked: true, + type: window.localStorage.getItem("onlyoffice_ai_chat_placement") || "window", + EditorsSupport : ["word", "slide", "cell", "pdf"], + size : [ 400, 400 ] + }; + + let hasOpenedOnce = false; + + var chatWindow = new window.Asc.PluginWindow(); + chatWindow.attachEvent("onWindowReady", function() { + Asc.Editor.callMethod("ResizeWindow", [chatWindow.id, [400, 400], [400, 400], [0, 0]]); + if(!hasOpenedOnce && attachedText && attachedText.trim()) { + chatWindow.command("onAttachedText", attachedText); + } + hasOpenedOnce = true; + }); + chatWindow.attachEvent("onChatMessage", async function(message) { + let requestEngine = AI.Request.create(AI.ActionType.Chat); + if (!requestEngine) + return; + + let result = await requestEngine.chatRequest(message); + if (!result) result = ""; + + //result = result.replace(/\n\n/g, '\n'); + chatWindow.command("onChatReply", result); + }); + chatWindow.attachEvent("onChatReplace", async function(data) { + switch (data.type) { + case "review": { + if (Asc.plugin.info.editorType === "word") + await Asc.Library.InsertAsReview(data.data, true); + else + await Asc.Library.InsertAsComment(data.data); + break; + } + case "comment": { + await Asc.Library.InsertAsComment(data.data); + break; + } + case "insert": { + await Asc.Library.InsertAsHTML(data.data); + break; + } + case "replace": { + await Asc.Library.ReplaceTextSmart([data.data]); + break; + } + } + }); + chatWindow.attachEvent("onDockedChanged", async function(type) { + window.localStorage.setItem("onlyoffice_ai_chat_placement", type); + + async function waitSaveSettings() + { + return new Promise(resolve => (function(){ + chatWindow.attachEvent("onUpdateState", function(type) { + resolve(); + }); + chatWindow.command("onUpdateState"); + })()); + }; + + await waitSaveSettings(); + Asc.Editor.callMethod("OnWindowDockChangedCallback", [chatWindow.id]); + }); + chatWindow.show(variation); + + window.chatWindow = chatWindow; + } + + // Submenu summarize: + if (Asc.Editor.getType() !== "pdf") + { + let button = new Asc.ButtonContextMenu(buttonMain); + button.text = "Summarization"; + button.icons = getContextMenuButtonIcons("summarization"); + button.editors = ["word"]; + button.addCheckers("Selection"); + button.attachOnClick(async function(data){ + let requestEngine = AI.Request.create(AI.ActionType.Summarization); + if (!requestEngine) + return; + + let content = await Asc.Library.GetSelectedText(); + let prompt = Asc.Prompts.getSummarizationPrompt(content); + let result = await requestEngine.chatRequest(prompt); + if (!result) return; + + result = "Summary:\n\n" + result; + await Asc.Library.InsertAsText(result); + }); + } + + // Submenu Text Analysis + if (true) + { + let button1 = new Asc.ButtonContextMenu(buttonMain); + button1.text = "Text analysis"; + button1.icons = getContextMenuButtonIcons("text-analysis-ai"); + button1.editors = ["word"]; + button1.addCheckers("Target", "Selection"); + + let button2 = new Asc.ButtonContextMenu(button1); + button2.text = "Rewrite differently"; + button2.editors = ["word"]; + button2.addCheckers("Selection"); + button2.attachOnClick(async function(){ + let requestEngine = AI.Request.create(AI.ActionType.TextAnalyze); + if (!requestEngine) + return; + + let content = await Asc.Library.GetSelectedText(); + let prompt = Asc.Prompts.getTextRewritePrompt(content); + let result = await requestEngine.chatRequest(prompt); + if (!result) return; + + result = result.replace(/\n\n/g, '\n'); + await Asc.Library.PasteText(result); + }); + + let button3 = new Asc.ButtonContextMenu(button1); + button3.text = "Make longer"; + button3.editors = ["word"]; + button3.addCheckers("Selection"); + button3.attachOnClick(async function(data){ + let requestEngine = AI.Request.create(AI.ActionType.TextAnalyze); + if (!requestEngine) + return; + + let content = await Asc.Library.GetSelectedText(); + let prompt = Asc.Prompts.getTextLongerPrompt(content); + let result = await requestEngine.chatRequest(prompt); + if (!result) return; + + result = result.replace(/\n\n/g, '\n'); + await Asc.Library.PasteText(result); + }); + + let button4 = new Asc.ButtonContextMenu(button1); + button4.text = "Make shorter"; + button4.editors = ["word"]; + button4.addCheckers("Selection"); + button4.attachOnClick(async function(data){ + let requestEngine = AI.Request.create(AI.ActionType.TextAnalyze); + if (!requestEngine) + return; + + let content = await Asc.Library.GetSelectedText(); + let prompt = Asc.Prompts.getTextShorterPrompt(content); + let result = await requestEngine.chatRequest(prompt); + if (!result) return; + + result = result.replace(/\n\n/g, '\n'); + await Asc.Library.PasteText(result); + }); + + let button5 = new Asc.ButtonContextMenu(button1); + button5.text = "Explain text in comment"; + button5.separator = true; + button5.editors = ["word"]; + button5.addCheckers("Target", "Selection"); + button5.attachOnClick(async function(){ + let requestEngine = AI.Request.create(AI.ActionType.TextAnalyze); + if (!requestEngine) + return; + + let content = await Asc.Library.GetSelectedText(); + if (!content) + content = await Asc.Library.GetCurrentWord(); + + if (!content) + return; + + let prompt = Asc.Prompts.getExplainPrompt(content); + let result = await requestEngine.chatRequest(prompt); + if (!result) return; + + result = result.replace(/\n\n/g, '\n'); + await Asc.Library.InsertAsComment(result); + }); + + let button6 = new Asc.ButtonContextMenu(button1); + button6.text = "Explain text in hyperlink"; + button6.separator = true; + button6.editors = ["word"]; + button6.addCheckers("Selection"); + button6.attachOnClick(async function(){ + let requestEngine = AI.Request.create(AI.ActionType.TextAnalyze); + if (!requestEngine) + return; + + let content = await Asc.Library.GetSelectedText(); + let prompt = Asc.Prompts.getExplainAsLinkPrompt(content); + let result = await requestEngine.chatRequest(prompt); + if (!result) return; + + result = result.replace(/\n\n/g, '\n'); + await Asc.Library.InsertAsHyperlink(result); + }); + + let button7 = new Asc.ButtonContextMenu(button1); + button7.text = "Fix spelling & grammar"; + button7.separator = true; + button7.editors = ["word"]; + button7.addCheckers("Selection"); + button7.attachOnClick(async function(){ + let requestEngine = AI.Request.create(AI.ActionType.TextAnalyze); + if (!requestEngine) + return; + + let content = await Asc.Library.GetSelectedText(); + let prompt = Asc.Prompts.getFixAndSpellPrompt(content); + + let result = await requestEngine.chatRequest(prompt); + if (!result) return; + + if (result !== 'The text is correct, there are no errors in it.') + await Asc.Library.ReplaceTextSmart([result]); + else + console.log('The text is correct, there are no errors in it.'); + }); + + let button8 = new Asc.ButtonContextMenu(button1); + button8.text = "Keywords"; + button8.editors = ["word"]; + button8.addCheckers("Selection"); + button8.attachOnClick(async function(){ + let requestEngine = AI.Request.create(AI.ActionType.TextAnalyze); + if (!requestEngine) + return; + + let content = await Asc.Library.GetSelectedText(); + let prompt = Asc.Prompts.getTextKeywordsPrompt(content); + let result = await requestEngine.chatRequest(prompt); + if (!result) return; + + await Asc.Library.InsertAsText(result); + }); + } + + // Submenu Translate + if (true) + { + let button1 = new Asc.ButtonContextMenu(buttonMain); + button1.text = "Translate"; + button1.icons = getContextMenuButtonIcons("translation"); + button1.editors = ["word", "slide", "cell"]; + button1.addCheckers("Selection"); + + let button2 = new Asc.ButtonContextMenu(button1); + button2.text = "English"; + button2.editors = ["word", "slide", "cell"]; + button2.addCheckers("Selection"); + button2.data = "English"; + button2.attachOnClick(async function(data){ + let requestEngine = AI.Request.create(AI.ActionType.Translation); + if (!requestEngine) + return; + + let lang = data; + let content = await Asc.Library.GetSelectedText(); + if (!content) + return; + + let prompt = Asc.Prompts.getTranslatePrompt(content, lang); + let result = await requestEngine.chatRequest(prompt); + if (!result) return; + + result = Asc.Library.getTranslateResult(result, content); + + await Asc.Library.PasteText(result); + }); + + let button3 = button2.copy(); + button3.text = "French"; + button3.data = "French"; + + let button4 = button2.copy(); + button4.text = "German"; + button4.data = "German"; + + let button5 = button2.copy(); + button5.text = "Chinese"; + button5.data = "Chinese"; + + let button6 = button2.copy(); + button6.text = "Japanese"; + button6.data = "Japanese"; + + let button7 = button2.copy(); + button7.text = "Russian"; + button7.data = "Russian"; + + let button8 = button2.copy(); + button8.text = "Korean"; + button8.data = "Korean"; + + let button9 = button2.copy(); + button9.text = "Spanish"; + button9.data = "Spanish"; + + let button10 = button2.copy(); + button10.text = "Italian"; + button10.data = "Italian"; + } + + if (true) + { + let button1 = new Asc.ButtonContextMenu(buttonMain); + button1.text = "Show hyperlink content"; + button1.addCheckers("Hyperlink"); + + button1.onContextMenuShowExtendItem = function(options, item) + { + item.data = options.value; + }; + + button1.attachOnClick(function(data){ + let variation = { + url : "hyperlink.html", + description : window.Asc.plugin.tr("Hyperlink"), + isVisual : true, + buttons : [], + isModal : false, + EditorsSupport : ["word", "slide", "cell", "pdf"], + size : [ 1000, 1000 ] + }; + + var linkWindow = new window.Asc.PluginWindow(); + linkWindow.attachEvent("onGetLink", async function(){ + let link = data; + if (!link) + link = await Asc.Library.GetSelectedText(); + link = link.replace(/\n/g, ''); + link = link.replace(/\r/g, ''); + linkWindow.command("onSetLink", link); + }); + linkWindow.show(variation); + }); + } + + if (true) + { + let buttonImages = new Asc.ButtonContextMenu(buttonMain); + buttonImages.text = "Image"; + buttonImages.icons = getContextMenuButtonIcons("image-ai"); + buttonImages.addCheckers("Selection", "Image", "OleObject"); + + let buttonGen = new Asc.ButtonContextMenu(buttonImages); + buttonGen.text = "Text to Image"; + buttonGen.addCheckers("Selection"); + buttonGen.attachOnClick(async function(){ + let requestEngine = AI.Request.create(AI.ActionType.ImageGeneration); + if (!requestEngine) + return; + + let content = await Asc.Library.GetSelectedText(); + if (!content) + return; + + let result = await requestEngine.imageGenerationRequest(content); + if (!result) return; + + if (Asc.plugin.info.editorSubType === "pdf") + return await Asc.Library.AddGeneratedImage(result); + await Asc.Library.AddOleObject(result, content); + }); + + let buttonOCR = new Asc.ButtonContextMenu(buttonImages); + buttonOCR.text = "OCR"; + buttonOCR.addCheckers("Image", "OleObject"); + buttonOCR.attachOnClick(async function(){ + let requestEngine = AI.Request.create(AI.ActionType.OCR); + if (!requestEngine) + return; + + let content = await Asc.Library.GetSelectedImage(); + if (!content) + return; + + let result = await requestEngine.imageOCRRequest(content); + if (!result) return; + + await Asc.Library.InsertAsMD(result, [Asc.PluginsMD.latex]); + }); + + let buttonExplainImage = new Asc.ButtonContextMenu(buttonImages); + buttonExplainImage.text = "Image to Text"; + buttonExplainImage.addCheckers("Image", "OleObject"); + buttonExplainImage.attachOnClick(async function(){ + let requestEngine = AI.Request.create(AI.ActionType.Vision); + if (!requestEngine) + return; + + let content = await Asc.Library.GetSelectedImage(); + if (!content) + return; + + let result = await requestEngine.imageVisionRequest({ + prompt : Asc.Prompts.getImageDescription(), + image : content + }); + if (!result) return; + + await Asc.Library.InsertAsMD(result); + }); + } + + if (true) + { + let button1 = new Asc.ButtonContextMenu(buttonMain); + button1.text = "Chatbot"; + button1.separator = true; + button1.icons = getContextMenuButtonIcons("ask-ai"); + button1.addCheckers("All"); + button1.attachOnClick(async function(){ + let selectedText = await Asc.Library.GetSelectedText(); + chatWindowShow(selectedText); + }); + } + + if (false) + { + let button1 = new Asc.ButtonContextMenu(buttonMain); + button1.text = "Settings"; + button1.separator = true; + button1.addCheckers("All"); + button1.attachOnClick(function(){ + onOpenSettingsModal(); + }); + } + + // register toolbar buttons + let buttonMainToolbar = new Asc.ButtonToolbar(); + buttonMainToolbar.text = "AI"; + + window.buttonMainToolbar = buttonMainToolbar; + window.getToolBarButtonIcons = getToolBarButtonIcons; + + if (true) + { + let button1 = new Asc.ButtonToolbar(buttonMainToolbar); + button1.text = "Settings"; + button1.icons = getToolBarButtonIcons("settings"); + button1.attachOnClick(function(data){ + onOpenSettingsModal(); + }); + } + + if (true) + { + let button1 = new Asc.ButtonToolbar(buttonMainToolbar); + button1.separator = true; + button1.text = "Chatbot"; + button1.icons = getToolBarButtonIcons("ask-ai"); + button1.attachOnClick(function(data){ + chatWindowShow(); + }); + + if (Asc.Editor.getType() !== "pdf") { + let button2 = new Asc.ButtonToolbar(buttonMainToolbar); + button2.text = "Summarization"; + button2.icons = getToolBarButtonIcons("summarization"); + button2.attachOnClick(async function(data){ + let requestEngine = AI.Request.create(AI.ActionType.Summarization); + if (!requestEngine) + return; + + onOpenSummarizationModal(); + }); + } + + /* + // TODO: + let button3 = new Asc.ButtonToolbar(buttonMainToolbar); + button3.text = "Text to image"; + button3.icons = getToolBarButtonIcons("text-to-image"); + button3.attachOnClick(function(data){ + console.log(data); + }); + */ + + let button4 = new Asc.ButtonToolbar(buttonMainToolbar); + button4.text = "Translation"; + button4.icons = getToolBarButtonIcons("translation"); + button4.menu = [{ + text:'Settings', + id:'t10n-settings', + onclick: () => { + onTranslateSettingsModal(); + }}]; + button4.split = true; + button4.attachOnClick(async function(){ + let requestEngine = AI.Request.create(AI.ActionType.Translation); + if (!requestEngine) + return; + + const ls_lang_key = "onlyoffice_ai_plugin_translate_lang"; + const currLang = window.localStorage.getItem(ls_lang_key); + + let lang = !!currLang ? currLang : "english"; + let content = await Asc.Library.GetSelectedText(); + if (!content) + return; + + let prompt = Asc.Prompts.getTranslatePrompt(content, lang); + let result = await requestEngine.chatRequest(prompt); + if (!result) return; + + result = Asc.Library.getTranslateResult(result, content); + await Asc.Library.PasteText(result); + }); + } + + // register actions + window.AI = window.AI || {}; + var AI = window.AI; + + AI.ActionType = { + Chat : "Chat", + Summarization : "Summarization", + Translation : "Translation", + TextAnalyze : "TextAnalyze", + ImageGeneration : "ImageGeneration", + OCR : "OCR", + Vision : "Vision" + }; + + AI.Actions = {}; + + function ActionUI(name, icon, modelId, capabilities) { + this.name = name || ""; + this.icon = icon || ""; + this.model = modelId || ""; + this.capabilities = (capabilities === undefined) ? AI.CapabilitiesUI.Chat : capabilities; + } + + AI.Actions[AI.ActionType.Chat] = new ActionUI("Chatbot", "ask-ai"); + AI.Actions[AI.ActionType.Summarization] = new ActionUI("Summarization", "summarization"); + AI.Actions[AI.ActionType.Translation] = new ActionUI("Translation", "translation"); + AI.Actions[AI.ActionType.TextAnalyze] = new ActionUI("Text analysis", "text-analysis-ai"); + AI.Actions[AI.ActionType.ImageGeneration] = new ActionUI("Image generation", "image-ai", "", AI.CapabilitiesUI.Image); + AI.Actions[AI.ActionType.OCR] = new ActionUI("OCR", "text-analysis-ai", "", AI.CapabilitiesUI.Vision); + AI.Actions[AI.ActionType.Vision] = new ActionUI("Vision", "vision-ai", "", AI.CapabilitiesUI.Vision); + + AI.ActionsGetKeys = function() + { + return [ + AI.ActionType.Chat, + AI.ActionType.Summarization, + AI.ActionType.Translation, + AI.ActionType.TextAnalyze, + AI.ActionType.ImageGeneration, + AI.ActionType.OCR, + AI.ActionType.Vision + ]; + }; + + AI.ActionsGetSorted = function() + { + let keys = AI.ActionsGetKeys(); + let count = keys.length; + let actions = new Array(count); + for (let i = 0; i < count; i++) + { + let src = AI.Actions[keys[i]]; + actions[i] = { + id : keys[i], + name : Asc.plugin.tr(src.name), + icon : src.icon, + model : src.model, + capabilities : src.capabilities + } + } + return actions; + }; + + var actions_key = "onlyoffice_ai_actions_key"; + AI.ActionsSave = function() + { + try + { + window.localStorage.setItem(actions_key, JSON.stringify(AI.Actions)); + return true; + } + catch (e) + { + } + return false; + }; + + AI.ActionsLoad = function() + { + let obj = null; + try + { + obj = JSON.parse(window.localStorage.getItem(actions_key)); + } + catch (e) + { + obj = (AI.DEFAULT_SERVER_SETTINGS && AI.DEFAULT_SERVER_SETTINGS.actions) ? AI.DEFAULT_SERVER_SETTINGS.actions : null; + } + + if (obj) + { + for (let i in obj) + { + if (AI.Actions[i] && obj[i].model) + AI.Actions[i].model = obj[i].model; + } + return true; + } + return false; + }; + + AI.ActionsChange = function(id, model) + { + if (AI.Actions[id]) + { + AI.Actions[id].model = model; + AI.ActionsSave(); + } + }; + + AI.ActionsLoad(); +} diff --git a/DocService/sources/ai/engine/storage.js b/DocService/sources/ai/engine/storage.js new file mode 100644 index 00000000..c0c02048 --- /dev/null +++ b/DocService/sources/ai/engine/storage.js @@ -0,0 +1,71 @@ +(function(exports, undefined) +{ + exports.AI = exports.AI || {}; + var AI = exports.AI; + AI.UI = AI.UI || {}; + AI.Storage = AI.Storage || {}; + AI.Storage.Version = 3; + + AI.isLocalDesktop = (function(){ + if (window.navigator && window.navigator.userAgent.toLowerCase().indexOf("ascdesktopeditor") < 0) + return false; + if (window.location && window.location.protocol == "file:") + return true; + if (window.document && window.document.currentScript && 0 == window.document.currentScript.src.indexOf("file:///")) + return true; + return false; + })(); + + AI.isLocalUrl = function(url) { + let filter = ["localhost", "127.0.0.1"]; + for (let i = 0, len = filter.length; i < len; i++) { + let pos = url.indexOf(filter[i]); + if (pos >= 0 && pos < 10) + return true; + } + return false; + }; + + AI.getDesktopLocalVersion = function() { + let ret = 99 * 1000000 + 99 * 1000 + 99; + if (!AI.isLocalDesktop) + return ret; + let pos = window.navigator.userAgent.indexOf("AscDesktopEditor/"); + let pos2 = window.navigator.userAgent.indexOf(" ", pos); + if (pos === -1 || pos2 === -1) + return ret; + try { + let tokens = window.navigator.userAgent.substring(pos + 17, pos2).split("."); + return parseInt(tokens[0]) * 1000000 + parseInt(tokens[1]) * 1000 + parseInt(tokens[2]); + } catch (e) { + } + + return ret; + }; + + AI.loadResourceAsText = async function(url) { + return new Promise(resolve => (function(){ + try { + var xhr = new XMLHttpRequest(); + if (xhr) { + xhr.open('GET', url, true); + xhr.onload = function () { + var status = xhr.status; + if (status == 200 || location.href.indexOf("file:") == 0) { + resolve(xhr.responseText); + } else { + resolve(""); + } + }; + xhr.onerror = function() { + resolve(""); + } + xhr.send(''); + } + } catch (e) { + resolve(""); + } + })()); + }; + +})(window); diff --git a/DocService/sources/aiProxyHandler.js b/DocService/sources/aiProxyHandler.js deleted file mode 100644 index 5d377648..00000000 --- a/DocService/sources/aiProxyHandler.js +++ /dev/null @@ -1,188 +0,0 @@ -/* - * (c) Copyright Ascensio System SIA 2010-2024 - * - * This program is a free software product. You can redistribute it and/or - * modify it under the terms of the GNU Affero General Public License (AGPL) - * version 3 as published by the Free Software Foundation. In accordance with - * Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect - * that Ascensio System SIA expressly excludes the warranty of non-infringement - * of any third-party rights. - * - * This program is distributed WITHOUT ANY WARRANTY; without even the implied - * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For - * details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html - * - * You can contact Ascensio System SIA at 20A-6 Ernesta Birznieka-Upish - * street, Riga, Latvia, EU, LV-1050. - * - * The interactive user interfaces in modified source and object code versions - * of the Program must display Appropriate Legal Notices, as required under - * Section 5 of the GNU AGPL version 3. - * - * Pursuant to Section 7(b) of the License you must retain the original Product - * logo when distributing the program. Pursuant to Section 7(e) we decline to - * grant you any rights under trademark law for use of our trademarks. - * - * All the Product's GUI elements, including illustrations and icon sets, as - * well as technical writing content are licensed under the terms of the - * Creative Commons Attribution-ShareAlike 4.0 International. See the License - * terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode - * - */ - -'use strict'; - -const { pipeline } = require('stream/promises'); -const config = require('config'); -const utils = require('../../Common/sources/utils'); -const operationContext = require('./../../Common/sources/operationContext'); - -/** - * Helper function to set CORS headers if the request origin is allowed - * - * @param {object} req - Express request object - * @param {object} res - Express response object - * @param {object} ctx - Operation context for logging - * @param {boolean} handleOptions - Whether to handle OPTIONS requests (default: true) - * @returns {boolean} - True if this was an OPTIONS request that was handled - */ -function handleCorsHeaders(req, res, ctx, handleOptions = true) { - const requestOrigin = req.headers.origin; - - // If no origin in request or allowed origins list is empty, do nothing - if (!requestOrigin || cfgAiApiAllowedOrigins.length === 0) { - return false; - } - - // If the origin is in our allowed list - if (cfgAiApiAllowedOrigins.includes(requestOrigin)) { - res.setHeader('Access-Control-Allow-Origin', requestOrigin); - res.setHeader('Access-Control-Allow-Credentials', 'true'); - res.setHeader('Vary', 'Origin'); // Important when using dynamic origin - - // If debug logging is available - if (ctx && ctx.logger) { - ctx.logger.debug('CORS headers set for origin: %s (matched allowed list)', requestOrigin); - } - - // Handle preflight OPTIONS requests if requested - if (handleOptions && req.method === 'OPTIONS') { - res.setHeader('Access-Control-Allow-Methods', 'DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT'); - // Allow all headers with wildcard - res.setHeader('Access-Control-Allow-Headers', '*'); - - // For preflight request, we should also set non-CORS headers to match the API - res.setHeader('Allow', 'OPTIONS, HEAD, GET, POST, PUT, DELETE, PATCH'); - res.setHeader('Content-Length', '0'); - res.setHeader('Content-Type', 'text/html; charset=utf-8'); - - // Return 204 which is standard for OPTIONS preflight - res.sendStatus(204); // No Content response for OPTIONS - return true; // Signal that we handled an OPTIONS request - } - } - - return false; // Not an OPTIONS request or origin not allowed -} - - -/** - * Makes an HTTP request to an AI API endpoint using the provided request and response objects - * - * @param {object} req - Express request object - * @param {object} res - Express response object - * @returns {Promise} - Promise resolving when the request is complete - */ -async function proxyRequest(req, res) { - // Create operation context for logging - const ctx = new operationContext.Context(); - ctx.initFromRequest(req); - - try { - ctx.logger.info('Start proxyRequest'); - // 1. Handle CORS preflight (OPTIONS) requests if necessary - if (handleCorsHeaders(req, res, ctx) === true) { - return; // OPTIONS request handled, stop further processing - } - - let body = JSON.parse(req.body); - - // Configure timeout options for the request - const timeoutOptions = { - connectionAndInactivity: cfgAiApiTimeout || '30s', - wholeCycle: cfgAiApiTimeout || '30s' - }; - - // Get request size limit if configured - const sizeLimit = 10 * 1024 * 1024; // Default to 10MB - - // Create a copy of the headers from the request - const headers = { ...body.headers }; - - // Get API key from environment or configuration - const aiApi = config.get('ai-api'); - const apiKey = aiApi.providers[0].key; - - // Add authorization header if API key is available - if (apiKey) { - headers['Authorization'] = `Bearer ${apiKey}`; - } - - // Create request parameters object - const requestParams = { - method: body.method, - uri: body.target, - headers, - body: body.data, - timeout: timeoutOptions, - limit: sizeLimit, - filterPrivate: false - }; - - // Create a safe copy for logging without sensitive info - const safeLogParams = { ...requestParams }; - if (safeLogParams.headers) { - safeLogParams.headers = { ...safeLogParams.headers }; - if (safeLogParams.headers.Authorization) { - safeLogParams.headers.Authorization = '[REDACTED]'; - } - } - - // Log the sanitized request parameters - ctx.logger.debug(`Proxying request: %j`, safeLogParams); - - // Use utils.httpRequest to make the request - const result = await utils.httpRequest( - ctx, // Operation context - requestParams.method, // HTTP method - requestParams.uri, // Target URL - requestParams.headers, // Request headers - requestParams.body, // Request body - requestParams.timeout, // Timeout configuration - requestParams.limit, // Size limit - requestParams.filterPrivate // Filter private requests - ); - - // Set the response headers to match the target response - res.set(result.response.headers); - - // Use pipeline to pipe the response data to the client - await pipeline(result.stream, res); - - } catch (error) { - ctx.logger.error(`AI API request error: %s`, error.stack); - res.status(200).json({ - "error": { - "message": "AI API request error", - "code": "500" - } - }); - } finally { - ctx.logger.info('End proxyRequest'); - } -} - - -module.exports = { - proxyRequest -}; diff --git a/DocService/sources/server.js b/DocService/sources/server.js index 1171baf8..29464219 100644 --- a/DocService/sources/server.js +++ b/DocService/sources/server.js @@ -59,7 +59,7 @@ const operationContext = require('./../../Common/sources/operationContext'); const tenantManager = require('./../../Common/sources/tenantManager'); const staticRouter = require('./routes/static'); const ms = require('ms'); -const aiProxyHandler = require('./aiProxyHandler'); +const aiProxyHandler = require('./ai/aiProxyHandler'); const cfgWopiEnable = config.get('wopi.enable'); const cfgWopiDummyEnable = config.get('wopi.dummy.enable');