[feature] Move storage-related files into "storage" folder; For bug 73502

This commit is contained in:
Pavel Ostrovskij
2025-04-03 17:30:58 +03:00
committed by Sergey Konovalov
parent e5f3f1fcf9
commit 9557fe7d43
17 changed files with 5117 additions and 5115 deletions

View File

@ -5,9 +5,9 @@ const { BlobServiceClient, StorageSharedKeyCredential, generateBlobSASQueryParam
const mime = require('mime');
const config = require('config');
const { Readable } = require('stream');
const utils = require('./utils');
const utils = require('../utils');
const ms = require('ms');
const commonDefines = require('./../../Common/sources/commondefines');
const commonDefines = require('../commondefines');
const cfgExpSessionAbsolute = ms(config.get('services.CoAuthoring.expire.sessionabsolute'));
const MAX_DELETE_OBJECTS = 1000;

View File

@ -1,215 +1,215 @@
/*
* (c) Copyright Ascensio System SIA 2010-2024
*
* This program is a free software product. You can redistribute it and/or
* modify it under the terms of the GNU Affero General Public License (AGPL)
* version 3 as published by the Free Software Foundation. In accordance with
* Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect
* that Ascensio System SIA expressly excludes the warranty of non-infringement
* of any third-party rights.
*
* This program is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For
* details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html
*
* You can contact Ascensio System SIA at 20A-6 Ernesta Birznieka-Upish
* street, Riga, Latvia, EU, LV-1050.
*
* The interactive user interfaces in modified source and object code versions
* of the Program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU AGPL version 3.
*
* Pursuant to Section 7(b) of the License you must retain the original Product
* logo when distributing the program. Pursuant to Section 7(e) we decline to
* grant you any rights under trademark law for use of our trademarks.
*
* All the Product's GUI elements, including illustrations and icon sets, as
* well as technical writing content are licensed under the terms of the
* Creative Commons Attribution-ShareAlike 4.0 International. See the License
* terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode
*
*/
'use strict';
const os = require('os');
const cluster = require('cluster');
var config = require('config');
var utils = require('./utils');
const cfgCacheStorage = config.get('storage');
const cfgPersistentStorage = utils.deepMergeObjects({}, cfgCacheStorage, config.get('persistentStorage'));
const cacheStorage = require('./' + cfgCacheStorage.name);
const persistentStorage = require('./' + cfgPersistentStorage.name);
const tenantManager = require('./tenantManager');
const HEALTH_CHECK_KEY_MAX = 10000;
function getStoragePath(ctx, strPath, opt_specialDir) {
opt_specialDir = opt_specialDir || cfgCacheStorage.cacheFolderName;
return opt_specialDir + '/' + tenantManager.getTenantPathPrefix(ctx) + strPath.replace(/\\/g, '/');
}
function getStorage(opt_specialDir) {
return opt_specialDir ? persistentStorage : cacheStorage;
}
function getStorageCfg(ctx, opt_specialDir) {
return opt_specialDir ? cfgPersistentStorage : cfgCacheStorage;
}
function canCopyBetweenStorage(storageCfgSrc, storageCfgDst) {
return storageCfgSrc.name === storageCfgDst.name && storageCfgSrc.endpoint === storageCfgDst.endpoint;
}
function isDifferentPersistentStorage() {
return !canCopyBetweenStorage(cfgCacheStorage, cfgPersistentStorage);
}
async function headObject(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.headObject(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
}
async function getObject(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.getObject(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
}
async function createReadStream(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.createReadStream(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
}
async function putObject(ctx, strPath, buffer, contentLength, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.putObject(storageCfg, getStoragePath(ctx, strPath, opt_specialDir), buffer, contentLength);
}
async function uploadObject(ctx, strPath, filePath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.uploadObject(storageCfg, getStoragePath(ctx, strPath, opt_specialDir), filePath);
}
async function copyObject(ctx, sourceKey, destinationKey, opt_specialDirSrc, opt_specialDirDst) {
let storageSrc = getStorage(opt_specialDirSrc);
let storagePathSrc = getStoragePath(ctx, sourceKey, opt_specialDirSrc);
let storagePathDst = getStoragePath(ctx, destinationKey, opt_specialDirDst);
let storageCfgSrc = getStorageCfg(ctx, opt_specialDirSrc);
let storageCfgDst = getStorageCfg(ctx, opt_specialDirDst);
if (canCopyBetweenStorage(storageCfgSrc, storageCfgDst)){
return await storageSrc.copyObject(storageCfgSrc, storageCfgDst, storagePathSrc, storagePathDst);
} else {
let storageDst = getStorage(opt_specialDirDst);
//todo stream
let buffer = await storageSrc.getObject(storageCfgSrc, storagePathSrc);
return await storageDst.putObject(storageCfgDst, storagePathDst, buffer, buffer.length);
}
}
async function copyPath(ctx, sourcePath, destinationPath, opt_specialDirSrc, opt_specialDirDst) {
let list = await listObjects(ctx, sourcePath, opt_specialDirSrc);
await Promise.all(list.map(function(curValue) {
return copyObject(ctx, curValue, destinationPath + '/' + getRelativePath(sourcePath, curValue), opt_specialDirSrc, opt_specialDirDst);
}));
}
async function listObjects(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
let prefix = getStoragePath(ctx, "", opt_specialDir);
try {
let list = await storage.listObjects(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
return list.map((currentValue) => {
return currentValue.substring(prefix.length);
});
} catch (e) {
ctx.logger.error('storage.listObjects: %s', e.stack);
return [];
}
}
async function deleteObject(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.deleteObject(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
}
async function deletePath(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.deletePath(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
}
async function getSignedUrl(ctx, baseUrl, strPath, urlType, optFilename, opt_creationDate, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.getSignedUrl(ctx, storageCfg, baseUrl, getStoragePath(ctx, strPath, opt_specialDir), urlType, optFilename, opt_creationDate);
}
async function getSignedUrls(ctx, baseUrl, strPath, urlType, opt_creationDate, opt_specialDir) {
let storagePathSrc = getStoragePath(ctx, strPath, opt_specialDir);
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
let list = await storage.listObjects(storageCfg, storagePathSrc, storageCfg);
let urls = await Promise.all(list.map(function(curValue) {
return storage.getSignedUrl(ctx, storageCfg, baseUrl, curValue, urlType, undefined, opt_creationDate);
}));
let outputMap = {};
for (let i = 0; i < list.length && i < urls.length; ++i) {
outputMap[getRelativePath(storagePathSrc, list[i])] = urls[i];
}
return outputMap;
}
async function getSignedUrlsArrayByArray(ctx, baseUrl, list, urlType, opt_specialDir) {
return await Promise.all(list.map(function (curValue) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
let storagePathSrc = getStoragePath(ctx, curValue, opt_specialDir);
return storage.getSignedUrl(ctx, storageCfg, baseUrl, storagePathSrc, urlType, undefined);
}));
}
async function getSignedUrlsByArray(ctx, baseUrl, list, optPath, urlType, opt_specialDir) {
let urls = await getSignedUrlsArrayByArray(ctx, baseUrl, list, urlType, opt_specialDir);
var outputMap = {};
for (var i = 0; i < list.length && i < urls.length; ++i) {
if (optPath) {
let storagePathSrc = getStoragePath(ctx, optPath, opt_specialDir);
outputMap[getRelativePath(storagePathSrc, list[i])] = urls[i];
} else {
outputMap[list[i]] = urls[i];
}
}
return outputMap;
}
function getRelativePath(strBase, strPath) {
return strPath.substring(strBase.length + 1);
}
async function healthCheck(ctx, opt_specialDir) {
const clusterId = cluster.isWorker ? cluster.worker.id : '';
const tempName = 'hc_' + os.hostname() + '_' + clusterId + '_' + Math.round(Math.random() * HEALTH_CHECK_KEY_MAX);
const tempBuffer = Buffer.from([1, 2, 3, 4, 5]);
try {
//It's proper to putObject one tempName
await putObject(ctx, tempName, tempBuffer, tempBuffer.length, opt_specialDir);
//try to prevent case, when another process can remove same tempName
await deleteObject(ctx, tempName, opt_specialDir);
} catch (err) {
ctx.logger.warn('healthCheck storage(%s) error %s', opt_specialDir, err.stack);
}
}
function needServeStatic(opt_specialDir) {
let storage = getStorage(opt_specialDir);
return storage.needServeStatic();
}
module.exports = {
headObject,
getObject,
createReadStream,
putObject,
uploadObject,
copyObject,
copyPath,
listObjects,
deleteObject,
deletePath,
getSignedUrl,
getSignedUrls,
getSignedUrlsArrayByArray,
getSignedUrlsByArray,
getRelativePath,
isDifferentPersistentStorage,
healthCheck,
needServeStatic
};
/*
* (c) Copyright Ascensio System SIA 2010-2024
*
* This program is a free software product. You can redistribute it and/or
* modify it under the terms of the GNU Affero General Public License (AGPL)
* version 3 as published by the Free Software Foundation. In accordance with
* Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect
* that Ascensio System SIA expressly excludes the warranty of non-infringement
* of any third-party rights.
*
* This program is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For
* details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html
*
* You can contact Ascensio System SIA at 20A-6 Ernesta Birznieka-Upish
* street, Riga, Latvia, EU, LV-1050.
*
* The interactive user interfaces in modified source and object code versions
* of the Program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU AGPL version 3.
*
* Pursuant to Section 7(b) of the License you must retain the original Product
* logo when distributing the program. Pursuant to Section 7(e) we decline to
* grant you any rights under trademark law for use of our trademarks.
*
* All the Product's GUI elements, including illustrations and icon sets, as
* well as technical writing content are licensed under the terms of the
* Creative Commons Attribution-ShareAlike 4.0 International. See the License
* terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode
*
*/
'use strict';
const os = require('os');
const cluster = require('cluster');
var config = require('config');
var utils = require('../utils');
const cfgCacheStorage = config.get('storage');
const cfgPersistentStorage = utils.deepMergeObjects({}, cfgCacheStorage, config.get('persistentStorage'));
const cacheStorage = require('./' + cfgCacheStorage.name);
const persistentStorage = require('./' + cfgPersistentStorage.name);
const tenantManager = require('../tenantManager');
const HEALTH_CHECK_KEY_MAX = 10000;
function getStoragePath(ctx, strPath, opt_specialDir) {
opt_specialDir = opt_specialDir || cfgCacheStorage.cacheFolderName;
return opt_specialDir + '/' + tenantManager.getTenantPathPrefix(ctx) + strPath.replace(/\\/g, '/');
}
function getStorage(opt_specialDir) {
return opt_specialDir ? persistentStorage : cacheStorage;
}
function getStorageCfg(ctx, opt_specialDir) {
return opt_specialDir ? cfgPersistentStorage : cfgCacheStorage;
}
function canCopyBetweenStorage(storageCfgSrc, storageCfgDst) {
return storageCfgSrc.name === storageCfgDst.name && storageCfgSrc.endpoint === storageCfgDst.endpoint;
}
function isDifferentPersistentStorage() {
return !canCopyBetweenStorage(cfgCacheStorage, cfgPersistentStorage);
}
async function headObject(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.headObject(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
}
async function getObject(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.getObject(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
}
async function createReadStream(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.createReadStream(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
}
async function putObject(ctx, strPath, buffer, contentLength, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.putObject(storageCfg, getStoragePath(ctx, strPath, opt_specialDir), buffer, contentLength);
}
async function uploadObject(ctx, strPath, filePath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.uploadObject(storageCfg, getStoragePath(ctx, strPath, opt_specialDir), filePath);
}
async function copyObject(ctx, sourceKey, destinationKey, opt_specialDirSrc, opt_specialDirDst) {
let storageSrc = getStorage(opt_specialDirSrc);
let storagePathSrc = getStoragePath(ctx, sourceKey, opt_specialDirSrc);
let storagePathDst = getStoragePath(ctx, destinationKey, opt_specialDirDst);
let storageCfgSrc = getStorageCfg(ctx, opt_specialDirSrc);
let storageCfgDst = getStorageCfg(ctx, opt_specialDirDst);
if (canCopyBetweenStorage(storageCfgSrc, storageCfgDst)){
return await storageSrc.copyObject(storageCfgSrc, storageCfgDst, storagePathSrc, storagePathDst);
} else {
let storageDst = getStorage(opt_specialDirDst);
//todo stream
let buffer = await storageSrc.getObject(storageCfgSrc, storagePathSrc);
return await storageDst.putObject(storageCfgDst, storagePathDst, buffer, buffer.length);
}
}
async function copyPath(ctx, sourcePath, destinationPath, opt_specialDirSrc, opt_specialDirDst) {
let list = await listObjects(ctx, sourcePath, opt_specialDirSrc);
await Promise.all(list.map(function(curValue) {
return copyObject(ctx, curValue, destinationPath + '/' + getRelativePath(sourcePath, curValue), opt_specialDirSrc, opt_specialDirDst);
}));
}
async function listObjects(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
let prefix = getStoragePath(ctx, "", opt_specialDir);
try {
let list = await storage.listObjects(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
return list.map((currentValue) => {
return currentValue.substring(prefix.length);
});
} catch (e) {
ctx.logger.error('storage.listObjects: %s', e.stack);
return [];
}
}
async function deleteObject(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.deleteObject(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
}
async function deletePath(ctx, strPath, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.deletePath(storageCfg, getStoragePath(ctx, strPath, opt_specialDir));
}
async function getSignedUrl(ctx, baseUrl, strPath, urlType, optFilename, opt_creationDate, opt_specialDir) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
return await storage.getSignedUrl(ctx, storageCfg, baseUrl, getStoragePath(ctx, strPath, opt_specialDir), urlType, optFilename, opt_creationDate);
}
async function getSignedUrls(ctx, baseUrl, strPath, urlType, opt_creationDate, opt_specialDir) {
let storagePathSrc = getStoragePath(ctx, strPath, opt_specialDir);
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
let list = await storage.listObjects(storageCfg, storagePathSrc, storageCfg);
let urls = await Promise.all(list.map(function(curValue) {
return storage.getSignedUrl(ctx, storageCfg, baseUrl, curValue, urlType, undefined, opt_creationDate);
}));
let outputMap = {};
for (let i = 0; i < list.length && i < urls.length; ++i) {
outputMap[getRelativePath(storagePathSrc, list[i])] = urls[i];
}
return outputMap;
}
async function getSignedUrlsArrayByArray(ctx, baseUrl, list, urlType, opt_specialDir) {
return await Promise.all(list.map(function (curValue) {
let storage = getStorage(opt_specialDir);
let storageCfg = getStorageCfg(ctx, opt_specialDir);
let storagePathSrc = getStoragePath(ctx, curValue, opt_specialDir);
return storage.getSignedUrl(ctx, storageCfg, baseUrl, storagePathSrc, urlType, undefined);
}));
}
async function getSignedUrlsByArray(ctx, baseUrl, list, optPath, urlType, opt_specialDir) {
let urls = await getSignedUrlsArrayByArray(ctx, baseUrl, list, urlType, opt_specialDir);
var outputMap = {};
for (var i = 0; i < list.length && i < urls.length; ++i) {
if (optPath) {
let storagePathSrc = getStoragePath(ctx, optPath, opt_specialDir);
outputMap[getRelativePath(storagePathSrc, list[i])] = urls[i];
} else {
outputMap[list[i]] = urls[i];
}
}
return outputMap;
}
function getRelativePath(strBase, strPath) {
return strPath.substring(strBase.length + 1);
}
async function healthCheck(ctx, opt_specialDir) {
const clusterId = cluster.isWorker ? cluster.worker.id : '';
const tempName = 'hc_' + os.hostname() + '_' + clusterId + '_' + Math.round(Math.random() * HEALTH_CHECK_KEY_MAX);
const tempBuffer = Buffer.from([1, 2, 3, 4, 5]);
try {
//It's proper to putObject one tempName
await putObject(ctx, tempName, tempBuffer, tempBuffer.length, opt_specialDir);
//try to prevent case, when another process can remove same tempName
await deleteObject(ctx, tempName, opt_specialDir);
} catch (err) {
ctx.logger.warn('healthCheck storage(%s) error %s', opt_specialDir, err.stack);
}
}
function needServeStatic(opt_specialDir) {
let storage = getStorage(opt_specialDir);
return storage.needServeStatic();
}
module.exports = {
headObject,
getObject,
createReadStream,
putObject,
uploadObject,
copyObject,
copyPath,
listObjects,
deleteObject,
deletePath,
getSignedUrl,
getSignedUrls,
getSignedUrlsArrayByArray,
getSignedUrlsByArray,
getRelativePath,
isDifferentPersistentStorage,
healthCheck,
needServeStatic
};

View File

@ -1,183 +1,183 @@
/*
* (c) Copyright Ascensio System SIA 2010-2024
*
* This program is a free software product. You can redistribute it and/or
* modify it under the terms of the GNU Affero General Public License (AGPL)
* version 3 as published by the Free Software Foundation. In accordance with
* Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect
* that Ascensio System SIA expressly excludes the warranty of non-infringement
* of any third-party rights.
*
* This program is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For
* details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html
*
* You can contact Ascensio System SIA at 20A-6 Ernesta Birznieka-Upish
* street, Riga, Latvia, EU, LV-1050.
*
* The interactive user interfaces in modified source and object code versions
* of the Program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU AGPL version 3.
*
* Pursuant to Section 7(b) of the License you must retain the original Product
* logo when distributing the program. Pursuant to Section 7(e) we decline to
* grant you any rights under trademark law for use of our trademarks.
*
* All the Product's GUI elements, including illustrations and icon sets, as
* well as technical writing content are licensed under the terms of the
* Creative Commons Attribution-ShareAlike 4.0 International. See the License
* terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode
*
*/
'use strict';
const { cp, rm, mkdir } = require('fs/promises');
const { stat, readFile, writeFile } = require('fs/promises');
var path = require('path');
var utils = require("./utils");
var crypto = require('crypto');
const ms = require('ms');
const config = require('config');
const commonDefines = require('./../../Common/sources/commondefines');
const constants = require('./../../Common/sources/constants');
const cfgExpSessionAbsolute = ms(config.get('services.CoAuthoring.expire.sessionabsolute'));
//Stubs are needed until integrators pass these parameters to all requests
let shardKeyCached;
let wopiSrcCached;
function getFilePath(storageCfg, strPath) {
const storageFolderPath = storageCfg.fs.folderPath;
return path.join(storageFolderPath, strPath);
}
function getOutputPath(strPath) {
return strPath.replace(/\\/g, '/');
}
async function headObject(storageCfg, strPath) {
let fsPath = getFilePath(storageCfg, strPath);
let stats = await stat(fsPath);
return {ContentLength: stats.size};
}
async function getObject(storageCfg, strPath) {
let fsPath = getFilePath(storageCfg, strPath);
return await readFile(fsPath);
}
async function createReadStream(storageCfg, strPath) {
let fsPath = getFilePath(storageCfg, strPath);
let stats = await stat(fsPath);
let contentLength = stats.size;
let readStream = await utils.promiseCreateReadStream(fsPath);
return {
contentLength: contentLength,
readStream: readStream
};
}
async function putObject(storageCfg, strPath, buffer, contentLength) {
var fsPath = getFilePath(storageCfg, strPath);
await mkdir(path.dirname(fsPath), {recursive: true});
if (Buffer.isBuffer(buffer)) {
await writeFile(fsPath, buffer);
} else {
let writable = await utils.promiseCreateWriteStream(fsPath);
await utils.pipeStreams(buffer, writable, true);
}
}
async function uploadObject(storageCfg, strPath, filePath) {
let fsPath = getFilePath(storageCfg, strPath);
await cp(filePath, fsPath, {force: true, recursive: true});
}
async function copyObject(storageCfgSrc, storageCfgDst, sourceKey, destinationKey) {
let fsPathSource = getFilePath(storageCfgSrc, sourceKey);
let fsPathDestination = getFilePath(storageCfgDst, destinationKey);
await cp(fsPathSource, fsPathDestination, {force: true, recursive: true});
}
async function listObjects(storageCfg, strPath) {
const storageFolderPath = storageCfg.fs.folderPath;
let fsPath = getFilePath(storageCfg, strPath);
let values = await utils.listObjects(fsPath);
return values.map(function(curvalue) {
return getOutputPath(curvalue.substring(storageFolderPath.length + 1));
});
}
async function deleteObject(storageCfg, strPath) {
const fsPath = getFilePath(storageCfg, strPath);
return rm(fsPath, {force: true, recursive: true});
}
async function deletePath(storageCfg, strPath) {
const fsPath = getFilePath(storageCfg, strPath);
return rm(fsPath, {force: true, recursive: true, maxRetries: 3});
}
async function getSignedUrl(ctx, storageCfg, baseUrl, strPath, urlType, optFilename, opt_creationDate) {
const storageSecretString = storageCfg.fs.secretString;
const storageUrlExpires = storageCfg.fs.urlExpires;
const bucketName = storageCfg.bucketName;
const storageFolderName = storageCfg.storageFolderName;
//replace '/' with %2f before encodeURIComponent becase nginx determine %2f as '/' and get wrong system path
const userFriendlyName = optFilename ? encodeURIComponent(optFilename.replace(/\//g, "%2f")) : path.basename(strPath);
var uri = '/' + bucketName + '/' + storageFolderName + '/' + strPath + '/' + userFriendlyName;
//RFC 1123 does not allow underscores https://stackoverflow.com/questions/2180465/can-domain-name-subdomains-have-an-underscore-in-it
var url = utils.checkBaseUrl(ctx, baseUrl, storageCfg).replace(/_/g, "%5f");
url += uri;
var date = Date.now();
let creationDate = opt_creationDate || date;
let expiredAfter = (commonDefines.c_oAscUrlTypes.Session === urlType ? (cfgExpSessionAbsolute / 1000) : storageUrlExpires) || 31536000;
//todo creationDate can be greater because mysql CURRENT_TIMESTAMP uses local time, not UTC
var expires = creationDate + Math.ceil(Math.abs(date - creationDate) / expiredAfter) * expiredAfter;
expires = Math.ceil(expires / 1000);
expires += expiredAfter;
var md5 = crypto.createHash('md5').update(expires + decodeURIComponent(uri) + storageSecretString).digest("base64");
md5 = md5.replace(/\+/g, "-").replace(/\//g, "_").replace(/=/g, "");
url += '?md5=' + encodeURIComponent(md5);
url += '&expires=' + encodeURIComponent(expires);
if (ctx.shardKey) {
shardKeyCached = ctx.shardKey;
url += `&${constants.SHARD_KEY_API_NAME}=${encodeURIComponent(ctx.shardKey)}`;
} else if (ctx.wopiSrc) {
wopiSrcCached = ctx.wopiSrc;
url += `&${constants.SHARD_KEY_WOPI_NAME}=${encodeURIComponent(ctx.wopiSrc)}`;
} else if (process.env.DEFAULT_SHARD_KEY) {
//Set DEFAULT_SHARD_KEY from environment as shardkey in case of integrator did not pass this param
url += `&${constants.SHARD_KEY_API_NAME}=${encodeURIComponent(process.env.DEFAULT_SHARD_KEY)}`;
} else if (shardKeyCached) {
//Add stubs for shardkey params until integrators pass these parameters to all requests
url += `&${constants.SHARD_KEY_API_NAME}=${encodeURIComponent(shardKeyCached)}`;
} else if (wopiSrcCached) {
url += `&${constants.SHARD_KEY_WOPI_NAME}=${encodeURIComponent(wopiSrcCached)}`;
}
url += '&filename=' + userFriendlyName;
return url;
}
function needServeStatic() {
return true;
}
module.exports = {
headObject,
getObject,
createReadStream,
putObject,
uploadObject,
copyObject,
listObjects,
deleteObject,
deletePath,
getSignedUrl,
needServeStatic
};
/*
* (c) Copyright Ascensio System SIA 2010-2024
*
* This program is a free software product. You can redistribute it and/or
* modify it under the terms of the GNU Affero General Public License (AGPL)
* version 3 as published by the Free Software Foundation. In accordance with
* Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect
* that Ascensio System SIA expressly excludes the warranty of non-infringement
* of any third-party rights.
*
* This program is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For
* details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html
*
* You can contact Ascensio System SIA at 20A-6 Ernesta Birznieka-Upish
* street, Riga, Latvia, EU, LV-1050.
*
* The interactive user interfaces in modified source and object code versions
* of the Program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU AGPL version 3.
*
* Pursuant to Section 7(b) of the License you must retain the original Product
* logo when distributing the program. Pursuant to Section 7(e) we decline to
* grant you any rights under trademark law for use of our trademarks.
*
* All the Product's GUI elements, including illustrations and icon sets, as
* well as technical writing content are licensed under the terms of the
* Creative Commons Attribution-ShareAlike 4.0 International. See the License
* terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode
*
*/
'use strict';
const { cp, rm, mkdir } = require('fs/promises');
const { stat, readFile, writeFile } = require('fs/promises');
var path = require('path');
var utils = require("../utils");
var crypto = require('crypto');
const ms = require('ms');
const config = require('config');
const commonDefines = require('../commondefines');
const constants = require('../constants');
const cfgExpSessionAbsolute = ms(config.get('services.CoAuthoring.expire.sessionabsolute'));
//Stubs are needed until integrators pass these parameters to all requests
let shardKeyCached;
let wopiSrcCached;
function getFilePath(storageCfg, strPath) {
const storageFolderPath = storageCfg.fs.folderPath;
return path.join(storageFolderPath, strPath);
}
function getOutputPath(strPath) {
return strPath.replace(/\\/g, '/');
}
async function headObject(storageCfg, strPath) {
let fsPath = getFilePath(storageCfg, strPath);
let stats = await stat(fsPath);
return {ContentLength: stats.size};
}
async function getObject(storageCfg, strPath) {
let fsPath = getFilePath(storageCfg, strPath);
return await readFile(fsPath);
}
async function createReadStream(storageCfg, strPath) {
let fsPath = getFilePath(storageCfg, strPath);
let stats = await stat(fsPath);
let contentLength = stats.size;
let readStream = await utils.promiseCreateReadStream(fsPath);
return {
contentLength: contentLength,
readStream: readStream
};
}
async function putObject(storageCfg, strPath, buffer, contentLength) {
var fsPath = getFilePath(storageCfg, strPath);
await mkdir(path.dirname(fsPath), {recursive: true});
if (Buffer.isBuffer(buffer)) {
await writeFile(fsPath, buffer);
} else {
let writable = await utils.promiseCreateWriteStream(fsPath);
await utils.pipeStreams(buffer, writable, true);
}
}
async function uploadObject(storageCfg, strPath, filePath) {
let fsPath = getFilePath(storageCfg, strPath);
await cp(filePath, fsPath, {force: true, recursive: true});
}
async function copyObject(storageCfgSrc, storageCfgDst, sourceKey, destinationKey) {
let fsPathSource = getFilePath(storageCfgSrc, sourceKey);
let fsPathDestination = getFilePath(storageCfgDst, destinationKey);
await cp(fsPathSource, fsPathDestination, {force: true, recursive: true});
}
async function listObjects(storageCfg, strPath) {
const storageFolderPath = storageCfg.fs.folderPath;
let fsPath = getFilePath(storageCfg, strPath);
let values = await utils.listObjects(fsPath);
return values.map(function(curvalue) {
return getOutputPath(curvalue.substring(storageFolderPath.length + 1));
});
}
async function deleteObject(storageCfg, strPath) {
const fsPath = getFilePath(storageCfg, strPath);
return rm(fsPath, {force: true, recursive: true});
}
async function deletePath(storageCfg, strPath) {
const fsPath = getFilePath(storageCfg, strPath);
return rm(fsPath, {force: true, recursive: true, maxRetries: 3});
}
async function getSignedUrl(ctx, storageCfg, baseUrl, strPath, urlType, optFilename, opt_creationDate) {
const storageSecretString = storageCfg.fs.secretString;
const storageUrlExpires = storageCfg.fs.urlExpires;
const bucketName = storageCfg.bucketName;
const storageFolderName = storageCfg.storageFolderName;
//replace '/' with %2f before encodeURIComponent becase nginx determine %2f as '/' and get wrong system path
const userFriendlyName = optFilename ? encodeURIComponent(optFilename.replace(/\//g, "%2f")) : path.basename(strPath);
var uri = '/' + bucketName + '/' + storageFolderName + '/' + strPath + '/' + userFriendlyName;
//RFC 1123 does not allow underscores https://stackoverflow.com/questions/2180465/can-domain-name-subdomains-have-an-underscore-in-it
var url = utils.checkBaseUrl(ctx, baseUrl, storageCfg).replace(/_/g, "%5f");
url += uri;
var date = Date.now();
let creationDate = opt_creationDate || date;
let expiredAfter = (commonDefines.c_oAscUrlTypes.Session === urlType ? (cfgExpSessionAbsolute / 1000) : storageUrlExpires) || 31536000;
//todo creationDate can be greater because mysql CURRENT_TIMESTAMP uses local time, not UTC
var expires = creationDate + Math.ceil(Math.abs(date - creationDate) / expiredAfter) * expiredAfter;
expires = Math.ceil(expires / 1000);
expires += expiredAfter;
var md5 = crypto.createHash('md5').update(expires + decodeURIComponent(uri) + storageSecretString).digest("base64");
md5 = md5.replace(/\+/g, "-").replace(/\//g, "_").replace(/=/g, "");
url += '?md5=' + encodeURIComponent(md5);
url += '&expires=' + encodeURIComponent(expires);
if (ctx.shardKey) {
shardKeyCached = ctx.shardKey;
url += `&${constants.SHARD_KEY_API_NAME}=${encodeURIComponent(ctx.shardKey)}`;
} else if (ctx.wopiSrc) {
wopiSrcCached = ctx.wopiSrc;
url += `&${constants.SHARD_KEY_WOPI_NAME}=${encodeURIComponent(ctx.wopiSrc)}`;
} else if (process.env.DEFAULT_SHARD_KEY) {
//Set DEFAULT_SHARD_KEY from environment as shardkey in case of integrator did not pass this param
url += `&${constants.SHARD_KEY_API_NAME}=${encodeURIComponent(process.env.DEFAULT_SHARD_KEY)}`;
} else if (shardKeyCached) {
//Add stubs for shardkey params until integrators pass these parameters to all requests
url += `&${constants.SHARD_KEY_API_NAME}=${encodeURIComponent(shardKeyCached)}`;
} else if (wopiSrcCached) {
url += `&${constants.SHARD_KEY_WOPI_NAME}=${encodeURIComponent(wopiSrcCached)}`;
}
url += '&filename=' + userFriendlyName;
return url;
}
function needServeStatic() {
return true;
}
module.exports = {
headObject,
getObject,
createReadStream,
putObject,
uploadObject,
copyObject,
listObjects,
deleteObject,
deletePath,
getSignedUrl,
needServeStatic
};

View File

@ -1,266 +1,266 @@
/*
* (c) Copyright Ascensio System SIA 2010-2024
*
* This program is a free software product. You can redistribute it and/or
* modify it under the terms of the GNU Affero General Public License (AGPL)
* version 3 as published by the Free Software Foundation. In accordance with
* Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect
* that Ascensio System SIA expressly excludes the warranty of non-infringement
* of any third-party rights.
*
* This program is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For
* details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html
*
* You can contact Ascensio System SIA at 20A-6 Ernesta Birznieka-Upish
* street, Riga, Latvia, EU, LV-1050.
*
* The interactive user interfaces in modified source and object code versions
* of the Program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU AGPL version 3.
*
* Pursuant to Section 7(b) of the License you must retain the original Product
* logo when distributing the program. Pursuant to Section 7(e) we decline to
* grant you any rights under trademark law for use of our trademarks.
*
* All the Product's GUI elements, including illustrations and icon sets, as
* well as technical writing content are licensed under the terms of the
* Creative Commons Attribution-ShareAlike 4.0 International. See the License
* terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode
*
*/
'use strict';
const fs = require('fs');
const url = require('url');
const { Agent } = require('https');
const path = require('path');
const { S3Client, ListObjectsCommand, HeadObjectCommand} = require("@aws-sdk/client-s3");
const { GetObjectCommand, PutObjectCommand, CopyObjectCommand} = require("@aws-sdk/client-s3");
const { DeleteObjectsCommand, DeleteObjectCommand } = require("@aws-sdk/client-s3");
const { getSignedUrl } = require("@aws-sdk/s3-request-presigner");
const { NodeHttpHandler } = require("@aws-sdk/node-http-handler");
const mime = require('mime');
const config = require('config');
const utils = require('./utils');
const ms = require('ms');
const commonDefines = require('./../../Common/sources/commondefines');
const cfgExpSessionAbsolute = ms(config.get('services.CoAuthoring.expire.sessionabsolute'));
const cfgRequestDefaults = config.get('services.CoAuthoring.requestDefaults');
//This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.
const MAX_DELETE_OBJECTS = 1000;
let clients = {};
function getS3Client(storageCfg) {
/**
* Don't hard-code your credentials!
* Export the following environment variables instead:
*
* export AWS_ACCESS_KEY_ID='AKID'
* export AWS_SECRET_ACCESS_KEY='SECRET'
*/
let configS3 = {
region: storageCfg.region,
endpoint: storageCfg.endpoint
};
if (storageCfg.accessKeyId && storageCfg.secretAccessKey) {
configS3.credentials = {
accessKeyId: storageCfg.accessKeyId,
secretAccessKey: storageCfg.secretAccessKey
}
}
if (configS3.endpoint) {
configS3.tls = storageCfg.sslEnabled;
configS3.forcePathStyle = storageCfg.s3ForcePathStyle;
}
//todo dedicated options?
const agent = new Agent(cfgRequestDefaults);
configS3.requestHandler = new NodeHttpHandler({
httpAgent: agent,
httpsAgent: agent
});
let configJson = JSON.stringify(configS3);
let client = clients[configJson];
if (!client) {
client = new S3Client(configS3);
clients[configJson] = client;
}
return client;
}
function getFilePath(storageCfg, strPath) {
const storageFolderName = storageCfg.storageFolderName;
return storageFolderName + '/' + strPath;
}
function joinListObjects(storageCfg, inputArray, outputArray) {
if (!inputArray) {
return;
}
const storageFolderName = storageCfg.storageFolderName;
let length = inputArray.length;
for (let i = 0; i < length; i++) {
outputArray.push(inputArray[i].Key.substring((storageFolderName + '/').length));
}
}
async function listObjectsExec(storageCfg, output, params) {
const data = await getS3Client(storageCfg).send(new ListObjectsCommand(params));
joinListObjects(storageCfg, data.Contents, output);
if (data.IsTruncated && (data.NextMarker || (data.Contents && data.Contents.length > 0))) {
params.Marker = data.NextMarker || data.Contents[data.Contents.length - 1].Key;
return await listObjectsExec(storageCfg, output, params);
} else {
return output;
}
}
async function deleteObjectsHelp(storageCfg, aKeys) {
//By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request.
//In quiet mode the response includes only keys where the delete operation encountered an error.
const input = {
Bucket: storageCfg.bucketName,
Delete: {
Objects: aKeys,
Quiet: true
}
};
const command = new DeleteObjectsCommand(input);
await getS3Client(storageCfg).send(command);
}
async function headObject(storageCfg, strPath) {
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath)
};
const command = new HeadObjectCommand(input);
let output = await getS3Client(storageCfg).send(command);
return {ContentLength: output.ContentLength};
}
async function getObject(storageCfg, strPath) {
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath)
};
const command = new GetObjectCommand(input);
const output = await getS3Client(storageCfg).send(command);
return await utils.stream2Buffer(output.Body);
}
async function createReadStream(storageCfg, strPath) {
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath)
};
const command = new GetObjectCommand(input);
const output = await getS3Client(storageCfg).send(command);
return {
contentLength: output.ContentLength,
readStream: output.Body
};
}
async function putObject(storageCfg, strPath, buffer, contentLength) {
//todo consider Expires
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath),
Body: buffer,
ContentLength: contentLength,
ContentType: mime.getType(strPath)
};
const command = new PutObjectCommand(input);
await getS3Client(storageCfg).send(command);
}
async function uploadObject(storageCfg, strPath, filePath) {
const file = fs.createReadStream(filePath);
//todo рассмотреть Expires
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath),
Body: file,
ContentType: mime.getType(strPath)
};
const command = new PutObjectCommand(input);
await getS3Client(storageCfg).send(command);
}
async function copyObject(storageCfgSrc, storageCfgDst, sourceKey, destinationKey) {
//todo source bucket
const input = {
Bucket: storageCfgDst.bucketName,
Key: getFilePath(storageCfgDst, destinationKey),
CopySource: `/${storageCfgSrc.bucketName}/${getFilePath(storageCfgSrc, sourceKey)}`
};
const command = new CopyObjectCommand(input);
await getS3Client(storageCfgDst).send(command);
}
async function listObjects(storageCfg, strPath) {
let params = {
Bucket: storageCfg.bucketName,
Prefix: getFilePath(storageCfg, strPath)
};
let output = [];
await listObjectsExec(storageCfg, output, params);
return output;
}
async function deleteObject(storageCfg, strPath) {
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath)
};
const command = new DeleteObjectCommand(input);
await getS3Client(storageCfg).send(command);
};
async function deleteObjects(storageCfg, strPaths) {
let aKeys = strPaths.map(function (currentValue) {
return {Key: getFilePath(storageCfg, currentValue)};
});
for (let i = 0; i < aKeys.length; i += MAX_DELETE_OBJECTS) {
await deleteObjectsHelp(storageCfg, aKeys.slice(i, i + MAX_DELETE_OBJECTS));
}
}
async function deletePath(storageCfg, strPath) {
let list = await listObjects(storageCfg, strPath);
await deleteObjects(storageCfg, list);
}
async function getSignedUrlWrapper(ctx, storageCfg, baseUrl, strPath, urlType, optFilename, opt_creationDate) {
const storageUrlExpires = storageCfg.fs.urlExpires;
let expires = (commonDefines.c_oAscUrlTypes.Session === urlType ? cfgExpSessionAbsolute / 1000 : storageUrlExpires) || 31536000;
// Signature version 4 presigned URLs must have an expiration date less than one week in the future
expires = Math.min(expires, 604800);
let userFriendlyName = optFilename ? optFilename.replace(/\//g, "%2f") : path.basename(strPath);
let contentDisposition = utils.getContentDisposition(userFriendlyName, null, null);
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath),
ResponseContentDisposition: contentDisposition
};
const command = new GetObjectCommand(input);
//default Expires 900 seconds
let options = {
expiresIn: expires
};
return await getSignedUrl(getS3Client(storageCfg), command, options);
//extra query params cause SignatureDoesNotMatch
//https://stackoverflow.com/questions/55503009/amazon-s3-signature-does-not-match-when-extra-query-params-ga-added-in-url
// return utils.changeOnlyOfficeUrl(url, strPath, optFilename);
}
function needServeStatic() {
return false;
}
module.exports = {
headObject,
getObject,
createReadStream,
putObject,
uploadObject,
copyObject,
listObjects,
deleteObject,
deletePath,
getSignedUrl: getSignedUrlWrapper,
needServeStatic
};
/*
* (c) Copyright Ascensio System SIA 2010-2024
*
* This program is a free software product. You can redistribute it and/or
* modify it under the terms of the GNU Affero General Public License (AGPL)
* version 3 as published by the Free Software Foundation. In accordance with
* Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect
* that Ascensio System SIA expressly excludes the warranty of non-infringement
* of any third-party rights.
*
* This program is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For
* details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html
*
* You can contact Ascensio System SIA at 20A-6 Ernesta Birznieka-Upish
* street, Riga, Latvia, EU, LV-1050.
*
* The interactive user interfaces in modified source and object code versions
* of the Program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU AGPL version 3.
*
* Pursuant to Section 7(b) of the License you must retain the original Product
* logo when distributing the program. Pursuant to Section 7(e) we decline to
* grant you any rights under trademark law for use of our trademarks.
*
* All the Product's GUI elements, including illustrations and icon sets, as
* well as technical writing content are licensed under the terms of the
* Creative Commons Attribution-ShareAlike 4.0 International. See the License
* terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode
*
*/
'use strict';
const fs = require('fs');
const url = require('url');
const { Agent } = require('https');
const path = require('path');
const { S3Client, ListObjectsCommand, HeadObjectCommand} = require("@aws-sdk/client-s3");
const { GetObjectCommand, PutObjectCommand, CopyObjectCommand} = require("@aws-sdk/client-s3");
const { DeleteObjectsCommand, DeleteObjectCommand } = require("@aws-sdk/client-s3");
const { getSignedUrl } = require("@aws-sdk/s3-request-presigner");
const { NodeHttpHandler } = require("@aws-sdk/node-http-handler");
const mime = require('mime');
const config = require('config');
const utils = require('../utils');
const ms = require('ms');
const commonDefines = require('../commondefines');
const cfgExpSessionAbsolute = ms(config.get('services.CoAuthoring.expire.sessionabsolute'));
const cfgRequestDefaults = config.get('services.CoAuthoring.requestDefaults');
//This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.
const MAX_DELETE_OBJECTS = 1000;
let clients = {};
function getS3Client(storageCfg) {
/**
* Don't hard-code your credentials!
* Export the following environment variables instead:
*
* export AWS_ACCESS_KEY_ID='AKID'
* export AWS_SECRET_ACCESS_KEY='SECRET'
*/
let configS3 = {
region: storageCfg.region,
endpoint: storageCfg.endpoint
};
if (storageCfg.accessKeyId && storageCfg.secretAccessKey) {
configS3.credentials = {
accessKeyId: storageCfg.accessKeyId,
secretAccessKey: storageCfg.secretAccessKey
}
}
if (configS3.endpoint) {
configS3.tls = storageCfg.sslEnabled;
configS3.forcePathStyle = storageCfg.s3ForcePathStyle;
}
//todo dedicated options?
const agent = new Agent(cfgRequestDefaults);
configS3.requestHandler = new NodeHttpHandler({
httpAgent: agent,
httpsAgent: agent
});
let configJson = JSON.stringify(configS3);
let client = clients[configJson];
if (!client) {
client = new S3Client(configS3);
clients[configJson] = client;
}
return client;
}
function getFilePath(storageCfg, strPath) {
const storageFolderName = storageCfg.storageFolderName;
return storageFolderName + '/' + strPath;
}
function joinListObjects(storageCfg, inputArray, outputArray) {
if (!inputArray) {
return;
}
const storageFolderName = storageCfg.storageFolderName;
let length = inputArray.length;
for (let i = 0; i < length; i++) {
outputArray.push(inputArray[i].Key.substring((storageFolderName + '/').length));
}
}
async function listObjectsExec(storageCfg, output, params) {
const data = await getS3Client(storageCfg).send(new ListObjectsCommand(params));
joinListObjects(storageCfg, data.Contents, output);
if (data.IsTruncated && (data.NextMarker || (data.Contents && data.Contents.length > 0))) {
params.Marker = data.NextMarker || data.Contents[data.Contents.length - 1].Key;
return await listObjectsExec(storageCfg, output, params);
} else {
return output;
}
}
async function deleteObjectsHelp(storageCfg, aKeys) {
//By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request.
//In quiet mode the response includes only keys where the delete operation encountered an error.
const input = {
Bucket: storageCfg.bucketName,
Delete: {
Objects: aKeys,
Quiet: true
}
};
const command = new DeleteObjectsCommand(input);
await getS3Client(storageCfg).send(command);
}
async function headObject(storageCfg, strPath) {
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath)
};
const command = new HeadObjectCommand(input);
let output = await getS3Client(storageCfg).send(command);
return {ContentLength: output.ContentLength};
}
async function getObject(storageCfg, strPath) {
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath)
};
const command = new GetObjectCommand(input);
const output = await getS3Client(storageCfg).send(command);
return await utils.stream2Buffer(output.Body);
}
async function createReadStream(storageCfg, strPath) {
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath)
};
const command = new GetObjectCommand(input);
const output = await getS3Client(storageCfg).send(command);
return {
contentLength: output.ContentLength,
readStream: output.Body
};
}
async function putObject(storageCfg, strPath, buffer, contentLength) {
//todo consider Expires
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath),
Body: buffer,
ContentLength: contentLength,
ContentType: mime.getType(strPath)
};
const command = new PutObjectCommand(input);
await getS3Client(storageCfg).send(command);
}
async function uploadObject(storageCfg, strPath, filePath) {
const file = fs.createReadStream(filePath);
//todo рассмотреть Expires
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath),
Body: file,
ContentType: mime.getType(strPath)
};
const command = new PutObjectCommand(input);
await getS3Client(storageCfg).send(command);
}
async function copyObject(storageCfgSrc, storageCfgDst, sourceKey, destinationKey) {
//todo source bucket
const input = {
Bucket: storageCfgDst.bucketName,
Key: getFilePath(storageCfgDst, destinationKey),
CopySource: `/${storageCfgSrc.bucketName}/${getFilePath(storageCfgSrc, sourceKey)}`
};
const command = new CopyObjectCommand(input);
await getS3Client(storageCfgDst).send(command);
}
async function listObjects(storageCfg, strPath) {
let params = {
Bucket: storageCfg.bucketName,
Prefix: getFilePath(storageCfg, strPath)
};
let output = [];
await listObjectsExec(storageCfg, output, params);
return output;
}
async function deleteObject(storageCfg, strPath) {
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath)
};
const command = new DeleteObjectCommand(input);
await getS3Client(storageCfg).send(command);
};
async function deleteObjects(storageCfg, strPaths) {
let aKeys = strPaths.map(function (currentValue) {
return {Key: getFilePath(storageCfg, currentValue)};
});
for (let i = 0; i < aKeys.length; i += MAX_DELETE_OBJECTS) {
await deleteObjectsHelp(storageCfg, aKeys.slice(i, i + MAX_DELETE_OBJECTS));
}
}
async function deletePath(storageCfg, strPath) {
let list = await listObjects(storageCfg, strPath);
await deleteObjects(storageCfg, list);
}
async function getSignedUrlWrapper(ctx, storageCfg, baseUrl, strPath, urlType, optFilename, opt_creationDate) {
const storageUrlExpires = storageCfg.fs.urlExpires;
let expires = (commonDefines.c_oAscUrlTypes.Session === urlType ? cfgExpSessionAbsolute / 1000 : storageUrlExpires) || 31536000;
// Signature version 4 presigned URLs must have an expiration date less than one week in the future
expires = Math.min(expires, 604800);
let userFriendlyName = optFilename ? optFilename.replace(/\//g, "%2f") : path.basename(strPath);
let contentDisposition = utils.getContentDisposition(userFriendlyName, null, null);
const input = {
Bucket: storageCfg.bucketName,
Key: getFilePath(storageCfg, strPath),
ResponseContentDisposition: contentDisposition
};
const command = new GetObjectCommand(input);
//default Expires 900 seconds
let options = {
expiresIn: expires
};
return await getSignedUrl(getS3Client(storageCfg), command, options);
//extra query params cause SignatureDoesNotMatch
//https://stackoverflow.com/questions/55503009/amazon-s3-signature-does-not-match-when-extra-query-params-ga-added-in-url
// return utils.changeOnlyOfficeUrl(url, strPath, optFilename);
}
function needServeStatic() {
return false;
}
module.exports = {
headObject,
getObject,
createReadStream,
putObject,
uploadObject,
copyObject,
listObjects,
deleteObject,
deletePath,
getSignedUrl: getSignedUrlWrapper,
needServeStatic
};

View File

@ -51,8 +51,9 @@
"./sources/editorDataMemory.js",
"./sources/editorDataRedis.js",
"./sources/pubsubRabbitMQ.js",
"../Common/sources/storage-fs.js",
"../Common/sources/storage-s3.js"
"../Common/sources/storage/storage-fs.js",
"../Common/sources/storage/storage-s3.js",
"../Common/sources/storage/storage-az.js"
]
}
}

File diff suppressed because it is too large Load Diff

View File

@ -47,7 +47,7 @@ var logger = require('./../../Common/sources/logger');
var utils = require('./../../Common/sources/utils');
var constants = require('./../../Common/sources/constants');
var commonDefines = require('./../../Common/sources/commondefines');
var storage = require('./../../Common/sources/storage-base');
var storage = require('./../../Common/sources/storage/storage-base');
var formatChecker = require('./../../Common/sources/formatchecker');
var statsDClient = require('./../../Common/sources/statsdclient');
var operationContext = require('./../../Common/sources/operationContext');

View File

@ -38,7 +38,7 @@ var pubsubService = require('./pubsubRabbitMQ');
var commonDefines = require('./../../Common/sources/commondefines');
var constants = require('./../../Common/sources/constants');
var utils = require('./../../Common/sources/utils');
const storage = require('./../../Common/sources/storage-base');
const storage = require('./../../Common/sources/storage/storage-base');
const queueService = require('./../../Common/sources/taskqueueRabbitMQ');
const operationContext = require('./../../Common/sources/operationContext');
const sqlBase = require('./databaseConnectors/baseConnector');

View File

@ -43,10 +43,10 @@ var commonDefines = require('./../../Common/sources/commondefines');
var docsCoServer = require('./DocsCoServer');
var canvasService = require('./canvasservice');
var wopiClient = require('./wopiClient');
var storage = require('./../../Common/sources/storage-base');
var storage = require('./../../Common/sources/storage/storage-base');
var formatChecker = require('./../../Common/sources/formatchecker');
var statsDClient = require('./../../Common/sources/statsdclient');
var storageBase = require('./../../Common/sources/storage-base');
var storageBase = require('./../../Common/sources/storage/storage-base');
var operationContext = require('./../../Common/sources/operationContext');
const sqlBase = require('./databaseConnectors/baseConnector');
const utilsDocService = require("./utilsDocService");

View File

@ -38,7 +38,7 @@ const utilsDocService = require('./utilsDocService');
var docsCoServer = require('./DocsCoServer');
var utils = require('./../../Common/sources/utils');
var constants = require('./../../Common/sources/constants');
var storageBase = require('./../../Common/sources/storage-base');
var storageBase = require('./../../Common/sources/storage/storage-base');
var formatChecker = require('./../../Common/sources/formatchecker');
const commonDefines = require('./../../Common/sources/commondefines');
const operationContext = require('./../../Common/sources/operationContext');

View File

@ -39,7 +39,7 @@ var ms = require('ms');
var taskResult = require('./taskresult');
var docsCoServer = require('./DocsCoServer');
var canvasService = require('./canvasservice');
var storage = require('./../../Common/sources/storage-base');
var storage = require('./../../Common/sources/storage/storage-base');
var utils = require('./../../Common/sources/utils');
var logger = require('./../../Common/sources/logger');
var constants = require('./../../Common/sources/constants');

View File

@ -35,7 +35,7 @@ const express = require('express');
const config = require("config");
const operationContext = require('./../../../Common/sources/operationContext');
const utils = require('./../../../Common/sources/utils');
const storage = require('./../../../Common/sources/storage-base');
const storage = require('./../../../Common/sources/storage/storage-base');
const urlModule = require("url");
const path = require("path");
const mime = require("mime");

View File

@ -13,8 +13,9 @@
},
"pkg": {
"scripts": [
"../Common/sources/storage-fs.js",
"../Common/sources/storage-s3.js",
"../Common/sources/storage/storage-fs.js",
"../Common/sources/storage/storage-s3.js",
"../Common/sources/storage/storage-az.js",
"../DocService/sources/editorDataMemory.js",
"../DocService/sources/editorDataRedis.js"
]

View File

@ -43,7 +43,7 @@ const lcid = require('lcid');
const ms = require('ms');
var commonDefines = require('./../../Common/sources/commondefines');
var storage = require('./../../Common/sources/storage-base');
var storage = require('./../../Common/sources/storage/storage-base');
var utils = require('./../../Common/sources/utils');
var constants = require('./../../Common/sources/constants');
var baseConnector = require('../../DocService/sources/databaseConnectors/baseConnector');

View File

@ -34,7 +34,7 @@ const { describe, test, expect, afterAll, beforeAll } = require('@jest/globals')
const http = require('http');
const { signToken } = require('../../../DocService/sources/DocsCoServer');
const storage = require('../../../Common/sources/storage-base');
const storage = require('../../../Common/sources/storage/storage-base');
const constants = require('../../../Common/sources/commondefines');
const operationContext = require('../../../Common/sources/operationContext');
const utils = require("../../../Common/sources/utils");

View File

@ -49,7 +49,7 @@ const { cp } = require('fs/promises');
const operationContext = require('../../../Common/sources/operationContext');
const tenantManager = require('../../../Common/sources/tenantManager');
const storage = require('../../../Common/sources/storage-base');
const storage = require('../../../Common/sources/storage/storage-base');
const utils = require('../../../Common/sources/utils');
const commonDefines = require("../../../Common/sources/commondefines");
const config = require('../../../Common/node_modules/config');

View File

@ -40,7 +40,7 @@ const {
const co = require('co');
const taskResult = require('./../../DocService/sources/taskresult');
const storage = require('./../../Common/sources/storage-base');
const storage = require('./../../Common/sources/storage/storage-base');
const storageFs = require('./../../Common/sources/storage-fs');
const operationContext = require('./../../Common/sources/operationContext');
const utils = require('./../../Common/sources/utils');