mirror of
https://github.com/ONLYOFFICE/server.git
synced 2026-02-10 18:05:07 +08:00
[storage] Migrate from s3 aws-sdk v2 to v3
This commit is contained in:
1299
Common/npm-shrinkwrap.json
generated
1299
Common/npm-shrinkwrap.json
generated
File diff suppressed because it is too large
Load Diff
@ -4,8 +4,9 @@
|
||||
"homepage": "https://www.onlyoffice.com",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-s3": "^3.370.0",
|
||||
"@aws-sdk/s3-request-presigner": "^3.370.0",
|
||||
"amqplib": "^0.8.0",
|
||||
"aws-sdk": "^2.1074.0",
|
||||
"co": "^4.6.0",
|
||||
"config": "^2.0.1",
|
||||
"content-disposition": "^0.5.3",
|
||||
|
||||
@ -34,7 +34,10 @@
|
||||
var fs = require('fs');
|
||||
var url = require('url');
|
||||
var path = require('path');
|
||||
var AWS = require('aws-sdk');
|
||||
const { S3Client, ListObjectsCommand, HeadObjectCommand} = require("@aws-sdk/client-s3");
|
||||
const { GetObjectCommand, PutObjectCommand, CopyObjectCommand} = require("@aws-sdk/client-s3");
|
||||
const { DeleteObjectsCommand, DeleteObjectCommand } = require("@aws-sdk/client-s3");
|
||||
const { getSignedUrl } = require("@aws-sdk/s3-request-presigner");
|
||||
var mime = require('mime');
|
||||
var utils = require('./utils');
|
||||
const ms = require('ms');
|
||||
@ -64,23 +67,18 @@ const cfgExpSessionAbsolute = ms(config.get('services.CoAuthoring.expire.session
|
||||
var configS3 = {
|
||||
region: cfgRegion,
|
||||
endpoint: cfgEndpoint,
|
||||
accessKeyId: cfgAccessKeyId,
|
||||
secretAccessKey: cfgSecretAccessKey
|
||||
credentials : {
|
||||
accessKeyId: cfgAccessKeyId,
|
||||
secretAccessKey: cfgSecretAccessKey
|
||||
}
|
||||
};
|
||||
|
||||
if (configS3.endpoint) {
|
||||
configS3.sslEnabled = cfgSslEnabled;
|
||||
configS3.s3ForcePathStyle = cfgS3ForcePathStyle;
|
||||
}
|
||||
AWS.config.update(configS3);
|
||||
var s3Client = new AWS.S3();
|
||||
if (configS3.endpoint) {
|
||||
s3Client.endpoint = new AWS.Endpoint(configS3.endpoint);
|
||||
}
|
||||
var cfgEndpointParsed = null;
|
||||
if (cfgEndpoint) {
|
||||
cfgEndpointParsed = url.parse(cfgEndpoint);
|
||||
}
|
||||
const client = new S3Client(configS3);
|
||||
|
||||
//This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.
|
||||
var MAX_DELETE_OBJECTS = 1000;
|
||||
|
||||
@ -89,137 +87,114 @@ function getFilePath(strPath) {
|
||||
return cfgStorageFolderName + '/' + strPath;
|
||||
}
|
||||
function joinListObjects(inputArray, outputArray) {
|
||||
if (!inputArray) {
|
||||
return;
|
||||
}
|
||||
var length = inputArray.length;
|
||||
for (var i = 0; i < length; i++) {
|
||||
outputArray.push(inputArray[i].Key.substring((cfgStorageFolderName + '/').length));
|
||||
}
|
||||
}
|
||||
function listObjectsExec(output, params, resolve, reject) {
|
||||
s3Client.listObjects(params, function(err, data) {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
joinListObjects(data.Contents, output);
|
||||
if (data.IsTruncated && (data.NextMarker || data.Contents.length > 0)) {
|
||||
params.Marker = data.NextMarker || data.Contents[data.Contents.length - 1].Key;
|
||||
listObjectsExec(output, params, resolve, reject);
|
||||
} else {
|
||||
resolve(output);
|
||||
}
|
||||
async function listObjectsExec(output, params) {
|
||||
const data = await client.send(new ListObjectsCommand(params));
|
||||
joinListObjects(data.Contents, output);
|
||||
if (data.IsTruncated && (data.NextMarker || (data.Contents && data.Contents.length > 0))) {
|
||||
params.Marker = data.NextMarker || data.Contents[data.Contents.length - 1].Key;
|
||||
return await listObjectsExec(output, params);
|
||||
} else {
|
||||
return output;
|
||||
}
|
||||
}
|
||||
async function deleteObjectsHelp(aKeys) {
|
||||
//By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request.
|
||||
//In quiet mode the response includes only keys where the delete operation encountered an error.
|
||||
const input = {
|
||||
Bucket: cfgBucketName,
|
||||
Delete: {
|
||||
Objects: aKeys,
|
||||
Quiet: true
|
||||
}
|
||||
});
|
||||
}
|
||||
function mapDeleteObjects(currentValue) {
|
||||
return {Key: currentValue};
|
||||
}
|
||||
function deleteObjectsHelp(aKeys) {
|
||||
return new Promise(function(resolve, reject) {
|
||||
//By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request.
|
||||
//In quiet mode the response includes only keys where the delete operation encountered an error.
|
||||
var params = {Bucket: cfgBucketName, Delete: {Objects: aKeys, Quiet: true}};
|
||||
s3Client.deleteObjects(params, function(err, data) {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
resolve(data);
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
const command = new DeleteObjectsCommand(input);
|
||||
return await client.send(command);
|
||||
}
|
||||
|
||||
exports.headObject = function(strPath) {
|
||||
return new Promise(function(resolve, reject) {
|
||||
var params = {Bucket: cfgBucketName, Key: getFilePath(strPath)};
|
||||
s3Client.headObject(params, function(err, data) {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
resolve(data);
|
||||
}
|
||||
});
|
||||
});
|
||||
exports.headObject = async function(strPath) {
|
||||
const input = {
|
||||
Bucket: cfgBucketName,
|
||||
Key: getFilePath(strPath)
|
||||
};
|
||||
const command = new HeadObjectCommand(input);
|
||||
return await client.send(command);
|
||||
};
|
||||
exports.getObject = function(strPath) {
|
||||
return new Promise(function(resolve, reject) {
|
||||
var params = {Bucket: cfgBucketName, Key: getFilePath(strPath)};
|
||||
s3Client.getObject(params, function(err, data) {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
resolve(data.Body);
|
||||
}
|
||||
});
|
||||
});
|
||||
exports.getObject = async function(strPath) {
|
||||
const input = {
|
||||
Bucket: cfgBucketName,
|
||||
Key: getFilePath(strPath)
|
||||
};
|
||||
const command = new GetObjectCommand(input);
|
||||
const output = await client.send(command);
|
||||
|
||||
return await utils.stream2Buffer(output.Body);
|
||||
};
|
||||
exports.createReadStream = function(strPath) {
|
||||
return new Promise(function(resolve, reject) {
|
||||
var params = {Bucket: cfgBucketName, Key: getFilePath(strPath)};
|
||||
s3Client.getObject(params)
|
||||
.on('error', (err) => {
|
||||
reject(err);
|
||||
})
|
||||
.on('httpHeaders', function(statusCode, headers, resp, statusMessage) {
|
||||
//retries are possible
|
||||
if (statusCode < 300) {
|
||||
let responseObject = {
|
||||
contentLength: headers['content-length'],
|
||||
readStream: this.response.httpResponse.createUnbufferedStream()
|
||||
};
|
||||
resolve(responseObject);
|
||||
}
|
||||
}).send();
|
||||
});
|
||||
exports.createReadStream = async function(strPath) {
|
||||
const input = {
|
||||
Bucket: cfgBucketName,
|
||||
Key: getFilePath(strPath)
|
||||
};
|
||||
const command = new GetObjectCommand(input);
|
||||
const output = await client.send(command);
|
||||
return {
|
||||
contentLength: output.ContentLength,
|
||||
readStream: output.Body
|
||||
};
|
||||
};
|
||||
exports.putObject = function(strPath, buffer, contentLength) {
|
||||
return new Promise(function(resolve, reject) {
|
||||
//todo рассмотреть Expires
|
||||
var params = {Bucket: cfgBucketName, Key: getFilePath(strPath), Body: buffer,
|
||||
ContentLength: contentLength, ContentType: mime.getType(strPath)};
|
||||
s3Client.putObject(params, function(err, data) {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
resolve(data);
|
||||
}
|
||||
});
|
||||
});
|
||||
exports.putObject = async function(strPath, buffer, contentLength) {
|
||||
//todo рассмотреть Expires
|
||||
const input = {
|
||||
Bucket: cfgBucketName,
|
||||
Key: getFilePath(strPath),
|
||||
Body: buffer,
|
||||
ContentLength: contentLength,
|
||||
ContentType: mime.getType(strPath)
|
||||
};
|
||||
const command = new PutObjectCommand(input);
|
||||
return await client.send(command);
|
||||
};
|
||||
exports.uploadObject = function(strPath, filePath) {
|
||||
return new Promise(function(resolve, reject) {
|
||||
fs.readFile(filePath, (err, data) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
resolve(data);
|
||||
}
|
||||
});
|
||||
}).then(function(data) {
|
||||
return exports.putObject(strPath, data, data.length);
|
||||
});
|
||||
exports.uploadObject = async function(strPath, filePath) {
|
||||
const file = fs.createReadStream(filePath);
|
||||
//todo рассмотреть Expires
|
||||
const input = {
|
||||
Bucket: cfgBucketName,
|
||||
Key: getFilePath(strPath),
|
||||
Body: file,
|
||||
ContentType: mime.getType(strPath)
|
||||
};
|
||||
const command = new PutObjectCommand(input);
|
||||
return await client.send(command);
|
||||
};
|
||||
exports.copyObject = function(sourceKey, destinationKey) {
|
||||
return exports.getObject(sourceKey).then(function(data) {
|
||||
return exports.putObject(destinationKey, data, data.length);
|
||||
});
|
||||
//todo source bucket
|
||||
const input = {
|
||||
Bucket: cfgBucketName,
|
||||
Key: getFilePath(destinationKey),
|
||||
CopySource: `/${cfgBucketName}/${getFilePath(sourceKey)}`
|
||||
};
|
||||
const command = new CopyObjectCommand(input);
|
||||
return client.send(command);
|
||||
};
|
||||
exports.listObjects = function(strPath) {
|
||||
return new Promise(function(resolve, reject) {
|
||||
var params = {Bucket: cfgBucketName, Prefix: getFilePath(strPath)};
|
||||
var output = [];
|
||||
listObjectsExec(output, params, resolve, reject);
|
||||
});
|
||||
exports.listObjects = async function(strPath) {
|
||||
var params = {Bucket: cfgBucketName, Prefix: getFilePath(strPath)};
|
||||
var output = [];
|
||||
return await listObjectsExec(output, params);
|
||||
};
|
||||
exports.deleteObject = function(strPath) {
|
||||
return new Promise(function(resolve, reject) {
|
||||
var params = {Bucket: cfgBucketName, Key: getFilePath(strPath)};
|
||||
s3Client.deleteObject(params, function(err, data) {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
resolve(data);
|
||||
}
|
||||
});
|
||||
});
|
||||
const input = {
|
||||
Bucket: cfgBucketName,
|
||||
Key: getFilePath(strPath)
|
||||
};
|
||||
const command = new DeleteObjectCommand(input);
|
||||
return client.send(command);
|
||||
};
|
||||
exports.deleteObjects = function(strPaths) {
|
||||
var aKeys = strPaths.map(function (currentValue) {
|
||||
@ -231,21 +206,25 @@ exports.deleteObjects = function(strPaths) {
|
||||
}
|
||||
return Promise.all(deletePromises);
|
||||
};
|
||||
exports.getSignedUrl = function(baseUrl, strPath, urlType, optFilename, opt_creationDate) {
|
||||
return new Promise(function(resolve, reject) {
|
||||
var expires = (commonDefines.c_oAscUrlTypes.Session === urlType ? cfgExpSessionAbsolute / 1000 : cfgStorageUrlExpires) || 31536000;
|
||||
var userFriendlyName = optFilename ? optFilename.replace(/\//g, "%2f") : path.basename(strPath);
|
||||
var contentDisposition = utils.getContentDisposition(userFriendlyName, null, null);
|
||||
//default Expires 900 seconds
|
||||
var params = {
|
||||
Bucket: cfgBucketName, Key: getFilePath(strPath), ResponseContentDisposition: contentDisposition, Expires: expires
|
||||
};
|
||||
s3Client.getSignedUrl('getObject', params, function(err, data) {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
resolve(utils.changeOnlyOfficeUrl(data, strPath, optFilename));
|
||||
}
|
||||
});
|
||||
});
|
||||
exports.getSignedUrl = async function (baseUrl, strPath, urlType, optFilename, opt_creationDate) {
|
||||
var expires = (commonDefines.c_oAscUrlTypes.Session === urlType ? cfgExpSessionAbsolute / 1000 : cfgStorageUrlExpires) || 31536000;
|
||||
// Signature version 4 presigned URLs must have an expiration date less than one week in the future
|
||||
expires = Math.min(expires, 604800);
|
||||
var userFriendlyName = optFilename ? optFilename.replace(/\//g, "%2f") : path.basename(strPath);
|
||||
var contentDisposition = utils.getContentDisposition(userFriendlyName, null, null);
|
||||
|
||||
const input = {
|
||||
Bucket: cfgBucketName,
|
||||
Key: getFilePath(strPath),
|
||||
ResponseContentDisposition: contentDisposition
|
||||
};
|
||||
const command = new GetObjectCommand(input);
|
||||
//default Expires 900 seconds
|
||||
var options = {
|
||||
expiresIn: expires
|
||||
};
|
||||
return await getSignedUrl(client, command, options);
|
||||
//extra query params cause SignatureDoesNotMatch
|
||||
//https://stackoverflow.com/questions/55503009/amazon-s3-signature-does-not-match-when-extra-query-params-ga-added-in-url
|
||||
// return utils.changeOnlyOfficeUrl(url, strPath, optFilename);
|
||||
};
|
||||
|
||||
@ -69,6 +69,6 @@
|
||||
"scripts": {
|
||||
"unit tests": "cd ./DocService && jest unit --config=../tests/jest.config.js",
|
||||
"integration tests": "cd ./DocService && jest integration --config=../tests/jest.config.js",
|
||||
"tests": "cd ./DocService && jest --config=../tests/jest.config.js"
|
||||
"tests": "cd ./DocService && jest --inject-globals=false --config=../tests/jest.config.js"
|
||||
}
|
||||
}
|
||||
|
||||
@ -14,6 +14,9 @@ const cfgTokenAlgorithm = config.get('services.CoAuthoring.token.session.algorit
|
||||
const cfgSecretOutbox = config.get('services.CoAuthoring.secret.outbox');
|
||||
const cfgTokenOutboxExpires = config.get('services.CoAuthoring.token.outbox.expires');
|
||||
const cfgTokenEnableRequestOutbox = config.get('services.CoAuthoring.token.enable.request.outbox');
|
||||
const cfgStorageName = config.get('storage.name');
|
||||
const cfgEndpoint = config.get('storage.endpoint');
|
||||
const cfgBucketName = config.get('storage.bucketName');
|
||||
const ctx = new operationContext.Context();
|
||||
const testFilesNames = {
|
||||
get: 'DocService-DocsCoServer-forgottenFilesCommands-getForgotten-integration-test',
|
||||
@ -142,7 +145,16 @@ describe('Command service', function () {
|
||||
describe('getForgotten', function () {
|
||||
const createExpected = ({ key, error }) => {
|
||||
const validKey = typeof key === 'string' && error === 0
|
||||
const urlPattern = 'http://localhost:8000/cache/files/forgotten/--key--/output.docx/output.docx';
|
||||
let urlPattern;
|
||||
if ("storage-fs" === cfgStorageName) {
|
||||
urlPattern = 'http://localhost:8000/cache/files/forgotten/--key--/output.docx/output.docx';
|
||||
} else {
|
||||
const host = cfgEndpoint.slice(0, "https://".length) + cfgBucketName + "." + cfgEndpoint.slice("https://".length);
|
||||
if (host[host.length - 1] === '/') {
|
||||
host = host.slice(0, -1);
|
||||
}
|
||||
urlPattern = host + '/files/forgotten/--key--/output.docx';
|
||||
}
|
||||
|
||||
const expected = { key, error };
|
||||
|
||||
|
||||
154
tests/integration/storage.tests.js
Normal file
154
tests/integration/storage.tests.js
Normal file
@ -0,0 +1,154 @@
|
||||
const {jest, describe, test, expect} = require('@jest/globals');
|
||||
const http = require('http');
|
||||
const https = require('https');
|
||||
const fs = require('fs');
|
||||
|
||||
const operationContext = require('../../Common/sources/operationContext');
|
||||
const storage = require('../../Common/sources/storage-base');
|
||||
const utils = require('../../Common/sources/utils');
|
||||
const commonDefines = require("../../Common/sources/commondefines");
|
||||
const config = require('../../Common/node_modules/config');
|
||||
|
||||
const cfgStorageName = config.get('storage.name');
|
||||
|
||||
const ctx = operationContext.global;
|
||||
const rand = Math.floor(Math.random() * 1000000);
|
||||
const testDir = "DocService-DocsCoServer-storage-" + rand;
|
||||
const baseUrl = "http://localhost:8000";
|
||||
const urlType = commonDefines.c_oAscUrlTypes.Session;
|
||||
let testFile1 = testDir + "/test1.txt";
|
||||
let testFile2 = testDir + "/test2.txt";
|
||||
let testFile3 = testDir + "/test3.txt";
|
||||
let testFileData1 = "test1";
|
||||
let testFileData2 = "test2";
|
||||
let testFileData3 = testFileData2;
|
||||
|
||||
console.debug(`testDir: ${testDir}`)
|
||||
|
||||
function request(url) {
|
||||
return new Promise(resolve => {
|
||||
let module = url.startsWith('https') ? https : http;
|
||||
module.get(url, response => {
|
||||
let data = '';
|
||||
response.on('data', _data => (data += _data));
|
||||
response.on('end', () => resolve(data));
|
||||
});
|
||||
});
|
||||
}
|
||||
function runTestForDir(specialDir) {
|
||||
test("start listObjects", async () => {
|
||||
let list = await storage.listObjects(ctx, testDir, specialDir);
|
||||
expect(list).toEqual([]);
|
||||
});
|
||||
test("putObject", async () => {
|
||||
let buffer = Buffer.from(testFileData1);
|
||||
await storage.putObject(ctx, testFile1, buffer, buffer.length, specialDir);
|
||||
let list = await storage.listObjects(ctx, testDir, specialDir);
|
||||
expect(list.sort()).toEqual([testFile1].sort());
|
||||
});
|
||||
if ("storage-fs" === cfgStorageName) {
|
||||
test("todo UploadObject in fs", async () => {
|
||||
let buffer = Buffer.from(testFileData2);
|
||||
await storage.putObject(ctx, testFile2, buffer, buffer.length, specialDir);
|
||||
let list = await storage.listObjects(ctx, testDir, specialDir);
|
||||
expect(list.sort()).toEqual([testFile1, testFile2].sort());
|
||||
});
|
||||
} else {
|
||||
test("uploadObject", async () => {
|
||||
const spy = jest.spyOn(fs, 'createReadStream').mockReturnValue(testFileData2);
|
||||
await storage.uploadObject(ctx, testFile2, "createReadStream.txt", specialDir);
|
||||
let list = await storage.listObjects(ctx, testDir, specialDir);
|
||||
expect(spy).toHaveBeenCalled();
|
||||
expect(list.sort()).toEqual([testFile1, testFile2].sort());
|
||||
});
|
||||
}
|
||||
test("copyObject", async () => {
|
||||
await storage.copyObject(ctx, testFile2, testFile3, specialDir, specialDir);
|
||||
// let buffer = Buffer.from(testFileData3);
|
||||
// await storage.putObject(ctx, testFile3, buffer, buffer.length, specialDir);
|
||||
let list = await storage.listObjects(ctx, testDir, specialDir);
|
||||
expect(list.sort()).toEqual([testFile1, testFile2, testFile3].sort());
|
||||
});
|
||||
test("headObject", async () => {
|
||||
let output;
|
||||
output = await storage.headObject(ctx, testFile1, specialDir);
|
||||
expect(output).toHaveProperty("ContentLength", testFileData1.length);
|
||||
|
||||
output = await storage.headObject(ctx, testFile2, specialDir);
|
||||
expect(output).toHaveProperty("ContentLength", testFileData2.length);
|
||||
|
||||
output = await storage.headObject(ctx, testFile3, specialDir);
|
||||
expect(output).toHaveProperty("ContentLength", testFileData3.length);
|
||||
});
|
||||
test("getObject", async () => {
|
||||
let output;
|
||||
output = await storage.getObject(ctx, testFile1, specialDir);
|
||||
expect(output.toString("utf8")).toEqual(testFileData1);
|
||||
|
||||
output = await storage.getObject(ctx, testFile2, specialDir);
|
||||
expect(output.toString("utf8")).toEqual(testFileData2);
|
||||
|
||||
output = await storage.getObject(ctx, testFile3, specialDir);
|
||||
expect(output.toString("utf8")).toEqual(testFileData3);
|
||||
});
|
||||
test("createReadStream", async () => {
|
||||
let output, outputText;
|
||||
|
||||
output = await storage.createReadStream(ctx, testFile1, specialDir);
|
||||
await utils.sleep(100);
|
||||
outputText = await utils.stream2Buffer(output.readStream);
|
||||
await utils.sleep(100);
|
||||
expect(outputText.toString("utf8")).toEqual(testFileData1);
|
||||
|
||||
output = await storage.createReadStream(ctx, testFile2, specialDir);
|
||||
outputText = await utils.stream2Buffer(output.readStream);
|
||||
expect(outputText.toString("utf8")).toEqual(testFileData2);
|
||||
|
||||
output = await storage.createReadStream(ctx, testFile3, specialDir);
|
||||
outputText = await utils.stream2Buffer(output.readStream);
|
||||
expect(outputText.toString("utf8")).toEqual(testFileData3);
|
||||
});
|
||||
test("getSignedUrl", async () => {
|
||||
let url, data;
|
||||
url = await storage.getSignedUrl(ctx, baseUrl, testFile1, urlType, undefined, undefined, specialDir);
|
||||
data = await request(url);
|
||||
expect(data).toEqual(testFileData1);
|
||||
|
||||
url = await storage.getSignedUrl(ctx, baseUrl, testFile2, urlType, undefined, undefined, specialDir);
|
||||
data = await request(url);
|
||||
expect(data).toEqual(testFileData2);
|
||||
|
||||
url = await storage.getSignedUrl(ctx, baseUrl, testFile3, urlType, undefined, undefined, specialDir);
|
||||
data = await request(url);
|
||||
expect(data).toEqual(testFileData3);
|
||||
});
|
||||
test("deleteObject", async () => {
|
||||
let list;
|
||||
list = await storage.listObjects(ctx, testDir, specialDir);
|
||||
expect(list.sort()).toEqual([testFile1, testFile2, testFile3].sort());
|
||||
|
||||
await storage.deleteObject(ctx, testFile1, specialDir);
|
||||
|
||||
list = await storage.listObjects(ctx, testDir, specialDir);
|
||||
expect(list.sort()).toEqual([testFile2, testFile3].sort());
|
||||
});
|
||||
test("deleteObjects", async () => {
|
||||
let list;
|
||||
list = await storage.listObjects(ctx, testDir, specialDir);
|
||||
expect(list.sort()).toEqual([testFile2, testFile3].sort());
|
||||
|
||||
await storage.deleteObjects(ctx, list, specialDir);
|
||||
|
||||
list = await storage.listObjects(ctx, testDir, specialDir);
|
||||
expect(list.sort()).toEqual([].sort());
|
||||
});
|
||||
}
|
||||
|
||||
// Assumed, that server is already up.
|
||||
describe('storage common dir', function () {
|
||||
runTestForDir("");
|
||||
});
|
||||
|
||||
describe('storage forgotten dir', function () {
|
||||
runTestForDir("forgotten");
|
||||
});
|
||||
Reference in New Issue
Block a user