mirror of
https://github.com/ONLYOFFICE/server.git
synced 2026-02-10 18:05:07 +08:00
[feature] Refactor uploadObject; For bug 73502
This commit is contained in:
@ -4,6 +4,7 @@
|
||||
- @aws-sdk/client-s3 3.637.0 ([Apache-2.0](https://raw.githubusercontent.com/aws/aws-sdk-js-v3/main/LICENSE))
|
||||
- @aws-sdk/node-http-handler 3.374.0 ([Apache-2.0](https://raw.githubusercontent.com/aws/aws-sdk-js-v3/main/LICENSE))
|
||||
- @aws-sdk/s3-request-presigner 3.370.0 ([Apache-2.0](https://raw.githubusercontent.com/aws/aws-sdk-js-v3/main/LICENSE))
|
||||
- @azure/storage-blob 12.27.0 ([MIT](https://raw.githubusercontent.com/Azure/azure-sdk-for-js/refs/heads/main/sdk/storage/storage-blob/LICENSE))
|
||||
- amqplib 0.8.0 ([MIT](https://raw.githubusercontent.com/amqp-node/amqplib/main/LICENSE))
|
||||
- co 4.6.0 ([MIT](https://raw.githubusercontent.com/tj/co/master/LICENSE))
|
||||
- config 2.0.1 ([MIT](https://raw.githubusercontent.com/node-config/node-config/master/LICENSE))
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
'use strict';
|
||||
const fs = require('fs');
|
||||
const url = require('url');
|
||||
const path = require('path');
|
||||
const { BlobServiceClient, StorageSharedKeyCredential, generateBlobSASQueryParameters, BlobSASPermissions } = require('@azure/storage-blob');
|
||||
const mime = require('mime');
|
||||
@ -12,8 +11,14 @@ const commonDefines = require('./../../Common/sources/commondefines');
|
||||
|
||||
const cfgExpSessionAbsolute = ms(config.get('services.CoAuthoring.expire.sessionabsolute'));
|
||||
const MAX_DELETE_OBJECTS = 1000;
|
||||
let blobServiceClients = {};
|
||||
const blobServiceClients = {};
|
||||
|
||||
/**
|
||||
* Gets or creates a BlobServiceClient for the given storage configuration.
|
||||
*
|
||||
* @param {Object} storageCfg - configuration object from default.json
|
||||
* @returns {BlobServiceClient} The Azure Blob Service client
|
||||
*/
|
||||
function getBlobServiceClient(storageCfg) {
|
||||
const configKey = `${storageCfg.accessKeyId}_${storageCfg.bucketName}`;
|
||||
if (!blobServiceClients[configKey]) {
|
||||
@ -29,19 +34,39 @@ function getBlobServiceClient(storageCfg) {
|
||||
return blobServiceClients[configKey];
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a ContainerClient for the specified storage configuration.
|
||||
*
|
||||
* @param {Object} storageCfg - configuration object from default.json
|
||||
* @returns {ContainerClient} The Azure Container client
|
||||
*/
|
||||
function getContainerClient(storageCfg) {
|
||||
const blobServiceClient = getBlobServiceClient(storageCfg);
|
||||
return blobServiceClient.getContainerClient(storageCfg.bucketName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a BlockBlobClient for the specified storage configuration and blob name.
|
||||
*
|
||||
* @param {Object} storageCfg - configuration object from default.json
|
||||
* @param {string} blobName - The name of the blob
|
||||
* @returns {BlockBlobClient} The Azure Block Blob client
|
||||
*/
|
||||
function getBlobClient(storageCfg, blobName) {
|
||||
const containerClient = getContainerClient(storageCfg);
|
||||
return containerClient.getBlockBlobClient(blobName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a full file path by combining the storage folder name and the path.
|
||||
*
|
||||
* @param {Object} storageCfg - configuration object from default.json
|
||||
* @param {string} strPath - The relative path of the file
|
||||
* @returns {string} The full file path
|
||||
*/
|
||||
function getFilePath(storageCfg, strPath) {
|
||||
const storageFolderName = storageCfg.storageFolderName;
|
||||
return `${storageFolderName}/${strPath}`
|
||||
return `${storageFolderName}/${strPath}`;
|
||||
}
|
||||
|
||||
async function listObjectsExec(storageCfg, prefix, output = []) {
|
||||
@ -107,30 +132,19 @@ async function putObject(storageCfg, strPath, buffer, contentLength) {
|
||||
|
||||
async function uploadObject(storageCfg, strPath, filePath) {
|
||||
const blockBlobClient = getBlobClient(storageCfg, getFilePath(storageCfg, strPath));
|
||||
const input = fs.createReadStream(filePath);
|
||||
const uploadStream = input instanceof Readable ? input : new Readable({
|
||||
read() {
|
||||
this.push(input);
|
||||
this.push(null);
|
||||
}
|
||||
});
|
||||
|
||||
await new Promise((resolve, reject) => {
|
||||
uploadStream.on('error', reject);
|
||||
blockBlobClient.uploadStream(
|
||||
uploadStream,
|
||||
undefined,
|
||||
undefined,
|
||||
{
|
||||
blobHTTPHeaders: {
|
||||
contentType: mime.getType(strPath),
|
||||
contentDisposition: utils.getContentDisposition(path.basename(strPath))
|
||||
}
|
||||
const uploadStream = fs.createReadStream(filePath);
|
||||
|
||||
await blockBlobClient.uploadStream(
|
||||
uploadStream,
|
||||
undefined,
|
||||
undefined,
|
||||
{
|
||||
blobHTTPHeaders: {
|
||||
contentType: mime.getType(strPath),
|
||||
contentDisposition: utils.getContentDisposition(path.basename(strPath))
|
||||
}
|
||||
)
|
||||
.then(resolve)
|
||||
.catch(reject);
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
async function copyObject(storageCfgSrc, storageCfgDst, sourceKey, destinationKey) {
|
||||
@ -188,6 +202,11 @@ async function getSignedUrlWrapper(ctx, storageCfg, baseUrl, strPath, urlType, o
|
||||
return await blobClient.generateSasUrl(sasOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if static routs is needed for cacheFolder
|
||||
*
|
||||
* @returns {boolean} Always returns false for Azure Blob Storage
|
||||
*/
|
||||
function needServeStatic() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -119,7 +119,7 @@ function runTestForDir(ctx, isMultitenantMode, specialDir) {
|
||||
});
|
||||
} else {
|
||||
test("uploadObject", async () => {
|
||||
const spy = jest.spyOn(fs, 'createReadStream').mockReturnValue(testFileData3);
|
||||
const spy = jest.spyOn(fs, 'createReadStream').mockReturnValue(Readable.from(testFileData3));
|
||||
let res = await storage.uploadObject(ctx, testFile3, "createReadStream.txt", specialDir);
|
||||
expect(res).toEqual(undefined);
|
||||
let list = await storage.listObjects(ctx, testDir, specialDir);
|
||||
@ -127,6 +127,31 @@ function runTestForDir(ctx, isMultitenantMode, specialDir) {
|
||||
expect(list.sort()).toEqual([testFile1, testFile2, testFile3].sort());
|
||||
spy.mockRestore();
|
||||
});
|
||||
|
||||
test("uploadObject - stream error handling", async () => {
|
||||
const streamErrorMessage = "Test stream error";
|
||||
const mockStream = new Readable({
|
||||
read() {
|
||||
this.emit('error', new Error(streamErrorMessage));
|
||||
}
|
||||
});
|
||||
|
||||
const spy = jest.spyOn(fs, 'createReadStream').mockReturnValue(mockStream);
|
||||
// Verify that the uploadObject function rejects when the stream emits an error
|
||||
await expect(storage.uploadObject(ctx, "test-error-file.txt", "nonexistent.txt", specialDir))
|
||||
.rejects.toThrow(streamErrorMessage);
|
||||
|
||||
spy.mockRestore();
|
||||
});
|
||||
|
||||
test("uploadObject - non-existent file handling", async () => {
|
||||
const nonExistentFile = 'definitely-does-not-exist-' + Date.now() + '.txt';
|
||||
// Verify the file actually doesn't exist
|
||||
expect(fs.existsSync(nonExistentFile)).toBe(false);
|
||||
// Verify that uploadObject properly handles and propagates the error
|
||||
await expect(storage.uploadObject(ctx, "test-error-file.txt", nonExistentFile, specialDir))
|
||||
.rejects.toThrow(/ENOENT/);
|
||||
});
|
||||
}
|
||||
test("copyObject", async () => {
|
||||
let res = await storage.copyObject(ctx, testFile3, testFile4, specialDir, specialDir);
|
||||
|
||||
Reference in New Issue
Block a user