Merge pull request 'feature/az-command-options-4' (#52) from feature/az-command-options-4 into release/v9.1.0

Reviewed-on: https://git.onlyoffice.com/ONLYOFFICE/server/pulls/52
This commit is contained in:
Sergey Konovalov
2025-08-11 13:19:19 +00:00
4 changed files with 190 additions and 61 deletions

View File

@ -7,25 +7,70 @@ on:
- 'tests/integration/withServerInstance/storage.tests.js'
- 'Common/sources/storage/**'
- 'DocService/sources/routes/static.js'
- '.github/workflows/azureStorageTests.yml'
jobs:
azure-storage-tests:
name: Azure Storage Tests
runs-on: ubuntu-latest
env:
AZURITE_CONTAINER: azurite-${{ github.run_id }}-${{ github.run_attempt }}
steps:
- name: Check out repository code
uses: actions/checkout@v3
- name: Run Azurite docker container
- name: Pre-run cleanup
run: |
docker run --name azurite \
docker rm -f "$AZURITE_CONTAINER" 2>/dev/null || true
- name: Setup and start Azurite
run: |
# Detect network and set network arguments
JOB_NET=$(docker inspect -f '{{range $k,$v := .NetworkSettings.Networks}}{{printf "%s\n" $k}}{{end}}' "$(hostname)" 2>/dev/null | head -n1 || true)
if [ -n "$JOB_NET" ]; then
NETWORK_ARGS="--network $JOB_NET"
else
NETWORK_ARGS=""
fi
# Start Azurite container
docker run --name "$AZURITE_CONTAINER" \
$NETWORK_ARGS \
-p 10000:10000 \
-p 10001:10001 \
-p 10002:10002 \
-d mcr.microsoft.com/azure-storage/azurite \
azurite-blob --blobHost 0.0.0.0 --loose
# Set host based on network configuration
if [ -n "$JOB_NET" ]; then
HOST=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$AZURITE_CONTAINER")
else
HOST=127.0.0.1
fi
# Wait for Azurite to be ready
echo "Waiting for Azurite at $HOST:10000..."
for i in $(seq 1 15); do
if curl -sS "http://$HOST:10000/" >/dev/null 2>&1; then
echo "Azurite ready"
break
fi
sleep 1
done
# Verify Azurite is running
if ! curl -sS "http://$HOST:10000/" >/dev/null 2>&1; then
echo "Azurite failed to start"
docker logs "$AZURITE_CONTAINER" || true
exit 1
fi
# Export host for subsequent steps
echo "AZURITE_HOST=$HOST" >> "$GITHUB_ENV"
- name: Caching dependencies
uses: actions/setup-node@v3
with:
@ -42,18 +87,15 @@ jobs:
npm --prefix Common ci
npm --prefix DocService ci
- name: Setup Azure storage test environment
- name: Setup Azure storage environment
run: |
# Wait for Azurite to be ready
sleep 15
# Create Azure storage configuration
cat > Common/config/local.json << 'EOF'
# Create minimal Azure storage configuration
cat > Common/config/local.json << EOF
{
"storage": {
"name": "storage-az",
"region": "",
"endpoint": "http://127.0.0.1:10000/devstoreaccount1",
"endpoint": "http://${AZURITE_HOST:-127.0.0.1}:10000/devstoreaccount1",
"bucketName": "test-container",
"storageFolderName": "files",
"cacheFolderName": "data",
@ -62,6 +104,18 @@ jobs:
},
"persistentStorage": {
"storageFolderName": "files/persistent"
},
"commandOptions": {
"az": {
"uploadData": {},
"uploadStream": {},
"download": {},
"syncCopyFromURL": {},
"listBlobsFlat": {
"maxPageSize": 1000
},
"deleteBlob": {}
}
}
}
EOF
@ -76,35 +130,18 @@ jobs:
# Run Node.js script from Common directory where Azure dependencies are installed
cd Common
node -e "
const { BlobServiceClient, StorageSharedKeyCredential } = require('@azure/storage-blob');
async function setupContainer() {
try {
const accountName = 'devstoreaccount1';
const accountKey = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==';
const endpoint = 'http://127.0.0.1:10000/devstoreaccount1';
const credential = new StorageSharedKeyCredential(accountName, accountKey);
const blobServiceClient = new BlobServiceClient(endpoint, credential);
const containerClient = blobServiceClient.getContainerClient('test-container');
console.log('Creating container...');
await containerClient.createIfNotExists();
console.log('Container created successfully');
// Upload a test file if needed
const blockBlobClient = containerClient.getBlockBlobClient('testfile.txt');
await blockBlobClient.upload('Test content', Buffer.byteLength('Test content'));
console.log('Test file uploaded');
} catch (error) {
console.error('Error setting up Azure storage:', error);
process.exit(1);
}
}
setupContainer();
(async () => {
const { BlobServiceClient, StorageSharedKeyCredential } = require('@azure/storage-blob');
const endpoint = 'http://' + (process.env.AZURITE_HOST || '127.0.0.1') + ':10000/devstoreaccount1';
const client = new BlobServiceClient(endpoint, new StorageSharedKeyCredential('devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='));
await client.getContainerClient('test-container').createIfNotExists();
console.log('Azure environment ready');
})().catch(console.error);
"
- name: Run storage tests
run: npm run storage-tests
run: npm run storage-tests
- name: Final cleanup
if: always()
run: docker rm -f "$AZURITE_CONTAINER" || true

View File

@ -7,22 +7,69 @@ on:
- 'tests/integration/withServerInstance/storage.tests.js'
- 'Common/sources/storage/**'
- 'DocService/sources/routes/static.js'
- '.github/workflows/s3storageTests.yml'
jobs:
storage-tests:
name: Storage Tests
runs-on: ubuntu-latest
env:
MINIO_CONTAINER: minio-${{ github.run_id }}-${{ github.run_attempt }}
steps:
- name: Run MinIO docker container
- name: Pre-run cleanup
run: |
docker run --name minio \
docker rm -f "$MINIO_CONTAINER" 2>/dev/null || true
# Remove legacy container name if it exists (for self-hosted runners or retries)
docker rm -f minio 2>/dev/null || true
- name: Setup and start MinIO
run: |
# Detect network and set network arguments
JOB_NET=$(docker inspect -f '{{range $k,$v := .NetworkSettings.Networks}}{{printf "%s\n" $k}}{{end}}' "$(hostname)" 2>/dev/null | head -n1 || true)
if [ -n "$JOB_NET" ]; then
NETWORK_ARGS="--network $JOB_NET"
else
NETWORK_ARGS=""
fi
# Start MinIO container
docker run --name "$MINIO_CONTAINER" \
$NETWORK_ARGS \
-p 9000:9000 \
-p 9001:9001 \
-e "MINIO_ROOT_USER=minioadmin" \
-e "MINIO_ROOT_PASSWORD=minioadmin" \
-d minio/minio server /data --console-address ":9001"
# Set host based on network configuration
if [ -n "$JOB_NET" ]; then
HOST=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$MINIO_CONTAINER")
else
HOST=127.0.0.1
fi
# Wait for MinIO to be ready
echo "Waiting for MinIO at $HOST:9000..."
for i in $(seq 1 15); do
if curl -sS "http://$HOST:9000/minio/health/ready" >/dev/null 2>&1; then
echo "MinIO ready"
break
fi
sleep 1
done
# Verify MinIO is running
if ! curl -sS "http://$HOST:9000/minio/health/ready" >/dev/null 2>&1; then
echo "MinIO failed to start"
docker logs "$MINIO_CONTAINER" || true
exit 1
fi
# Export host for subsequent steps
echo "MINIO_HOST=$HOST" >> "$GITHUB_ENV"
- name: Check out repository code
uses: actions/checkout@v3
@ -44,11 +91,12 @@ jobs:
- name: Creating storage configuration
run: |
echo '{
cat > Common/config/local.json << EOF
{
"storage": {
"name": "storage-s3",
"region": "us-east-1",
"endpoint": "http://localhost:9000",
"endpoint": "http://${MINIO_HOST:-127.0.0.1}:9000",
"bucketName": "cache",
"storageFolderName": "files",
"commandOptions": {
@ -66,12 +114,17 @@ jobs:
"persistentStorage": {
"storageFolderName": "files/persistent"
}
}' >> Common/config/local.json
}
EOF
- name: Create MinIO buckets
run: |
docker exec minio mc alias set myminio http://localhost:9000 minioadmin minioadmin
docker exec minio mc mb myminio/cache
docker exec "$MINIO_CONTAINER" mc alias set myminio http://localhost:9000 minioadmin minioadmin
docker exec "$MINIO_CONTAINER" mc mb myminio/cache
- name: Run storage tests
run: npm run storage-tests
run: npm run storage-tests
- name: Final cleanup
if: always()
run: docker rm -f "$MINIO_CONTAINER" || true

View File

@ -136,6 +136,16 @@
"MaxKeys": 1000
},
"deleteObject": {}
},
"az": {
"uploadData": {},
"uploadStream": {},
"download": {},
"syncCopyFromURL": {},
"listBlobsFlat": {
"maxPageSize": 1000
},
"deleteBlob": {}
}
},
"urlExpires": 604800,

View File

@ -73,12 +73,31 @@ function getFilePath(storageCfg, strPath) {
return `${storageFolderName}/${strPath}`;
}
/**
* @param {Object} baseOptions - Base options object
* @param {Object} storageCfg - Storage configuration
* @param {string} commandType - uploadData, uploadStream, download, etc.
* @returns {Object|undefined} Merged options or undefined if empty
*/
function applyCommandOptions(baseOptions, storageCfg, commandType) {
if (storageCfg.commandOptions.az && storageCfg.commandOptions.az[commandType]) {
const configOptions = storageCfg.commandOptions.az[commandType];
if (configOptions && Object.keys(configOptions).length > 0) {
return {...baseOptions, ...configOptions};
}
}
return Object.keys(baseOptions).length > 0 ? baseOptions : undefined;
}
async function listObjectsExec(storageCfg, prefix, output = []) {
const containerClient = getContainerClient(storageCfg);
const storageFolderName = storageCfg.storageFolderName;
const prefixWithFolder = storageFolderName ? `${storageFolderName}/${prefix}` : prefix;
for await (const blob of containerClient.listBlobsFlat({prefix: prefixWithFolder})) {
const baseOptions = {prefix: prefixWithFolder};
const listOptions = applyCommandOptions(baseOptions, storageCfg, 'listBlobsFlat');
for await (const blob of containerClient.listBlobsFlat(listOptions)) {
const relativePath = storageFolderName ?
blob.name.substring(storageFolderName.length + 1) : blob.name;
output.push(relativePath);
@ -88,8 +107,11 @@ async function listObjectsExec(storageCfg, prefix, output = []) {
async function deleteObjectsHelp(storageCfg, aKeys) {
const containerClient = getContainerClient(storageCfg);
const deleteOptions = applyCommandOptions({}, storageCfg, 'deleteBlob');
await Promise.all(
aKeys.map(key => containerClient.deleteBlob(key.Key))
aKeys.map(key => {
return containerClient.deleteBlob(key.Key, deleteOptions);
})
);
}
@ -101,13 +123,15 @@ async function headObject(storageCfg, strPath) {
async function getObject(storageCfg, strPath) {
const blobClient = getBlobClient(storageCfg, getFilePath(storageCfg, strPath));
const response = await blobClient.download();
const options = applyCommandOptions({}, storageCfg, 'download');
const response = await blobClient.download(options);
return await utils.stream2Buffer(response.readableStreamBody);
}
async function createReadStream(storageCfg, strPath) {
const blobClient = getBlobClient(storageCfg, getFilePath(storageCfg, strPath));
const response = await blobClient.download();
const options = applyCommandOptions({}, storageCfg, 'download');
const response = await blobClient.download(options);
return {
contentLength: response.contentLength,
readStream: response.readableStreamBody
@ -117,17 +141,17 @@ async function createReadStream(storageCfg, strPath) {
async function putObject(storageCfg, strPath, buffer, contentLength) {
const blobClient = getBlobClient(storageCfg, getFilePath(storageCfg, strPath));
const uploadOptions = {
const baseOptions = {
blobHTTPHeaders: {
contentType: mime.getType(strPath),
contentDisposition: utils.getContentDisposition(path.basename(strPath))
}
};
const uploadOptions = applyCommandOptions(baseOptions, storageCfg, 'uploadData');
if (buffer instanceof Buffer) {
// Handle Buffer upload
await blobClient.uploadData(buffer, uploadOptions);
} else if (typeof buffer.pipe === 'function') {
// Handle Stream upload
await blobClient.uploadStream(buffer, undefined, undefined, uploadOptions);
} else {
throw new TypeError('Input must be Buffer or Readable stream');
@ -138,16 +162,19 @@ async function uploadObject(storageCfg, strPath, filePath) {
const blockBlobClient = getBlobClient(storageCfg, getFilePath(storageCfg, strPath));
const uploadStream = fs.createReadStream(filePath);
const uploadOptions = {
blobHTTPHeaders: {
contentType: mime.getType(strPath),
contentDisposition: utils.getContentDisposition(path.basename(strPath))
}
};
const finalOptions = applyCommandOptions(uploadOptions, storageCfg, 'uploadStream');
await blockBlobClient.uploadStream(
uploadStream,
undefined,
undefined,
{
blobHTTPHeaders: {
contentType: mime.getType(strPath),
contentDisposition: utils.getContentDisposition(path.basename(strPath))
}
}
finalOptions
);
}
@ -162,7 +189,8 @@ async function copyObject(storageCfgSrc, storageCfgDst, sourceKey, destinationKe
expiresOn: new Date(Date.now() + 3600 * 1000)
}, new StorageSharedKeyCredential(storageCfgSrc.accessKeyId, storageCfgSrc.secretAccessKey)).toString();
await destBlobClient.syncCopyFromURL(`${sourceBlobClient.url}?${sasToken}`);
const copyOptions = applyCommandOptions({}, storageCfgDst, 'syncCopyFromURL');
await destBlobClient.syncCopyFromURL(`${sourceBlobClient.url}?${sasToken}`, copyOptions);
}
async function listObjects(storageCfg, strPath) {
@ -171,7 +199,8 @@ async function listObjects(storageCfg, strPath) {
async function deleteObject(storageCfg, strPath) {
const blobClient = getBlobClient(storageCfg, getFilePath(storageCfg, strPath));
await blobClient.delete();
const options = applyCommandOptions({}, storageCfg, 'deleteBlob');
await blobClient.delete(options);
}
async function deleteObjects(storageCfg, strPaths) {