Merge remote-tracking branch 'remotes/origin/release/v9.1.0' into feature/linter-mysql

# Conflicts:
#	.github/workflows/azureStorageTests.yml
#	.github/workflows/s3storageTests.yml
#	Common/config/default.json
#	Common/config/log4js/development.json
#	Common/config/log4js/production.json
#	Common/sources/constants.js
#	Common/sources/logger.js
#	Common/sources/moduleReloader.js
#	Common/sources/operationContext.js
#	Common/sources/storage/storage-az.js
#	Common/sources/storage/storage-fs.js
#	Common/sources/utils.js
#	DocService/sources/DocsCoServer.js
#	DocService/sources/ai/aiProxyHandler.js
#	DocService/sources/canvasservice.js
#	DocService/sources/converterservice.js
#	DocService/sources/databaseConnectors/oracleConnector.js
#	DocService/sources/fileuploaderservice.js
#	DocService/sources/wopiClient.js
#	DocService/sources/wopiUtils.js
#	FileConverter/sources/converter.js
This commit is contained in:
Sergey Konovalov
2025-08-31 02:22:56 +03:00
31 changed files with 1113 additions and 903 deletions

View File

@ -7,25 +7,70 @@ on:
- 'tests/integration/withServerInstance/storage.tests.js'
- 'Common/sources/storage/**'
- 'DocService/sources/routes/static.js'
- '.github/workflows/azureStorageTests.yml'
jobs:
azure-storage-tests:
name: Azure Storage Tests
runs-on: ubuntu-latest
env:
AZURITE_CONTAINER: azurite-${{ github.run_id }}-${{ github.run_attempt }}
steps:
- name: Check out repository code
uses: actions/checkout@v3
- name: Run Azurite docker container
- name: Pre-run cleanup
run: |
docker run --name azurite \
docker rm -f "$AZURITE_CONTAINER" 2>/dev/null || true
- name: Setup and start Azurite
run: |
# Detect network and set network arguments
JOB_NET=$(docker inspect -f '{{range $k,$v := .NetworkSettings.Networks}}{{printf "%s\n" $k}}{{end}}' "$(hostname)" 2>/dev/null | head -n1 || true)
if [ -n "$JOB_NET" ]; then
NETWORK_ARGS="--network $JOB_NET"
else
NETWORK_ARGS=""
fi
# Start Azurite container
docker run --name "$AZURITE_CONTAINER" \
$NETWORK_ARGS \
-p 10000:10000 \
-p 10001:10001 \
-p 10002:10002 \
-d mcr.microsoft.com/azure-storage/azurite \
azurite-blob --blobHost 0.0.0.0 --loose
# Set host based on network configuration
if [ -n "$JOB_NET" ]; then
HOST=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$AZURITE_CONTAINER")
else
HOST=127.0.0.1
fi
# Wait for Azurite to be ready
echo "Waiting for Azurite at $HOST:10000..."
for i in $(seq 1 15); do
if curl -sS "http://$HOST:10000/" >/dev/null 2>&1; then
echo "Azurite ready"
break
fi
sleep 1
done
# Verify Azurite is running
if ! curl -sS "http://$HOST:10000/" >/dev/null 2>&1; then
echo "Azurite failed to start"
docker logs "$AZURITE_CONTAINER" || true
exit 1
fi
# Export host for subsequent steps
echo "AZURITE_HOST=$HOST" >> "$GITHUB_ENV"
- name: Caching dependencies
uses: actions/setup-node@v3
with:
@ -42,18 +87,15 @@ jobs:
npm --prefix Common ci
npm --prefix DocService ci
- name: Setup Azure storage test environment
- name: Setup Azure storage environment
run: |
# Wait for Azurite to be ready
sleep 15
# Create Azure storage configuration
cat > Common/config/local.json << 'EOF'
# Create minimal Azure storage configuration
cat > Common/config/local.json << EOF
{
"storage": {
"name": "storage-az",
"region": "",
"endpoint": "http://127.0.0.1:10000/devstoreaccount1",
"endpoint": "http://${AZURITE_HOST:-127.0.0.1}:10000/devstoreaccount1",
"bucketName": "test-container",
"storageFolderName": "files",
"cacheFolderName": "data",
@ -62,6 +104,18 @@ jobs:
},
"persistentStorage": {
"storageFolderName": "files/persistent"
},
"commandOptions": {
"az": {
"uploadData": {},
"uploadStream": {},
"download": {},
"syncCopyFromURL": {},
"listBlobsFlat": {
"maxPageSize": 1000
},
"deleteBlob": {}
}
}
}
EOF
@ -76,35 +130,18 @@ jobs:
# Run Node.js script from Common directory where Azure dependencies are installed
cd Common
node -e "
const { BlobServiceClient, StorageSharedKeyCredential } = require('@azure/storage-blob');
async function setupContainer() {
try {
const accountName = 'devstoreaccount1';
const accountKey = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==';
const endpoint = 'http://127.0.0.1:10000/devstoreaccount1';
const credential = new StorageSharedKeyCredential(accountName, accountKey);
const blobServiceClient = new BlobServiceClient(endpoint, credential);
const containerClient = blobServiceClient.getContainerClient('test-container');
console.log('Creating container...');
await containerClient.createIfNotExists();
console.log('Container created successfully');
// Upload a test file if needed
const blockBlobClient = containerClient.getBlockBlobClient('testfile.txt');
await blockBlobClient.upload('Test content', Buffer.byteLength('Test content'));
console.log('Test file uploaded');
} catch (error) {
console.error('Error setting up Azure storage:', error);
process.exit(1);
}
}
setupContainer();
(async () => {
const { BlobServiceClient, StorageSharedKeyCredential } = require('@azure/storage-blob');
const endpoint = 'http://' + (process.env.AZURITE_HOST || '127.0.0.1') + ':10000/devstoreaccount1';
const client = new BlobServiceClient(endpoint, new StorageSharedKeyCredential('devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='));
await client.getContainerClient('test-container').createIfNotExists();
console.log('Azure environment ready');
})().catch(console.error);
"
- name: Run storage tests
run: npm run storage-tests
- name: Final cleanup
if: always()
run: docker rm -f "$AZURITE_CONTAINER" || true

View File

@ -45,7 +45,7 @@ jobs:
- name: Creating schema
run: |
docker cp ./schema/oracle/createdb.sql oracle:/
docker exec oracle sqlplus -s onlyoffice/onlyoffice@//localhost/onlyoffice @/createdb.sql
docker exec oracle sqlplus -s onlyoffice/onlyoffice@//localhost/onlyoffice "@/createdb.sql"
- name: Run Jest
run: npm run "integration database tests"

View File

@ -37,12 +37,13 @@ jobs:
- name: Creating service DB configuration
run: |
echo '{"services": {"CoAuthoring": {"sql": {"dbHost": "127.0.0.1"}}}}' >> Common/config/local.json
echo '{"services": {"CoAuthoring": {"sql": {"dbHost": "127.0.0.1", "pgPoolExtraOptions": {"options": "-c search_path=ci_test"}}}}}' >> Common/config/local.json
- name: Creating schema
run: |
docker exec postgres psql -d onlyoffice -U onlyoffice -c "CREATE SCHEMA IF NOT EXISTS ci_test;"
docker cp ./schema/postgresql/createdb.sql postgres:/
docker exec postgres psql -d onlyoffice -U onlyoffice -a -f /createdb.sql
docker exec postgres bash -c 'PGOPTIONS="-c search_path=ci_test" psql -d onlyoffice -U onlyoffice -a -f /createdb.sql'
- name: Run Jest
run: npm run "integration database tests"

View File

@ -7,22 +7,69 @@ on:
- 'tests/integration/withServerInstance/storage.tests.js'
- 'Common/sources/storage/**'
- 'DocService/sources/routes/static.js'
- '.github/workflows/s3storageTests.yml'
jobs:
storage-tests:
name: Storage Tests
runs-on: ubuntu-latest
env:
MINIO_CONTAINER: minio-${{ github.run_id }}-${{ github.run_attempt }}
steps:
- name: Run MinIO docker container
- name: Pre-run cleanup
run: |
docker run --name minio \
docker rm -f "$MINIO_CONTAINER" 2>/dev/null || true
# Remove legacy container name if it exists (for self-hosted runners or retries)
docker rm -f minio 2>/dev/null || true
- name: Setup and start MinIO
run: |
# Detect network and set network arguments
JOB_NET=$(docker inspect -f '{{range $k,$v := .NetworkSettings.Networks}}{{printf "%s\n" $k}}{{end}}' "$(hostname)" 2>/dev/null | head -n1 || true)
if [ -n "$JOB_NET" ]; then
NETWORK_ARGS="--network $JOB_NET"
else
NETWORK_ARGS=""
fi
# Start MinIO container
docker run --name "$MINIO_CONTAINER" \
$NETWORK_ARGS \
-p 9000:9000 \
-p 9001:9001 \
-e "MINIO_ROOT_USER=minioadmin" \
-e "MINIO_ROOT_PASSWORD=minioadmin" \
-d minio/minio server /data --console-address ":9001"
# Set host based on network configuration
if [ -n "$JOB_NET" ]; then
HOST=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$MINIO_CONTAINER")
else
HOST=127.0.0.1
fi
# Wait for MinIO to be ready
echo "Waiting for MinIO at $HOST:9000..."
for i in $(seq 1 15); do
if curl -sS "http://$HOST:9000/minio/health/ready" >/dev/null 2>&1; then
echo "MinIO ready"
break
fi
sleep 1
done
# Verify MinIO is running
if ! curl -sS "http://$HOST:9000/minio/health/ready" >/dev/null 2>&1; then
echo "MinIO failed to start"
docker logs "$MINIO_CONTAINER" || true
exit 1
fi
# Export host for subsequent steps
echo "MINIO_HOST=$HOST" >> "$GITHUB_ENV"
- name: Check out repository code
uses: actions/checkout@v3
@ -44,11 +91,12 @@ jobs:
- name: Creating storage configuration
run: |
echo '{
cat > Common/config/local.json << EOF
{
"storage": {
"name": "storage-s3",
"region": "us-east-1",
"endpoint": "http://localhost:9000",
"endpoint": "http://${MINIO_HOST:-127.0.0.1}:9000",
"bucketName": "cache",
"storageFolderName": "files",
"commandOptions": {
@ -66,12 +114,17 @@ jobs:
"persistentStorage": {
"storageFolderName": "files/persistent"
}
}' >> Common/config/local.json
}
EOF
- name: Create MinIO buckets
run: |
docker exec minio mc alias set myminio http://localhost:9000 minioadmin minioadmin
docker exec minio mc mb myminio/cache
docker exec "$MINIO_CONTAINER" mc alias set myminio http://localhost:9000 minioadmin minioadmin
docker exec "$MINIO_CONTAINER" mc mb myminio/cache
- name: Run storage tests
run: npm run storage-tests
- name: Final cleanup
if: always()
run: docker rm -f "$MINIO_CONTAINER" || true