Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-449/pas…
Browse files Browse the repository at this point in the history
…s_overhead_fields' into w/8.7/improvement/CLDSRV-449/pass_overhead_fields
  • Loading branch information
tmacro committed Oct 19, 2023
2 parents 04091dc + 8d83546 commit 0e47810
Show file tree
Hide file tree
Showing 17 changed files with 319 additions and 24 deletions.
6 changes: 6 additions & 0 deletions constants.js
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,12 @@ const constants = {
},
multiObjectDeleteConcurrency: 50,
maxScannedLifecycleListingEntries: 10000,
overheadField: [
'content-length',
'owner-id',
'versionId',
'isNull',
],
};

module.exports = constants;
4 changes: 3 additions & 1 deletion lib/api/apiUtils/object/createAndStoreObject.js
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
* @param {(object|null)} streamingV4Params - if v4 auth, object containing
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
* credentialScope (to be used for streaming v4 auth if applicable)
* @param {(object|null)} overheadField - fields to be included in metadata overhead
* @param {RequestLogger} log - logger instance
* @param {string} originOp - Origin operation
* @param {function} callback - callback function
Expand All @@ -60,7 +61,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
*/
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
log, originOp, callback) {
overheadField, log, originOp, callback) {
const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === '';

Expand Down Expand Up @@ -116,6 +117,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
isDeleteMarker,
replicationInfo: getReplicationInfo(
objectKey, bucketMD, false, size, null, null, authInfo),
overheadField,
log,
};

Expand Down
2 changes: 1 addition & 1 deletion lib/api/initiateMultipartUpload.js
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
if (metaHeaders instanceof Error) {
log.debug('user metadata validation failed', {
error: metaHeaders,
method: 'createAndStoreObject',
method: 'initiateMultipartUpload',
});
return process.nextTick(() => callback(metaHeaders));
}
Expand Down
7 changes: 5 additions & 2 deletions lib/api/multiObjectDelete.js
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ const requestUtils = policies.requestUtils;
const { validObjectKeys } = require('../routes/routeVeeam');
const { deleteVeeamCapabilities } = require('../routes/veeam/delete');
const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject');
const { overheadField } = require('../../constants');

const versionIdUtils = versioning.VersionID;
const { data } = require('../data/wrapper');
Expand Down Expand Up @@ -336,6 +337,7 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
const deleteInfo = {};
if (options && options.deleteData) {
options.overheadField = overheadField;
deleteInfo.deleted = true;
if (!_bucketRequiresOplogUpdate(bucket)) {
options.doesNotNeedOpogUpdate = true;
Expand All @@ -360,8 +362,9 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
// This call will create a delete-marker
return createAndStoreObject(bucketName, bucket, entry.key,
objMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, log, 's3:ObjectRemoved:DeleteMarkerCreated', (err, result) =>
callback(err, objMD, deleteInfo, result.versionId));
deleteInfo.newDeleteMarker, null, overheadField, log,
's3:ObjectRemoved:DeleteMarkerCreated', (err, result) =>
callback(err, objMD, deleteInfo, result.versionId));
},
], (err, objMD, deleteInfo, versionId) => {
if (err === skipError) {
Expand Down
4 changes: 4 additions & 0 deletions lib/api/objectCopy.js
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,10 @@ function objectCopy(authInfo, request, sourceBucket,
dataStoreContext.metaHeaders =
storeMetadataParams.metaHeaders;
}

// eslint-disable-next-line no-param-reassign
storeMetadataParams.overheadField = constants.overheadField;

let dataLocator;
// If 0 byte object just set dataLocator to empty array
if (!sourceObjMD.location) {
Expand Down
4 changes: 3 additions & 1 deletion lib/api/objectDelete.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject')

const versionIdUtils = versioning.VersionID;
const objectLockedError = new Error('object locked');
const { overheadField } = require('../../constants');

/**
* objectDeleteInternal - DELETE an object from a bucket
Expand Down Expand Up @@ -187,6 +188,7 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
});
}
if (delOptions && delOptions.deleteData) {
delOptions.overheadField = overheadField;
if (objectMD.isDeleteMarker) {
// record that we deleted a delete marker to set
// response headers accordingly
Expand All @@ -213,7 +215,7 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
deleteInfo.newDeleteMarker = true;
return createAndStoreObject(bucketName, bucketMD,
objectKey, objectMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, log, isExpiration ?
deleteInfo.newDeleteMarker, null, overheadField, log, isExpiration ?
's3:LifecycleExpiration:DeleteMarkerCreated' :
's3:ObjectRemoved:DeleteMarkerCreated',
(err, newDelMarkerRes) => {
Expand Down
4 changes: 3 additions & 1 deletion lib/api/objectPut.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ const { validatePutVersionId } = require('./apiUtils/object/coldStorage');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');

const writeContinue = require('../utilities/writeContinue');
const { overheadField } = require('../../constants');

const versionIdUtils = versioning.VersionID;

/**
Expand Down Expand Up @@ -166,7 +168,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
writeContinue(request, request._response);
return createAndStoreObject(bucketName,
bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle,
request, false, streamingV4Params, log, 's3:ObjectCreated:Put', next);
request, false, streamingV4Params, overheadField, log, 's3:ObjectCreated:Put', next);
},
], (err, storingResult) => {
if (err) {
Expand Down
1 change: 1 addition & 0 deletions lib/api/objectPutCopyPart.js
Original file line number Diff line number Diff line change
Expand Up @@ -319,6 +319,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
uploadId,
splitter: constants.splitter,
lastModified,
overheadField: constants.overheadField,
};
return services.metadataStorePart(mpuBucketName,
locations, metaStoreParams, log, err => {
Expand Down
3 changes: 2 additions & 1 deletion lib/api/objectPutPart.js
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
'content-md5': hexDigest,
'content-length': size,
};
return metadata.putObjectMD(mpuBucketName, partKey, omVal, {}, log,
const mdParams = { overheadField: constants.overheadField };
return metadata.putObjectMD(mpuBucketName, partKey, omVal, mdParams, log,
err => {
if (err) {
log.error('error putting object in mpu bucket', {
Expand Down
1 change: 1 addition & 0 deletions lib/routes/routeBackbeat.js
Original file line number Diff line number Diff line change
Expand Up @@ -550,6 +550,7 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) {
const options = {
versionId,
isNull,
overheadField: constants.overheadField,
};

// NOTE: When 'versioning' is set to true and no 'versionId' is specified,
Expand Down
16 changes: 13 additions & 3 deletions lib/services.js
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ const services = {
tagging, taggingCopy, replicationInfo, defaultRetention,
dataStoreName, creationTime, retentionMode, retentionDate,
legalHold, originOp, updateMicroVersionId, archive, oldReplayId,
deleteNullKey, amzStorageClass } = params;
deleteNullKey, amzStorageClass, overheadField } = params;
log.trace('storing object in metadata');
assert.strictEqual(typeof bucketName, 'string');
const md = new ObjectMD();
Expand Down Expand Up @@ -208,6 +208,10 @@ const services = {
options.deleteNullKey = deleteNullKey;
}

if (overheadField) {
options.overheadField = overheadField;
}

// information to store about the version and the null version id
// in the object metadata

Expand Down Expand Up @@ -759,7 +763,7 @@ const services = {
metadataStorePart(mpuBucketName, partLocations,
metaStoreParams, log, cb) {
assert.strictEqual(typeof mpuBucketName, 'string');
const { partNumber, contentMD5, size, uploadId, lastModified, splitter }
const { partNumber, contentMD5, size, uploadId, lastModified, splitter, overheadField }
= metaStoreParams;
const dateModified = typeof lastModified === 'string' ?
lastModified : new Date().toJSON();
Expand All @@ -775,7 +779,13 @@ const services = {
'content-md5': contentMD5,
'content-length': size,
};
metadata.putObjectMD(mpuBucketName, partKey, omVal, {}, log, err => {

const params = {};
if (overheadField) {
params.overheadField = overheadField;
}

metadata.putObjectMD(mpuBucketName, partKey, omVal, params, log, err => {
if (err) {
log.error('error from metadata', { error: err });
return cb(err);
Expand Down
28 changes: 28 additions & 0 deletions tests/unit/api/multiObjectDelete.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ const log = new DummyRequestLogger();

const { metadata } = storage.metadata.inMemory.metadata;
const { ds } = storage.data.inMemory.datastore;
const metadataswitch = require('../metadataswitch');
const sinon = require('sinon');

const canonicalID = 'accessKey1';
Expand Down Expand Up @@ -42,6 +43,7 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => {

beforeEach(done => {
cleanup();
sinon.spy(metadataswitch, 'deleteObjectMD');
testPutObjectRequest1 = new DummyRequest({
bucketName,
namespace,
Expand Down Expand Up @@ -224,6 +226,32 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => {
return done();
});
});

it('should pass overheadField to metadata', done => {
getObjMetadataAndDelete(authInfo, 'foo', request, bucketName, bucket,
true, [], [{ key: objectKey1 }, { key: objectKey2 }], log,
(err, quietSetting, errorResults, numOfObjects) => {
assert.ifError(err);
assert.strictEqual(numOfObjects, 2);
sinon.assert.calledWith(
metadataswitch.deleteObjectMD,
bucketName,
objectKey1,
sinon.match({ overheadField: sinon.match.array }),
sinon.match.any,
sinon.match.any
);
sinon.assert.calledWith(
metadataswitch.deleteObjectMD,
bucketName,
objectKey2,
sinon.match({ overheadField: sinon.match.array }),
sinon.match.any,
sinon.match.any
);
done();
});
});
});

describe('initializeMultiObjectDeleteWithBatchingSupport', () => {
Expand Down
60 changes: 60 additions & 0 deletions tests/unit/api/multipartUpload.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ const assert = require('assert');
const async = require('async');
const crypto = require('crypto');
const moment = require('moment');
const sinon = require('sinon');
const { parseString } = require('xml2js');

const { bucketPut } = require('../../../lib/api/bucketPut');
Expand All @@ -22,6 +23,8 @@ const multipartDelete = require('../../../lib/api/multipartDelete');
const objectPutPart = require('../../../lib/api/objectPutPart');
const DummyRequest = require('../DummyRequest');
const changeObjectLock = require('../../utilities/objectLock-util');
const metadataswitch = require('../metadataswitch');


const { metadata } = storage.metadata.inMemory.metadata;
const metadataBackend = storage.metadata.inMemory.metastore;
Expand Down Expand Up @@ -2206,3 +2209,60 @@ describe('multipart upload with object lock', () => {
});
});
});

describe('multipart upload overheadField', () => {
const any = sinon.match.any;

beforeEach(() => {
cleanup();
sinon.spy(metadataswitch, 'putObjectMD');
});

after(() => {
metadataswitch.putObjectMD.restore();
cleanup();
});

it('should pass overheadField', done => {
async.waterfall([
next => bucketPut(authInfo, bucketPutRequest, log, next),
(corsHeaders, next) => initiateMultipartUpload(authInfo,
initiateRequest, log, next),
(result, corsHeaders, next) => {
const mpuKeys = metadata.keyMaps.get(mpuBucket);
assert.strictEqual(mpuKeys.size, 1);
assert(mpuKeys.keys().next().value
.startsWith(`overview${splitter}${objectKey}`));
parseString(result, next);
},
],
(err, json) => {
// Need to build request in here since do not have uploadId
// until here
assert.ifError(err);
const testUploadId = json.InitiateMultipartUploadResult.UploadId[0];
const md5Hash = crypto.createHash('md5');
const bufferBody = Buffer.from(postBody);
md5Hash.update(bufferBody);
const calculatedHash = md5Hash.digest('hex');
const partRequest = new DummyRequest({
bucketName,
objectKey,
namespace,
url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`,
headers: { host: `${bucketName}.s3.amazonaws.com` },
query: {
partNumber: '1',
uploadId: testUploadId,
},
calculatedHash,
}, postBody);
objectPutPart(authInfo, partRequest, undefined, log, err => {
assert.ifError(err);
sinon.assert.calledWith(metadataswitch.putObjectMD.lastCall,
any, any, any, sinon.match({ overheadField: sinon.match.array }), any, any);
done();
});
});
});
});
Loading

0 comments on commit 0e47810

Please sign in to comment.