Пример #1
0
    def test_check_object_integrity(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test object integrity using md5sum
        """
        bucketname = bucket_factory(1)[0].name
        original_dir = "/original"
        result_dir = "/result"
        awscli_pod.exec_cmd_on_pod(command=f'mkdir {result_dir}')
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(
            awscli_pod, original_dir)
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod, original_dir, full_object_path,
                              mcg_obj)

        # Retrieve all objects from MCG bucket to result dir in Pod
        logger.info('Downloading all objects from MCG bucket to awscli pod')
        sync_object_directory(awscli_pod, full_object_path, result_dir,
                              mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f'{original_dir}/{obj}',
                result_object_path=f'{result_dir}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'
Пример #2
0
    def test_check_object_integrity(self, awscli_pod, rgw_bucket_factory):
        """
        Test object integrity using md5sum
        """
        bucketname = rgw_bucket_factory(1, "rgw-oc")[0].name
        obc_obj = OBC(bucketname)
        original_dir = "/original"
        result_dir = "/result"
        awscli_pod.exec_cmd_on_pod(command=f"mkdir {result_dir}")
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(awscli_pod, original_dir)
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod, original_dir, full_object_path, obc_obj)

        logger.info("Downloading all objects from RGW bucket to awscli pod")
        sync_object_directory(awscli_pod, full_object_path, result_dir, obc_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f"{original_dir}/{obj}",
                result_object_path=f"{result_dir}/{obj}",
                awscli_pod=awscli_pod,
            ), "Checksum comparision between original and result object failed"
Пример #3
0
    def test_check_object_integrity(
        self,
        mcg_obj,
        awscli_pod,
        bucket_factory,
        bucketclass_dict,
        test_directory_setup,
    ):
        """
        Test object integrity using md5sum
        """
        bucketname = bucket_factory(1, bucketclass=bucketclass_dict)[0].name
        original_dir = test_directory_setup.origin_dir
        result_dir = test_directory_setup.result_dir
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(awscli_pod, original_dir)
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod, original_dir, full_object_path, mcg_obj)
        # Retrieve all objects from MCG bucket to result dir in Pod
        logger.info("Downloading all objects from MCG bucket to awscli pod")
        sync_object_directory(awscli_pod, full_object_path, result_dir, mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f"{original_dir}/{obj}",
                result_object_path=f"{result_dir}/{obj}",
                awscli_pod=awscli_pod,
            ), "Checksum comparison between original and result object failed"
Пример #4
0
    def test_check_object_integrity(self, rgw_endpoint, awscli_pod_session,
                                    rgw_bucket_factory, test_directory_setup):
        """
        Test object integrity using md5sum
        """
        bucketname = rgw_bucket_factory(1, "rgw-oc")[0].name
        obc_obj = OBC(bucketname)
        original_dir = AWSCLI_TEST_OBJ_DIR
        result_dir = test_directory_setup.result_dir
        full_object_path = f"s3://{bucketname}"
        downloaded_files = awscli_pod_session.exec_cmd_on_pod(
            f"ls -A1 {original_dir}").split(" ")
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod_session, original_dir,
                              full_object_path, obc_obj)

        logger.info("Downloading all objects from RGW bucket to awscli pod")
        sync_object_directory(awscli_pod_session, full_object_path, result_dir,
                              obc_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f"{original_dir}/{obj}",
                result_object_path=f"{result_dir}/{obj}",
                awscli_pod=awscli_pod_session,
            ), "Checksum comparision between original and result object failed"
Пример #5
0
    def test_check_multi_object_integrity(self, mcg_obj, awscli_pod,
                                          bucket_factory, amount, file_type):
        """
        Test write multiple files to bucket and check integrity
        """
        original_dir = "/original"
        result_dir = "/result"
        if file_type == 'large':
            public_bucket = PUBLIC_BUCKET
            obj_key = LARGE_FILE_KEY
        elif file_type == 'small':
            public_bucket = constants.TEST_FILES_BUCKET
            obj_key = 'random1.txt'
        elif file_type == 'large_small':
            public_bucket = PUBLIC_BUCKET
            obj_key = LARGE_FILE_KEY.rsplit('/', 1)[0]

        # Download the file to pod
        awscli_pod.exec_cmd_on_pod(
            command=f'mkdir {original_dir} {result_dir}')
        public_s3_client = retrieve_anon_s3_resource().meta.client
        download_files = []
        # Use obj_key as prefix to download multiple files for large_small
        # case, it also works with single file
        for obj in public_s3_client.list_objects(
                Bucket=public_bucket, Prefix=obj_key).get('Contents'):
            # Skip the extra file in large file type
            if file_type == 'large' and obj["Key"] != obj_key:
                continue
            logger.info(
                f'Downloading {obj["Key"]} from AWS bucket {public_bucket}')
            download_obj_cmd = f'cp s3://{public_bucket}/{obj["Key"]} {original_dir}'
            awscli_pod.exec_cmd_on_pod(
                command=craft_s3_command(download_obj_cmd),
                out_yaml_format=False)
            download_files.append(obj['Key'].split('/')[-1])

        # Write downloaded objects to the new bucket and check integrity
        bucketname = bucket_factory(1)[0].name
        base_path = f"s3://{bucketname}"
        for i in range(amount):
            full_object_path = base_path + f"/{i}/"
            sync_object_directory(awscli_pod, original_dir, full_object_path,
                                  mcg_obj)

            # Retrieve all objects from MCG bucket to result dir in Pod
            logger.info('Downloading objects from MCG bucket to awscli pod')
            sync_object_directory(awscli_pod, full_object_path, result_dir,
                                  mcg_obj)

            # Checksum is compared between original and result object
            for obj in download_files:
                assert verify_s3_object_integrity(
                    original_object_path=f'{original_dir}/{obj}',
                    result_object_path=f'{result_dir}/{obj}',
                    awscli_pod=awscli_pod
                ), ('Checksum comparision between original and result object '
                    'failed')
Пример #6
0
 def compare_dirs(self, awscli_pod, amount=1):
     # Checksum is compared between original and result object
     for i in range(amount):
         file_name = f"testfile{i}.txt"
         assert verify_s3_object_integrity(
             original_object_path=f'{self.MCG_NS_ORIGINAL_DIR}/{file_name}',
             result_object_path=f'{self.MCG_NS_RESULT_DIR}/{file_name}',
             awscli_pod=awscli_pod
         ), 'Checksum comparision between original and result object failed'
Пример #7
0
    def test_multipart_upload_operations(self, rgw_endpoint,
                                         awscli_pod_session,
                                         rgw_bucket_factory,
                                         test_directory_setup):
        """
        Test Multipart upload operations on bucket and verifies the integrity of the downloaded object
        """
        bucket, key, origin_dir, res_dir, object_path, parts = setup(
            awscli_pod_session, rgw_bucket_factory, test_directory_setup)
        bucketname = bucket.name
        bucket = OBC(bucketname)

        # Abort all Multipart Uploads for this Bucket (optional, for starting over)
        logger.info(f"Aborting any Multipart Upload on bucket:{bucketname}")
        abort_all_multipart_upload(bucket, bucketname, key)

        # Create & list Multipart Upload on the Bucket
        logger.info(
            f"Initiating Multipart Upload on Bucket: {bucketname} with Key {key}"
        )
        upload_id = create_multipart_upload(bucket, bucketname, key)
        logger.info(
            f"Listing the Multipart Upload: {list_multipart_upload(bucket, bucketname)}"
        )

        # Uploading individual parts to the Bucket
        logger.info(f"Uploading individual parts to the bucket {bucketname}")
        uploaded_parts = upload_parts(bucket, awscli_pod_session, bucketname,
                                      key, res_dir, upload_id, parts)

        # Listing the Uploaded parts
        logger.info(
            f"Listing the individual parts: {list_uploaded_parts(bucket, bucketname, key, upload_id)}"
        )

        # Completing the Multipart Upload
        logger.info(f"Completing the Multipart Upload on bucket: {bucketname}")
        logger.info(
            complete_multipart_upload(bucket, bucketname, key, upload_id,
                                      uploaded_parts))

        # Checksum Validation: Downloading the object after completing Multipart Upload and verifying its integrity
        logger.info(
            "Downloading the completed multipart object from the RGW bucket to the awscli pod"
        )
        sync_object_directory(awscli_pod_session, object_path, res_dir, bucket)
        assert verify_s3_object_integrity(
            original_object_path=f"{origin_dir}/{key}",
            result_object_path=f"{res_dir}/{key}",
            awscli_pod=awscli_pod_session,
        ), "Checksum comparision between original and result object failed"
Пример #8
0
def test_fill_bucket(
    mcg_obj_session, awscli_pod_session, multiregion_mirror_setup_session
):
    """
    Test multi-region bucket creation using the S3 SDK. Fill the bucket for
    upgrade testing.
    """

    (bucket, created_backingstores) = multiregion_mirror_setup_session

    mcg_bucket_path = f"s3://{bucket.name}"

    # Download test objects from the public bucket
    awscli_pod_session.exec_cmd_on_pod(command=f"mkdir {LOCAL_TESTOBJS_DIR_PATH}")
    DOWNLOADED_OBJS = retrieve_test_objects_to_pod(
        awscli_pod_session, LOCAL_TESTOBJS_DIR_PATH
    )

    logger.info("Uploading all pod objects to MCG bucket")

    # Upload test objects to the NooBucket 3 times
    for i in range(3):
        sync_object_directory(
            awscli_pod_session,
            LOCAL_TESTOBJS_DIR_PATH,
            f"{mcg_bucket_path}/{i}/",
            mcg_obj_session,
        )

    mcg_obj_session.check_if_mirroring_is_done(bucket.name, timeout=420)
    bucket.verify_health()

    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(
        awscli_pod_session, mcg_bucket_path, LOCAL_TEMP_PATH, mcg_obj_session
    )

    bucket.verify_health()

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        for i in range(3):
            assert verify_s3_object_integrity(
                original_object_path=f"{LOCAL_TESTOBJS_DIR_PATH}/{obj}",
                result_object_path=f"{LOCAL_TEMP_PATH}/{i}/{obj}",
                awscli_pod=awscli_pod_session,
            ), "Checksum comparison between original and result object failed"
    bucket.verify_health()
Пример #9
0
 def compare_dirs(self, awscli_pod, amount=1):
     # Checksum is compared between original and result object
     result = True
     for i in range(amount):
         file_name = f"testfile{i}.txt"
         original_object_path = f"{self.MCG_NS_ORIGINAL_DIR}/{file_name}"
         result_object_path = f"{self.MCG_NS_RESULT_DIR}/{file_name}"
         if not verify_s3_object_integrity(
                 original_object_path=original_object_path,
                 result_object_path=result_object_path,
                 awscli_pod=awscli_pod,
         ):
             logger.warning(f"Checksum comparision between original object "
                            f"{original_object_path} and result object "
                            f"{result_object_path} failed")
             result = False
     return result
Пример #10
0
    def test_multipart_upload_operations(self, mcg_obj, awscli_pod,
                                         bucket_factory):
        """
        Test Multipart upload operations on bucket and verifies the integrity of the downloaded object
        """
        bucket, key, origin_dir, res_dir, object_path, parts = setup(
            awscli_pod, bucket_factory)

        # Abort all Multipart Uploads for this Bucket (optional, for starting over)
        logger.info(f'Aborting any Multipart Upload on bucket:{bucket}')
        abort_all_multipart_upload(mcg_obj, bucket, key)

        # Create & list Multipart Upload on the Bucket
        logger.info(
            f'Initiating Multipart Upload on Bucket: {bucket} with Key {key}')
        upload_id = create_multipart_upload(mcg_obj, bucket, key)
        logger.info(
            f'Listing the Multipart Upload : {list_multipart_upload(mcg_obj, bucket)}'
        )

        # Uploading individual parts to the Bucket
        logger.info(f'Uploading individual parts to the bucket {bucket}')
        uploaded_parts = upload_parts(mcg_obj, awscli_pod, bucket, key,
                                      res_dir, upload_id, parts)

        # Listing the Uploaded parts
        logger.info(
            f'Listing the individual parts : {list_uploaded_parts(mcg_obj, bucket, key, upload_id)}'
        )

        # Completing the Multipart Upload
        logger.info(f'Completing the Multipart Upload on bucket: {bucket}')
        logger.info(
            complete_multipart_upload(mcg_obj, bucket, key, upload_id,
                                      uploaded_parts))

        # Checksum Validation: Downloading the object after completing Multipart Upload and verifying its integrity
        logger.info(
            'Downloading the completed multipart object from MCG bucket to awscli pod'
        )
        sync_object_directory(awscli_pod, object_path, res_dir, mcg_obj)
        assert verify_s3_object_integrity(
            original_object_path=f'{origin_dir}/{key}',
            result_object_path=f'{res_dir}/{key}',
            awscli_pod=awscli_pod
        ), 'Checksum comparision between original and result object failed'
Пример #11
0
def test_fill_bucket(mcg_obj_session, awscli_pod_session,
                     multiregion_mirror_setup_session):
    """
    Test multi-region bucket creation using the S3 SDK. Fill the bucket for
    upgrade testing.
    """

    (bucket, created_backingstores) = multiregion_mirror_setup_session

    mcg_bucket_path = f's3://{bucket.name}'

    # Download test objects from the public bucket
    awscli_pod_session.exec_cmd_on_pod(
        command=f'mkdir {LOCAL_TESTOBJS_DIR_PATH}')
    DOWNLOADED_OBJS = retrieve_test_objects_to_pod(awscli_pod_session,
                                                   LOCAL_TESTOBJS_DIR_PATH)

    logger.info('Uploading all pod objects to MCG bucket')

    # Upload test objects to the NooBucket 3 times
    for i in range(3):
        sync_object_directory(awscli_pod_session, LOCAL_TESTOBJS_DIR_PATH,
                              f'{mcg_bucket_path}/{i}/', mcg_obj_session)

    mcg_obj_session.check_if_mirroring_is_done(bucket.name)
    assert bucket.status == constants.STATUS_BOUND

    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(awscli_pod_session, mcg_bucket_path, LOCAL_TEMP_PATH,
                          mcg_obj_session)

    assert bucket.status == constants.STATUS_BOUND

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        for i in range(3):
            assert verify_s3_object_integrity(
                original_object_path=f'{LOCAL_TESTOBJS_DIR_PATH}/{obj}',
                result_object_path=f'{LOCAL_TEMP_PATH}/{i}/{obj}',
                awscli_pod=awscli_pod_session
            ), 'Checksum comparison between original and result object failed'
    assert bucket.status == constants.STATUS_BOUND
Пример #12
0
def test_noobaa_postupgrade(mcg_obj_session, awscli_pod_session,
                            multiregion_mirror_setup_session):
    """
    Check bucket data and remove resources created in 'test_fill_bucket'.
    """

    (bucket, created_backingstores) = multiregion_mirror_setup_session
    backingstore1 = created_backingstores[0]
    backingstore2 = created_backingstores[1]
    mcg_bucket_path = f"s3://{bucket.name}"

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        assert verify_s3_object_integrity(
            original_object_path=f"{LOCAL_TESTOBJS_DIR_PATH}/{obj}",
            result_object_path=f"{LOCAL_TEMP_PATH}/{obj}",
            awscli_pod=awscli_pod_session,
        ), "Checksum comparision between original and result object failed"

    bucket.verify_health()

    # Clean up the temp dir
    awscli_pod_session.exec_cmd_on_pod(
        command=f'sh -c "rm -rf {LOCAL_TEMP_PATH}/*"')

    mcg_obj_session.check_backingstore_state("backing-store-" +
                                             backingstore1.name,
                                             BS_OPTIMAL,
                                             timeout=360)
    mcg_obj_session.check_backingstore_state("backing-store-" +
                                             backingstore2.name,
                                             BS_OPTIMAL,
                                             timeout=360)

    bucket.verify_health()

    # Verify integrity of A
    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(awscli_pod_session, mcg_bucket_path, LOCAL_TEMP_PATH,
                          mcg_obj_session)
    bucket.verify_health()
Пример #13
0
def test_noobaa_postupgrade(mcg_obj_session, awscli_pod_session,
                            multiregion_mirror_setup_session):
    """
    Check bucket data and remove resources created in 'test_fill_bucket'.
    """

    (bucket, created_backingstores) = multiregion_mirror_setup_session
    backingstore1 = created_backingstores[0]
    backingstore2 = created_backingstores[1]
    mcg_bucket_path = f's3://{bucket.name}'

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        assert verify_s3_object_integrity(
            original_object_path=f'{LOCAL_TESTOBJS_DIR_PATH}/{obj}',
            result_object_path=f'{LOCAL_TEMP_PATH}/{obj}',
            awscli_pod=awscli_pod_session
        ), 'Checksum comparision between original and result object failed'

    assert bucket.status == constants.STATUS_BOUND

    # Clean up the temp dir
    awscli_pod_session.exec_cmd_on_pod(
        command=f'sh -c \"rm -rf {LOCAL_TEMP_PATH}/*\"')

    mcg_obj_session.check_backingstore_state('backing-store-' +
                                             backingstore1['name'],
                                             BS_OPTIMAL,
                                             timeout=360)
    mcg_obj_session.check_backingstore_state('backing-store-' +
                                             backingstore2['name'],
                                             BS_OPTIMAL,
                                             timeout=360)

    assert bucket.status == constants.STATUS_BOUND

    # Verify integrity of A
    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(awscli_pod_session, mcg_bucket_path, LOCAL_TEMP_PATH,
                          mcg_obj_session)
    assert bucket.status == constants.STATUS_BOUND
Пример #14
0
    def test_multiregion_mirror(
        self,
        cld_mgr,
        mcg_obj,
        awscli_pod_session,
        multiregion_mirror_setup,
        test_directory_setup,
    ):
        """
        Test multi-region bucket creation using the S3 SDK
        """

        bucket, backingstores = multiregion_mirror_setup
        backingstore1 = backingstores[0]
        backingstore2 = backingstores[1]

        bucket_name = bucket.name
        aws_client = cld_mgr.aws_client

        local_testobjs_dir_path = AWSCLI_TEST_OBJ_DIR
        downloaded_objs = awscli_pod_session.exec_cmd_on_pod(
            f"ls -A1 {local_testobjs_dir_path}").split(" ")

        logger.info("Uploading all pod objects to MCG bucket")
        local_temp_path = test_directory_setup.result_dir
        mcg_bucket_path = f"s3://{bucket_name}"

        # Upload test objects to the NooBucket
        sync_object_directory(awscli_pod_session, local_testobjs_dir_path,
                              mcg_bucket_path, mcg_obj)

        mcg_obj.check_if_mirroring_is_done(bucket_name)

        # Bring bucket A down
        aws_client.toggle_aws_bucket_readwrite(backingstore1.uls_name)
        mcg_obj.check_backingstore_state("backing-store-" + backingstore1.name,
                                         BS_AUTH_FAILED)

        # Verify integrity of B
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod_session, mcg_bucket_path,
                              local_temp_path, mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert verify_s3_object_integrity(
                original_object_path=f"{local_testobjs_dir_path}/{obj}",
                result_object_path=f"{local_temp_path}/{obj}",
                awscli_pod=awscli_pod_session,
            ), "Checksum comparision between original and result object failed"

        # Clean up the temp dir
        awscli_pod_session.exec_cmd_on_pod(
            command=f'sh -c "rm -rf {local_temp_path}/*"')

        # Bring B down, bring A up
        logger.info("Blocking bucket B")
        aws_client.toggle_aws_bucket_readwrite(backingstore2.uls_name)
        logger.info("Freeing bucket A")
        aws_client.toggle_aws_bucket_readwrite(backingstore1.uls_name,
                                               block=False)
        mcg_obj.check_backingstore_state("backing-store-" + backingstore1.name,
                                         BS_OPTIMAL)
        mcg_obj.check_backingstore_state("backing-store-" + backingstore2.name,
                                         BS_AUTH_FAILED)

        # Verify integrity of A
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod_session, mcg_bucket_path,
                              local_temp_path, mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert verify_s3_object_integrity(
                original_object_path=f"{local_testobjs_dir_path}/{obj}",
                result_object_path=f"{local_temp_path}/{obj}",
                awscli_pod=awscli_pod_session,
            ), "Checksum comparision between original and result object failed"
        # Bring B up
        aws_client.toggle_aws_bucket_readwrite(backingstore2.uls_name,
                                               block=False)
        mcg_obj.check_backingstore_state("backing-store-" + backingstore2.name,
                                         BS_OPTIMAL)
Пример #15
0
    def test_mcg_namespace_mpu_crd(self, mcg_obj, awscli_pod, bucket_factory,
                                   bucketclass_dict):
        """
        Test multipart upload S3 operations on namespace buckets(created by CRDs)
        Validates create, upload, upload copy and list parts operations

        """
        ns_buc = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0]

        ns_bucket = ns_buc.name

        object_path = f"s3://{ns_bucket}"

        logger.info(
            f"Setting up test files for mpu and aborting any mpu on bucket: {ns_bucket}"
        )
        mpu_key, origin_dir, res_dir, parts = multipart_setup(awscli_pod)
        bucket_utils.abort_all_multipart_upload(mcg_obj, ns_bucket, COPY_OBJ)

        # Initiate mpu, Upload part copy, List and Abort operations
        logger.info(
            f"Put object on bucket: {ns_bucket} to create a copy source")
        assert bucket_utils.s3_put_object(s3_obj=mcg_obj,
                                          bucketname=ns_bucket,
                                          object_key=ROOT_OBJ,
                                          data=OBJ_DATA), "Failed: PutObject"
        logger.info(
            f"Initiating mpu on bucket: {ns_bucket} with key {COPY_OBJ}")
        part_copy_id = bucket_utils.create_multipart_upload(
            mcg_obj, ns_bucket, COPY_OBJ)
        list_mpu_res = bucket_utils.list_multipart_upload(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket)
        if (constants.AZURE_PLATFORM
                not in bucketclass_dict["namespace_policy_dict"]
            ["namespacestore_dict"]):
            logger.info(f"Listing in-progress mpu: {list_mpu_res}")
            assert (part_copy_id == list_mpu_res["Uploads"][0]["UploadId"]
                    ), "Invalid UploadId"

        logger.info(f"Uploading a part copy to: {ns_bucket}")
        assert bucket_utils.s3_upload_part_copy(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            copy_source=f"/{ns_bucket}/{ROOT_OBJ}",
            object_key=COPY_OBJ,
            part_number=1,
            upload_id=part_copy_id,
        ), "Failed: upload part copy"

        logger.info(
            f"Aborting initiated multipart upload with id: {part_copy_id}")
        assert bucket_utils.abort_multipart(mcg_obj, ns_bucket, COPY_OBJ,
                                            part_copy_id), "Abort failed"

        # Initiate mpu, Upload part, List parts operations
        logger.info(
            f"Initiating Multipart Upload on Bucket: {ns_bucket} with Key: {mpu_key}"
        )
        mp_upload_id = bucket_utils.create_multipart_upload(
            mcg_obj, ns_bucket, mpu_key)

        list_mpu_res = bucket_utils.list_multipart_upload(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket)
        if (constants.AZURE_PLATFORM
                not in bucketclass_dict["namespace_policy_dict"]
            ["namespacestore_dict"]):
            logger.info(f"Listing multipart upload: {list_mpu_res}")
            assert (mp_upload_id == list_mpu_res["Uploads"][0]["UploadId"]
                    ), "Invalid UploadId"

        logger.info(f"Uploading individual parts to the bucket: {ns_bucket}")
        uploaded_parts = bucket_utils.upload_parts(
            mcg_obj=mcg_obj,
            awscli_pod=awscli_pod,
            bucketname=ns_bucket,
            object_key=mpu_key,
            body_path=res_dir,
            upload_id=mp_upload_id,
            uploaded_parts=parts,
        )
        list_parts_res = bucket_utils.list_uploaded_parts(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_key=mpu_key,
            upload_id=mp_upload_id,
        )
        logger.info(f"Listing individual parts: {list_parts_res['Parts']}")
        for i, ele in enumerate(uploaded_parts):
            assert (ele["PartNumber"] == list_parts_res["Parts"][i]
                    ["PartNumber"]), "Invalid part_number"
            assert ele["ETag"] == list_parts_res["Parts"][i][
                "ETag"], "Invalid ETag"

        logger.info(f"Completing the Multipart Upload on bucket: {ns_bucket}")
        assert bucket_utils.complete_multipart_upload(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_key=mpu_key,
            upload_id=mp_upload_id,
            parts=uploaded_parts,
        ), "MPU did not complete"

        # Checksum validation after completing MPU
        logger.info(
            f"Downloading the completed multipart object from {ns_bucket} to aws-cli pod"
        )
        bucket_utils.sync_object_directory(podobj=awscli_pod,
                                           src=object_path,
                                           target=res_dir,
                                           s3_obj=mcg_obj)
        assert bucket_utils.verify_s3_object_integrity(
            original_object_path=f"{origin_dir}/{mpu_key}",
            result_object_path=f"{res_dir}/{mpu_key}",
            awscli_pod=awscli_pod,
        ), "Checksum comparision between original and result object failed"
Пример #16
0
    def test_multiregion_mirror(self, mcg_obj_with_aws, awscli_pod,
                                multiregion_mirror_setup):
        """
        Test multi-region bucket creation using the S3 SDK
        """

        bucket, backingstore1, backingstore2 = multiregion_mirror_setup
        bucket_name = bucket.name

        # Download test objects from the public bucket
        downloaded_objs = retrieve_test_objects_to_pod(awscli_pod,
                                                       '/aws/original/')

        logger.info('Uploading all pod objects to MCG bucket')
        local_testobjs_dir_path = '/aws/original'
        local_temp_path = '/aws/temp'
        mcg_bucket_path = f's3://{bucket_name}'

        sync_object_directory(awscli_pod,
                              's3://' + constants.TEST_FILES_BUCKET,
                              local_testobjs_dir_path)

        # Upload test objects to the NooBucket
        sync_object_directory(awscli_pod, local_testobjs_dir_path,
                              mcg_bucket_path, mcg_obj_with_aws)

        mcg_obj_with_aws.check_if_mirroring_is_done(bucket_name)

        # Bring bucket A down
        mcg_obj_with_aws.toggle_aws_bucket_readwrite(backingstore1['name'])
        mcg_obj_with_aws.check_backingstore_state(
            'backing-store-' + backingstore1['name'], BS_AUTH_FAILED)

        # Verify integrity of B
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod, mcg_bucket_path, local_temp_path,
                              mcg_obj_with_aws)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert verify_s3_object_integrity(
                original_object_path=f'{local_testobjs_dir_path}/{obj}',
                result_object_path=f'{local_temp_path}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'

        # Clean up the temp dir
        awscli_pod.exec_cmd_on_pod(
            command=f'sh -c \"rm -rf {local_temp_path}/*\"')

        # Bring B down, bring A up
        logger.info('Blocking bucket B')
        mcg_obj_with_aws.toggle_aws_bucket_readwrite(backingstore2['name'])
        logger.info('Freeing bucket A')
        mcg_obj_with_aws.toggle_aws_bucket_readwrite(backingstore1['name'],
                                                     block=False)
        mcg_obj_with_aws.check_backingstore_state(
            'backing-store-' + backingstore1['name'], BS_OPTIMAL)
        mcg_obj_with_aws.check_backingstore_state(
            'backing-store-' + backingstore2['name'], BS_AUTH_FAILED)

        # Verify integrity of A
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod, mcg_bucket_path, local_temp_path,
                              mcg_obj_with_aws)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert verify_s3_object_integrity(
                original_object_path=f'{local_testobjs_dir_path}/{obj}',
                result_object_path=f'{local_temp_path}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'
        # Bring B up
        mcg_obj_with_aws.toggle_aws_bucket_readwrite(backingstore2['name'],
                                                     block=False)
        mcg_obj_with_aws.check_backingstore_state(
            'backing-store-' + backingstore2['name'], BS_OPTIMAL)
Пример #17
0
    def test_unidirectional_bucket_object_change_replication(
        self,
        awscli_pod_session,
        mcg_obj_session,
        bucket_factory,
        source_bucketclass,
        target_bucketclass,
        test_directory_setup,
    ):
        """
        Test unidirectional bucket replication when objects are changed

        """
        target_bucket_name = bucket_factory(
            bucketclass=target_bucketclass)[0].name

        replication_policy = ("basic-replication-rule", target_bucket_name,
                              None)
        source_bucket = bucket_factory(
            1,
            bucketclass=source_bucketclass,
            replication_policy=replication_policy)[0]
        source_bucket_name = source_bucket.name

        origin_dir = test_directory_setup.origin_dir
        target_dir = test_directory_setup.result_dir

        written_random_objects = write_random_test_objects_to_bucket(
            awscli_pod_session,
            source_bucket_name,
            origin_dir,
            amount=3,
            mcg_obj=mcg_obj_session,
        )

        listed_obejcts = mcg_obj_session.s3_list_all_objects_in_bucket(
            source_bucket_name)

        compare_bucket_object_list(mcg_obj_session, source_bucket_name,
                                   target_bucket_name)

        assert set(written_random_objects) == {
            obj.key
            for obj in listed_obejcts
        }, "Some of the uploaded objects are missing"

        sync_object_directory(
            awscli_pod_session,
            f"s3://{target_bucket_name}",
            target_dir,
            mcg_obj_session,
        )
        (
            original_obj_sums,
            obj_sums_after_rewrite,
            obj_sums_after_rw_and_replication,
        ) = (
            [],
            [],
            [],
        )

        for i in range(3):
            original_obj_sums.append(
                cal_md5sum(awscli_pod_session, f"{origin_dir}/ObjKey-{i}",
                           True))
            assert verify_s3_object_integrity(
                f"{origin_dir}/ObjKey-{i}",
                f"{target_dir}/ObjKey-{i}",
                awscli_pod_session,
            ), "The uploaded and downloaded objects have different hashes"

        written_random_objects = write_random_test_objects_to_bucket(
            awscli_pod_session,
            source_bucket_name,
            origin_dir,
            amount=4,
            mcg_obj=mcg_obj_session,
        )

        compare_bucket_object_list(mcg_obj_session, source_bucket_name,
                                   target_bucket_name)

        awscli_pod_session.exec_cmd_on_pod(command=f"rm -rf {target_dir}")

        sync_object_directory(
            awscli_pod_session,
            f"s3://{target_bucket_name}",
            target_dir,
            mcg_obj_session,
        )

        for i in range(4):
            obj_sums_after_rewrite.append(
                cal_md5sum(awscli_pod_session, f"{origin_dir}/ObjKey-{i}",
                           True))
            obj_sums_after_rw_and_replication.append(
                cal_md5sum(awscli_pod_session, f"{target_dir}/ObjKey-{i}",
                           True))

        for i in range(3):
            assert (obj_sums_after_rewrite[i] ==
                    obj_sums_after_rw_and_replication[i]
                    ), "Object change was not uploaded/downloaded correctly"
            assert (original_obj_sums[i] !=
                    obj_sums_after_rw_and_replication[i]
                    ), "Object change was not replicated"