コード例 #1
0
    def test_check_object_integrity(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test object integrity using md5sum
        """
        bucketname = bucket_factory(1)[0].name
        original_dir = "/original"
        result_dir = "/result"
        awscli_pod.exec_cmd_on_pod(command=f'mkdir {result_dir}')
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(
            awscli_pod, original_dir)
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod, original_dir, full_object_path,
                              mcg_obj)

        # Retrieve all objects from MCG bucket to result dir in Pod
        logger.info('Downloading all objects from MCG bucket to awscli pod')
        sync_object_directory(awscli_pod, full_object_path, result_dir,
                              mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f'{original_dir}/{obj}',
                result_object_path=f'{result_dir}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'
コード例 #2
0
    def test_check_object_integrity(self, rgw_endpoint, awscli_pod_session,
                                    rgw_bucket_factory, test_directory_setup):
        """
        Test object integrity using md5sum
        """
        bucketname = rgw_bucket_factory(1, "rgw-oc")[0].name
        obc_obj = OBC(bucketname)
        original_dir = AWSCLI_TEST_OBJ_DIR
        result_dir = test_directory_setup.result_dir
        full_object_path = f"s3://{bucketname}"
        downloaded_files = awscli_pod_session.exec_cmd_on_pod(
            f"ls -A1 {original_dir}").split(" ")
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod_session, original_dir,
                              full_object_path, obc_obj)

        logger.info("Downloading all objects from RGW bucket to awscli pod")
        sync_object_directory(awscli_pod_session, full_object_path, result_dir,
                              obc_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f"{original_dir}/{obj}",
                result_object_path=f"{result_dir}/{obj}",
                awscli_pod=awscli_pod_session,
            ), "Checksum comparision between original and result object failed"
コード例 #3
0
    def test_write_file_to_bucket(
        self,
        mcg_obj,
        awscli_pod,
        bucket_class_factory,
        bucket_factory,
        interface,
        bucketclass_dict,
    ):
        """
        Test object IO using the S3 SDK
        """
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        data_dir = "/data"
        bucketname = bucket_factory(1,
                                    interface=interface,
                                    bucketclass=bucketclass_dict)[0].name
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(awscli_pod, data_dir)
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod, data_dir, full_object_path, mcg_obj)

        assert set(downloaded_files).issubset(
            obj.key
            for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname))
コード例 #4
0
    def test_bucket_delete_with_objects(self, mcg_obj, interface, awscli_pod):
        """
        Negative test with deletion of bucket has objects stored in.
        """
        bucketname = create_unique_resource_name(
            resource_description="bucket", resource_type=interface.lower())
        try:
            bucket = BUCKET_MAP[interface.lower()](bucketname, mcg=mcg_obj)

            logger.info(f"aws s3 endpoint is {mcg_obj.s3_endpoint}")
            logger.info(f"aws region is {mcg_obj.region}")
            data_dir = "/data"
            full_object_path = f"s3://{bucketname}"
            retrieve_test_objects_to_pod(awscli_pod, data_dir)
            sync_object_directory(awscli_pod, data_dir, full_object_path,
                                  mcg_obj)

            logger.info(f"Deleting bucket: {bucketname}")
            if interface == "S3":
                try:
                    s3_del = mcg_obj.s3_resource.Bucket(bucketname).delete()
                    assert not s3_del, "Unexpected s3 delete non-empty OBC succeed"
                except botocore.exceptions.ClientError as err:
                    assert "BucketNotEmpty" in str(
                        err), "Couldn't verify delete non-empty OBC with s3"
                    logger.info(
                        f"Delete non-empty OBC {bucketname} failed as expected"
                    )
        finally:
            bucket.delete()
コード例 #5
0
    def test_empty_file_integrity(self, awscli_pod, rgw_bucket_factory):
        """
        Test write empty files to bucket and check integrity
        """
        original_dir = "/data"
        result_dir = "/result"
        bucketname = rgw_bucket_factory(1, "rgw-oc")[0].name
        obc_obj = OBC(bucketname)
        full_object_path = f"s3://{bucketname}"

        # Touch create 1000 empty files in pod
        awscli_pod.exec_cmd_on_pod(command=f"mkdir {original_dir} {result_dir}")
        command = "for i in $(seq 1 100); do touch /data/test$i; done"
        awscli_pod.exec_sh_cmd_on_pod(command=command, sh="sh")
        # Write all empty objects to the new bucket
        sync_object_directory(awscli_pod, original_dir, full_object_path, obc_obj)

        # Retrieve all objects from RGW bucket to result dir in Pod
        logger.info("Downloading objects from RGW bucket to awscli pod")
        sync_object_directory(awscli_pod, full_object_path, result_dir, obc_obj)

        # Checksum is compared between original and result object
        original_md5 = awscli_pod.exec_cmd_on_pod(
            f'sh -c "cat {original_dir}/* | md5sum"'
        )
        result_md5 = awscli_pod.exec_cmd_on_pod(
            f'sh -c "cat {original_dir}/* | md5sum"'
        )
        assert (
            original_md5 == result_md5
        ), "Origin and result folders checksum mismatch found"
コード例 #6
0
    def write_files_to_pod_and_upload(
        self,
        mcg_obj,
        awscli_pod,
        bucket_to_write,
        original_folder,
        amount=1,
        s3_creds=None,
    ):
        """
        Upload files to bucket (NS or uls)
        """
        full_object_path = f"s3://{bucket_to_write}"
        object_list = []

        for i in range(amount):
            file_name = f"testfile{i}.txt"
            object_list.append(file_name)
            awscli_pod.exec_cmd_on_pod(
                f"dd if=/dev/urandom of={original_folder}/{file_name} bs=1M count=1 status=none"
            )
        if s3_creds:
            # Write data directly to target bucket from original dir
            sync_object_directory(
                awscli_pod,
                original_folder,
                full_object_path,
                signed_request_creds=s3_creds,
            )
        else:
            # Write data directly to NS bucket from original dir
            sync_object_directory(
                awscli_pod, original_folder, full_object_path, mcg_obj
            )
        return object_list
コード例 #7
0
    def test_s3_bucket_delete_1t_objects(self, mcg_obj, awscli_pod_session):
        """
        Test with deletion of bucket has 1T objects stored in.
        """
        bucketname = create_unique_resource_name(resource_description="bucket",
                                                 resource_type="s3")
        try:
            bucket = MCGS3Bucket(bucketname, mcg_obj)
            logger.info(f"aws s3 endpoint is {mcg_obj.s3_endpoint}")
            logger.info(f"aws region is {mcg_obj.region}")
            data_dir = AWSCLI_TEST_OBJ_DIR

            # Sync downloaded objects dir to the new bucket, sync to 3175
            # virtual dirs. With each dir around 315MB, and 3175 dirs will
            # reach targed 1TB data.
            logger.info("Writing objects to bucket")
            for i in range(3175):
                full_object_path = f"s3://{bucketname}/{i}/"
                sync_object_directory(awscli_pod_session, data_dir,
                                      full_object_path, mcg_obj)

            # Delete bucket content use aws rm with --recursive option.
            # The object_versions.delete function does not work with objects
            # exceeds 1000.
            start = timeit.default_timer()
            rm_object_recursive(awscli_pod_session, bucketname, mcg_obj)
            bucket.delete()
            stop = timeit.default_timer()
            gap = (stop - start) // 60 % 60
            if gap > 10:
                assert False, "Failed to delete s3 bucket within 10 minutes"
        finally:
            if mcg_obj.s3_verify_bucket_exists(bucketname):
                rm_object_recursive(awscli_pod_session, bucketname, mcg_obj)
                mcg_obj.s3_resource.Bucket(bucketname).delete()
コード例 #8
0
ファイル: test_namespace.py プロジェクト: romayalon/ocs-ci
    def write_files_to_pod_and_upload(self,
                                      mcg_obj,
                                      awscli_pod,
                                      bucket_to_write,
                                      amount=1,
                                      s3_creds=None):
        """
        Upload files to bucket (NS or uls)
        """
        awscli_pod.exec_cmd_on_pod(
            command=f"mkdir -p {self.MCG_NS_ORIGINAL_DIR}")
        full_object_path = f"s3://{bucket_to_write}"

        for i in range(amount):
            file_name = f"testfile{i}"
            awscli_pod.exec_cmd_on_pod(
                f"dd if=/dev/urandom of={self.MCG_NS_ORIGINAL_DIR}/{file_name}.txt bs=1M count=1 status=none"
            )
        if s3_creds:
            # Write data directly to target bucket from original dir
            sync_object_directory(
                awscli_pod,
                self.MCG_NS_ORIGINAL_DIR,
                full_object_path,
                signed_request_creds=s3_creds,
            )
        else:
            # Write data directly to NS bucket from original dir
            sync_object_directory(awscli_pod, self.MCG_NS_ORIGINAL_DIR,
                                  full_object_path, mcg_obj)
コード例 #9
0
    def test_check_object_integrity(self, awscli_pod, rgw_bucket_factory):
        """
        Test object integrity using md5sum
        """
        bucketname = rgw_bucket_factory(1, "rgw-oc")[0].name
        obc_obj = OBC(bucketname)
        original_dir = "/original"
        result_dir = "/result"
        awscli_pod.exec_cmd_on_pod(command=f"mkdir {result_dir}")
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(awscli_pod, original_dir)
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod, original_dir, full_object_path, obc_obj)

        logger.info("Downloading all objects from RGW bucket to awscli pod")
        sync_object_directory(awscli_pod, full_object_path, result_dir, obc_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f"{original_dir}/{obj}",
                result_object_path=f"{result_dir}/{obj}",
                awscli_pod=awscli_pod,
            ), "Checksum comparision between original and result object failed"
コード例 #10
0
    def test_write_file_to_bucket(
        self,
        mcg_obj,
        awscli_pod_session,
        bucket_class_factory,
        bucket_factory,
        interface,
        bucketclass_dict,
    ):
        """
        Test object IO using the S3 SDK
        """
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        bucketname = bucket_factory(
            1, interface=interface, bucketclass=bucketclass_dict
        )[0].name
        full_object_path = f"s3://{bucketname}"
        downloaded_files = awscli_pod_session.exec_cmd_on_pod(
            f"ls -A1 {AWSCLI_TEST_OBJ_DIR}"
        ).split(" ")
        # Write all downloaded objects to the new bucket
        sync_object_directory(
            awscli_pod_session, AWSCLI_TEST_OBJ_DIR, full_object_path, mcg_obj
        )

        assert set(downloaded_files).issubset(
            obj.key for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname)
        )
コード例 #11
0
    def test_bucket_delete_with_objects(self, rgw_endpoint, rgw_bucket_factory,
                                        interface, awscli_pod_session):
        """
        Negative test with deletion of bucket has objects stored in.
        """
        bucket = rgw_bucket_factory(1, interface)[0]
        bucketname = bucket.name
        obc_obj = OBC(bucketname)
        try:
            data_dir = AWSCLI_TEST_OBJ_DIR
            full_object_path = f"s3://{bucketname}"
            sync_object_directory(awscli_pod_session, data_dir,
                                  full_object_path, obc_obj)

            logger.info(f"Deleting bucket: {bucketname}")
            if interface == "S3":
                try:
                    s3_del = obc_obj.s3_resource.Bucket(bucketname).delete()
                    assert (
                        not s3_del
                    ), "Unexpected issue: Successfully deleted a bucket containing objects via S3"
                except botocore.exceptions.ClientError as err:
                    assert "BucketNotEmpty" in str(
                        err), "Couldn't verify delete non-empty OBC with s3"
                    logger.info(
                        f"Delete non-empty OBC {bucketname} failed as expected"
                    )
        finally:
            bucket.delete()
コード例 #12
0
    def test_bucket_delete_with_objects(
        self, mcg_obj, awscli_pod, bucket_factory, interface, bucketclass_dict
    ):
        """
        Negative test with deletion of bucket has objects stored in.

        """
        bucket = bucket_factory(interface=interface, bucketclass=bucketclass_dict)[0]
        bucketname = bucket.name

        data_dir = "/data"
        full_object_path = f"s3://{bucketname}"
        retrieve_test_objects_to_pod(awscli_pod, data_dir)
        sync_object_directory(awscli_pod, data_dir, full_object_path, mcg_obj)

        logger.info(f"Deleting bucket: {bucketname}")
        if interface == "S3":
            try:
                s3_del = mcg_obj.s3_resource.Bucket(bucketname).delete()
                assert not s3_del, "Unexpected s3 delete non-empty OBC succeed"
            except botocore.exceptions.ClientError as err:
                assert "BucketNotEmpty" in str(
                    err
                ), "Couldn't verify delete non-empty OBC with s3"
                logger.info(f"Delete non-empty OBC {bucketname} failed as expected")
        # Deletion verification is performed internally as part of delete()
        bucket.delete()
        if bucketclass_dict:
            bucket.bucketclass.delete()
コード例 #13
0
    def test_check_object_integrity(
        self,
        mcg_obj,
        awscli_pod,
        bucket_factory,
        bucketclass_dict,
        test_directory_setup,
    ):
        """
        Test object integrity using md5sum
        """
        bucketname = bucket_factory(1, bucketclass=bucketclass_dict)[0].name
        original_dir = test_directory_setup.origin_dir
        result_dir = test_directory_setup.result_dir
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(awscli_pod, original_dir)
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod, original_dir, full_object_path, mcg_obj)
        # Retrieve all objects from MCG bucket to result dir in Pod
        logger.info("Downloading all objects from MCG bucket to awscli pod")
        sync_object_directory(awscli_pod, full_object_path, result_dir, mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f"{original_dir}/{obj}",
                result_object_path=f"{result_dir}/{obj}",
                awscli_pod=awscli_pod,
            ), "Checksum comparison between original and result object failed"
コード例 #14
0
    def test_empty_file_integrity(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test write empty files to bucket and check integrity
        """
        original_dir = '/data'
        result_dir = "/result"
        bucketname = bucket_factory(1)[0].name
        full_object_path = f"s3://{bucketname}"

        # Touch create 1000 empty files in pod
        awscli_pod.exec_cmd_on_pod(
            command=f'mkdir {original_dir} {result_dir}')
        command = "for i in $(seq 1 100); do touch /data/test$i; done"
        awscli_pod.exec_sh_cmd_on_pod(command=command, sh='sh')
        # Write all empty objects to the new bucket
        sync_object_directory(awscli_pod, original_dir, full_object_path,
                              mcg_obj)

        # Retrieve all objects from MCG bucket to result dir in Pod
        logger.info('Downloading objects from MCG bucket to awscli pod')
        sync_object_directory(awscli_pod, full_object_path, result_dir,
                              mcg_obj)

        # Checksum is compared between original and result object
        original_md5 = awscli_pod.exec_cmd_on_pod(
            f'sh -c "cat {original_dir}/* | md5sum"')
        result_md5 = awscli_pod.exec_cmd_on_pod(
            f'sh -c "cat {original_dir}/* | md5sum"')
        assert original_md5 == result_md5
コード例 #15
0
    def test_check_multi_object_integrity(self, mcg_obj, awscli_pod,
                                          bucket_factory, amount, file_type):
        """
        Test write multiple files to bucket and check integrity
        """
        original_dir = "/original"
        result_dir = "/result"
        if file_type == 'large':
            public_bucket = PUBLIC_BUCKET
            obj_key = LARGE_FILE_KEY
        elif file_type == 'small':
            public_bucket = constants.TEST_FILES_BUCKET
            obj_key = 'random1.txt'
        elif file_type == 'large_small':
            public_bucket = PUBLIC_BUCKET
            obj_key = LARGE_FILE_KEY.rsplit('/', 1)[0]

        # Download the file to pod
        awscli_pod.exec_cmd_on_pod(
            command=f'mkdir {original_dir} {result_dir}')
        public_s3_client = retrieve_anon_s3_resource().meta.client
        download_files = []
        # Use obj_key as prefix to download multiple files for large_small
        # case, it also works with single file
        for obj in public_s3_client.list_objects(
                Bucket=public_bucket, Prefix=obj_key).get('Contents'):
            # Skip the extra file in large file type
            if file_type == 'large' and obj["Key"] != obj_key:
                continue
            logger.info(
                f'Downloading {obj["Key"]} from AWS bucket {public_bucket}')
            download_obj_cmd = f'cp s3://{public_bucket}/{obj["Key"]} {original_dir}'
            awscli_pod.exec_cmd_on_pod(
                command=craft_s3_command(download_obj_cmd),
                out_yaml_format=False)
            download_files.append(obj['Key'].split('/')[-1])

        # Write downloaded objects to the new bucket and check integrity
        bucketname = bucket_factory(1)[0].name
        base_path = f"s3://{bucketname}"
        for i in range(amount):
            full_object_path = base_path + f"/{i}/"
            sync_object_directory(awscli_pod, original_dir, full_object_path,
                                  mcg_obj)

            # Retrieve all objects from MCG bucket to result dir in Pod
            logger.info('Downloading objects from MCG bucket to awscli pod')
            sync_object_directory(awscli_pod, full_object_path, result_dir,
                                  mcg_obj)

            # Checksum is compared between original and result object
            for obj in download_files:
                assert verify_s3_object_integrity(
                    original_object_path=f'{original_dir}/{obj}',
                    result_object_path=f'{result_dir}/{obj}',
                    awscli_pod=awscli_pod
                ), ('Checksum comparision between original and result object '
                    'failed')
コード例 #16
0
    def test_write_multi_files_to_bucket(self, mcg_obj, awscli_pod,
                                         bucket_factory, amount, file_type):
        """
        Test write multiple files to bucket
        """
        data_dir = "/data"
        if file_type == "large":
            public_bucket = PUBLIC_BUCKET
            obj_key = LARGE_FILE_KEY
        elif file_type == "small":
            public_bucket = constants.TEST_FILES_BUCKET
            obj_key = "random1.txt"
        elif file_type == "large_small":
            public_bucket = PUBLIC_BUCKET
            obj_key = LARGE_FILE_KEY.rsplit("/", 1)[0]

        # Download the file to pod
        awscli_pod.exec_cmd_on_pod(command=f"mkdir {data_dir}")
        public_s3_client = retrieve_anon_s3_resource().meta.client
        download_files = []
        # Use obj_key as prefix to download multiple files for large_small
        # case, it also works with single file
        for obj in public_s3_client.list_objects(
                Bucket=public_bucket, Prefix=obj_key).get("Contents"):
            # Skip the extra file in large file type
            if file_type == "large" and obj["Key"] != obj_key:
                continue
            logger.info(
                f'Downloading {obj["Key"]} from AWS bucket {public_bucket}')
            download_obj_cmd = f'cp s3://{public_bucket}/{obj["Key"]} {data_dir}'
            awscli_pod.exec_cmd_on_pod(
                command=craft_s3_command(download_obj_cmd),
                out_yaml_format=False)
            download_files.append(obj["Key"])
        # Write all downloaded objects to the new bucket
        bucketname = bucket_factory(1)[0].name
        base_path = f"s3://{bucketname}"
        for i in range(amount):
            full_object_path = base_path + f"/{i}/"
            sync_object_directory(awscli_pod, data_dir, full_object_path,
                                  mcg_obj)

        obj_list = list(
            obj.key.split("/")[-1]
            for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname))

        # Check total copy files amount match
        if file_type == "large_small":
            assert len(
                obj_list) == 2 * amount, "Total file amount does not match"
        else:
            assert len(obj_list) == amount, "Total file amount does not match"

        # Check deduplicate set is same
        test_set = set([i.split("/")[-1] for i in download_files])
        assert test_set == set(obj_list), "File name set does not match"
コード例 #17
0
    def test_multipart_upload_operations(self, rgw_endpoint,
                                         awscli_pod_session,
                                         rgw_bucket_factory,
                                         test_directory_setup):
        """
        Test Multipart upload operations on bucket and verifies the integrity of the downloaded object
        """
        bucket, key, origin_dir, res_dir, object_path, parts = setup(
            awscli_pod_session, rgw_bucket_factory, test_directory_setup)
        bucketname = bucket.name
        bucket = OBC(bucketname)

        # Abort all Multipart Uploads for this Bucket (optional, for starting over)
        logger.info(f"Aborting any Multipart Upload on bucket:{bucketname}")
        abort_all_multipart_upload(bucket, bucketname, key)

        # Create & list Multipart Upload on the Bucket
        logger.info(
            f"Initiating Multipart Upload on Bucket: {bucketname} with Key {key}"
        )
        upload_id = create_multipart_upload(bucket, bucketname, key)
        logger.info(
            f"Listing the Multipart Upload: {list_multipart_upload(bucket, bucketname)}"
        )

        # Uploading individual parts to the Bucket
        logger.info(f"Uploading individual parts to the bucket {bucketname}")
        uploaded_parts = upload_parts(bucket, awscli_pod_session, bucketname,
                                      key, res_dir, upload_id, parts)

        # Listing the Uploaded parts
        logger.info(
            f"Listing the individual parts: {list_uploaded_parts(bucket, bucketname, key, upload_id)}"
        )

        # Completing the Multipart Upload
        logger.info(f"Completing the Multipart Upload on bucket: {bucketname}")
        logger.info(
            complete_multipart_upload(bucket, bucketname, key, upload_id,
                                      uploaded_parts))

        # Checksum Validation: Downloading the object after completing Multipart Upload and verifying its integrity
        logger.info(
            "Downloading the completed multipart object from the RGW bucket to the awscli pod"
        )
        sync_object_directory(awscli_pod_session, object_path, res_dir, bucket)
        assert verify_s3_object_integrity(
            original_object_path=f"{origin_dir}/{key}",
            result_object_path=f"{res_dir}/{key}",
            awscli_pod=awscli_pod_session,
        ), "Checksum comparision between original and result object failed"
コード例 #18
0
    def test_data_reduction_performance(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test data reduction performance
        """
        # TODO: Privatize test bucket
        download_dir = "/aws/downloaded"
        retrieve_test_objects_to_pod(awscli_pod, download_dir)
        bucket = bucket_factory(1)[0]
        bucketname = bucket.name
        full_object_path = f"s3://{bucketname}"
        sync_object_directory(awscli_pod, download_dir, full_object_path, mcg_obj)

        assert mcg_obj.check_data_reduction(
            bucketname, 100 * 1024 * 1024
        ), "Data reduction did not work as anticipated."
コード例 #19
0
ファイル: test_noobaa.py プロジェクト: yosibsh/ocs-ci
def test_fill_bucket(
    mcg_obj_session, awscli_pod_session, multiregion_mirror_setup_session
):
    """
    Test multi-region bucket creation using the S3 SDK. Fill the bucket for
    upgrade testing.
    """

    (bucket, created_backingstores) = multiregion_mirror_setup_session

    mcg_bucket_path = f"s3://{bucket.name}"

    # Download test objects from the public bucket
    awscli_pod_session.exec_cmd_on_pod(command=f"mkdir {LOCAL_TESTOBJS_DIR_PATH}")
    DOWNLOADED_OBJS = retrieve_test_objects_to_pod(
        awscli_pod_session, LOCAL_TESTOBJS_DIR_PATH
    )

    logger.info("Uploading all pod objects to MCG bucket")

    # Upload test objects to the NooBucket 3 times
    for i in range(3):
        sync_object_directory(
            awscli_pod_session,
            LOCAL_TESTOBJS_DIR_PATH,
            f"{mcg_bucket_path}/{i}/",
            mcg_obj_session,
        )

    mcg_obj_session.check_if_mirroring_is_done(bucket.name, timeout=420)
    bucket.verify_health()

    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(
        awscli_pod_session, mcg_bucket_path, LOCAL_TEMP_PATH, mcg_obj_session
    )

    bucket.verify_health()

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        for i in range(3):
            assert verify_s3_object_integrity(
                original_object_path=f"{LOCAL_TESTOBJS_DIR_PATH}/{obj}",
                result_object_path=f"{LOCAL_TEMP_PATH}/{i}/{obj}",
                awscli_pod=awscli_pod_session,
            ), "Checksum comparison between original and result object failed"
    bucket.verify_health()
コード例 #20
0
    def test_data_reduction(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test data reduction mechanics

        """
        # TODO: Privatize test bucket
        download_dir = '/aws/downloaded'
        retrieve_test_objects_to_pod(awscli_pod, download_dir)
        bucket = bucket_factory(1)[0]
        bucketname = bucket.name
        full_object_path = f"s3://{bucketname}"
        sync_object_directory(awscli_pod, download_dir, full_object_path,
                              mcg_obj)

        assert mcg_obj.check_data_reduction(bucketname), (
            'Data reduction did not work as anticipated.')
コード例 #21
0
    def test_multipart_upload_operations(self, mcg_obj, awscli_pod,
                                         bucket_factory):
        """
        Test Multipart upload operations on bucket and verifies the integrity of the downloaded object
        """
        bucket, key, origin_dir, res_dir, object_path, parts = setup(
            awscli_pod, bucket_factory)

        # Abort all Multipart Uploads for this Bucket (optional, for starting over)
        logger.info(f'Aborting any Multipart Upload on bucket:{bucket}')
        abort_all_multipart_upload(mcg_obj, bucket, key)

        # Create & list Multipart Upload on the Bucket
        logger.info(
            f'Initiating Multipart Upload on Bucket: {bucket} with Key {key}')
        upload_id = create_multipart_upload(mcg_obj, bucket, key)
        logger.info(
            f'Listing the Multipart Upload : {list_multipart_upload(mcg_obj, bucket)}'
        )

        # Uploading individual parts to the Bucket
        logger.info(f'Uploading individual parts to the bucket {bucket}')
        uploaded_parts = upload_parts(mcg_obj, awscli_pod, bucket, key,
                                      res_dir, upload_id, parts)

        # Listing the Uploaded parts
        logger.info(
            f'Listing the individual parts : {list_uploaded_parts(mcg_obj, bucket, key, upload_id)}'
        )

        # Completing the Multipart Upload
        logger.info(f'Completing the Multipart Upload on bucket: {bucket}')
        logger.info(
            complete_multipart_upload(mcg_obj, bucket, key, upload_id,
                                      uploaded_parts))

        # Checksum Validation: Downloading the object after completing Multipart Upload and verifying its integrity
        logger.info(
            'Downloading the completed multipart object from MCG bucket to awscli pod'
        )
        sync_object_directory(awscli_pod, object_path, res_dir, mcg_obj)
        assert verify_s3_object_integrity(
            original_object_path=f'{origin_dir}/{key}',
            result_object_path=f'{res_dir}/{key}',
            awscli_pod=awscli_pod
        ), 'Checksum comparision between original and result object failed'
コード例 #22
0
    def download_files(
        self, mcg_obj, awscli_pod, bucket_to_read, result_folder, s3_creds=None
    ):
        """
        Download files from bucket (NS or uls)
        """
        ns_bucket_path = f"s3://{bucket_to_read}"

        if s3_creds:
            # Read data directly from target bucket (uls) to result dir
            sync_object_directory(
                awscli_pod,
                ns_bucket_path,
                result_folder,
                signed_request_creds=s3_creds,
            )
        else:
            # Read data from NS bucket to result dir
            sync_object_directory(awscli_pod, ns_bucket_path, result_folder, mcg_obj)
コード例 #23
0
def test_fill_bucket(mcg_obj_session, awscli_pod_session,
                     multiregion_mirror_setup_session):
    """
    Test multi-region bucket creation using the S3 SDK. Fill the bucket for
    upgrade testing.
    """

    (bucket, created_backingstores) = multiregion_mirror_setup_session

    mcg_bucket_path = f's3://{bucket.name}'

    # Download test objects from the public bucket
    awscli_pod_session.exec_cmd_on_pod(
        command=f'mkdir {LOCAL_TESTOBJS_DIR_PATH}')
    DOWNLOADED_OBJS = retrieve_test_objects_to_pod(awscli_pod_session,
                                                   LOCAL_TESTOBJS_DIR_PATH)

    logger.info('Uploading all pod objects to MCG bucket')

    # Upload test objects to the NooBucket 3 times
    for i in range(3):
        sync_object_directory(awscli_pod_session, LOCAL_TESTOBJS_DIR_PATH,
                              f'{mcg_bucket_path}/{i}/', mcg_obj_session)

    mcg_obj_session.check_if_mirroring_is_done(bucket.name)
    assert bucket.status == constants.STATUS_BOUND

    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(awscli_pod_session, mcg_bucket_path, LOCAL_TEMP_PATH,
                          mcg_obj_session)

    assert bucket.status == constants.STATUS_BOUND

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        for i in range(3):
            assert verify_s3_object_integrity(
                original_object_path=f'{LOCAL_TESTOBJS_DIR_PATH}/{obj}',
                result_object_path=f'{LOCAL_TEMP_PATH}/{i}/{obj}',
                awscli_pod=awscli_pod_session
            ), 'Checksum comparison between original and result object failed'
    assert bucket.status == constants.STATUS_BOUND
コード例 #24
0
ファイル: test_namespace.py プロジェクト: kesavanvt/ocs-ci
    def download_files(self, mcg_obj, awscli_pod, bucket_to_read, s3_creds=None):
        """
        Download files from bucket (NS or uls)
        """
        awscli_pod.exec_cmd_on_pod(command=f"mkdir {self.MCG_NS_RESULT_DIR}")
        ns_bucket_path = f"s3://{bucket_to_read}"

        if s3_creds:
            # Read data directly from target bucket (uls) to result dir
            sync_object_directory(
                awscli_pod,
                ns_bucket_path,
                self.MCG_NS_RESULT_DIR,
                signed_request_creds=s3_creds,
            )
        else:
            # Read data from NS bucket to result dir
            sync_object_directory(
                awscli_pod, ns_bucket_path, self.MCG_NS_RESULT_DIR, mcg_obj
            )
コード例 #25
0
    def test_write_empty_file_to_bucket(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test write empty files to bucket
        """
        data_dir = "/data"
        bucketname = bucket_factory(1)[0].name
        full_object_path = f"s3://{bucketname}"

        # Touch create 1000 empty files in pod
        awscli_pod.exec_cmd_on_pod(command=f"mkdir {data_dir}")
        command = "for i in $(seq 1 100); do touch /data/test$i; done"
        awscli_pod.exec_sh_cmd_on_pod(command=command, sh="sh")
        # Write all empty objects to the new bucket
        sync_object_directory(awscli_pod, data_dir, full_object_path, mcg_obj)

        obj_set = set(
            obj.key for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname)
        )
        test_set = set("test" + str(i + 1) for i in range(100))
        assert test_set == obj_set, "File name set does not match"
コード例 #26
0
ファイル: test_noobaa.py プロジェクト: sidhant-agrawal/ocs-ci
def test_noobaa_postupgrade(mcg_obj_session, awscli_pod_session,
                            multiregion_mirror_setup_session):
    """
    Check bucket data and remove resources created in 'test_fill_bucket'.
    """

    (bucket, created_backingstores) = multiregion_mirror_setup_session
    backingstore1 = created_backingstores[0]
    backingstore2 = created_backingstores[1]
    mcg_bucket_path = f"s3://{bucket.name}"

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        assert verify_s3_object_integrity(
            original_object_path=f"{LOCAL_TESTOBJS_DIR_PATH}/{obj}",
            result_object_path=f"{LOCAL_TEMP_PATH}/{obj}",
            awscli_pod=awscli_pod_session,
        ), "Checksum comparision between original and result object failed"

    bucket.verify_health()

    # Clean up the temp dir
    awscli_pod_session.exec_cmd_on_pod(
        command=f'sh -c "rm -rf {LOCAL_TEMP_PATH}/*"')

    mcg_obj_session.check_backingstore_state("backing-store-" +
                                             backingstore1.name,
                                             BS_OPTIMAL,
                                             timeout=360)
    mcg_obj_session.check_backingstore_state("backing-store-" +
                                             backingstore2.name,
                                             BS_OPTIMAL,
                                             timeout=360)

    bucket.verify_health()

    # Verify integrity of A
    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(awscli_pod_session, mcg_bucket_path, LOCAL_TEMP_PATH,
                          mcg_obj_session)
    bucket.verify_health()
コード例 #27
0
def test_noobaa_postupgrade(mcg_obj_session, awscli_pod_session,
                            multiregion_mirror_setup_session):
    """
    Check bucket data and remove resources created in 'test_fill_bucket'.
    """

    (bucket, created_backingstores) = multiregion_mirror_setup_session
    backingstore1 = created_backingstores[0]
    backingstore2 = created_backingstores[1]
    mcg_bucket_path = f's3://{bucket.name}'

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        assert verify_s3_object_integrity(
            original_object_path=f'{LOCAL_TESTOBJS_DIR_PATH}/{obj}',
            result_object_path=f'{LOCAL_TEMP_PATH}/{obj}',
            awscli_pod=awscli_pod_session
        ), 'Checksum comparision between original and result object failed'

    assert bucket.status == constants.STATUS_BOUND

    # Clean up the temp dir
    awscli_pod_session.exec_cmd_on_pod(
        command=f'sh -c \"rm -rf {LOCAL_TEMP_PATH}/*\"')

    mcg_obj_session.check_backingstore_state('backing-store-' +
                                             backingstore1['name'],
                                             BS_OPTIMAL,
                                             timeout=360)
    mcg_obj_session.check_backingstore_state('backing-store-' +
                                             backingstore2['name'],
                                             BS_OPTIMAL,
                                             timeout=360)

    assert bucket.status == constants.STATUS_BOUND

    # Verify integrity of A
    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(awscli_pod_session, mcg_bucket_path, LOCAL_TEMP_PATH,
                          mcg_obj_session)
    assert bucket.status == constants.STATUS_BOUND
コード例 #28
0
def write_empty_files_to_bucket(mcg_obj, awscli_pod_session, bucket_name,
                                test_directory_setup):
    """
    Write empty files to bucket and verify if they are created.

    Args:
        mcg_obj (MCG) : An MCG object containing the MCG S3 connection credentials
        awscli_pod_session : Fixture to create a new AWSCLI pod for relaying commands.
        bucket_name (str) : Name of the bucket on which files are to be written.
        test_directory_setup : Fixture to setup test DIRs.

    Raises:
        UnexpectedBehaviour : Raises an exception if files are not created.

    Returns:
        Set: A set of names of all bucket objects.

    """

    full_object_path = f"s3://{bucket_name}"
    data_dir = test_directory_setup.origin_dir

    # Touch create 1000 empty files in bucket
    command = f"for file_no in $(seq 1 1000); do touch {data_dir}/test$file_no; done"
    awscli_pod_session.exec_sh_cmd_on_pod(command=command, sh="sh")
    # Write all empty objects to the bucket
    sync_object_directory(awscli_pod_session, data_dir, full_object_path,
                          mcg_obj)

    log.info("Successfully created files.")

    obj_set = set(
        obj.key for obj in mcg_obj.s3_list_all_objects_in_bucket(bucket_name))
    test_set = set("test" + str(file_no + 1) for file_no in range(1000))

    if test_set != obj_set:
        raise ex.UnexpectedBehaviour("File name set does not match")
    log.info("File name set match")

    return obj_set
コード例 #29
0
 def test_mcg_data_compression(self, mcg_obj, awscli_pod, bucket_factory,
                               bucketclass_dict):
     """
     Test data reduction mechanics
     Args:
         mcg_obj (obj): An object representing the current state of the MCG in the cluster
         awscli_pod (pod): A pod running the AWSCLI tools
         bucket_factory: Calling this fixture creates a new bucket(s)
     """
     download_dir = "/aws/compression/"
     awscli_pod.exec_cmd_on_pod(
         command=craft_s3_command(
             f"cp s3://{constants.TEST_FILES_BUCKET}/enwik8 {download_dir}"
         ),
         out_yaml_format=False,
     )
     bucketname = bucket_factory(1, bucketclass=bucketclass_dict)[0].name
     full_object_path = f"s3://{bucketname}"
     sync_object_directory(awscli_pod, download_dir, full_object_path,
                           mcg_obj)
     # For this test, enwik8 is used in conjunction with Snappy compression
     # utilized by NooBaa. Snappy consistently compresses 35MB of the file.
     mcg_obj.check_data_reduction(bucketname, 35 * 1024 * 1024)
コード例 #30
0
    def test_bucket_delete_with_objects(self, mcg_obj, awscli_pod_session,
                                        bucket_factory, interface,
                                        bucketclass_dict):
        """
        Negative test with deletion of bucket has objects stored in.

        """
        bucketname = bucket_factory(bucketclass=bucketclass_dict)[0].name

        data_dir = AWSCLI_TEST_OBJ_DIR
        full_object_path = f"s3://{bucketname}"
        sync_object_directory(awscli_pod_session, data_dir, full_object_path,
                              mcg_obj)

        logger.info(f"Deleting bucket: {bucketname}")
        if interface == "S3":
            try:
                s3_del = mcg_obj.s3_resource.Bucket(bucketname).delete()
                assert not s3_del, "Unexpected s3 delete non-empty OBC succeed"
            except botocore.exceptions.ClientError as err:
                assert "BucketNotEmpty" in str(
                    err), "Couldn't verify delete non-empty OBC with s3"
                logger.info(
                    f"Delete non-empty OBC {bucketname} failed as expected")