Ejemplo n.º 1
0
    def test_list_cached_objects(
        self,
        bucket_factory,
        mcg_obj,
        cld_mgr,
        awscli_pod_session,
        test_directory_setup,
        bucketclass_dict,
    ):
        """
        Test the ability to list the object stored in a cache bucket.
        """

        # Create the cached namespace bucket on top of the namespace resource
        bucket_obj = bucket_factory(bucketclass=bucketclass_dict)[0]
        original_folder = test_directory_setup.origin_dir
        # Upload files to NS bucket
        writen_objs_names = self.write_files_to_pod_and_upload(
            mcg_obj,
            awscli_pod_session,
            bucket_to_write=bucket_obj.name,
            original_dir=original_folder,
            amount=3,
        )
        wait_for_cache(mcg_obj, bucket_obj.name, writen_objs_names)
Ejemplo n.º 2
0
    def test_delete_cached_object(self, bucket_factory, mcg_obj, cld_mgr,
                                  awscli_pod, bucketclass_dict):
        """
        Test the deletion of an object that is present in the cache of a cache bucket.
        """

        # Create the cached namespace bucket on top of the namespace resource
        bucket_obj = bucket_factory(bucketclass=bucketclass_dict)[0]
        # Upload files to NS bucket
        writen_objs_names = self.write_files_to_pod_and_upload(
            mcg_obj, awscli_pod, bucket_to_write=bucket_obj.name, amount=1)
        wait_for_cache(mcg_obj, bucket_obj.name, writen_objs_names)

        # Delete the object from mcg interface
        s3_delete_object(mcg_obj, bucket_obj.name, writen_objs_names[0])
        sleep(5)
        if not check_cached_objects_by_name(mcg_obj, bucket_obj.name):
            raise UnexpectedBehaviour(
                "Object was not deleted from cache properly")

        # Check deletion in the cloud provider
        aws_target_bucket = bucket_obj.bucketclass.namespacestores[0].uls_name
        aws_obj_list = list(
            cld_mgr.aws_client.client.Bucket(aws_target_bucket).objects.all())
        if writen_objs_names[0] in aws_obj_list:
            raise UnexpectedBehaviour(
                "Object was not deleted from cache properly")
Ejemplo n.º 3
0
    def test_read_non_cached_object(self, bucket_factory, mcg_obj, cld_mgr,
                                    awscli_pod, bucketclass_dict):
        """
        Test reading an object that is not present in a cache bucket.
        """

        # Create the cached namespace bucket on top of the namespace resource
        bucket_obj = bucket_factory(bucketclass=bucketclass_dict)[0]
        s3_creds = {
            "access_key_id": cld_mgr.aws_client.access_key,
            "access_key": cld_mgr.aws_client.secret_key,
            "endpoint": constants.MCG_NS_AWS_ENDPOINT,
            "region": self.DEFAULT_REGION,
        }
        aws_target_bucket = bucket_obj.bucketclass.namespacestores[0].uls_name

        # Upload files directly to AWS
        writen_objs_names = self.write_files_to_pod_and_upload(
            mcg_obj,
            awscli_pod,
            bucket_to_write=aws_target_bucket,
            amount=3,
            s3_creds=s3_creds,
        )
        if not check_cached_objects_by_name(mcg_obj, bucket_obj.name):
            raise UnexpectedBehaviour(
                "Objects were found in the cache of an empty bucket")
        # Read files from ns bucket
        self.download_files(mcg_obj,
                            awscli_pod,
                            bucket_to_read=bucket_obj.name)
        wait_for_cache(mcg_obj, bucket_obj.name, writen_objs_names)
Ejemplo n.º 4
0
    def test_read_cached_object(
        self,
        bucket_factory,
        mcg_obj,
        cld_mgr,
        awscli_pod_session,
        test_directory_setup,
        bucketclass_dict,
    ):
        """
        Test reading an object that is present in a cache bucket.
        """

        # Create the cached namespace bucket on top of the namespace resource
        bucket_obj = bucket_factory(bucketclass=bucketclass_dict)[0]
        s3_creds = {
            "access_key_id": cld_mgr.aws_client.access_key,
            "access_key": cld_mgr.aws_client.secret_key,
            "endpoint": constants.MCG_NS_AWS_ENDPOINT,
            "region": self.DEFAULT_REGION,
        }

        original_folder = test_directory_setup.origin_dir
        result_folder = test_directory_setup.result_dir

        aws_target_bucket = bucket_obj.bucketclass.namespacestores[0].uls_name
        # Upload files to NS bucket
        writen_objs_names = self.write_files_to_pod_and_upload(
            mcg_obj,
            awscli_pod_session,
            bucket_to_write=bucket_obj.name,
            original_dir=original_folder,
            amount=1,
        )
        wait_for_cache(mcg_obj, bucket_obj.name, writen_objs_names)

        # Upload files directly to AWS
        self.write_files_to_pod_and_upload(
            mcg_obj,
            awscli_pod_session,
            bucket_to_write=aws_target_bucket,
            original_dir=original_folder,
            amount=1,
            s3_creds=s3_creds,
        )
        # Read files from ns bucket
        self.download_files(
            mcg_obj,
            awscli_pod_session,
            bucket_to_read=bucket_obj.name,
            download_dir=result_folder,
        )

        # Compare dirs should return false since we expect the cached object to return
        # instead of the new object currently present in the original dir
        if self.compare_dirs(awscli_pod_session,
                             origin=original_folder,
                             destination=result_folder):
            raise UnexpectedBehaviour("Cached object was not downloaded")
Ejemplo n.º 5
0
    def test_list_cached_objects(self, bucket_factory, mcg_obj, cld_mgr,
                                 awscli_pod, bucketclass_dict):
        """
        Test the ability to list the object stored in a cache bucket.
        """

        # Create the cached namespace bucket on top of the namespace resource
        bucket_obj = bucket_factory(bucketclass=bucketclass_dict)[0]
        # Upload files to NS bucket
        writen_objs_names = self.write_files_to_pod_and_upload(
            mcg_obj, awscli_pod, bucket_to_write=bucket_obj.name, amount=3)
        wait_for_cache(mcg_obj, bucket_obj.name, writen_objs_names)
Ejemplo n.º 6
0
    def test_read_stale_object(self, bucket_factory, mcg_obj, cld_mgr,
                               awscli_pod, bucketclass_dict):
        """
        Test reading a stale object from a cache bucket.
        """

        # Create the cached namespace bucket on top of the namespace resource
        bucket_obj = bucket_factory(bucketclass=bucketclass_dict)[0]
        s3_creds = {
            "access_key_id": cld_mgr.aws_client.access_key,
            "access_key": cld_mgr.aws_client.secret_key,
            "endpoint": constants.MCG_NS_AWS_ENDPOINT,
            "region": self.DEFAULT_REGION,
        }
        aws_target_bucket = bucket_obj.bucketclass.namespacestores[0].uls_name
        # Upload files to NS bucket
        writen_objs_names = self.write_files_to_pod_and_upload(
            mcg_obj, awscli_pod, bucket_to_write=bucket_obj.name, amount=1)
        wait_for_cache(mcg_obj, bucket_obj.name, writen_objs_names)

        awscli_pod.exec_cmd_on_pod(
            "mv /original/testfile0.txt /original/testfile1.txt")
        # Upload files directly to AWS
        self.write_files_to_pod_and_upload(
            mcg_obj,
            awscli_pod,
            bucket_to_write=aws_target_bucket,
            amount=1,
            s3_creds=s3_creds,
        )
        awscli_pod.exec_cmd_on_pod(
            "mv /original/testfile1.txt /original/testfile0.txt")
        # using sleep and not TimeoutSampler because we need to wait throughout the whole ttl
        sleep(bucketclass_dict["namespace_policy_dict"]["ttl"] / 1000)

        # Read files from ns bucket
        self.download_files(mcg_obj,
                            awscli_pod,
                            bucket_to_read=bucket_obj.name)

        if self.compare_dirs(awscli_pod):
            raise UnexpectedBehaviour(
                "Updated file was not fetched after ttl was exceeded")
    def test_mcg_cache_lifecycle(
        self,
        mcg_obj,
        cld_mgr,
        awscli_pod,
        bucket_factory,
        test_directory_setup,
        bucketclass_dict,
    ):
        """
        Test MCG cache bucket lifecycle

        1. Create cache buckets on each namespace stores (RGW-OBC/OBC)
        2. Verify write operations cache and hub bucket
        3. Verify read/list operations on cache bucket and hub target
        4. Verify delete operation on buckets
        5. Delete multiple cache buckets with data still in ns store
        6. Recreate the cache buckets on ns store(with existing data) then read.

        """
        data = "Sample string content to write to a S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        if (constants.RGW_PLATFORM in bucketclass_dict["namespace_policy_dict"]
            ["namespacestore_dict"]):
            s3_creds = {
                "access_key_id": cld_mgr.rgw_client.access_key,
                "access_key": cld_mgr.rgw_client.secret_key,
                "endpoint": cld_mgr.rgw_client.endpoint,
            }
            logger.info("RGW obc will be created as cache bucket")
            obc_interface = "rgw-oc"
        else:
            s3_creds = {
                "access_key_id": cld_mgr.aws_client.access_key,
                "access_key": cld_mgr.aws_client.secret_key,
                "endpoint": constants.MCG_NS_AWS_ENDPOINT,
                "region": config.ENV_DATA["region"],
            }
            logger.info("Noobaa obc will be created as cache bucket")
            obc_interface = bucketclass_dict["interface"]

        # Create the namespace resource and bucket
        ns_bucket = bucket_factory(
            interface=obc_interface,
            bucketclass=bucketclass_dict,
        )[0]
        logger.info(f"Cache bucket: {ns_bucket.name} created")
        target_bucket = ns_bucket.bucketclass.namespacestores[0].uls_name

        # Write to cache
        logger.info(f"Writing object on cache bucket: {ns_bucket.name}")
        assert s3_put_object(mcg_obj, ns_bucket.name, object_key,
                             data), "Failed: PutObject"
        wait_for_cache(mcg_obj, ns_bucket.name, [object_key])

        # Write to hub and read from cache
        logger.info("Setting up test files for upload")
        setup_base_objects(awscli_pod,
                           test_directory_setup.origin_dir,
                           amount=3)
        logger.info(f"Uploading objects to ns target: {target_bucket}")
        sync_object_directory(
            awscli_pod,
            src=test_directory_setup.origin_dir,
            target=f"s3://{target_bucket}",
            signed_request_creds=s3_creds,
        )
        sync_object_directory(
            awscli_pod,
            f"s3://{ns_bucket.name}",
            test_directory_setup.result_dir,
            mcg_obj,
        )

        # Read cached object
        assert s3_get_object(mcg_obj, ns_bucket.name,
                             object_key), "Failed: GetObject"

        # Read stale object(ttl expired)
        sleep(bucketclass_dict["namespace_policy_dict"]["ttl"] / 1000)
        logger.info(f"Get object on cache bucket: {ns_bucket.name}")
        assert s3_get_object(mcg_obj, ns_bucket.name,
                             object_key), "Failed: GetObject"

        # List on cache bucket
        list_response = s3_list_objects_v1(s3_obj=mcg_obj,
                                           bucketname=ns_bucket.name)
        logger.info(f"Listed objects: {list_response}")

        # Delete object from cache bucket
        s3_delete_object(mcg_obj, ns_bucket.name, object_key)
        sleep(5)
        # Try to read deleted object
        try:
            s3_get_object(mcg_obj, ns_bucket.name, object_key)
        except boto3exception.ClientError:
            logger.info("object deleted successfully")

        # Validate deletion on the hub
        if (constants.RGW_PLATFORM in bucketclass_dict["namespace_policy_dict"]
            ["namespacestore_dict"]):
            obj_list = list(
                cld_mgr.rgw_client.client.Bucket(target_bucket).objects.all())
        else:
            obj_list = list(
                cld_mgr.aws_client.client.Bucket(target_bucket).objects.all())
        if object_key in obj_list:
            raise UnexpectedBehaviour(
                "Object was not deleted from cache properly")

        # Recreate and validate object
        assert s3_put_object(mcg_obj, ns_bucket.name, object_key,
                             data), "Failed: PutObject"
        assert s3_get_object(mcg_obj, ns_bucket.name,
                             object_key), "Failed: GetObject"

        logger.info(f"Deleting cache bucket {ns_bucket.name}")
        curr_ns_store = ns_bucket.bucketclass.namespacestores[0]
        ns_bucket.delete()
        new_bucket_class = {
            "interface": "OC",
            "namespace_policy_dict": {
                "type": "Cache",
                "ttl": 180000,
                "namespacestores": [curr_ns_store],
            },
            "placement_policy": {
                "tiers": [{
                    "backingStores": [constants.DEFAULT_NOOBAA_BACKINGSTORE]
                }]
            },
        }
        logger.info(
            f"Recreating cache bucket {ns_bucket.name} using current hub: {target_bucket}"
        )
        ns_bucket = bucket_factory(
            interface=obc_interface,
            bucketclass=new_bucket_class,
        )[0]
        logger.info(
            f"Read existing data on hub: {target_bucket} through cache bucket: {ns_bucket.name}"
        )
        assert s3_get_object(mcg_obj, ns_bucket.name,
                             object_key), "Failed: GetObject"
Ejemplo n.º 8
0
    def factory(num_of_obcs=20, bulk=False, measure=True):
        """
        Args:
            num_of_obcs (int) : Number of OBCs we want to create of each type mentioned above.
                                (Total OBCs = num_of_obcs * 5)
            bulk (bool) : True for bulk operations, False otherwise.
            measure (bool) : True if we want to measure the OBC creation/deletion time, False otherwise.

        """

        # Create OBCs - bs, ns, cached and create random files
        obc_objs = list()
        obc_names = list()
        obc_params = [
            (
                "OC",
                {
                    "interface": "OC",
                    "namespace_policy_dict": {
                        "type": "Single",
                        "namespacestore_dict": {
                            "rgw": [(1, None)]
                        },
                    },
                },
            ),
            ("OC", None),
            (
                "OC",
                {
                    "interface": "OC",
                    "namespace_policy_dict": {
                        "type": "Cache",
                        "ttl": 3600,
                        "namespacestore_dict": {
                            "rgw": [(1, None)]
                        },
                    },
                    "placement_policy": {
                        "tiers": [{
                            "backingStores":
                            [constants.DEFAULT_NOOBAA_BACKINGSTORE]
                        }]
                    },
                },
            ),
        ]
        for _interface, _bucketclass in obc_params:
            if num_of_obcs > 0:
                buckets = bucket_factory(
                    amount=num_of_obcs,
                    interface=_interface,
                    bucketclass=_bucketclass,
                    verify_health=not bulk,
                )
                if bulk:
                    for bucket in buckets:
                        bucket.verify_health()
                obc_objs.extend(buckets)
                written_objs_names = write_empty_files_to_bucket(
                    mcg_obj, awscli_pod_session, buckets[0].name,
                    test_directory_setup)
                if (_bucketclass
                        and _bucketclass["namespace_policy_dict"]["type"]
                        == "Cache"):
                    wait_for_cache(mcg_obj, buckets[0].name,
                                   list(written_objs_names))

        # Create OBCs - Replica Pair, create random files and verify replication

        target_bucketclass = {
            "interface": "OC",
            "namespace_policy_dict": {
                "type": "Single",
                "namespacestore_dict": {
                    "rgw": [(1, None)]
                },
            },
        }

        source_bucketclass = {
            "interface": "OC",
            "namespace_policy_dict": {
                "type": "Single",
                "namespacestore_dict": {
                    "rgw": [(1, None)]
                },
            },
        }

        target_buckets = list()
        source_buckets = list()
        for _num in range(num_of_obcs):
            target_bucket = bucket_factory(bucketclass=target_bucketclass)[0]
            target_buckets.append(target_bucket)
            target_bucket_name = target_bucket.name

            replication_policy = ("basic-replication-rule", target_bucket_name,
                                  None)
            source_bucket = bucket_factory(
                1,
                bucketclass=source_bucketclass,
                replication_policy=replication_policy)[0]
            source_buckets.append(source_bucket)

            write_empty_files_to_bucket(mcg_obj, awscli_pod_session,
                                        source_bucket.name,
                                        test_directory_setup)
            compare_bucket_object_list(mcg_obj_session, source_bucket.name,
                                       target_bucket_name)
        obc_objs.extend(target_buckets)
        obc_objs.extend(source_buckets)

        for obc in obc_objs:
            obc_names.append(obc.name)

        if measure:
            # Measure OBC Creation Time
            scale_noobaa_lib.measure_obc_creation_time(obc_name_list=obc_names)

        # Delete OBCs
        for bucket in obc_objs:
            log.info(f"Deleting bucket: {bucket.name}")
            bucket.delete()

        if measure:
            # Measure OBC Deletion Time
            scale_noobaa_lib.measure_obc_deletion_time(obc_name_list=obc_names)
Ejemplo n.º 9
0
    def mcg_system_setup(bucket_amount=5, object_amount=10):
        # Create standard MCG buckets
        test_buckets = bucket_factory(
            amount=bucket_amount,
            interface="CLI",
        )

        uploaded_objects_dir = test_directory_setup.origin_dir
        downloaded_obejcts_dir = test_directory_setup.result_dir

        test_buckets_pattern = "RandomObject-"
        first_bidirectional_pattern = "FirstBidi-"
        second_bidirectional_pattern = "SecondBidi-"
        cache_pattern = "Cache-"

        # Perform a round-trip object verification -
        # 1. Generate random objects in uploaded_objects_dir
        # 2. Upload the objects to the bucket
        # 3. Download the objects from the bucket
        # 4. Compare the object checksums in downloaded_obejcts_dir
        # with the ones in uploaded_objects_dir
        for count, bucket in enumerate(test_buckets):
            assert random_object_round_trip_verification(
                io_pod=awscli_pod_session,
                bucket_name=bucket.name,
                upload_dir=uploaded_objects_dir + f"Bucket{count}",
                download_dir=downloaded_obejcts_dir + f"Bucket{count}",
                amount=object_amount,
                pattern=test_buckets_pattern,
                mcg_obj=mcg_obj_session,
            ), "Some or all written objects were not found in the list of downloaded objects"

        # E2E TODO: Create RGW kafka notification & see the objects are notified to kafka

        # Create two MCG buckets with a bidirectional replication policy
        bucketclass = {
            "interface": "OC",
            "backingstore_dict": {
                "aws": [(1, "eu-central-1")]
            },
        }
        first_bidi_bucket_name = bucket_factory(
            bucketclass=bucketclass)[0].name
        replication_policy = ("basic-replication-rule", first_bidi_bucket_name,
                              None)
        second_bidi_bucket_name = bucket_factory(
            1, bucketclass=bucketclass,
            replication_policy=replication_policy)[0].name
        patch_replication_policy_to_bucket(first_bidi_bucket_name,
                                           "basic-replication-rule-2",
                                           second_bidi_bucket_name)

        bidi_uploaded_objs_dir_1 = uploaded_objects_dir + "/bidi_1"
        bidi_uploaded_objs_dir_2 = uploaded_objects_dir + "/bidi_2"
        bidi_downloaded_objs_dir_1 = downloaded_obejcts_dir + "/bidi_1"
        bidi_downloaded_objs_dir_2 = downloaded_obejcts_dir + "/bidi_2"

        # Verify replication is working as expected by performing a two-way round-trip object verification
        random_object_round_trip_verification(
            io_pod=awscli_pod_session,
            bucket_name=first_bidi_bucket_name,
            upload_dir=bidi_uploaded_objs_dir_1,
            download_dir=bidi_downloaded_objs_dir_1,
            amount=object_amount,
            pattern=first_bidirectional_pattern,
            wait_for_replication=True,
            second_bucket_name=second_bidi_bucket_name,
            mcg_obj=mcg_obj_session,
        )

        random_object_round_trip_verification(
            io_pod=awscli_pod_session,
            bucket_name=second_bidi_bucket_name,
            upload_dir=bidi_uploaded_objs_dir_2,
            download_dir=bidi_downloaded_objs_dir_2,
            amount=object_amount,
            pattern=second_bidirectional_pattern,
            wait_for_replication=True,
            second_bucket_name=first_bidi_bucket_name,
            mcg_obj=mcg_obj_session,
        )

        # Create a cache bucket
        cache_bucketclass = {
            "interface": "OC",
            "namespace_policy_dict": {
                "type": "Cache",
                "ttl": 3600000,
                "namespacestore_dict": {
                    "aws": [(1, "eu-central-1")],
                },
            },
            "placement_policy": {
                "tiers": [{
                    "backingStores": [constants.DEFAULT_NOOBAA_BACKINGSTORE]
                }]
            },
        }
        cache_bucket = bucket_factory(bucketclass=cache_bucketclass)[0]

        cache_uploaded_objs_dir = uploaded_objects_dir + "/cache"
        cache_uploaded_objs_dir_2 = uploaded_objects_dir + "/cache_2"
        cache_downloaded_objs_dir = downloaded_obejcts_dir + "/cache"
        underlying_bucket_name = cache_bucket.bucketclass.namespacestores[
            0].uls_name

        # Upload a random object to the bucket
        objs_written_to_cache_bucket = write_random_test_objects_to_bucket(
            awscli_pod_session,
            cache_bucket.name,
            cache_uploaded_objs_dir,
            pattern=cache_pattern,
            mcg_obj=mcg_obj_session,
        )
        wait_for_cache(mcg_obj_session, cache_bucket.name,
                       objs_written_to_cache_bucket)
        # Write a random, larger object directly to the underlying storage of the bucket
        write_random_test_objects_to_bucket(
            awscli_pod_session,
            underlying_bucket_name,
            cache_uploaded_objs_dir_2,
            pattern=cache_pattern,
            s3_creds=cld_mgr.aws_client.nss_creds,
        )
        # Download the object from the cache bucket
        sync_object_directory(
            awscli_pod_session,
            f"s3://{cache_bucket.name}",
            cache_downloaded_objs_dir,
            mcg_obj_session,
        )
        # Make sure the cached object was returned, and not the one that was written to the underlying storage
        assert compare_directory(
            awscli_pod_session,
            cache_uploaded_objs_dir,
            cache_downloaded_objs_dir,
            amount=1,
            pattern=cache_pattern,
        ), "The uploaded and downloaded cached objects have different checksums"
        assert (
            compare_directory(
                awscli_pod_session,
                cache_uploaded_objs_dir_2,
                cache_downloaded_objs_dir,
                amount=1,
                pattern=cache_pattern,
            ) is False
        ), "The cached object was replaced by the new one before the TTL has expired"
        return {
            "test_buckets": test_buckets,
            "test_buckets_upload_dir": uploaded_objects_dir,
            "object_amount": object_amount,
            "test_buckets_pattern": test_buckets_pattern,
            "first_bidi_bucket_name": first_bidi_bucket_name,
            "bidi_downloaded_objs_dir_2": bidi_downloaded_objs_dir_2,
            "first_bidirectional_pattern": first_bidirectional_pattern,
            "second_bidi_bucket_name": second_bidi_bucket_name,
            "second_bidirectional_pattern": second_bidirectional_pattern,
            "cache_bucket_name": cache_bucket.name,
            "cache_pattern": cache_pattern,
            "cache_downloaded_objs_dir": cache_downloaded_objs_dir,
        }