Beispiel #1
0
    def request_aws_credentials(self):
        """
        Uses a CredentialsRequest CR to create an AWS IAM that allows the program
        to interact with S3

        Returns:
            OCS: The CredentialsRequest resource
        """
        awscreds_data = templating.load_yaml(constants.MCG_AWS_CREDS_YAML)
        req_name = create_unique_resource_name("awscredreq",
                                               "credentialsrequests")
        awscreds_data["metadata"]["name"] = req_name
        awscreds_data["metadata"]["namespace"] = self.namespace
        awscreds_data["spec"]["secretRef"]["name"] = req_name
        awscreds_data["spec"]["secretRef"]["namespace"] = self.namespace

        creds_request = create_resource(**awscreds_data)
        sleep(5)

        secret_ocp_obj = OCP(kind="secret", namespace=self.namespace)
        try:
            cred_req_secret_dict = secret_ocp_obj.get(
                resource_name=creds_request.name, retry=5)
        except CommandFailed:
            logger.error("Failed to retrieve credentials request secret")
            raise CredReqSecretNotFound(
                "Please make sure that the cluster used is an AWS cluster, "
                "or that the `platform` var in your config is correct.")

        aws_access_key_id = base64.b64decode(
            cred_req_secret_dict.get("data").get("aws_access_key_id")).decode(
                "utf-8")

        aws_access_key = base64.b64decode(
            cred_req_secret_dict.get("data").get(
                "aws_secret_access_key")).decode("utf-8")

        def _check_aws_credentials():
            try:
                sts = boto3.client(
                    "sts",
                    aws_access_key_id=aws_access_key_id,
                    aws_secret_access_key=aws_access_key,
                )
                sts.get_caller_identity()

                return True

            except ClientError:
                logger.info("Credentials are still not active. Retrying...")
                return False

        try:
            for api_test_result in TimeoutSampler(120, 5,
                                                  _check_aws_credentials):
                if api_test_result:
                    logger.info("AWS credentials created successfully.")
                    break

        except TimeoutExpiredError:
            logger.error("Failed to create credentials")
            assert False

        return creds_request, aws_access_key_id, aws_access_key
Beispiel #2
0
    def oc_create_bucketclass(
        self,
        name,
        backingstores,
        placement_policy,
        namespace_policy,
        replication_policy,
    ):
        """
        Creates a new NooBaa bucket class using a template YAML
        Args:
            name (str): The name to be given to the bucket class
            backingstores (list): The backing stores to use as part of the policy
            placement_policy (str): The placement policy to be used - Mirror | Spread
            namespace_policy (dict): The namespace policy to be used
            replication_policy (dict): The replication policy dictionary

        Returns:
            OCS: The bucket class resource

        """
        bc_data = templating.load_yaml(constants.MCG_BUCKETCLASS_YAML)
        bc_data["metadata"]["name"] = name
        bc_data["metadata"]["namespace"] = self.namespace
        bc_data["spec"] = {}

        if (backingstores is not None) and (placement_policy is not None):
            bc_data["spec"]["placementPolicy"] = {"tiers": [{}]}
            tiers = bc_data["spec"]["placementPolicy"]["tiers"][0]
            tiers["backingStores"] = [
                backingstore.name for backingstore in backingstores
            ]
            tiers["placement"] = placement_policy

        # In cases of Single and Cache namespace policies, we use the
        # write_resource key to populate the relevant YAML field.
        # The right field name is still used.
        if namespace_policy:
            bc_data["spec"]["namespacePolicy"] = {}
            ns_policy_type = namespace_policy["type"]
            bc_data["spec"]["namespacePolicy"]["type"] = ns_policy_type

            if ns_policy_type == constants.NAMESPACE_POLICY_TYPE_SINGLE:
                bc_data["spec"]["namespacePolicy"]["single"] = {
                    "resource": namespace_policy["write_resource"]
                }

            elif ns_policy_type == constants.NAMESPACE_POLICY_TYPE_MULTI:
                bc_data["spec"]["namespacePolicy"]["multi"] = {
                    "writeResource": namespace_policy["write_resource"],
                    "readResources": namespace_policy["read_resources"],
                }

            elif ns_policy_type == constants.NAMESPACE_POLICY_TYPE_CACHE:
                bc_data["spec"]["placementPolicy"] = placement_policy
                bc_data["spec"]["namespacePolicy"]["cache"] = namespace_policy[
                    "cache"]

        if replication_policy:
            bc_data["spec"].setdefault("replicationPolicy",
                                       json.dumps(replication_policy))

        return create_resource(**bc_data)
Beispiel #3
0
    def create_namespace_store(self, nss_name, region, cld_mgr,
                               cloud_uls_factory, platform):
        """
        Creates a new namespace store

        Args:
            nss_name (str): The name to be given to the new namespace store
            region (str): The region name to be used
            cld_mgr: A cloud manager instance
            cloud_uls_factory: The cloud uls factory
            platform (str): The platform resource name

        Returns:
            str: The name of the created target_bucket_name (cloud uls)
        """
        # Create the actual target bucket on AWS
        uls_dict = cloud_uls_factory({platform: [(1, region)]})
        target_bucket_name = list(uls_dict[platform])[0]

        nss_data = templating.load_yaml(constants.MCG_NAMESPACESTORE_YAML)
        nss_data["metadata"]["name"] = nss_name
        nss_data["metadata"]["namespace"] = config.ENV_DATA[
            "cluster_namespace"]

        NSS_MAPPING = {
            constants.AWS_PLATFORM: {
                "type": "aws-s3",
                "awsS3": {
                    "targetBucket": target_bucket_name,
                    "secret": {
                        "name": get_attr_chain(cld_mgr,
                                               "aws_client.secret.name")
                    },
                },
            },
            constants.AZURE_PLATFORM: {
                "type": "azure-blob",
                "azureBlob": {
                    "targetBlobContainer": target_bucket_name,
                    "secret": {
                        "name": get_attr_chain(cld_mgr,
                                               "azure_client.secret.name")
                    },
                },
            },
            constants.RGW_PLATFORM: {
                "type": "s3-compatible",
                "s3Compatible": {
                    "targetBucket": target_bucket_name,
                    "endpoint": get_attr_chain(cld_mgr, "rgw_client.endpoint"),
                    "signatureVersion": "v2",
                    "secret": {
                        "name": get_attr_chain(cld_mgr,
                                               "rgw_client.secret.name")
                    },
                },
            },
        }

        nss_data["spec"] = NSS_MAPPING[platform]
        create_resource(**nss_data)
        return target_bucket_name
Beispiel #4
0
def oc_create_namespacestore(
    nss_name,
    platform,
    mcg_obj,
    uls_name=None,
    cld_mgr=None,
    nss_tup=None,
    nsfs_pvc_name=None,
):
    """
    Create a namespacestore using the MCG CLI

    Args:
        nss_name (str): Name of the namespacestore
        platform (str): Platform to create the namespacestore on
        mcg_obj (MCG): A redundant MCG object, used for uniformity between OC and CLI calls
        uls_name (str): Name of the ULS bucket to use for the namespacestore
        cld_mgr (CloudManager): CloudManager object used for supplying the needed connection credentials
        nss_tup (tuple): A tuple containing the NSFS namespacestore details, in this order:
            pvc_name (str): Name of the PVC that will host the namespace filesystem
            pvc_size (int): Size in Gi of the PVC that will host the namespace filesystem
            sub_path (str): The path to a sub directory inside the PVC FS which the NSS will use as the root directory
            fs_backend (str): The file system backend type - CEPH_FS | GPFS | NFSv4. Defaults to None.

    """
    nss_data = templating.load_yaml(constants.MCG_NAMESPACESTORE_YAML)
    nss_data["metadata"]["name"] = nss_name
    nss_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"]

    NSS_MAPPING = {
        constants.AWS_PLATFORM: lambda: {
            "type": "aws-s3",
            "awsS3": {
                "targetBucket": uls_name,
                "secret": {
                    "name": get_attr_chain(cld_mgr, "aws_client.secret.name"),
                    "namespace": nss_data["metadata"]["namespace"],
                },
            },
        },
        constants.AZURE_PLATFORM: lambda: {
            "type": "azure-blob",
            "azureBlob": {
                "targetBlobContainer": uls_name,
                "secret": {
                    "name": get_attr_chain(cld_mgr, "azure_client.secret.name"),
                    "namespace": nss_data["metadata"]["namespace"],
                },
            },
        },
        constants.RGW_PLATFORM: lambda: {
            "type": "s3-compatible",
            "s3Compatible": {
                "targetBucket": uls_name,
                "endpoint": get_attr_chain(cld_mgr, "rgw_client.endpoint"),
                "signatureVersion": "v2",
                "secret": {
                    "name": get_attr_chain(cld_mgr, "rgw_client.secret.name"),
                    "namespace": nss_data["metadata"]["namespace"],
                },
            },
        },
        constants.NAMESPACE_FILESYSTEM: lambda: {
            "type": "nsfs",
            "nsfs": {
                "pvcName": uls_name,
                "subPath": nss_tup[2] if nss_tup[2] else "",
            },
        },
    }

    if (
        platform.lower() == constants.NAMESPACE_FILESYSTEM
        and len(nss_tup) == 4
        and nss_tup[3]
    ):
        NSS_MAPPING[platform.lower()]["nsfs"]["fsBackend"] = nss_tup[3]

    nss_data["spec"] = NSS_MAPPING[platform.lower()]()
    create_resource(**nss_data)
Beispiel #5
0
    def test_duplicate_noobaa_secrets(
        self,
        backingstore_factory,
        cloud_uls_factory,
        mcg_obj,
        teardown_factory,
        cld_mgr,
    ):
        """
        Objective of this test is:
            * Create a secret with the same credentials and see if the duplicates are allowed when BS created
        """
        # create secret with the same credentials to check if duplicates are allowed
        first_bs_obj = backingstore_factory(
            method="oc", uls_dict={"aws": [(1, constants.AWS_REGION)]}
        )[0]
        aws_secret_obj = cld_mgr.aws_client.create_s3_secret(
            cld_mgr.aws_client.secret_prefix, cld_mgr.aws_client.data_prefix
        )
        logger.info(f"New secret created: {aws_secret_obj.name}")
        teardown_factory(aws_secret_obj)

        cloud = "aws"
        uls_tup = (1, constants.AWS_REGION)
        uls_name = list(cloud_uls_factory({cloud: [uls_tup]})["aws"])[0]
        logger.info(f"ULS dict: {type(uls_name)}")
        second_bs_name = create_unique_resource_name(
            resource_description="backingstore",
            resource_type=cloud.lower(),
        )
        bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
        bs_data["metadata"]["name"] = second_bs_name
        bs_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"]
        bs_data["spec"] = {
            "type": "aws-s3",
            "awsS3": {
                "targetBucket": uls_name,
                "region": constants.AWS_REGION,
                "secret": {
                    "name": aws_secret_obj.name,
                    "namespace": bs_data["metadata"]["namespace"],
                },
            },
        }
        second_bs_obj = create_resource(**bs_data)
        teardown_factory(second_bs_obj)

        # Check if the duplicate secrets are allowed
        first_bs_dict = OCP(
            namespace=config.ENV_DATA["cluster_namespace"], kind="backingstore"
        ).get(resource_name=first_bs_obj.name)
        second_bs_dict = OCP(
            namespace=config.ENV_DATA["cluster_namespace"], kind="backingstore"
        ).get(resource_name=second_bs_name)
        assert (
            first_bs_dict["spec"]["awsS3"]["secret"]["name"]
            == second_bs_dict["spec"]["awsS3"]["secret"]["name"]
        ), "Backingstores are not referring to the same secrets when secrets with duplicate credentials are created!!"
        logger.info(
            "Duplicate secrets are not allowed! only the first secret is being referred"
        )