Beispiel #1
0
    def __init__(self):
        cloud_map = {
            "AWS": S3Client,
            "GCP": GoogleClient,
            "AZURE": AzureClient,
            "IBMCOS": S3Client,
            "RGW": S3Client,
        }
        try:
            logger.info(
                "Trying to load credentials from ocs-ci-data. "
                "This flow is only relevant when running under OCS-QE environments."
            )
            cred_dict = update_config_from_s3().get("AUTH")
        except (AttributeError, EndpointConnectionError):
            logger.warning(
                "Failed to load credentials from ocs-ci-data.\n"
                "Your local AWS credentials might be misconfigured.\n"
                "Trying to load credentials from local auth.yaml instead"
            )
            cred_dict = load_auth_config().get("AUTH", {})

        if not cred_dict:
            logger.warning(
                "Local auth.yaml not found, or failed to load. "
                "All cloud clients will be instantiated as None."
            )

        # Instantiate all needed cloud clients as None by default
        for cloud_name in constants.CLOUD_MNGR_PLATFORMS:
            setattr(self, f"{cloud_name.lower()}_client", None)

        else:
            # Override None clients with actual ones if found
            for cloud_name in cred_dict:
                if cloud_name in cloud_map:
                    # If all the values of the client are filled in auth.yaml,
                    # instantiate an actual client
                    if not any(
                        value is None for value in cred_dict[cloud_name].values()
                    ):
                        setattr(
                            self,
                            f"{cloud_name.lower()}_client",
                            cloud_map[cloud_name](auth_dict=cred_dict[cloud_name]),
                        )

        try:
            rgw_conn = RGW()
            endpoint, access_key, secret_key = rgw_conn.get_credentials()
            cred_dict["RGW"] = {
                "SECRET_PREFIX": "RGW",
                "DATA_PREFIX": "AWS",
                "ENDPOINT": endpoint,
                "RGW_ACCESS_KEY_ID": access_key,
                "RGW_SECRET_ACCESS_KEY": secret_key,
            }
            setattr(self, "rgw_client", cloud_map["RGW"](auth_dict=cred_dict["RGW"]))
        except CommandFailed:
            setattr(self, "rgw_client", None)
Beispiel #2
0
 def __init__(self):
     cloud_map = {
         'AWS': S3Client,
         'GOOGLE': GoogleClient,
         'AZURE': AzureClient,
         # TODO: Implement - 'IBMCOS': S3Client
     }
     try:
         logger.info('Trying to load credentials from ocs-ci-data')
         cred_dict = update_config_from_s3().get('AUTH')
     except AttributeError:
         logger.warn('Failed to load credentials from ocs-ci-data. '
                     'Loading from local auth.yaml')
         cred_dict = load_auth_config().get('AUTH')
     for cloud_name in cred_dict:
         if cloud_name in cloud_map:
             try:
                 setattr(
                     self, f'{cloud_name.lower()}_client',
                     cloud_map[cloud_name](auth_dict=cred_dict[cloud_name]))
             except DefaultCredentialsError:
                 setattr(self, f'{cloud_name.lower()}_client', None)
Beispiel #3
0
# mark the test class with marker below to ignore leftover check
ignore_leftovers = pytest.mark.ignore_leftovers

# Mark the test class with marker below to ignore leftover of resources having
# the app labels specified
ignore_leftover_label = pytest.mark.ignore_leftover_label

# testing marker this is just for testing purpose if you want to run some test
# under development, you can mark it with @run_this and run pytest -m run_this
run_this = pytest.mark.run_this

# Skipif marks
skipif_aws_creds_are_missing = pytest.mark.skipif(
    (load_auth_config().get('AUTH', {}).get('AWS', {}).get('AWS_ACCESS_KEY_ID')
     is None and 'AWS_ACCESS_KEY_ID' not in os.environ
     and update_config_from_s3() is None),
    reason=("AWS credentials weren't found in the local auth.yaml "
            "and couldn't be fetched from the cloud"))

google_api_required = pytest.mark.skipif(
    not os.path.exists(os.path.expanduser(config.RUN['google_api_secret'])),
    reason="Google API credentials don't exist")

aws_platform_required = pytest.mark.skipif(
    config.ENV_DATA['platform'].lower() != 'aws',
    reason="Test runs ONLY on AWS deployed cluster")

azure_platform_required = pytest.mark.skipif(
    config.ENV_DATA['platform'].lower() != 'azure',
    reason="Test runs ONLY on Azure deployed cluster")
Beispiel #4
0
    def test_noobaa_secret_deletion_method2(self, teardown_factory, mcg_obj, cleanup):
        """
        Objectives of this tests are:
            1) create first backingstore using CLI passing credentials, which creates secret as well
            2) create second backingstore using CLI passing credentials, which recognizes the duplicates
               and uses the secret created above
            3) Modify the existing secret credentials see if the owned BS/NS is getting reconciled
            4) delete the first backingstore and make sure secret is not deleted
            5) check for the ownerReference see if its removed for the above backingstore deletion
            6) delete the second backingstore and make sure secret is now deleted

        """

        # create ULS
        try:
            logger.info(
                "Trying to load credentials from ocs-ci-data. "
                "This flow is only relevant when running under OCS-QE environments."
            )
            secret_dict = update_config_from_s3().get("AUTH")
        except (AttributeError, EndpointConnectionError):
            logger.warning(
                "Failed to load credentials from ocs-ci-data.\n"
                "Your local AWS credentials might be misconfigured.\n"
                "Trying to load credentials from local auth.yaml instead"
            )
            secret_dict = load_auth_config().get("AUTH", {})
        access_key = secret_dict["AWS"]["AWS_ACCESS_KEY_ID"]
        secret_key = secret_dict["AWS"]["AWS_SECRET_ACCESS_KEY"]
        first_uls_name = create_unique_resource_name(
            resource_description="uls", resource_type="aws"
        )
        client = boto3.resource(
            "s3",
            verify=True,
            endpoint_url="https://s3.amazonaws.com",
            aws_access_key_id=access_key,
            aws_secret_access_key=secret_key,
        )
        client.create_bucket(
            Bucket=first_uls_name,
            CreateBucketConfiguration={"LocationConstraint": "eu-central-1"},
        )
        first_bs_name = create_unique_resource_name(
            resource_description="backingstore", resource_type="aws"
        )
        create_aws_bs_using_cli(
            mcg_obj=mcg_obj,
            backingstore_name=first_bs_name,
            access_key=access_key,
            secret_key=secret_key,
            uls_name=first_uls_name,
            region="eu-central-1",
        )
        mcg_obj.check_backingstore_state(
            backingstore_name=first_bs_name, desired_state=constants.BS_OPTIMAL
        )
        first_bs_obj = BackingStore(
            name=first_bs_name,
            method="cli",
            type="cloud",
            uls_name=first_uls_name,
            mcg_obj=mcg_obj,
        )
        cleanup(first_bs_obj)

        # create second backingstore using CLI and pass the secret credentials
        second_uls_name = create_unique_resource_name(
            resource_description="uls", resource_type="aws"
        )
        client.create_bucket(
            Bucket=second_uls_name,
            CreateBucketConfiguration={"LocationConstraint": "eu-central-1"},
        )
        second_bs_name = create_unique_resource_name(
            resource_description="backingstore", resource_type="aws"
        )
        create_aws_bs_using_cli(
            mcg_obj=mcg_obj,
            backingstore_name=second_bs_name,
            access_key=access_key,
            secret_key=secret_key,
            uls_name=second_uls_name,
            region="eu-central-1",
        )
        mcg_obj.check_backingstore_state(
            backingstore_name=second_bs_name, desired_state=constants.BS_OPTIMAL
        )
        second_bs_obj = BackingStore(
            name=second_bs_name,
            method="cli",
            type="cloud",
            uls_name=second_uls_name,
            mcg_obj=mcg_obj,
        )
        cleanup(second_bs_obj)

        # Modify the secret credentials to wrong one and see if the backingstores get rejected
        secret_name = OCP(
            namespace=config.ENV_DATA["cluster_namespace"], kind="backingstore"
        ).get(resource_name=second_bs_name)["spec"]["awsS3"]["secret"]["name"]

        wrong_access_key_patch = {
            "data": {"AWS_ACCESS_KEY_ID": "d3JvbmdhY2Nlc3NrZXk="}
        }  # Invalid Access Key
        OCP(namespace=config.ENV_DATA["cluster_namespace"], kind="secret").patch(
            resource_name=secret_name,
            params=json.dumps(wrong_access_key_patch),
            format_type="merge",
        )
        logger.info("Patched wrong access key!")
        assert OCP(
            namespace=config.ENV_DATA["cluster_namespace"], kind="backingstore"
        ).wait_for_resource(
            resource_name=second_bs_name,
            condition="Creating",
            column="PHASE",
        ), "Backingstores are not getting reconciled after changing linked secret credentials!"
        logger.info("Backingstores getting reconciled!")

        # delete first backingstore
        first_bs_obj.delete()
        logger.info(f"First backingstore {first_bs_name} deleted!")
        assert (
            OCP(namespace=config.ENV_DATA["cluster_namespace"], kind="secret").get(
                resource_name=secret_name, dont_raise=True
            )
            is not None
        ), "[Not expected] Secret got deleted along when first backingstore deleted!!"
        logger.info("Secret exists after the first backingstore deletion!")

        # check for the owner reference
        secret_owner_ref = OCP(
            namespace=config.ENV_DATA["cluster_namespace"], kind="secret"
        ).get(resource_name=secret_name)["metadata"]["ownerReferences"]
        for owner in secret_owner_ref:
            assert owner["name"] != first_bs_name, (
                f"Owner reference for {first_bs_name} still exists in the secret {secret_name} "
                f"even after backingstore {first_bs_name} got deleted!"
            )
        logger.info(
            f"Owner reference for first backingstore {first_bs_name} is deleted in {secret_name} !!"
        )

        # delete second backingstore
        second_bs_obj.delete()
        logger.info(f"Second backingstore {second_bs_name} deleted!")
        assert (
            OCP(namespace=config.ENV_DATA["cluster_namespace"], kind="secret").get(
                resource_name=secret_name, dont_raise=True
            )
            is None
        ), "[Not expected] Secret still exists even after all backingstores linked are deleted!"
        logger.info(
            "Secret got deleted after the all the linked backingstores are deleted!"
        )