Exemple #1
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     obc_data = templating.load_yaml(constants.MCG_OBC_YAML)
     if self.name is None:
         self.name = create_unique_resource_name('oc', 'obc')
     obc_data['metadata']['name'] = self.name
     obc_data['spec']['bucketName'] = self.name
     obc_data['metadata']['namespace'] = self.mcg.namespace
     create_resource(**obc_data)
Exemple #2
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     obc_data = templating.load_yaml(constants.MCG_OBC_YAML)
     if self.name is None:
         self.name = create_unique_resource_name('oc', 'obc')
     obc_data['metadata']['name'] = self.name
     obc_data['spec']['bucketName'] = self.name
     obc_data['spec']['storageClassName'] = constants.INDEPENDENT_DEFAULT_STORAGECLASS_RGW
     obc_data['metadata']['namespace'] = self.namespace
     create_resource(**obc_data)
Exemple #3
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     obc_data = templating.load_yaml(constants.MCG_OBC_YAML)
     if self.name is None:
         self.name = create_unique_resource_name('oc', 'obc')
     obc_data['metadata']['name'] = self.name
     obc_data['spec']['bucketName'] = self.name
     obc_data['spec']['storageClassName'] = self.namespace + '.noobaa.io'
     obc_data['metadata']['namespace'] = self.namespace
     if 'bucketclass' in kwargs:
         obc_data.setdefault('spec', {}).setdefault(
             'additionalConfig', {}).setdefault('bucketclass',
                                                kwargs['bucketclass'])
     create_resource(**obc_data)
def main(url, token):

    run_id = ''.join(
        random.choice(string.ascii_lowercase + string.digits)
        for _ in range(8))
    info('Starting test run {}..'.format(run_id))

    all_containers_ready = False

    with Action('Waiting for all containers to be ready..') as act:
        for i in range(60):
            containers = get_containers(url, token)
            ready = True
            for name in EXPECTED_CONTAINERS:
                if not containers.get(name, {}).get('ready'):
                    info('{} is not ready yet (restarts: {})'.format(
                        name,
                        containers.get(name, {}).get('restart_count')))
                    ready = False

            if ready:
                all_containers_ready = True
                break

            time.sleep(5)
            act.progress()

    if not all_containers_ready:
        fatal_error('Not all containers are ready')

    manifest = '''
apiVersion: v1
kind: Namespace
metadata:
    name: e2e
'''
    try:
        create_resource(manifest, url + '/api/v1/namespaces', token)
    except requests.exceptions.HTTPError as e:
        # it's ok if the namespace is already there (409 Conflict)
        if e.response.status_code != 409:
            raise

    for entry in os.listdir('tests'):
        if entry.startswith('test_'):
            module_name = entry.split('.')[0]
            module = importlib.import_module('tests.{}'.format(module_name))
            func = getattr(module, module_name)
            info('Running {}..'.format(module_name))
            func(run_id, url, token)
Exemple #5
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     obc_data = templating.load_yaml(constants.MCG_OBC_YAML)
     if self.name is None:
         self.name = create_unique_resource_name('oc', 'obc')
     obc_data['metadata']['name'] = self.name
     obc_data['spec']['bucketName'] = self.name
     if storagecluster_independent_check():
         obc_data['spec'][
             'storageClassName'] = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RGW
     else:
         obc_data['spec'][
             'storageClassName'] = constants.DEFAULT_STORAGECLASS_RGW
     obc_data['metadata']['namespace'] = self.namespace
     create_resource(**obc_data)
Exemple #6
0
 def __init__(self,
              key_id=None,
              access_key=None,
              endpoint="https://s3.amazonaws.com",
              verify=True,
              *args,
              **kwargs):
     super().__init__(*args, **kwargs)
     if key_id and access_key:
         self.client = boto3.resource('s3',
                                      verify=verify,
                                      endpoint_url=endpoint,
                                      aws_access_key_id=key_id,
                                      aws_secret_access_key=access_key)
         self.access_key = key_id
         self.secret_key = access_key
     else:
         self.client = boto3.resource('s3', endpoint_url=endpoint)
         # create a secret for the underlying storage to use
         session = boto3.Session()
         # Retrieving the credentials of the existing session
         credentials = session.get_credentials().get_frozen_credentials()
         self.access_key = credentials.access_key
         self.secret_key = credentials.secret_key
     bs_secret_data = templating.load_yaml(
         constants.MCG_BACKINGSTORE_SECRET_YAML)
     bs_secret_data['metadata']['name'] += '-client-secret'
     bs_secret_data['metadata']['namespace'] = config.ENV_DATA[
         'cluster_namespace']
     bs_secret_data['data']['AWS_ACCESS_KEY_ID'] = base64.urlsafe_b64encode(
         self.access_key.encode('UTF-8')).decode('ascii')
     bs_secret_data['data'][
         'AWS_SECRET_ACCESS_KEY'] = base64.urlsafe_b64encode(
             self.secret_key.encode('UTF-8')).decode('ascii')
     self.secret = create_resource(**bs_secret_data)
Exemple #7
0
    def factory(interface=constants.CEPHBLOCKPOOL,
                secret=None,
                custom_data=None):
        """
        Args:
            interface (str): CephBlockPool or CephFileSystem. This decides
                whether a RBD based or CephFS resource is created.
                RBD is default.
            secret (object): An OCS instance for the secret.
            custom_data (dict): If provided then storageclass object is created
                by using these data. Parameters `block_pool` and `secret`
                are not useds but references are set if provided.

        Returns:
            object: helpers.create_storage_class instance with links to
                block_pool and secret.
        """
        if custom_data:
            sc_obj = helpers.create_resource(**custom_data, wait=False)
        else:
            secret = secret or secret_factory(interface=interface)
            ceph_pool = ceph_pool_factory(interface)
            interface_name = ceph_pool.name

            sc_obj = helpers.create_storage_class(
                interface_type=interface,
                interface_name=interface_name,
                secret_name=secret.name)
            assert sc_obj, f"Failed to create {interface} storage class"
            sc_obj.ceph_pool = ceph_pool
            sc_obj.secret = secret

        instances.append(sc_obj)
        return sc_obj
Exemple #8
0
    def request_aws_credentials(self):
        """
        Uses a CredentialsRequest CR to create an AWS IAM that allows the program
        to interact with S3

        Returns:
            OCS: The CredentialsRequest resource
        """
        awscreds_data = templating.load_yaml(constants.MCG_AWS_CREDS_YAML)
        req_name = create_unique_resource_name('awscredreq',
                                               'credentialsrequests')
        awscreds_data['metadata']['name'] = req_name
        awscreds_data['metadata']['namespace'] = self.namespace
        awscreds_data['spec']['secretRef']['name'] = req_name
        awscreds_data['spec']['secretRef']['namespace'] = self.namespace

        creds_request = create_resource(**awscreds_data)
        sleep(5)

        secret_ocp_obj = OCP(kind='secret', namespace=self.namespace)
        cred_req_secret_dict = secret_ocp_obj.get(creds_request.name)

        aws_access_key_id = base64.b64decode(
            cred_req_secret_dict.get('data').get('aws_access_key_id')).decode(
                'utf-8')

        aws_access_key = base64.b64decode(
            cred_req_secret_dict.get('data').get(
                'aws_secret_access_key')).decode('utf-8')

        def _check_aws_credentials():
            try:
                s3_res = boto3.resource(
                    's3',
                    verify=False,
                    endpoint_url="https://s3.amazonaws.com",
                    aws_access_key_id=aws_access_key_id,
                    aws_secret_access_key=aws_access_key)
                test_bucket = s3_res.create_bucket(
                    Bucket=create_unique_resource_name('cred-verify',
                                                       's3-bucket'))
                test_bucket.delete()
                return True

            except ClientError:
                logger.info('Credentials are still not active. Retrying...')
                return False

        try:
            for api_test_result in TimeoutSampler(40, 5,
                                                  _check_aws_credentials):
                if api_test_result:
                    logger.info('AWS credentials created successfully.')
                    break

        except TimeoutExpiredError:
            logger.error('Failed to create credentials')
            assert False

        return creds_request, aws_access_key_id, aws_access_key
def create_instance_in_clusterlogging(sc_name=None):
    """
    Creation of instance for clusterlogging that creates PVCs,
    ElasticSearch, curator fluentd and kibana pods and checks for all
    the pods and PVCs

    Args:
        sc_name (str): Storage class name to create PVCs

    Returns:
        dict: Contains all detailed information of the
            instance such as pods that got created, its resources and limits
            values, storage class and size details etc.

    """
    inst_data = templating.load_yaml(constants.CL_INSTANCE_YAML)
    inst_data['spec']['logStore']['elasticsearch']['storage']['storageClassName'] = sc_name
    inst_data['spec']['logStore']['elasticsearch']['storage']['size'] = "200Gi"
    node_count = inst_data['spec']['logStore']['elasticsearch']['nodeCount']
    helpers.create_resource(wait=False, **inst_data)
    oc = ocp.OCP('v1', 'ClusterLogging', 'openshift-logging')
    logging_instance = oc.get(resource_name='instance', out_yaml_format='True')
    if logging_instance:
        logger.info("Successfully created instance for cluster-logging")
        logger.debug(logging_instance)
    else:
        logger.error("Instance for clusterlogging is not created properly")

    pod_obj = ocp.OCP(
        kind=constants.POD, namespace='openshift-logging'
    )
    pod_status = pod_obj.wait_for_resource(
        condition=constants.STATUS_RUNNING, resource_count=11, timeout=200,
        sleep=5
    )
    assert pod_status, "Pods are not in Running state."
    logger.info("All pods are in Running state")
    pvc_obj = ocp.OCP(
        kind=constants.PVC, namespace='openshift-logging'
    )
    pvc_status = pvc_obj.wait_for_resource(
        condition=constants.STATUS_BOUND, resource_count=node_count,
        timeout=150, sleep=5
    )
    assert pvc_status, "PVCs are not in bound state."
    logger.info("PVCs are Bound")
    return logging_instance
Exemple #10
0
def oc_create_aws_backingstore(cld_mgr, backingstore_name, uls_name, region):
    """
    Create a new backingstore with aws underlying storage using oc create command

    Args:
        cld_mgr (CloudManager): holds secret for backingstore creation
        backingstore_name (str): backingstore name
        uls_name (str): underlying storage name
        region (str): which region to create backingstore (should be the same as uls)

    """
    bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
    bs_data['metadata']['name'] = backingstore_name
    bs_data['metadata']['namespace'] = config.ENV_DATA['cluster_namespace']
    bs_data['spec']['awsS3']['secret']['name'] = cld_mgr.aws_client.secret.name
    bs_data['spec']['awsS3']['targetBucket'] = uls_name
    bs_data['spec']['awsS3']['region'] = region
    create_resource(**bs_data)
Exemple #11
0
def oc_create_pv_backingstore(backingstore_name, vol_num, size, storage_class):
    """
    Create a new backingstore with pv underlying storage using oc create command

    Args:
        backingstore_name (str): backingstore name
        vol_num (int): number of pv volumes
        size (int): each volume size in GB
        storage_class (str): which storage class to use

    """
    bs_data = templating.load_yaml(constants.PV_BACKINGSTORE_YAML)
    bs_data['metadata']['name'] = backingstore_name
    bs_data['metadata']['namespace'] = config.ENV_DATA['cluster_namespace']
    bs_data['spec']['pvPool']['resources']['requests']['storage'] = str(size) + 'Gi'
    bs_data['spec']['pvPool']['numVolumes'] = vol_num
    bs_data['spec']['pvPool']['storageClass'] = storage_class
    create_resource(**bs_data)
    wait_for_pv_backingstore(backingstore_name, config.ENV_DATA['cluster_namespace'])
def test_fixture(request):
    """
    Setup and teardown
    * The setup will deploy openshift-logging in the cluster
    * The teardown will uninstall cluster-logging from the cluster
    """

    def finalizer():
        teardown()

    request.addfinalizer(finalizer)

    # Deploys elastic-search operator on the project openshift-operators-redhat
    ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
    assert ocp_logging_obj.create_elasticsearch_operator_group(
        yaml_file=constants.EO_OG_YAML,
        resource_name='openshift-operators-redhat'
    )
    assert ocp_logging_obj.set_rbac(
        yaml_file=constants.EO_RBAC_YAML, resource_name='prometheus-k8s'
    )
    logging_version = config.ENV_DATA['logging_version']
    subscription_yaml = templating.load_yaml(constants.EO_SUB_YAML)
    subscription_yaml['spec']['channel'] = logging_version
    helpers.create_resource(**subscription_yaml)
    assert ocp_logging_obj.get_elasticsearch_subscription()

    # Deploys cluster-logging operator on the project openshift-loggingno nee
    ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
    assert ocp_logging_obj.create_clusterlogging_operator_group(
        yaml_file=constants.CL_OG_YAML
    )
    cl_subscription = templating.load_yaml(constants.CL_SUB_YAML)
    cl_subscription['spec']['channel'] = logging_version
    helpers.create_resource(**cl_subscription)
    assert ocp_logging_obj.get_clusterlogging_subscription()
    cluster_logging_operator = OCP(
        kind=constants.POD, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
    )
    logger.info(f"The cluster-logging-operator {cluster_logging_operator.get()}")

    create_instance()
Exemple #13
0
    def create_aws_secret(self):
        bs_secret_data = templating.load_yaml(
            constants.MCG_BACKINGSTORE_SECRET_YAML)
        bs_secret_data['metadata']['name'] = 'cldmgr-aws-secret'
        bs_secret_data['metadata']['namespace'] = config.ENV_DATA[
            'cluster_namespace']
        bs_secret_data['data']['AWS_ACCESS_KEY_ID'] = base64.urlsafe_b64encode(
            self.access_key.encode('UTF-8')).decode('ascii')
        bs_secret_data['data'][
            'AWS_SECRET_ACCESS_KEY'] = base64.urlsafe_b64encode(
                self.secret_key.encode('UTF-8')).decode('ascii')

        return create_resource(**bs_secret_data)
Exemple #14
0
    def factory(
        interface=constants.CEPHBLOCKPOOL,
        pvc=None,
        custom_data=None,
        status=constants.STATUS_RUNNING,
        pod_dict_path=None,
        raw_block_pv=False
    ):
        """
        Args:
            interface (str): CephBlockPool or CephFileSystem. This decides
                whether a RBD based or CephFS resource is created.
                RBD is default.
            pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
            custom_data (dict): If provided then Pod object is created
                by using these data. Parameter `pvc` is not used but reference
                is set if provided.
            status (str): If provided then factory waits for object to reach
                desired state.
            pod_dict_path (str): YAML path for the pod.
            raw_block_pv (bool): True for creating raw block pv based pod,
                False otherwise.

        Returns:
            object: helpers.create_pvc instance.
        """
        if custom_data:
            pod_obj = helpers.create_resource(**custom_data)
        else:
            pvc = pvc or pvc_factory(interface=interface)

            pod_obj = helpers.create_pod(
                pvc_name=pvc.name,
                namespace=pvc.namespace,
                interface_type=interface,
                pod_dict_path=pod_dict_path,
                raw_block_pv=raw_block_pv
            )
            assert pod_obj, "Failed to create PVC"
        instances.append(pod_obj)
        if status:
            helpers.wait_for_resource_state(pod_obj, status)
            pod_obj.reload()
        pod_obj.pvc = pvc

        return pod_obj
Exemple #15
0
    def factory(
        interface=constants.CEPHBLOCKPOOL,
        pvc=None,
        service_account=None,
        size=None,
        custom_data=None,
        node_name=None,
        replica_count=1,
    ):
        """
        Args:
            interface (str): CephBlockPool or CephFileSystem. This decides
                whether a RBD based or CephFS resource is created.
                RBD is default.
            pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
            service_account (str): service account name for dc_pods
            size (int): The requested size for the PVC
            custom_data (dict): If provided then Pod object is created
                by using these data. Parameter `pvc` is not used but reference
                is set if provided.
            node_name (str): The name of specific node to schedule the pod
            replica_count (int): Replica count for deployment config
        """
        if custom_data:
            dc_pod_obj = helpers.create_resource(**custom_data)
        else:

            pvc = pvc or pvc_factory(interface=interface, size=size)
            sa_obj = service_account_factory(project=pvc.project,
                                             service_account=service_account)
            dc_pod_obj = helpers.create_pod(interface_type=interface,
                                            pvc_name=pvc.name,
                                            do_reload=False,
                                            namespace=pvc.namespace,
                                            sa_name=sa_obj.name,
                                            dc_deployment=True,
                                            replica_count=replica_count,
                                            node_name=node_name)
        instances.append(dc_pod_obj)
        log.info(dc_pod_obj.name)
        helpers.wait_for_resource_state(dc_pod_obj,
                                        constants.STATUS_RUNNING,
                                        timeout=180)
        dc_pod_obj.pvc = pvc
        return dc_pod_obj
Exemple #16
0
    def factory(
        interface=constants.CEPHBLOCKPOOL,
        secret=None,
        custom_data=None,
        sc_name=None,
        reclaim_policy=constants.RECLAIM_POLICY_DELETE
    ):
        """
        Args:
            interface (str): CephBlockPool or CephFileSystem. This decides
                whether a RBD based or CephFS resource is created.
                RBD is default.
            secret (object): An OCS instance for the secret.
            custom_data (dict): If provided then storageclass object is created
                by using these data. Parameters `block_pool` and `secret`
                are not useds but references are set if provided.
            sc_name (str): Name of the storage class

        Returns:
            object: helpers.create_storage_class instance with links to
                block_pool and secret.
        """
        if custom_data:
            sc_obj = helpers.create_resource(**custom_data)
        else:
            secret = secret or secret_factory(interface=interface)
            ceph_pool = ceph_pool_factory(interface)
            if interface == constants.CEPHBLOCKPOOL:
                interface_name = ceph_pool.name
            elif interface == constants.CEPHFILESYSTEM:
                interface_name = helpers.get_cephfs_data_pool_name()

            sc_obj = helpers.create_storage_class(
                interface_type=interface,
                interface_name=interface_name,
                secret_name=secret.name,
                sc_name=sc_name,
                reclaim_policy=reclaim_policy
            )
            assert sc_obj, f"Failed to create {interface} storage class"
            sc_obj.ceph_pool = ceph_pool
            sc_obj.secret = secret

        instances.append(sc_obj)
        return sc_obj
Exemple #17
0
    def oc_create_bucketclass(self, name, backingstores, placement):
        """
        Creates a new NooBaa bucket class
        Args:
            name: The name to be given to the bucket class
            backingstores: The backing stores to use as part of the policy
            placement: The placement policy to be used - Mirror | Spread

        Returns:
            OCS: The bucket class resource

        """
        bc_data = templating.load_yaml(constants.MCG_BUCKETCLASS_YAML)
        bc_data['metadata']['name'] = name
        bc_data['metadata']['namespace'] = self.namespace
        tiers = bc_data['spec']['placementPolicy']['tiers'][0]
        tiers['backingStores'] = backingstores
        tiers['placement'] = placement
        return create_resource(**bc_data)
Exemple #18
0
def create_configmap_cluster_monitoring_pod(sc_name=None,
                                            telemeter_server_url=None):
    """
    Create a configmap named cluster-monitoring-config based on the arguments.

    Args:
        sc_name (str): Name of the storage class which will be used for
            persistent storage needs of OCP Prometheus and Alert Manager.
            If not defined, the related options won't be present in the
            monitoring config map and the default (non persistent) storage
            will be used for OCP Prometheus and Alert Manager.
        telemeter_server_url (str): URL of Telemeter server where telemeter
            client (running in the cluster) will send it's telemetry data. If
            not defined, related option won't be present in the monitoring
            config map and the default (production) telemeter server will
            receive the metrics data.
    """
    logger.info("Creating configmap cluster-monitoring-config")
    config_map = templating.load_yaml(
        constants.CONFIGURE_PVC_ON_MONITORING_POD)
    config = yaml.safe_load(config_map['data']['config.yaml'])
    if sc_name is not None:
        logger.info(
            f"Setting {sc_name} as storage backed for Prometheus and Alertmanager"
        )
        config['prometheusK8s']['volumeClaimTemplate']['spec'][
            'storageClassName'] = sc_name
        config['alertmanagerMain']['volumeClaimTemplate']['spec'][
            'storageClassName'] = sc_name
    else:
        del config['prometheusK8s']
        del config['alertmanagerMain']
    if telemeter_server_url is not None:
        logger.info(f"Setting {telemeter_server_url} as telemeter server url")
        config['telemeterClient'] = {}
        config['telemeterClient']['telemeterServerURL'] = telemeter_server_url
    config = yaml.dump(config)
    config_map['data']['config.yaml'] = config
    assert helpers.create_resource(**config_map)
    ocp = OCP('v1', 'ConfigMap', defaults.OCS_MONITORING_NAMESPACE)
    assert ocp.get(resource_name='cluster-monitoring-config')
    logger.info("Successfully created configmap cluster-monitoring-config")
Exemple #19
0
    def create_aws_backingstore_secret(self, name):
        """
        Creates a secret for NooBaa's backingstore
        Args:
            name: The name to be given to the secret

        Returns:
            OCS: The secret resource

        """
        bs_secret_data = templating.load_yaml(
            constants.MCG_BACKINGSTORE_SECRET_YAML)
        bs_secret_data['metadata']['name'] += f'-{name}'
        bs_secret_data['metadata']['namespace'] = self.namespace
        bs_secret_data['data']['AWS_ACCESS_KEY_ID'] = base64.urlsafe_b64encode(
            self.aws_access_key_id.encode('UTF-8')).decode('ascii')
        bs_secret_data['data'][
            'AWS_SECRET_ACCESS_KEY'] = base64.urlsafe_b64encode(
                self.aws_access_key.encode('UTF-8')).decode('ascii')
        return create_resource(**bs_secret_data)
Exemple #20
0
    def oc_create_aws_backingstore(self, name, targetbucket, secretname, region):
        """
        Creates a new NooBaa backing store
        Args:
            name: The name to be given to the backing store
            targetbucket: The S3 target bucket to connect to
            secretname: The secret to use for authentication
            region: The target bucket's region

        Returns:
            OCS: The backingstore resource

        """
        bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
        bs_data['metadata']['name'] += f'-{name}'
        bs_data['metadata']['namespace'] = self.namespace
        bs_data['spec']['awsS3']['secret']['name'] = secretname
        bs_data['spec']['awsS3']['targetBucket'] = targetbucket
        bs_data['spec']['awsS3']['region'] = region
        return create_resource(**bs_data)
Exemple #21
0
def create_configmap_cluster_monitoring_pod(sc_name):
    """
    Create a configmap named cluster-monitoring-config
    and configure pvc on monitoring pod

    Args:
        sc_name (str): Name of the storage class
    """
    logger.info("Creating configmap cluster-monitoring-config")
    config_map = templating.load_yaml_to_dict(
        constants.CONFIGURE_PVC_ON_MONITORING_POD
    )
    config = yaml.safe_load(config_map['data']['config.yaml'])
    config['prometheusK8s']['volumeClaimTemplate']['spec']['storageClassName'] = sc_name
    config['alertmanagerMain']['volumeClaimTemplate']['spec']['storageClassName'] = sc_name
    config = yaml.dump(config)
    config_map['data']['config.yaml'] = config
    assert helpers.create_resource(**config_map, wait=False)
    ocp = OCP('v1', 'ConfigMap', 'openshift-monitoring')
    assert ocp.get(resource_name='cluster-monitoring-config')
    logger.info("Successfully created configmap cluster-monitoring-config")
Exemple #22
0
    def request_aws_credentials(self):
        """
        Uses a CredentialsRequest CR to create an AWS IAM that allows the program
        to interact with S3

        Returns:
            OCS: The CredentialsRequest resource
        """
        awscreds_data = templating.load_yaml(constants.MCG_AWS_CREDS_YAML)
        req_name = create_unique_resource_name('awscredreq',
                                               'credentialsrequests')
        awscreds_data['metadata']['name'] = req_name
        awscreds_data['metadata']['namespace'] = self.namespace
        awscreds_data['spec']['secretRef']['name'] = req_name
        awscreds_data['spec']['secretRef']['namespace'] = self.namespace

        creds_request = create_resource(**awscreds_data)
        sleep(5)

        secret_ocp_obj = OCP(kind='secret', namespace=self.namespace)
        try:
            cred_req_secret_dict = secret_ocp_obj.get(
                resource_name=creds_request.name, retry=5)
        except CommandFailed:
            logger.error('Failed to retrieve credentials request secret')
            raise CredReqSecretNotFound(
                'Please make sure that the cluster used is an AWS cluster, '
                'or that the `platform` var in your config is correct.')

        aws_access_key_id = base64.b64decode(
            cred_req_secret_dict.get('data').get('aws_access_key_id')).decode(
                'utf-8')

        aws_access_key = base64.b64decode(
            cred_req_secret_dict.get('data').get(
                'aws_secret_access_key')).decode('utf-8')

        def _check_aws_credentials():
            try:
                sts = boto3.client('sts',
                                   aws_access_key_id=aws_access_key_id,
                                   aws_secret_access_key=aws_access_key)
                sts.get_caller_identity()

                return True

            except ClientError:
                logger.info('Credentials are still not active. Retrying...')
                return False

        try:
            for api_test_result in TimeoutSampler(120, 5,
                                                  _check_aws_credentials):
                if api_test_result:
                    logger.info('AWS credentials created successfully.')
                    break

        except TimeoutExpiredError:
            logger.error('Failed to create credentials')
            assert False

        return creds_request, aws_access_key_id, aws_access_key