def create_pvc_and_verify_pvc_exists(
    sc_name, cbp_name, desired_status=constants.STATUS_BOUND, wait=True
):
    """
    Create pvc, verify pvc is bound in state and
    pvc exists on ceph side
    """

    pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML)
    pvc_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'pvc'
    )
    pvc_data['spec']['storageClassName'] = sc_name
    pvc_data['spec']['resources']['requests']['storage'] = "10Gi"
    pvc_obj = pvc.PVC(**pvc_data)
    pvc_obj.create()
    if wait:
        assert pvc_obj.ocp.wait_for_resource(
            condition=desired_status, resource_name=pvc_obj.name
        ), f"{pvc_obj.kind} {pvc_obj.name} failed to reach"
        f"status {desired_status}"
    pvc_obj.reload()

    # ToDo: Add validation to check pv exists on bcaekend
    # Commenting the below code: https://bugzilla.redhat.com/show_bug.cgi?id=1723656
    # Validate pv is created on ceph
    # logger.info(f"Verifying pv exists on backend")
    # ct_pod = pod.get_ceph_tools_pod()
    # pv_list = ct_pod.exec_ceph_cmd(
    #     ceph_cmd=f"rbd ls -p {cbp_name}", format='json'
    # )
    # _rc = pvc_obj.backed_pv in pv_list
    # assert _rc, f"pv doesn't exist on backend"
    # logger.info(f"pv {pvc_obj.backed_pv} exists on backend")
    return pvc_obj
Ejemplo n.º 2
0
    def _factory(job_name=None,
                 bucket=None,
                 project=None,
                 custom_options=None):
        """
        Args:
            job_name (str): Name of the job
            bucket (obj): MCG bucket with S3 interface
            project (obj): OCP object representing OCP project which will be
                used for the job
            mcg_obj (obj): instance of MCG class
            resource_path (str): path to directory where should be created
                resources
            custom_options (dict): Dictionary of lists containing tuples with
                additional configuration for fio in format:
                {'section': [('option', 'value'),...],...}
                e.g.
                {'global':[('name','bucketname')],'create':[('time_based','1'),('runtime','48h')]}
                Those values can be added to the config or rewrite already existing
                values

        Returns:
            func: MCG workload job factory function

        """
        job_name = job_name or create_unique_resource_name(
            resource_description='mcg-io', resource_type='job')
        bucket = bucket or bucket_factory()
        project = project or project_factory()
        job = create_workload_job(job_name, bucket, project, mcg_obj,
                                  resource_path, custom_options)
        instances.append(job)
        return job
Ejemplo n.º 3
0
def create_pvc_clone(sc_name,
                     parent_pvc,
                     clone_yaml,
                     pvc_name=None,
                     do_reload=True,
                     storage_size=None):
    """
    Create a cloned pvc from existing pvc

    Args:
        sc_name (str): The name of storage class (same for both parent and cloned pvc).
        parent_pvc (str): Name of the parent pvc, whose clone is to be created.
        pvc_name (str): The name of the PVC being created
        do_reload (bool): True for wait for reloading PVC after its creation, False otherwise
        storage_size (str): Size of the clone, if not passed will use the default "storage" value from pvc-clone.yaml

    Returns:
        PVC: PVC instance

    """
    pvc_data = templating.load_yaml(clone_yaml)
    pvc_data['metadata']['name'] = (pvc_name if pvc_name else
                                    helpers.create_unique_resource_name(
                                        'cloned', 'pvc'))
    pvc_data['spec']['storageClassName'] = sc_name
    pvc_data['spec']['dataSource']['name'] = parent_pvc
    if storage_size:
        pvc_data['spec']['resources']['requests']['storage'] = storage_size
    ocs_obj = PVC(**pvc_data)
    created_pvc = ocs_obj.create(do_reload=do_reload)
    assert created_pvc, f"Failed to create resource {pvc_name}"
    return ocs_obj
def create_pvc_and_verify_pvc_exists(sc_name,
                                     cbp_name,
                                     desired_status=constants.STATUS_BOUND,
                                     wait=True):
    """
    Create pvc, verify pvc is bound in state and
    pvc exists on ceph side
    """

    pvc_data = defaults.CSI_PVC_DICT.copy()
    pvc_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'pvc')
    pvc_data['spec']['storageClassName'] = sc_name
    pvc_data['spec']['resources']['requests']['storage'] = "10Gi"
    pvc_obj = pvc.PVC(**pvc_data)
    pvc_obj.create()
    if wait:
        assert pvc_obj.ocp.wait_for_resource(
            condition=desired_status, resource_name=pvc_obj.name
        ), f"{pvc_obj.kind} {pvc_obj.name} failed to reach"
        f"status {desired_status}"
    pvc_obj.reload()

    # Validate pv is created on ceph
    logger.info(f"Verifying pv exists on backend")
    ct_pod = pod.get_ceph_tools_pod()
    pv_list = ct_pod.exec_ceph_cmd(ceph_cmd=f"rbd ls -p {cbp_name}",
                                   format='json')
    _rc = pvc_obj.backed_pv in pv_list
    assert _rc, f"pv doesn't exist on backend"
    logger.info(f"pv {pvc_obj.backed_pv} exists on backend")
    return pvc_obj
Ejemplo n.º 5
0
    def create_new_aws_connection(self, conn_name=None):
        """
        Creates a new NooBaa connection to an AWS backend

        Args:
            conn_name: The connection name to be used

        Returns:
            bool: False if the connection creation failed

        """
        if conn_name is None:
            conn_name = create_unique_resource_name('awsconnection', 'mcgconn')

        params = {
            "auth_method": "AWS_V4",
            "endpoint": "https://s3.amazonaws.com",
            "endpoint_type": "AWS",
            "identity": self.aws_access_key_id,
            "name": conn_name,
            "secret": self.aws_access_key
        }

        try:
            for resp in TimeoutSampler(30, 3, self.send_rpc_query,
                                       'account_api',
                                       'add_external_connection', params):
                if 'error' not in resp.text:
                    logger.info(f'Connection {conn_name} created successfully')
                    return True
                else:
                    logger.info('AWS IAM did not yet propagate')
        except TimeoutExpiredError:
            logger.error(f'Could not create connection {conn_name}')
            assert False
Ejemplo n.º 6
0
def create_buckets(request, noobaa_obj):
    """
    Creates multiple buckets
    """
    created_buckets = []

    def verify_bucket():
        """
        Verifies whether buckets exists after deletion

        """
        for bucket in created_buckets:
            logger.info(f"Verifying whether bucket: {bucket.name} exists"
                        f" after deletion")
            assert noobaa_obj.s3_verify_bucket_exists(bucket) is False

    request.addfinalizer(verify_bucket)

    bucket_name = create_unique_resource_name(
        resource_description='bucket', resource_type='s3'
    )
    logger.info(f'Creating bucket: {bucket_name}')
    created_buckets.append(
        noobaa_obj.s3_create_bucket(bucketname=bucket_name)
    )

    return created_buckets
Ejemplo n.º 7
0
    def test_write_file_to_bucket(self, noobaa_obj, awscli_pod, created_buckets, uploaded_objects):
        """
        Test object IO using the S3 SDK
        """
        # Retrieve a list of all objects on the test-objects bucket and downloads them to the pod
        downloaded_files = []
        public_s3 = boto3.resource('s3', region_name=noobaa_obj.region)
        for obj in public_s3.Bucket(constants.TEST_FILES_BUCKET).objects.all():
            # Download test object(s)
            logger.info(f'Downloading {obj.key}')
            awscli_pod.exec_cmd_on_pod(
                command=f'wget https://{constants.TEST_FILES_BUCKET}.s3.{noobaa_obj.region}.amazonaws.com/{obj.key}'
            )
            downloaded_files.append(obj.key)

        bucketname = create_unique_resource_name(self.__class__.__name__.lower(), 's3-bucket')
        logger.info(f'Creating the test bucket - {bucketname}')
        created_buckets.append(noobaa_obj.s3_create_bucket(bucketname=bucketname))

        # Write all downloaded objects to the new bucket
        logger.info(f'Writing objects to bucket')
        for obj_name in downloaded_files:
            full_object_path = f"s3://{bucketname}/{obj_name}"
            copycommand = f"cp {obj_name} {full_object_path}"
            assert 'Completed' in awscli_pod.exec_cmd_on_pod(
                command=craft_s3_command(noobaa_obj, copycommand), out_yaml_format=False,
                secrets=[noobaa_obj.access_key_id, noobaa_obj.access_key, noobaa_obj.endpoint]
            )
            uploaded_objects.append(full_object_path)
Ejemplo n.º 8
0
    def test_verify_all_fields_in_sc_yaml_with_oc_describe(self, interface):
        """
        Test function to create RBD and CephFS SC, and match with oc describe sc
        output
        """
        log.info(f"Creating a {interface} storage class")
        self.sc_data = templating.load_yaml(
            getattr(constants, f"CSI_{interface}_STORAGECLASS_YAML"))
        self.sc_data['metadata']['name'] = (
            helpers.create_unique_resource_name('test',
                                                f'csi-{interface.lower()}'))
        global SC_OBJ
        SC_OBJ = OCS(**self.sc_data)
        assert SC_OBJ.create()
        log.info(
            f"{interface}Storage class: {SC_OBJ.name} created successfully")
        log.info(self.sc_data)

        # Get oc describe sc output
        describe_out = SC_OBJ.get("sc")
        log.info(describe_out)

        # Confirm that sc yaml details matches oc describe sc output
        value = {
            k: describe_out[k]
            for k in set(describe_out) - set(self.sc_data)
        }
        assert len(value) == 1 and value['volumeBindingMode'] == 'Immediate', (
            "OC describe sc output didn't match storage class yaml")
        log.info("OC describe sc output matches storage class yaml")
        # Delete Storage Class
        log.info(f"Deleting Storageclass: {SC_OBJ.name}")
        assert SC_OBJ.delete()
        log.info(f"Storage Class: {SC_OBJ.name} deleted successfully")
        del SC_OBJ
Ejemplo n.º 9
0
    def request_aws_credentials(self):
        """
        Uses a CredentialsRequest CR to create an AWS IAM that allows the program
        to interact with S3

        Returns:
            OCS: The CredentialsRequest resource
        """
        awscreds_data = templating.load_yaml(constants.MCG_AWS_CREDS_YAML)
        req_name = create_unique_resource_name('awscredreq',
                                               'credentialsrequests')
        awscreds_data['metadata']['name'] = req_name
        awscreds_data['metadata']['namespace'] = self.namespace
        awscreds_data['spec']['secretRef']['name'] = req_name
        awscreds_data['spec']['secretRef']['namespace'] = self.namespace

        creds_request = create_resource(**awscreds_data)
        sleep(5)

        secret_ocp_obj = OCP(kind='secret', namespace=self.namespace)
        cred_req_secret_dict = secret_ocp_obj.get(creds_request.name)

        aws_access_key_id = base64.b64decode(
            cred_req_secret_dict.get('data').get('aws_access_key_id')).decode(
                'utf-8')

        aws_access_key = base64.b64decode(
            cred_req_secret_dict.get('data').get(
                'aws_secret_access_key')).decode('utf-8')

        def _check_aws_credentials():
            try:
                s3_res = boto3.resource(
                    's3',
                    verify=False,
                    endpoint_url="https://s3.amazonaws.com",
                    aws_access_key_id=aws_access_key_id,
                    aws_secret_access_key=aws_access_key)
                test_bucket = s3_res.create_bucket(
                    Bucket=create_unique_resource_name('cred-verify',
                                                       's3-bucket'))
                test_bucket.delete()
                return True

            except ClientError:
                logger.info('Credentials are still not active. Retrying...')
                return False

        try:
            for api_test_result in TimeoutSampler(40, 5,
                                                  _check_aws_credentials):
                if api_test_result:
                    logger.info('AWS credentials created successfully.')
                    break

        except TimeoutExpiredError:
            logger.error('Failed to create credentials')
            assert False

        return creds_request, aws_access_key_id, aws_access_key
Ejemplo n.º 10
0
    def _create_buckets(amount=1, interface='S3', *args, **kwargs):
        """
        Creates and deletes all buckets that were created as part of the test

        Args:
            amount (int): The amount of buckets to create
            interface (str): The interface to use for creation of buckets. S3 | OC | CLI

        Returns:
            list: A list of s3.Bucket objects, containing all the created buckets

        """
        if interface.lower() not in bucketMap:
            raise RuntimeError(
                f'Invalid interface type received: {interface}. '
                f'available types: {", ".join(bucketMap.keys())}'
            )
        for i in range(amount):
            bucket_name = create_unique_resource_name(
                resource_description='bucket', resource_type=interface.lower()
            )
            created_buckets.append(
                bucketMap[interface.lower()](mcg_obj, bucket_name, *args, **kwargs)
            )
        return created_buckets
def setup(self):
    """
    Create new project
    Create PVCs
    """
    # Create new project
    self.namespace = create_unique_resource_name('test', 'namespace')
    self.project_obj = ocp.OCP(kind='Project', namespace=self.namespace)
    assert self.project_obj.new_project(
        self.namespace), (f'Failed to create new project {self.namespace}')

    # Parameters for PVC yaml as dict
    pvc_data = load_yaml_to_dict(constants.CSI_PVC_YAML)
    pvc_data['metadata']['namespace'] = self.namespace
    pvc_data['spec']['storageClassName'] = self.sc_obj.name
    pvc_data['metadata']['name'] = self.pvc_base_name

    # Create 100 PVCs
    pvc_objs = create_multiple_pvc(self.number_of_pvc, pvc_data)
    log.info(f'Created initial {self.number_of_pvc} PVCs')
    self.pvc_objs_initial = pvc_objs[:]

    # Verify PVCs are Bound and fetch PV names
    for pvc in self.pvc_objs_initial:
        pvc.reload()
        assert pvc.status == constants.STATUS_BOUND, (
            f'PVC {pvc.name} is not Bound')
        self.initial_pvs.append(pvc.backed_pv)
    log.info(f'Initial {self.number_of_pvc} PVCs are in Bound state')
Ejemplo n.º 12
0
    def test_bucket_delete_with_objects(self, mcg_obj, interface, awscli_pod):
        """
        Negative test with deletion of bucket has objects stored in.
        """
        bucket_map = {'s3': S3Bucket, 'oc': OCBucket, 'cli': CLIBucket}
        bucketname = create_unique_resource_name(
            resource_description='bucket', resource_type=interface.lower())
        try:
            bucket = bucket_map[interface.lower()](mcg_obj, bucketname)

            logger.info(f"aws s3 endpoint is {mcg_obj.s3_endpoint}")
            logger.info(f"aws region is {mcg_obj.region}")
            data_dir = '/data'
            full_object_path = f"s3://{bucketname}"
            helpers.retrieve_test_objects_to_pod(awscli_pod, data_dir)
            helpers.sync_object_directory(awscli_pod, data_dir,
                                          full_object_path, mcg_obj)

            logger.info(f"Deleting bucket: {bucketname}")
            if interface == "S3":
                try:
                    s3_del = mcg_obj.s3_resource.Bucket(bucketname).delete()
                    assert not s3_del, (
                        "Unexpected s3 delete non-empty OBC succeed")
                except botocore.exceptions.ClientError as err:
                    assert "BucketNotEmpty" in str(err), (
                        "Couldn't verify delete non-empty OBC with s3")
                    logger.info(
                        f"Delete non-empty OBC {bucketname} failed as expected"
                    )
        finally:
            bucket.delete()
Ejemplo n.º 13
0
 def test_reclaim_policy_retain(self):
     """
     Calling functions for pvc invalid name and size
     """
     pvc_count = len(list_ceph_images(pool_name=self.cbp_obj.name))
     pvc_obj = helpers.create_pvc(
         sc_name=self.sc_obj_retain.name,
         pvc_name=helpers.create_unique_resource_name('retain', 'pvc')
     )
     helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
     pvc_obj.reload()
     pv_name = pvc_obj.get()['spec']['volumeName']
     pv_namespace = pvc_obj.get()['metadata']['namespace']
     pv_obj = ocp.OCP(kind='PersistentVolume', namespace=pv_namespace)
     assert pvc_obj.delete()
     pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
     assert pv_obj.get(pv_name).get('status').get('phase') == 'Released', (
         f"Status of PV {pv_obj.get(pv_name)} is not 'Released'"
     )
     log.info("Status of PV is Released")
     assert pvc_count + 1 == len(list_ceph_images(pool_name=self.cbp_obj.name))
     assert pv_obj.delete(resource_name=pv_name)
     assert pv_obj.wait_for_delete(pv_name, 60), (
         f"PV {pv_name} is not deleted"
     )
Ejemplo n.º 14
0
def multiregion_mirror_setup(mcg_obj, multiregion_resources, bucket_factory):
    # Setup
    # Todo:
    #  add region and amount parametrization - note that `us-east-1` will cause an error
    #  as it is the default region. If usage of `us-east-1` needs to be tested, keep the 'region' field out.
    aws_buckets, backingstore_secrets, backingstore_objects, bucketclasses = multiregion_resources
    # Define backing stores
    backingstore1 = {
        'name':
        create_unique_resource_name(resource_description='testbs',
                                    resource_type='s3bucket'),
        'region':
        f'us-west-{randrange(1, 3)}'
    }
    backingstore2 = {
        'name':
        create_unique_resource_name(resource_description='testbs',
                                    resource_type='s3bucket'),
        'region':
        f'us-east-2'
    }
    # Create target buckets for them
    mcg_obj.create_new_backingstore_aws_bucket(backingstore1)
    mcg_obj.create_new_backingstore_aws_bucket(backingstore2)
    aws_buckets.extend((backingstore1['name'], backingstore2['name']))
    # Create a backing store secret
    backingstore_secret = mcg_obj.create_aws_backingstore_secret(
        backingstore1['name'] + 'secret')
    backingstore_secrets.append(backingstore_secret)
    # Create AWS-backed backing stores on NooBaa
    backingstore_obj_1 = mcg_obj.oc_create_aws_backingstore(
        backingstore1['name'], backingstore1['name'], backingstore_secret.name,
        backingstore1['region'])
    backingstore_obj_2 = mcg_obj.oc_create_aws_backingstore(
        backingstore2['name'], backingstore2['name'], backingstore_secret.name,
        backingstore2['region'])
    backingstore_objects.extend((backingstore_obj_1, backingstore_obj_2))
    # Create a new mirror bucketclass that'll use all the backing stores we created
    bucketclass = mcg_obj.oc_create_bucketclass(
        create_unique_resource_name(resource_description='testbc',
                                    resource_type='bucketclass'),
        [backingstore.name for backingstore in backingstore_objects], 'Mirror')
    bucketclasses.append(bucketclass)
    # Create a NooBucket that'll use the bucket class in order to test the mirroring policy
    bucket_name = bucket_factory(1, 'OC', bucketclass=bucketclass.name)[0].name

    return bucket_name, backingstore1, backingstore2
Ejemplo n.º 15
0
    def test_s3_bucket_creation(self, noobaa_obj, created_buckets):
        """
        Test bucket creation using the S3 SDK
        """

        bucketname = create_unique_resource_name(self.__class__.__name__.lower(), 's3-bucket')
        logger.info(f'Creating new bucket - {bucketname}')
        created_buckets.append(noobaa_obj.s3_create_bucket(bucketname=bucketname))
Ejemplo n.º 16
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     obc_data = templating.load_yaml(constants.MCG_OBC_YAML)
     if self.name is None:
         self.name = create_unique_resource_name('oc', 'obc')
     obc_data['metadata']['name'] = self.name
     obc_data['spec']['bucketName'] = self.name
     obc_data['metadata']['namespace'] = self.mcg.namespace
     create_resource(**obc_data)
Ejemplo n.º 17
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     obc_data = templating.load_yaml(constants.MCG_OBC_YAML)
     if self.name is None:
         self.name = create_unique_resource_name('oc', 'obc')
     obc_data['metadata']['name'] = self.name
     obc_data['spec']['bucketName'] = self.name
     obc_data['spec']['storageClassName'] = constants.INDEPENDENT_DEFAULT_STORAGECLASS_RGW
     obc_data['metadata']['namespace'] = self.namespace
     create_resource(**obc_data)
Ejemplo n.º 18
0
def create_pod(request):
    """
    Create a pod
    """
    class_instance = request.node.cls

    pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML)
    pod_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'pod')
    pod_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
    pod_data['spec']['volumes'][0]['persistentVolumeClaim'][
        'claimName'] = class_instance.pvc_obj.name
    class_instance.pod_obj = helpers.create_pod(**pod_data)
Ejemplo n.º 19
0
 def test_basics_rbd(self, test_fixture_rbd):
     """
     Testing basics: secret creation,
     storage class creation,pvc and pod with rbd
     """
     global RBD_PVC_OBJ, RBD_POD_OBJ
     log.info('creating pvc for RBD ')
     pvc_name = helpers.create_unique_resource_name('test-rbd', 'pvc')
     RBD_PVC_OBJ = helpers.create_pvc(sc_name=RBD_SC_OBJ.name,
                                      pvc_name=pvc_name)
     if RBD_PVC_OBJ.backed_pv is None:
         RBD_PVC_OBJ.reload()
     RBD_POD_OBJ = helpers.create_pod(
         interface_type=constants.CEPHBLOCKPOOL, pvc_name=RBD_PVC_OBJ.name)
Ejemplo n.º 20
0
 def test_basics_cephfs(self, test_fixture_cephfs):
     """
     Testing basics: secret creation,
      storage class creation, pvc and pod with cephfs
     """
     global CEPHFS_PVC_OBJ, CEPHFS_POD_OBJ
     log.info('creating pvc for CephFS ')
     pvc_name = helpers.create_unique_resource_name('test-cephfs', 'pvc')
     CEPHFS_PVC_OBJ = helpers.create_pvc(sc_name=CEPHFS_SC_OBJ.name,
                                         pvc_name=pvc_name)
     log.info('creating cephfs pod')
     CEPHFS_POD_OBJ = helpers.create_pod(
         interface_type=constants.CEPHFILESYSTEM,
         pvc_name=CEPHFS_PVC_OBJ.name)
Ejemplo n.º 21
0
def setup(self):
    """
    Setting up the environment for the test
    """
    # Create a storage class
    log.info("Creating a Storage Class")
    self.sc_data = templating.load_yaml(constants.CSI_RBD_STORAGECLASS_YAML)
    self.sc_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'csi-rbd')
    global SC_OBJ
    SC_OBJ = OCS(**self.sc_data)
    assert SC_OBJ.create()
    log.info(f"Storage class: {SC_OBJ.name} created successfully")
    log.debug(self.sc_data)
Ejemplo n.º 22
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     obc_data = templating.load_yaml(constants.MCG_OBC_YAML)
     if self.name is None:
         self.name = create_unique_resource_name('oc', 'obc')
     obc_data['metadata']['name'] = self.name
     obc_data['spec']['bucketName'] = self.name
     obc_data['spec']['storageClassName'] = self.namespace + '.noobaa.io'
     obc_data['metadata']['namespace'] = self.namespace
     if 'bucketclass' in kwargs:
         obc_data.setdefault('spec', {}).setdefault(
             'additionalConfig', {}).setdefault('bucketclass',
                                                kwargs['bucketclass'])
     create_resource(**obc_data)
Ejemplo n.º 23
0
def setup_fs(self):
    """
    Setting up the environment for the test
    """
    global CEPH_OBJ
    self.fs_data = defaults.CEPHFILESYSTEM_DICT.copy()
    self.fs_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'cephfs')
    self.fs_data['metadata']['namespace'] = ENV_DATA['cluster_namespace']
    CEPH_OBJ = OCS(**self.fs_data)
    CEPH_OBJ.create()
    assert POD.wait_for_resource(condition='Running',
                                 selector='app=rook-ceph-mds')
    pods = POD.get(selector='app=rook-ceph-mds')['items']
    assert len(pods) == 2
Ejemplo n.º 24
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     obc_data = templating.load_yaml(constants.MCG_OBC_YAML)
     if self.name is None:
         self.name = create_unique_resource_name('oc', 'obc')
     obc_data['metadata']['name'] = self.name
     obc_data['spec']['bucketName'] = self.name
     if storagecluster_independent_check():
         obc_data['spec'][
             'storageClassName'] = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RGW
     else:
         obc_data['spec'][
             'storageClassName'] = constants.DEFAULT_STORAGECLASS_RGW
     obc_data['metadata']['namespace'] = self.namespace
     create_resource(**obc_data)
    def test_storageclass_invalid(self, invalid_storageclass):
        """
        Test that Persistent Volume Claim can not be created from misconfigured
        CephFS Storage Class.
        """
        pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML)
        pvc_name = helpers.create_unique_resource_name('test', 'pvc')
        pvc_data['metadata']['name'] = pvc_name
        pvc_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
        pvc_data['spec']['storageClassName'] = invalid_storageclass[
            'metadata']['name']
        logger.info(
            f"Create PVC {pvc_name} "
            f"with storageClassName "
            f"{invalid_storageclass['metadata']['name']}"
        )
        pvc = PVC(**pvc_data)
        pvc.create()

        pvc_status = pvc.status
        logger.debug(f"Status of PVC {pvc_name} after creation: {pvc_status}")
        assert pvc_status == constants.STATUS_PENDING

        logger.info(
            f"Waiting for status '{constants.STATUS_BOUND}' "
            f"for 60 seconds (it shouldn't change)"
        )
        with pytest.raises(TimeoutExpiredError):
            # raising TimeoutExpiredError is expected behavior
            pvc_status_changed = pvc.ocp.wait_for_resource(
                resource_name=pvc_name,
                condition=constants.STATUS_BOUND,
                timeout=60,
                sleep=20
            )
            logger.debug('Check that PVC status did not changed')
            assert not pvc_status_changed

        pvc_status = pvc.status
        logger.info(f"Status of PVC {pvc_name} after 60 seconds: {pvc_status}")
        assert_msg = (
            f"PVC {pvc_name} hasn't reached status "
            f"{constants.STATUS_PENDING}"
        )
        assert pvc_status == constants.STATUS_PENDING, assert_msg

        logger.info(f"Deleting PVC {pvc_name}")
        pvc.delete()
Ejemplo n.º 26
0
        def _check_aws_credentials():
            try:
                s3_res = boto3.resource(
                    's3', verify=False, endpoint_url="https://s3.amazonaws.com",
                    aws_access_key_id=aws_access_key_id,
                    aws_secret_access_key=aws_access_key
                )
                test_bucket = s3_res.create_bucket(
                    Bucket=create_unique_resource_name('cred-verify', 's3-bucket')
                )
                test_bucket.delete()
                return True

            except ClientError:
                logger.info('Credentials are still not active. Retrying...')
                return False
Ejemplo n.º 27
0
    def _create_buckets(amount=1):
        """
        Creates and deletes all buckets that were created as part of the test

        Args:
            amount (int): The amount of buckets to create

        Returns:
            list: A list of s3.Bucket objects, containing all the created buckets

        """
        for i in range(amount):
            bucket_name = create_unique_resource_name(
                resource_description='bucket', resource_type='s3')
            logger.info(f'Creating bucket: {bucket_name}')
            created_bucket_names.append(
                mcg_obj.s3_create_bucket(bucketname=bucket_name))
        return created_bucket_names
Ejemplo n.º 28
0
 def test_reclaim_policy_delete(self):
     """
     Test to validate storage class with reclaim policy "Delete"
     """
     pvc_obj = helpers.create_pvc(
         sc_name=self.sc_obj_delete.name,
         pvc_name=helpers.create_unique_resource_name('delete', 'pvc'))
     pv_name = pvc_obj.get()['spec']['volumeName']
     pv_namespace = pvc_obj.get()['metadata']['namespace']
     pv_obj = ocp.OCP(kind='PersistentVolume', namespace=pv_namespace)
     assert pvc_obj.delete()
     pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
     try:
         pv_obj.get(pv_name)
     except CommandFailed as ex:
         assert f'persistentvolumes "{pv_name}" not found' in str(ex),\
             "pv exists"
         log.info("Underlying PV is deleted")
Ejemplo n.º 29
0
 def test_reclaim_policy_delete(self):
     """
     Test to validate storage class with reclaim policy "Delete"
     """
     pvc_obj = helpers.create_pvc(
         sc_name=self.sc_obj_delete.name,
         pvc_name=helpers.create_unique_resource_name('delete', 'pvc')
     )
     helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
     pvc_obj.reload()
     pv_name = pvc_obj.get()['spec']['volumeName']
     pv_namespace = pvc_obj.get()['metadata']['namespace']
     pv_obj = ocp.OCP(kind='PersistentVolume', namespace=pv_namespace)
     assert pvc_obj.delete()
     pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
     assert pv_obj.wait_for_delete(pv_name, 60), (
         f"PV {pv_name} is not deleted"
     )
Ejemplo n.º 30
0
    def create_new_backingstore_aws_bucket(self, backingstore_info):
        """
        Creates an S3 target bucket for NooBaa to use as a backing store

        Args:
            backingstore_info: A tuple containing the BS information
            to be used in its creation.

        """
        if backingstore_info.get('name') is None:
            backingstore_info['name'] = create_unique_resource_name('backingstorebucket', 'awsbucket')

        if backingstore_info.get('region') is None:
            self.aws_s3_resource.create_bucket(Bucket=backingstore_info['name'])
        else:
            self.aws_s3_resource.create_bucket(
                Bucket=backingstore_info['name'],
                CreateBucketConfiguration={
                    'LocationConstraint': backingstore_info['region']
                }
            )