def test_basics_rbd(self, test_fixture):
     """
     Testing basics: secret creation,
     storage class creation and pvc with cephfs
     """
     self.cephfs_secret = templating.load_yaml_to_dict(
         constants.CSI_CEPHFS_SECRET_YAML)
     del self.cephfs_secret['data']['userID']
     del self.cephfs_secret['data']['userKey']
     self.cephfs_secret['data']['adminKey'] = (
         get_admin_key_from_ceph_tools())
     self.cephfs_secret['data']['adminID'] = constants.ADMIN_BASE64
     logging.info(self.cephfs_secret)
     secret = OCS(**self.cephfs_secret)
     secret.create()
     self.cephfs_sc = templating.load_yaml_to_dict(
         constants.CSI_CEPHFS_STORAGECLASS_YAML)
     self.cephfs_sc['parameters']['monitors'] = self.mons
     self.cephfs_sc['parameters']['pool'] = (
         f"{self.fs_data['metadata']['name']}-data0")
     storage_class = OCS(**self.cephfs_sc)
     storage_class.create()
     self.cephfs_pvc = templating.load_yaml_to_dict(
         constants.CSI_CEPHFS_PVC_YAML)
     pvc = PVC(**self.cephfs_pvc)
     pvc.create()
     log.info(pvc.status)
     assert 'Bound' in pvc.status
     pvc.delete()
     storage_class.delete()
     secret.delete()
Example #2
0
def create_secret(interface_type):
    """
    Create a secret

    Args:
        interface_type (str): The type of the interface
            (e.g. CephBlockPool, CephFileSystem)

    Returns:
        OCS: An OCS instance for the secret
    """
    secret_data = dict()
    if interface_type == constants.CEPHBLOCKPOOL:
        secret_data = templating.load_yaml_to_dict(
            constants.CSI_RBD_SECRET_YAML)
        secret_data['stringData']['userID'] = constants.ADMIN_USER
        secret_data['stringData']['userKey'] = get_admin_key()
        interface = constants.RBD_INTERFACE
    elif interface_type == constants.CEPHFILESYSTEM:
        secret_data = templating.load_yaml_to_dict(
            constants.CSI_CEPHFS_SECRET_YAML)
        del secret_data['stringData']['userID']
        del secret_data['stringData']['userKey']
        secret_data['stringData']['adminID'] = constants.ADMIN_USER
        secret_data['stringData']['adminKey'] = get_admin_key()
        interface = constants.CEPHFS_INTERFACE
    secret_data['metadata']['name'] = create_unique_resource_name(
        f'test-{interface}', 'secret')
    secret_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE

    return create_resource(**secret_data, wait=False)
 def test_basics_cephfs(self):
     """
     Testing basics: secret creation,
      storage class creation  and pvc with rbd
     """
     self.rbd_secret = templating.load_yaml_to_dict(
         constants.CSI_RBD_SECRET_YAML)
     del self.rbd_secret['data']['kubernetes']
     self.rbd_secret['data']['admin'] = get_admin_key_from_ceph_tools()
     logging.info(self.rbd_secret)
     secret = OCS(**self.rbd_secret)
     secret.create()
     self.rbd_sc = templating.load_yaml_to_dict(
         constants.CSI_RBD_STORAGECLASS_YAML)
     self.rbd_sc['parameters']['monitors'] = self.mons
     del self.rbd_sc['parameters']['userid']
     storage_class = OCS(**self.rbd_sc)
     storage_class.create()
     self.rbd_pvc = templating.load_yaml_to_dict(constants.CSI_RBD_PVC_YAML)
     pvc = PVC(**self.rbd_pvc)
     pvc.create()
     assert 'Bound' in pvc.status
     pvc.delete()
     storage_class.delete()
     secret.delete()
Example #4
0
 def setup_postgresql(self):
     """
     Deploy postgres sql server
     """
     try:
         pgsql_service = templating.load_yaml_to_dict(
             constants.PGSQL_SERVICE_YAML
         )
         pgsql_cmap = templating.load_yaml_to_dict(
             constants.PGSQL_CONFIGMAP_YAML
         )
         pgsql_sset = templating.load_yaml_to_dict(
             constants.PGSQL_STATEFULSET_YAML
         )
         self.pgsql_service = OCS(**pgsql_service)
         self.pgsql_service.create()
         self.pgsql_cmap = OCS(**pgsql_cmap)
         self.pgsql_cmap.create()
         self.pgsql_sset = OCS(**pgsql_sset)
         self.pgsql_sset.create()
         self.pod_obj.wait_for_resource(
             condition='Running',
             selector='app=postgres',
             timeout=120
         )
     except (CommandFailed, CalledProcessError) as cf:
         log.error('Failed during setup of PostgreSQL server')
         raise cf
     self.pgsql_is_setup = True
Example #5
0
def test_yaml_to_dict():
    assert templating.load_yaml_to_dict(
        constants.CEPHFILESYSTEM_YAML)['apiVersion'] == 'ceph.rook.io/v1'
    assert templating.load_yaml_to_dict(
        constants.CEPHFILESYSTEM_YAML
    )['spec']['metadataPool']['replicated']['size'] == 3
    assert templating.load_yaml_to_dict(
        constants.CSI_PVC_YAML)['spec']['accessModes'] == ['ReadWriteOnce']
Example #6
0
File: pod.py Project: jhutar/ocs-ci
    def run_io(self,
               storage_type,
               size,
               io_direction='rw',
               rw_ratio=75,
               jobs=1,
               runtime=60,
               depth=4,
               fio_filename=None):
        """
        Execute FIO on a pod
        This operation will run in background and will store the results in
        'self.thread.result()'.
        In order to wait for the output and not continue with the test until
        FIO is done, call self.thread.result() right after calling run_io.
        See tests/manage/test_pvc_deletion_during_io.py::test_run_io
        for usage of FIO

        Args:
            storage_type (str): 'fs' or 'block'
            size (str): Size in MB, e.g. '200M'
            io_direction (str): Determines the operation:
                'ro', 'wo', 'rw' (default: 'rw')
            rw_ratio (int): Determines the reads and writes using a
                <rw_ratio>%/100-<rw_ratio>%
                (e.g. the default is 75 which means it is 75%/25% which
                equivalent to 3 reads are performed for every 1 write)
            jobs (int): Number of jobs to execute FIO
            runtime (int): Number of seconds IO should run for
            depth (int): IO depth
            fio_filename(str): Name of fio file created on app pod's mount point
        """
        name = 'test_workload'
        spec = self.pod_data.get('spec')
        path = (
            spec.get('containers')[0].get('volumeMounts')[0].get('mountPath'))
        work_load = 'fio'
        # few io parameters for Fio

        wl = workload.WorkLoad(name, path, work_load, storage_type, self, jobs)
        assert wl.setup(), "Setup up for FIO failed"
        if io_direction == 'rw':
            io_params = templating.load_yaml_to_dict(
                constants.FIO_IO_RW_PARAMS_YAML)
            io_params['rwmixread'] = rw_ratio
        else:
            io_params = templating.load_yaml_to_dict(
                constants.FIO_IO_PARAMS_YAML)
        io_params['runtime'] = runtime
        io_params['size'] = size
        if fio_filename:
            io_params['filename'] = fio_filename
        io_params['iodepth'] = depth

        self.fio_thread = wl.run(**io_params)
Example #7
0
    def run_io(self,
               storage_type,
               size,
               io_direction='rw',
               rw_ratio=75,
               jobs=1,
               runtime=60,
               depth=4,
               fio_filename=None):
        """
        Execute FIO on a pod
        This operation will run in background and will store the results in
        'self.thread.result()'.
        In order to wait for the output and not continue with the test until
        FIO is done, call self.thread.result() right after calling run_io.
        See tests/manage/test_pvc_deletion_during_io.py::test_run_io
        for usage of FIO

        Args:
            storage_type (str): 'fs' or 'block'
            size (str): Size in MB, e.g. '200M'
            io_direction (str): Determines the operation:
                'ro', 'wo', 'rw' (default: 'rw')
            rw_ratio (int): Determines the reads and writes using a
                <rw_ratio>%/100-<rw_ratio>%
                (e.g. the default is 75 which means it is 75%/25% which
                equivalent to 3 reads are performed for every 1 write)
            jobs (int): Number of jobs to execute FIO
            runtime (int): Number of seconds IO should run for
            depth (int): IO depth
            fio_filename(str): Name of fio file created on app pod's mount point
        """
        if not self.wl_setup_done:
            self.workload_setup(storage_type=storage_type, jobs=jobs)

        if io_direction == 'rw':
            self.io_params = templating.load_yaml_to_dict(
                constants.FIO_IO_RW_PARAMS_YAML)
            self.io_params['rwmixread'] = rw_ratio
        else:
            self.io_params = templating.load_yaml_to_dict(
                constants.FIO_IO_PARAMS_YAML)
        self.io_params['runtime'] = runtime
        self.io_params['size'] = size
        if fio_filename:
            self.io_params['filename'] = fio_filename
        self.io_params['iodepth'] = depth
        self.fio_thread = self.wl_obj.run(**self.io_params)
Example #8
0
def create_multiple_pvc(number_of_pvc=1, pvc_data=None):
    """
    Create one or more PVC

    Args:
        number_of_pvc (int): Number of PVCs to be created
        pvc_data (dict): Parameters for PVC yaml

    Returns:
         list: List of PVC objects
    """
    if pvc_data is None:
        pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML)
    pvc_objs = []
    pvc_base_name = pvc_data['metadata']['name']

    for count in range(1, number_of_pvc + 1):
        if number_of_pvc != 1:
            pvc_name = f'{pvc_base_name}{count}'
            pvc_data['metadata']['name'] = pvc_name
        pvc_name = pvc_data['metadata']['name']
        log.info(f'Creating Persistent Volume Claim {pvc_name}')
        pvc_obj = PVC(**pvc_data)
        pvc_obj.create()
        pvc_objs.append(pvc_obj)
        log.info(f'Created Persistent Volume Claim {pvc_name}')
    return pvc_objs
Example #9
0
def create_pvc_invalid_size(pvcsize):
    """
    Creates a pvc with an user provided data

    Args:
        pvcsize (str): Size of the pvc to be created

    Returns:
        None
    """
    pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML)
    pvc_data['metadata']['name'] = "auto"
    pvc_data['spec']['resources']['requests']['storage'] = pvcsize
    pvc_data['spec']['storageClassName'] = SC_OBJ.name
    pvc_obj = PVC(**pvc_data)
    log.info(f"Creating a PVC with size {pvcsize}")
    try:
        pvc_obj.create()
    except CommandFailed as ex:
        error = ("quantities must match the regular expression '^([+-]?[0-9.]"
                 "+)([eEinumkKMGTP]*[-+]?[0-9]*)$'")
        if error in str(ex):
            log.info(f"PVC creation failed with error \n {ex} \n as "
                     "invalid pvc size is provided. EXPECTED")
        else:
            assert ("PVC creation with invalid size succeeded : "
                    "NOT expected")
def setup(self):
    """
    Create new project
    Create PVCs
    """
    # Create new project
    self.namespace = create_unique_resource_name('test', 'namespace')
    self.project_obj = ocp.OCP(kind='Project', namespace=self.namespace)
    assert self.project_obj.new_project(
        self.namespace), (f'Failed to create new project {self.namespace}')

    # Parameters for PVC yaml as dict
    pvc_data = load_yaml_to_dict(constants.CSI_PVC_YAML)
    pvc_data['metadata']['namespace'] = self.namespace
    pvc_data['spec']['storageClassName'] = self.sc_obj.name
    pvc_data['metadata']['name'] = self.pvc_base_name

    # Create 100 PVCs
    pvc_objs = create_multiple_pvc(self.number_of_pvc, pvc_data)
    log.info(f'Created initial {self.number_of_pvc} PVCs')
    self.pvc_objs_initial = pvc_objs[:]

    # Verify PVCs are Bound and fetch PV names
    for pvc in self.pvc_objs_initial:
        pvc.reload()
        assert pvc.status == constants.STATUS_BOUND, (
            f'PVC {pvc.name} is not Bound')
        self.initial_pvs.append(pvc.backed_pv)
    log.info(f'Initial {self.number_of_pvc} PVCs are in Bound state')
def create_pvc_and_verify_pvc_exists(
    sc_name, cbp_name, desired_status=constants.STATUS_BOUND, wait=True
):
    """
    Create pvc, verify pvc is bound in state and
    pvc exists on ceph side
    """

    pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML)
    pvc_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'pvc'
    )
    pvc_data['spec']['storageClassName'] = sc_name
    pvc_data['spec']['resources']['requests']['storage'] = "10Gi"
    pvc_obj = pvc.PVC(**pvc_data)
    pvc_obj.create()
    if wait:
        assert pvc_obj.ocp.wait_for_resource(
            condition=desired_status, resource_name=pvc_obj.name
        ), f"{pvc_obj.kind} {pvc_obj.name} failed to reach"
        f"status {desired_status}"
    pvc_obj.reload()

    # ToDo: Add validation to check pv exists on bcaekend
    # Commenting the below code: https://bugzilla.redhat.com/show_bug.cgi?id=1723656
    # Validate pv is created on ceph
    # logger.info(f"Verifying pv exists on backend")
    # ct_pod = pod.get_ceph_tools_pod()
    # pv_list = ct_pod.exec_ceph_cmd(
    #     ceph_cmd=f"rbd ls -p {cbp_name}", format='json'
    # )
    # _rc = pvc_obj.backed_pv in pv_list
    # assert _rc, f"pv doesn't exist on backend"
    # logger.info(f"pv {pvc_obj.backed_pv} exists on backend")
    return pvc_obj
 def test_pvc_delete_and_verify_size_is_returned_to_backend_pool(self):
     """
     Test case to verify after delete pvc size returned to backend pools
     """
     used_before_creating_pvc = check_ceph_used_space()
     logger.info(f"Used before creating pvc {used_before_creating_pvc}")
     pvc_obj = create_pvc_and_verify_pvc_exists(
         self.sc_obj.name, self.cbp_obj.name
     )
     pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML)
     pod_data['spec']['volumes'][0]['persistentVolumeClaim'][
         'claimName'
     ] = pvc_obj.name
     pod_obj = helpers.create_pod(**pod_data)
     used_percentage = pod.run_io_and_verify_mount_point(pod_obj)
     assert used_percentage > '90%', "I/O's didn't run completely"
     used_after_creating_pvc = check_ceph_used_space()
     logger.info(f"Used after creating pvc {used_after_creating_pvc}")
     assert used_before_creating_pvc < used_after_creating_pvc
     pod_obj.delete()
     pvc_obj.delete()
     verify_pv_not_exists(pvc_obj.backed_pv, self.cbp_obj.name)
     used_after_deleting_pvc = check_ceph_used_space()
     logger.info(f"Used after deleting pvc {used_after_deleting_pvc}")
     assert used_after_deleting_pvc < used_after_creating_pvc
     assert (abs(
         used_after_deleting_pvc - used_before_creating_pvc) < 0.2)
Example #13
0
def create_cephfilesystem():
    """
    Function for deploying CephFileSystem (MDS)

    Returns:
        bool: True if CephFileSystem creates successful
    """
    fs_data = templating.load_yaml_to_dict(constants.CEPHFILESYSTEM_YAML)
    fs_data['metadata']['name'] = create_unique_resource_name(
        'test', 'cephfs'
    )
    fs_data['metadata']['namespace'] = config.ENV_DATA['cluster_namespace']
    global CEPHFS_OBJ
    CEPHFS_OBJ = OCS(**fs_data)
    CEPHFS_OBJ.create()
    POD = pod.get_all_pods(
        namespace=defaults.ROOK_CLUSTER_NAMESPACE
    )
    for pod_names in POD:
        if 'rook-ceph-mds' in pod_names.labels.values():
            assert pod_names.ocp.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector='app=rook-ceph-mds'
            )
    assert validate_cephfilesystem(fs_name=fs_data['metadata']['name'])
    return True
Example #14
0
    def test_fio_with_block_storage(self):
        name = 'test_workload'
        spec = self.pod_obj.data.get('spec')
        path = (
            spec.get('containers')[0].get('volumeMounts')[0].get('mountPath'))
        work_load = 'fio'
        storage_type = 'fs'
        # few io parameters for Fio
        runtime = 10
        size = '200M'

        wl = workload.WorkLoad(name, path, work_load, storage_type,
                               self.pod_obj)
        assert wl.setup()
        io_params = templating.load_yaml_to_dict(constants.FIO_IO_PARAMS_YAML)
        io_params['runtime'] = runtime
        io_params['size'] = size

        future_result = wl.run(**io_params)

        timeout = 1200
        sample = TimeoutSampler(timeout=timeout,
                                sleep=3,
                                func=future_result.done)
        assert sample.wait_for_func_status(result=True)

        try:
            logger.info(future_result.result())
        except exceptions.CommandFailed:
            logger.exception(f"FIO failed")
            raise
        except Exception:
            logger.exception(f"Found Exception")
            raise
Example #15
0
def create_pvc_invalid_name(pvcname):
    """
    Creates a pvc with an user provided data

    Args:
        pvcname (str): Name of the pvc to be created

    Returns:
        None
    """
    pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML)
    pvc_data['metadata']['name'] = pvcname
    pvc_data['spec']['storageClassName'] = SC_OBJ.name
    pvc_obj = PVC(**pvc_data)
    log.info(f"Creating a pvc with name {pvcname}")
    try:
        pvc_obj.create()
    except CommandFailed as ex:
        error = ("subdomain must consist of lower case alphanumeric "
                 "characters, '-' or '.', and must start and end with "
                 "an alphanumeric character")
        if error in str(ex):
            log.info(f"PVC creation failed with error \n {ex} \n as "
                     "invalid pvc name is provided. EXPECTED")
        else:
            assert ("PVC creation with invalid name succeeded : "
                    "NOT expected")
    def test_multiple_pvc_concurrent_creation_deletion(self):
        """
        To exercise resource creation and deletion
        """
        # Start deleting 100 PVCs
        command = (f'for i in `seq 1 {self.number_of_pvc}`;do oc delete pvc '
                   f'{self.pvc_base_name}$i -n {self.namespace};done')
        proc = run_async(command)
        assert proc, (
            f'Failed to execute command for deleting {self.number_of_pvc} PVCs'
        )

        # Create 100 new PVCs
        # Parameters for PVC yaml as dict
        pvc_data = load_yaml_to_dict(constants.CSI_PVC_YAML)
        pvc_data['metadata']['namespace'] = self.namespace
        pvc_data['spec']['storageClassName'] = self.sc_obj.name
        pvc_data['metadata']['name'] = self.pvc_base_name_new

        # Create 100 PVCs
        pvc_objs = create_multiple_pvc(self.number_of_pvc, pvc_data)

        log.info(f'Created {self.number_of_pvc} new PVCs.')
        self.pvc_objs_new = pvc_objs[:]

        # Verify PVCs are Bound
        for pvc in self.pvc_objs_new:
            pvc.reload()
            assert pvc.status == constants.STATUS_BOUND, (
                f'PVC {pvc.name} is not Bound')
        log.info('Verified: Newly created PVCs are in Bound state.')

        # Verify command to delete PVCs
        ret, out, err = proc.async_communicate()
        log.info(
            f'Return values of command: {command}.\nretcode:{ret}\nstdout:'
            f'{out}\nstderr:{err}')
        assert not ret, 'Deletion of PVCs failed'

        # Verify PVCs are deleted
        for pvc in self.pvc_objs_initial:
            try:
                pvc.get()
                return False
            except exceptions.CommandFailed as exp:
                assert "not found" in str(exp), (
                    f'Failed to fetch details of PVC {pvc.name}')
                log.info(f'Expected: PVC {pvc.name} does not exists ')
        log.info(f'Successfully deleted initial {self.number_of_pvc} PVCs')

        # Verify PVs using ceph toolbox. PVs should be deleted because
        # reclaimPolicy is Delete
        ceph_cmd = f'rbd ls -p {self.cbp_obj.name}'
        ct_pod = get_ceph_tools_pod()
        final_pv_list = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd, format='json')
        assert not any(pv in final_pv_list for pv in self.initial_pvs), (
            'PVs associated with deleted PVCs still exists')
        log.info('Verified: PVs associated with deleted PVCs are also deleted')
Example #17
0
def create_pod(interface_type=None,
               pvc_name=None,
               desired_status=constants.STATUS_RUNNING,
               wait=True,
               namespace=defaults.ROOK_CLUSTER_NAMESPACE,
               node_name=None,
               pod_dict_path=None):
    """
    Create a pod

    Args:
        interface_type (str): The interface type (CephFS, RBD, etc.)
        pvc_name (str): The PVC that should be attached to the newly created pod
        desired_status (str): The status of the pod to wait for
        wait (bool): True for waiting for the pod to reach the desired
            status, False otherwise
        namespace (str): The namespace for the new resource creation
        node_name (str): The name of specific node to schedule the pod
        pod_dict_path (str): YAML path for the pod

    Returns:
        Pod: A Pod instance

    Raises:
        AssertionError: In case of any failure
    """
    if interface_type == constants.CEPHBLOCKPOOL:
        pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
        interface = constants.RBD_INTERFACE
    else:
        pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
        interface = constants.CEPHFS_INTERFACE

    pod_data = templating.load_yaml_to_dict(pod_dict)
    pod_data['metadata']['name'] = create_unique_resource_name(
        f'test-{interface}', 'pod')
    pod_data['metadata']['namespace'] = namespace
    if pvc_name:
        pod_data['spec']['volumes'][0]['persistentVolumeClaim'][
            'claimName'] = pvc_name

    if node_name:
        pod_data['spec']['nodeName'] = node_name
    else:
        if 'nodeName' in pod_data.get('spec'):
            del pod_data['spec']['nodeName']

    pod_obj = pod.Pod(**pod_data)
    pod_name = pod_data.get('metadata').get('name')
    created_resource = pod_obj.create(do_reload=wait)
    assert created_resource, (f"Failed to create resource {pod_name}")
    if wait:
        assert wait_for_resource_state(resource=pod_obj,
                                       state=desired_status,
                                       timeout=120)

    return pod_obj
Example #18
0
def create_pod(request):
    """
    Create a pod
    """
    class_instance = request.node.cls

    pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML)
    pod_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'pod')
    pod_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
    pod_data['spec']['volumes'][0]['persistentVolumeClaim'][
        'claimName'] = class_instance.pvc_obj.name
    class_instance.pod_obj = helpers.create_pod(**pod_data)
Example #19
0
def setup(self):
    """
    Setting up the environment for the test
    """
    # Create a storage class
    log.info("Creating a Storage Class")
    self.sc_data = templating.load_yaml_to_dict(
        constants.CSI_RBD_STORAGECLASS_YAML)
    self.sc_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'csi-rbd')
    global SC_OBJ
    SC_OBJ = OCS(**self.sc_data)
    assert SC_OBJ.create()
    log.info(f"Storage class: {SC_OBJ.name} created successfully")
    log.debug(self.sc_data)
    def test_storageclass_invalid(self, invalid_storageclass):
        """
        Test that Persistent Volume Claim can not be created from misconfigured
        CephFS Storage Class.
        """
        pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML)
        pvc_name = helpers.create_unique_resource_name('test', 'pvc')
        pvc_data['metadata']['name'] = pvc_name
        pvc_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
        pvc_data['spec']['storageClassName'] = invalid_storageclass[
            'metadata']['name']
        logger.info(
            f"Create PVC {pvc_name} "
            f"with storageClassName "
            f"{invalid_storageclass['metadata']['name']}"
        )
        pvc = PVC(**pvc_data)
        pvc.create()

        pvc_status = pvc.status
        logger.debug(f"Status of PVC {pvc_name} after creation: {pvc_status}")
        assert pvc_status == constants.STATUS_PENDING

        logger.info(
            f"Waiting for status '{constants.STATUS_BOUND}' "
            f"for 60 seconds (it shouldn't change)"
        )
        with pytest.raises(TimeoutExpiredError):
            # raising TimeoutExpiredError is expected behavior
            pvc_status_changed = pvc.ocp.wait_for_resource(
                resource_name=pvc_name,
                condition=constants.STATUS_BOUND,
                timeout=60,
                sleep=20
            )
            logger.debug('Check that PVC status did not changed')
            assert not pvc_status_changed

        pvc_status = pvc.status
        logger.info(f"Status of PVC {pvc_name} after 60 seconds: {pvc_status}")
        assert_msg = (
            f"PVC {pvc_name} hasn't reached status "
            f"{constants.STATUS_PENDING}"
        )
        assert pvc_status == constants.STATUS_PENDING, assert_msg

        logger.info(f"Deleting PVC {pvc_name}")
        pvc.delete()
Example #21
0
    def test_sql_workload_simple(self, ripsaw):
        """
        This is a basic pgsql workload
        """
        # Deployment postgres
        log.info("Deploying postgres database")
        ripsaw.apply_crd('resources/crds/' 'ripsaw_v1alpha1_ripsaw_crd.yaml')
        ripsaw.setup_postgresql()
        run_cmd('bin/oc wait --for condition=ready pod '
                '-l app=postgres '
                '--timeout=120s')

        # Create pgbench benchmark
        log.info("Create resource file for pgbench workload")
        pg_data = templating.load_yaml_to_dict(constants.PGSQL_BENCHMARK_YAML)
        pg_obj = OCS(**pg_data)
        pg_obj.create()
        # Wait for pgbench pod to be created
        log.info("waiting for pgbench benchmark to create, "
                 f"PGbench pod name: {pg_obj.name} ")
        wait_time = 30
        log.info(f"Waiting {wait_time} seconds...")
        time.sleep(wait_time)

        pgbench_pod = run_cmd('bin/oc get pods -l '
                              'app=pgbench-client -o name')
        pgbench_pod = pgbench_pod.split('/')[1]
        run_cmd('bin/oc wait --for condition=Initialized '
                f'pods/{pgbench_pod} '
                '--timeout=60s')
        run_cmd('bin/oc wait --for condition=Complete jobs '
                '-l app=pgbench-client '
                '--timeout=300s')

        # Running pgbench and parsing logs
        output = run_cmd(f'bin/oc logs {pgbench_pod}')
        pg_output = utils.parse_pgsql_logs(output)
        log.info("*******PGBench output log*********\n" f"{pg_output}")
        for data in pg_output:
            latency_avg = data['latency_avg']
            if not latency_avg:
                raise UnexpectedBehaviour("PGBench failed to run, "
                                          "no data found on latency_avg")
        log.info("PGBench has completed successfully")

        # Clean up pgbench benchmark
        log.info("Deleting PG bench benchmark:")
        pg_obj.delete()
def setup_fs(self):
    """
    Setting up the environment for the test
    """
    global CEPH_OBJ
    self.fs_data = templating.load_yaml_to_dict(constants.CEPHFILESYSTEM_YAML)
    self.fs_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'cephfs')
    self.fs_data['metadata']['namespace'] = config.ENV_DATA[
        'cluster_namespace']
    CEPH_OBJ = OCS(**self.fs_data)
    CEPH_OBJ.create()
    assert POD.wait_for_resource(condition='Running',
                                 selector='app=rook-ceph-mds')
    pods = POD.get(selector='app=rook-ceph-mds')['items']
    assert len(pods) == 2
Example #23
0
def create_serviceaccount(namespace):
    """
    Create a Serviceaccount

    Args:
        namespace (str): The namespace for the serviceaccount creation

    Returns:
        OCS: An OCS instance for the service_account
    """

    service_account_data = templating.load_yaml_to_dict(
        constants.SERVICE_ACCOUNT_YAML)
    service_account_data['metadata']['name'] = create_unique_resource_name(
        'sa', 'serviceaccount')
    service_account_data['metadata']['namespace'] = namespace

    return create_resource(**service_account_data)
def create_instance_in_clusterlogging(sc_name=None):
    """
    Creation of instance for clusterlogging that creates PVCs,
    ElasticSearch, curator fluentd and kibana pods and checks for all
    the pods and PVCs

    Args:
        sc_name (str): Storage class name to create PVCs

    Returns:
        dict: Contains all detailed information of the
            instance such as pods that got created, its resources and limits
            values, storage class and size details etc.

    """
    inst_data = templating.load_yaml_to_dict(constants.CL_INSTANCE_YAML)
    inst_data['spec']['logStore']['elasticsearch']['storage'][
        'storageClassName'] = sc_name
    inst_data['spec']['logStore']['elasticsearch']['storage']['size'] = "200Gi"
    node_count = inst_data['spec']['logStore']['elasticsearch']['nodeCount']
    helpers.create_resource(wait=False, **inst_data)
    oc = ocp.OCP('v1', 'ClusterLogging', 'openshift-logging')
    logging_instance = oc.get(resource_name='instance', out_yaml_format='True')
    if logging_instance:
        logger.info("Successfully created instance for cluster-logging")
        logger.debug(logging_instance)
    else:
        logger.error("Instance for clusterlogging is not created properly")

    pod_obj = ocp.OCP(kind=constants.POD, namespace='openshift-logging')
    pod_status = pod_obj.wait_for_resource(condition=constants.STATUS_RUNNING,
                                           resource_count=11,
                                           timeout=200,
                                           sleep=5)
    assert pod_status, "Pods are not in Running state."
    logger.info("All pods are in Running state")
    pvc_obj = ocp.OCP(kind=constants.PVC, namespace='openshift-logging')
    pvc_status = pvc_obj.wait_for_resource(condition=constants.STATUS_BOUND,
                                           resource_count=node_count,
                                           timeout=150,
                                           sleep=5)
    assert pvc_status, "PVCs are not in bound state."
    logger.info("PVCs are Bound")
    return logging_instance
Example #25
0
def create_ceph_block_pool(pool_name=None):
    """
    Create a Ceph block pool

    Args:
        pool_name (str): The pool name to create

    Returns:
        OCS: An OCS instance for the Ceph block pool
    """
    cbp_data = templating.load_yaml_to_dict(constants.CEPHBLOCKPOOL_YAML)
    cbp_data['metadata']['name'] = (pool_name if pool_name else
                                    create_unique_resource_name('test', 'cbp'))
    cbp_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
    cbp_obj = create_resource(**cbp_data, wait=False)
    cbp_obj.reload()

    assert verify_block_pool_exists(
        cbp_obj.name), (f"Block pool {cbp_obj.name} does not exist")
    return cbp_obj
Example #26
0
def create_ceph_file_system(pool_name=None):
    """
    Create a Ceph file system

    Args:
        pool_name (str): The pool name to create

    Returns:
        OCS: An OCS instance for the Ceph file system
    """
    cfs_data = templating.load_yaml_to_dict(constants.CEPHFILESYSTEM_YAML)
    cfs_data['metadata']['name'] = (pool_name if pool_name else
                                    create_unique_resource_name('test', 'cfs'))
    cfs_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
    cfs_data = create_resource(**cfs_data)
    cfs_data.reload()

    assert validate_cephfilesystem(
        cfs_data.name), (f"File system {cfs_data.name} does not exist")
    return cfs_data
def create_storageclass(sc_name, expect_fail=False):
    """
    Function to create a storage class and check for
    duplicate storage class name

    Args:
        sc_name (str): name of the storageclass to be created
        expect_fail (bool): To catch the incorrect scenario if
            two SCs are indeed created with same name

    Returns:
        None

    """

    # Create a storage class
    sc_data = templating.load_yaml_to_dict(constants.CSI_RBD_STORAGECLASS_YAML)
    sc_data['metadata']['name'] = sc_name
    sc_data['parameters']['clusterID'] = defaults.ROOK_CLUSTER_NAMESPACE

    global SC_OBJ
    SC_OBJ = OCS(**sc_data)

    # Check for expected failure with duplicate SC name
    try:
        SC_OBJ.create()
        assert not expect_fail, (
            "SC creation with same name passed. Expected to fail !"
        )
        log.info(
            f"Storage class: {SC_OBJ.name} created successfully !"
        )
        log.debug(sc_data)

    except CommandFailed as ecf:
        assert "AlreadyExists" in str(ecf)
        log.info(
            f"Cannot create two StorageClasses with same name !"
            f" Error message:  \n"
            f"{ecf}"
        )
Example #28
0
    def test_smallfile_workload(self, ripsaw):
        """
        Run SmallFile Workload
        """
        log.info("Apply Operator CRD")
        ripsaw.apply_crd('resources/crds/ripsaw_v1alpha1_ripsaw_crd.yaml')

        log.info("Running SmallFile bench")
        sf_data = templating.load_yaml_to_dict(
            constants.SMALLFILE_BENCHMARK_YAML)
        sf_obj = OCS(**sf_data)
        sf_obj.create()
        # wait for benchmark pods to get created - takes a while
        for bench_pod in TimeoutSampler(40, 3, get_pod_name_by_pattern,
                                        'smallfile-client', 'my-ripsaw'):
            try:
                if bench_pod[0] is not None:
                    small_file_client_pod = bench_pod[0]
                    break
            except IndexError:
                log.info("Bench pod not ready yet")

        bench_pod = OCP(kind='pod', namespace='my-ripsaw')
        log.info("Waiting for SmallFile benchmark to Run")
        assert bench_pod.wait_for_resource(condition=constants.STATUS_RUNNING,
                                           resource_name=small_file_client_pod,
                                           sleep=30,
                                           timeout=600)
        start_time = time.time()
        timeout = 900
        while True:
            logs = bench_pod.exec_oc_cmd(f'logs {small_file_client_pod}',
                                         out_yaml_format=False)
            if "RUN STATUS DONE" in logs:
                log.info("SmallFile Benchmark Completed Successfully")
                break

            if timeout < (time.time() - start_time):
                raise TimeoutError(
                    f"Timed out waiting for benchmark to complete")
            time.sleep(30)
Example #29
0
def create_configmap_cluster_monitoring_pod(sc_name):
    """
    Create a configmap named cluster-monitoring-config
    and configure pvc on monitoring pod

    Args:
        sc_name (str): Name of the storage class
    """
    logger.info("Creating configmap cluster-monitoring-config")
    config_map = templating.load_yaml_to_dict(
        constants.CONFIGURE_PVC_ON_MONITORING_POD
    )
    config = yaml.safe_load(config_map['data']['config.yaml'])
    config['prometheusK8s']['volumeClaimTemplate']['spec']['storageClassName'] = sc_name
    config['alertmanagerMain']['volumeClaimTemplate']['spec']['storageClassName'] = sc_name
    config = yaml.dump(config)
    config_map['data']['config.yaml'] = config
    assert helpers.create_resource(**config_map, wait=False)
    ocp = OCP('v1', 'ConfigMap', 'openshift-monitoring')
    assert ocp.get(resource_name='cluster-monitoring-config')
    logger.info("Successfully created configmap cluster-monitoring-config")
Example #30
0
    def test_verify_all_fields_in_sc_yaml_with_oc_describe(self, interface):
        """
        Test function to create RBD and CephFS SC, and match with oc describe sc
        output
        """
        log.info(f"Creating a {interface} storage class")
        self.sc_data = templating.load_yaml_to_dict(
            getattr(constants, f"CSI_{interface}_STORAGECLASS_YAML")
        )
        self.sc_data['metadata']['name'] = (
            helpers.create_unique_resource_name(
                'test', f'csi-{interface.lower()}'
            )
        )
        global SC_OBJ
        SC_OBJ = OCS(**self.sc_data)
        assert SC_OBJ.create()
        log.info(
            f"{interface}Storage class: {SC_OBJ.name} created successfully"
        )
        log.info(self.sc_data)

        # Get oc describe sc output
        describe_out = SC_OBJ.get("sc")
        log.info(describe_out)

        # Confirm that sc yaml details matches oc describe sc output
        value = {
            k: describe_out[k] for k in set(describe_out) - set(self.sc_data)
        }
        assert len(value) == 1 and value['volumeBindingMode'] == 'Immediate', (
            "OC describe sc output didn't match storage class yaml"
        )
        log.info("OC describe sc output matches storage class yaml")
        # Delete Storage Class
        log.info(f"Deleting Storageclass: {SC_OBJ.name}")
        assert SC_OBJ.delete()
        log.info(f"Storage Class: {SC_OBJ.name} deleted successfully")
        del SC_OBJ