Exemple #1
0
def create_pods(request):
    """
    Create multiple pods
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete multiple pods
        """
        if hasattr(class_instance, 'pod_objs'):
            for pod in class_instance.pod_objs:
                pod.delete()

    request.addfinalizer(finalizer)

    class_instance.pod_objs = list()
    for pvc_obj in class_instance.pvc_objs:
        class_instance.pod_objs.append(
            helpers.create_pod(interface_type=class_instance.interface,
                               pvc_name=pvc_obj.name,
                               do_reload=False,
                               namespace=class_instance.namespace))

    for pod in class_instance.pod_objs:
        helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
Exemple #2
0
def change_registry_backend_to_ocs():
    """
    Function to deploy registry with OCS backend.

    Raises:
        AssertionError: When failure in change of registry backend to OCS

    """
    pv_obj = helpers.create_pvc(
        sc_name=constants.DEFAULT_SC_CEPHFS,
        pvc_name='registry-cephfs-rwx-pvc',
        namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE,
        size='100Gi',
        access_mode=constants.ACCESS_MODE_RWX)
    helpers.wait_for_resource_state(pv_obj, 'Bound')
    ocp_obj = ocp.OCP(kind=constants.CONFIG,
                      namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE)
    param_cmd = f'[{{"op": "add", "path": "/spec/storage", "value": {{"pvc": {{"claim": "{pv_obj.name}"}}}}}}]'
    assert ocp_obj.patch(
        resource_name=constants.IMAGE_REGISTRY_RESOURCE_NAME, params=param_cmd
    ), f"Registry pod storage backend to OCS is not success"

    # Validate registry pod status
    validate_registry_pod_status()

    # Validate pvc mount in the registry pod
    validate_pvc_mount_on_registry_pod()
Exemple #3
0
    def create_pvc_delete(self, multi_pvc_factory, project=None):
        """
        Creates and deletes all types of PVCs

        """
        # Create rbd pvcs
        pvc_objs_rbd = create_pvcs(multi_pvc_factory=multi_pvc_factory,
                                   interface='CephBlockPool',
                                   project=project,
                                   status="",
                                   storageclass=None)

        # Create cephfs pvcs
        pvc_objs_cephfs = create_pvcs(multi_pvc_factory=multi_pvc_factory,
                                      interface='CephFileSystem',
                                      project=project,
                                      status="",
                                      storageclass=None)

        all_pvc_to_delete = pvc_objs_rbd + pvc_objs_cephfs

        # Check pvc status
        for pvc_obj in all_pvc_to_delete:
            helpers.wait_for_resource_state(resource=pvc_obj,
                                            state=constants.STATUS_BOUND,
                                            timeout=300)

        # Start deleting PVC
        delete_pvcs(all_pvc_to_delete)

        # Check PVCs are deleted
        for pvc_obj in all_pvc_to_delete:
            pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)

        logger.info("All PVCs are deleted as expected")
 def test_pvc_delete_and_verify_size_is_returned_to_backend_pool(self):
     """
     Test case to verify after delete pvc size returned to backend pools
     """
     used_before_creating_pvc = check_ceph_used_space()
     logger.info(f"Used before creating pvc {used_before_creating_pvc}")
     pvc_obj = create_pvc_and_verify_pvc_exists(
         self.sc_obj.name, self.cbp_obj.name
     )
     pod_obj = helpers.create_pod(
         interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvc_obj.name
     )
     helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
     pod_obj.reload()
     used_percentage = pod.run_io_and_verify_mount_point(pod_obj)
     assert used_percentage > '90%', "I/O's didn't run completely"
     used_after_creating_pvc = check_ceph_used_space()
     logger.info(f"Used after creating pvc {used_after_creating_pvc}")
     assert used_before_creating_pvc < used_after_creating_pvc
     pod_obj.delete()
     pvc_obj.delete()
     verify_pv_not_exists(pvc_obj, self.cbp_obj.name)
     used_after_deleting_pvc = check_ceph_used_space()
     logger.info(f"Used after deleting pvc {used_after_deleting_pvc}")
     assert used_after_deleting_pvc < used_after_creating_pvc
     assert (abs(
         used_after_deleting_pvc - used_before_creating_pvc) < 0.2
     )
Exemple #5
0
def create_dc_pods(request):
    """
    Create multiple deploymentconfig pods
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete multiple dc pods
        """
        if hasattr(class_instance, 'dc_pod_objs'):
            for pod in class_instance.dc_pod_objs:
                helpers.delete_deploymentconfig(pod_obj=pod)

    request.addfinalizer(finalizer)

    class_instance.dc_pod_objs = [
        helpers.create_pod(interface_type=class_instance.interface,
                           pvc_name=pvc_obj.name,
                           do_reload=False,
                           namespace=class_instance.namespace,
                           sa_name=class_instance.sa_obj.name,
                           dc_deployment=True,
                           replica_count=class_instance.replica_count)
        for pvc_obj in class_instance.pvc_objs
    ]

    for pod in class_instance.dc_pod_objs:
        helpers.wait_for_resource_state(pod,
                                        constants.STATUS_RUNNING,
                                        timeout=180)
    def setup_base(
        self, interface, multi_pvc_factory, pod_factory
    ):
        """
        Create PVCs and pods
        """
        pvc_objs = multi_pvc_factory(
            interface=interface,
            project=None,
            storageclass=None,
            size=self.pvc_size,
            access_mode=constants.ACCESS_MODE_RWO,
            status=constants.STATUS_BOUND,
            num_of_pvc=self.num_of_pvcs,
            wait_each=False
        )

        pod_objs = []
        for pvc_obj in pvc_objs:
            pod_obj = pod_factory(pvc=pvc_obj, status="")
            pod_objs.append(pod_obj)
        for pod_obj in pod_objs:
            wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING
            )
            pod_obj.reload()

        return pvc_objs, pod_objs
    def create_pvc_and_deploymentconfig_pod(self, request, pvc_factory):
        """
        """
        def finalizer():
            helpers.delete_deploymentconfig_pods(pod_obj)

        request.addfinalizer(finalizer)

        # Create pvc
        pvc_obj = pvc_factory()

        # Create service_account to get privilege for deployment pods
        sa_name = helpers.create_serviceaccount(pvc_obj.project.namespace)

        helpers.add_scc_policy(sa_name=sa_name.name, namespace=pvc_obj.project.namespace)

        pod_obj = helpers.create_pod(
            interface_type=constants.CEPHBLOCKPOOL,
            pvc_name=pvc_obj.name,
            namespace=pvc_obj.project.namespace,
            sa_name=sa_name.name,
            dc_deployment=True
        )
        helpers.wait_for_resource_state(resource=pod_obj, state=constants.STATUS_RUNNING)
        return pod_obj, pvc_obj
Exemple #8
0
def wait_for_storage_pods(timeout=200):
    """
    Check all OCS pods status, they should be in Running or Completed state

    Args:
        timeout (int): Number of seconds to wait for pods to get into correct
            state

    """
    all_pod_obj = get_all_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
    for pod_obj in all_pod_obj:
        state = constants.STATUS_RUNNING
        if any(i in pod_obj.name for i in ['-1-deploy', 'ocs-deviceset']):
            state = constants.STATUS_COMPLETED

        try:
            helpers.wait_for_resource_state(resource=pod_obj,
                                            state=state,
                                            timeout=timeout)
        except ResourceWrongStatusException:
            # 'rook-ceph-crashcollector' on the failed node stucks at
            # pending state. BZ 1810014 tracks it.
            # Ignoring 'rook-ceph-crashcollector' pod health check as
            # WA and deleting its deployment so that the pod
            # disappears. Will revert this WA once the BZ is fixed
            if 'rook-ceph-crashcollector' in pod_obj.name:
                ocp_obj = ocp.OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
                pod_name = pod_obj.name
                deployment_name = '-'.join(pod_name.split("-")[:-2])
                command = f"delete deployment {deployment_name}"
                ocp_obj.exec_oc_cmd(command=command)
                logger.info(f"Deleted deployment for pod {pod_obj.name}")
            else:
                raise
Exemple #9
0
    def wait_for_build_to_complete(self, timeout=900):
        """
        Wait for build status to reach complete state

        Args:
            timeout (int): Time  in seconds to wait

        """
        log.info(f"Waiting for the build to reach {JENKINS_BUILD_COMPLETE} state")
        for project in self.projects:
            jenkins_builds = self.get_builds_obj(namespace=project)
            for jenkins_build in jenkins_builds:
                if (jenkins_build.name, project) not in self.build_completed:
                    try:
                        wait_for_resource_state(
                            resource=jenkins_build, state=JENKINS_BUILD_COMPLETE, timeout=timeout
                        )
                        self.get_build_duration_time(
                            namespace=project, build_name=jenkins_build.name
                        )
                    except ResourceWrongStatusException:
                        ocp_obj = OCP(namespace=project, kind='build')
                        output = ocp_obj.describe(resource_name=jenkins_build.name)
                        error_msg = (
                            f'{jenkins_build.name} did not reach to '
                            f'{JENKINS_BUILD_COMPLETE} state after {timeout} sec\n'
                            f'oc describe output of {jenkins_build.name} \n:{output}'
                        )
                        log.error(error_msg)
                        self.print_completed_builds_results()
                        raise UnexpectedBehaviour(error_msg)
    def wait_for_build_status(self, status, timeout=900):
        """
        Wait for build status to reach running/completed

        Args:
            status (str): status to reach Running or Completed
            timeout (int): Time in seconds to wait

        """
        log.info(f"Waiting for the build to reach {status} state")
        for project in self.projects:
            jenkins_builds = self.get_builds_obj(namespace=project)
            for jenkins_build in jenkins_builds:
                if (jenkins_build.name, project) not in self.build_completed:
                    try:
                        wait_for_resource_state(resource=jenkins_build,
                                                state=status,
                                                timeout=timeout)
                        self.build_completed.append(
                            (jenkins_build.name, project))
                    except ResourceWrongStatusException:
                        ocp_obj = OCP(namespace=project, kind='build')
                        output = ocp_obj.describe(
                            resource_name=jenkins_build.name)
                        error_msg = (
                            f'{jenkins_build.name} did not reach to '
                            f'{status} state after {timeout} sec\n'
                            f'oc describe output of {jenkins_build.name} \n:{output}'
                        )
                        log.error(error_msg)
                        self.get_builds_logs()
                        raise UnexpectedBehaviour(error_msg)
Exemple #11
0
    def test_pvc_deletion_measurement_performance(self, teardown_factory,
                                                  pvc_size):
        """
        Measuring PVC deletion time is within supported limits
        """
        logging.info('Start creating new PVC')

        pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size)
        helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
        pvc_obj.reload()
        pv_name = pvc_obj.backed_pv
        pvc_reclaim_policy = pvc_obj.reclaim_policy
        teardown_factory(pvc_obj)
        pvc_obj.delete()
        logging.info('Start deletion of PVC')
        pvc_obj.ocp.wait_for_delete(pvc_obj.name)
        if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
            helpers.validate_pv_delete(pvc_obj.backed_pv)
        delete_time = helpers.measure_pvc_deletion_time(
            self.interface, pv_name)
        # Deletion time for CephFS PVC is a little over 3 seconds
        deletion_time = 4 if self.interface == constants.CEPHFILESYSTEM else 3
        logging.info(f"PVC deleted in {delete_time} seconds")
        if delete_time > deletion_time:
            raise ex.PerformanceException(
                f"PVC deletion time is {delete_time} and greater than {deletion_time} second"
            )
        push_to_pvc_time_dashboard(self.interface, "deletion", delete_time)
    def test_drain_mcg_pod_node(self, node_drain_teardown, pod_to_drain):
        """
        Test drianage of nodes which contain NB resources

        """

        # Retrieve the relevant pod object
        pod_obj = pod.Pod(**pod.get_pods_having_label(
            label=self.labels_map[pod_to_drain],
            namespace=defaults.ROOK_CLUSTER_NAMESPACE)[0])
        # Retrieve the node name on which the pod resides
        node_name = pod_obj.get()['spec']['nodeName']
        # Drain the node
        drain_nodes([node_name])
        # Verify the node was drained properly
        wait_for_nodes_status([node_name],
                              status=constants.NODE_READY_SCHEDULING_DISABLED)
        # Retrieve the new pod that should've been created post-drainage
        pod_obj = pod.Pod(**pod.get_pods_having_label(
            label=self.labels_map[pod_to_drain],
            namespace=defaults.ROOK_CLUSTER_NAMESPACE)[0])
        # Verify that the new pod has reached a 'RUNNNING' status again and recovered successfully
        wait_for_resource_state(pod_obj, constants.STATUS_RUNNING, timeout=120)
        # Check the NB status to verify the system is healthy
        self.cl_obj.wait_for_noobaa_health_ok()
    def cleanup(self):
        """
        Removes resources created during test execution and verifies
        the reclaim policy is honored
        """

        pod_objs = pod.get_all_pods(namespace=self.namespace)
        if len(pod_objs) > 0:
            for pod_obj in pod_objs:
                pod_obj.delete()
                pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

        if hasattr(self, 'pvc_obj'):
            pv_obj = self.pvc_obj.backed_pv_obj
            self.pvc_obj.delete()

            try:
                assert helpers.validate_pv_delete(pv_obj.name)

            except AssertionError:
                if self.reclaim_policy == constants.RECLAIM_POLICY_RETAIN:
                    helpers.wait_for_resource_state(pv_obj,
                                                    constants.STATUS_RELEASED)
                    # TODO: deletion of ceph rbd image, blocked by BZ#1723656
                    pv_obj.delete()

                else:
                    raise UnexpectedBehaviour(
                        f"PV {pv_obj.name} is not deleted after deleting PVC")

        if hasattr(self, 'sc_obj'):
            self.sc_obj.delete()
Exemple #14
0
def validate_pods_are_respinned_and_running_state(pod_objs_list):
    """
    Verifies the list of the pods are respinned and in running state

    Args:
        pod_objs_list (list): List of the pods obj

    Returns:
         bool : True if the pods are respinned and running, False otherwise

    Raises:
        ResourceWrongStatusException: In case the resources hasn't
            reached the Running state

    """
    for pod in pod_objs_list:
        helpers.wait_for_resource_state(pod,
                                        constants.STATUS_RUNNING,
                                        timeout=180)

    for pod in pod_objs_list:
        pod_obj = pod.get()
        start_time = pod_obj['status']['startTime']
        ts = time.strptime(start_time, '%Y-%m-%dT%H:%M:%SZ')
        ts = calendar.timegm(ts)
        current_time_utc = time.time()
        sec = current_time_utc - ts
        if (sec / 3600) >= 1:
            logger.error(
                f'Pod {pod.name} is not respinned, the age of the pod is {start_time}'
            )
            return False

    return True
Exemple #15
0
def change_registry_backend_to_ocs():
    """
    Function to deploy registry with OCS backend.

    Raises:
        AssertionError: When failure in change of registry backend to OCS

    """
    sc_name = f"{constants.DEFAULT_STORAGECLASS_CEPHFS}"
    pv_obj = helpers.create_pvc(
        sc_name=sc_name,
        pvc_name='registry-cephfs-rwx-pvc',
        namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE,
        size='100Gi',
        access_mode=constants.ACCESS_MODE_RWX)
    helpers.wait_for_resource_state(pv_obj, 'Bound')
    param_cmd = f'[{{"op": "add", "path": "/spec/storage", "value": {{"pvc": {{"claim": "{pv_obj.name}"}}}}}}]'

    run_cmd(f"oc patch {constants.IMAGE_REGISTRY_CONFIG} -p "
            f"'{param_cmd}' --type json")

    # Validate registry pod status
    retry((CommandFailed, UnexpectedBehaviour), tries=3,
          delay=15)(validate_registry_pod_status)()

    # Validate pvc mount in the registry pod
    retry((CommandFailed, UnexpectedBehaviour, AssertionError),
          tries=3,
          delay=15)(validate_pvc_mount_on_registry_pod)()
Exemple #16
0
    def test_ocs_347(self, resources):
        pod, pvc, storageclass = resources

        log.info("Creating RBD StorageClass")
        storageclass.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=self.cbp_obj.name,
                secret_name=self.rbd_secret_obj.name,
            )
        )
        log.info("Creating a PVC")
        pvc.append(helpers.create_pvc(sc_name=storageclass[0].name))
        for pvc_obj in pvc:
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
        log.info(
            f"Creating a pod on with pvc {pvc[0].name}"
        )
        pod_obj = helpers.create_pod(
            interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvc[0].name,
            pod_dict_path=constants.NGINX_POD_YAML
        )
        pod.append(pod_obj)
        helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
        pod_obj.reload()
Exemple #17
0
    def test_delete_create_pvc_same_name(self, interface, pvc_factory,
                                         teardown_factory):
        """
        Delete PVC and create a new PVC with same name
        """
        # Create a PVC
        pvc_obj1 = pvc_factory(interface=interface,
                               access_mode=constants.ACCESS_MODE_RWO,
                               status=constants.STATUS_BOUND)

        # Delete the PVC
        logger.info(f"Deleting PVC {pvc_obj1.name}")
        pvc_obj1.delete()
        pvc_obj1.ocp.wait_for_delete(pvc_obj1.name)
        logger.info(f"Deleted PVC {pvc_obj1.name}")

        # Create a new PVC with same name
        logger.info(f"Creating new PVC with same name {pvc_obj1.name}")
        pvc_obj2 = helpers.create_pvc(sc_name=pvc_obj1.storageclass.name,
                                      pvc_name=pvc_obj1.name,
                                      namespace=pvc_obj1.project.namespace,
                                      do_reload=False)

        teardown_factory(pvc_obj2)

        # Check the new PVC and PV are Bound
        helpers.wait_for_resource_state(resource=pvc_obj2,
                                        state=constants.STATUS_BOUND)
        pv_obj2 = pvc_obj2.backed_pv_obj
        helpers.wait_for_resource_state(resource=pv_obj2,
                                        state=constants.STATUS_BOUND)
Exemple #18
0
    def finalizer():
        """
        Delete the PVC
        """
        pv_objs = []

        # Get PV form PVC instances and delete PVCs
        for instance in instances:
            if not instance.is_deleted:
                pv_objs.append(instance.backed_pv_obj)
                instance.delete()
                instance.ocp.wait_for_delete(instance.name)

        # Wait for PVs to delete
        # If they have ReclaimPolicy set to Retain then delete them manually
        for pv_obj in pv_objs:
            if pv_obj.data.get(
                    'spec').get('persistentVolumeReclaimPolicy'
                                ) == constants.RECLAIM_POLICY_RETAIN:
                helpers.wait_for_resource_state(pv_obj,
                                                constants.STATUS_RELEASED)
                pv_obj.delete()
                pv_obj.ocp.wait_for_delete(pv_obj.name)
            else:
                pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name,
                                           timeout=180)
Exemple #19
0
def create_pvcs(request):
    """
    Create multiple PVCs
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete multiple PVCs
        """
        if hasattr(class_instance, 'pvc_objs'):
            for pvc_obj in class_instance.pvc_objs:
                pvc_obj.reload()
                backed_pv_name = pvc_obj.backed_pv
                pvc_obj.delete()
            for pvc_obj in class_instance.pvc_objs:
                pvc_obj.ocp.wait_for_delete(pvc_obj.name)
                helpers.validate_pv_delete(backed_pv_name)

    request.addfinalizer(finalizer)

    class_instance.pvc_objs = helpers.create_multiple_pvcs(
        sc_name=class_instance.sc_obj.name,
        number_of_pvc=class_instance.num_of_pvcs,
        size=class_instance.pvc_size,
        namespace=class_instance.namespace)
    for pvc_obj in class_instance.pvc_objs:
        helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
        pvc_obj.reload()
Exemple #20
0
    def wait_for_pgbench_status(self, status, timeout=None):
        """
        Wait for pgbench benchmark pods status to reach running/completed

        Args:
            status (str): status to reach Running or Completed
            timeout (int): Time in seconds to wait

        """
        """
        Sometimes with the default values in the benchmark yaml the pgbench pod is not
        getting completed within the specified time and the tests are failing.
        I think it is varying with the infrastructure.
        So, for now we set the timeout to 30 mins and will start monitoring each pg bench
        pods for each run.Based on the results we will define the timeout again
        """
        timeout = timeout if timeout else 1800
        # Wait for pg_bench pods to initialized and running
        log.info(f"Waiting for pgbench pods to be reach {status} state")
        pgbench_pod_objs = self.get_pgbench_pods()
        for pgbench_pod_obj in pgbench_pod_objs:
            try:
                wait_for_resource_state(resource=pgbench_pod_obj,
                                        state=status,
                                        timeout=timeout)
            except ResourceWrongStatusException:
                output = run_cmd(f'oc logs {pgbench_pod_obj.name}')
                error_msg = f'{pgbench_pod_obj.name} did not reach to {status} state after {timeout} sec\n{output}'
                log.error(error_msg)
                raise UnexpectedBehaviour(error_msg)
Exemple #21
0
 def test_reclaim_policy_retain(self):
     """
     Calling functions for pvc invalid name and size
     """
     pvc_count = len(list_ceph_images(pool_name=self.cbp_obj.name))
     pvc_obj = helpers.create_pvc(
         sc_name=self.sc_obj_retain.name,
         pvc_name=helpers.create_unique_resource_name('retain', 'pvc')
     )
     helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
     pvc_obj.reload()
     pv_name = pvc_obj.get()['spec']['volumeName']
     pv_namespace = pvc_obj.get()['metadata']['namespace']
     pv_obj = ocp.OCP(kind='PersistentVolume', namespace=pv_namespace)
     assert pvc_obj.delete()
     pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
     assert pv_obj.get(pv_name).get('status').get('phase') == 'Released', (
         f"Status of PV {pv_obj.get(pv_name)} is not 'Released'"
     )
     log.info("Status of PV is Released")
     assert pvc_count + 1 == len(list_ceph_images(pool_name=self.cbp_obj.name))
     assert pv_obj.delete(resource_name=pv_name)
     assert pv_obj.wait_for_delete(pv_name, 60), (
         f"PV {pv_name} is not deleted"
     )
Exemple #22
0
    def create_mutiple_pvcs_statistics(self, num_of_samples, teardown_factory,
                                       pvc_size):
        """

        Creates number (samples_num) of PVCs, measures creation time for each PVC and returns list of creation times.

         Args:
             num_of_samples: Number of the sampled created PVCs.
             teardown_factory: A fixture used when we want a new resource that was created during the tests.
             pvc_size: Size of the created PVCs.

         Returns:
             List of the creation times of all the created PVCs.

        """
        time_measures = []
        for i in range(num_of_samples):
            log.info(f'Start creation of PVC number {i + 1}.')

            pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name,
                                         size=pvc_size)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
            teardown_factory(pvc_obj)
            create_time = helpers.measure_pvc_creation_time(
                self.interface, pvc_obj.name)
            logging.info(f"PVC created in {create_time} seconds")

            time_measures.append(create_time)
        return time_measures
Exemple #23
0
    def wait_for_jenkins_deploy_status(self, status, timeout=600):
        """
        Wait for jenkins deploy pods status to reach running/completed

        Args:
            status (str): status to reach Running or Completed
            timeout (int): Time in seconds to wait

        """
        log.info(f"Waiting for jenkins-deploy pods to be reach {status} state")
        for project in self.projects:
            jenkins_deploy_pods = self.get_jenkins_deploy_pods(namespace=project)
            for jenkins_deploy_pod in jenkins_deploy_pods:
                try:
                    wait_for_resource_state(
                        resource=jenkins_deploy_pod, state=status, timeout=timeout
                    )
                except ResourceWrongStatusException:
                    cmd = f'logs {jenkins_deploy_pod.name}'
                    ocp_obj = OCP(namespace=project)
                    output_log = ocp_obj.exec_oc_cmd(command=cmd, out_yaml_format=False)
                    cmd = f'describe {jenkins_deploy_pod.name}'
                    output_describe = ocp_obj.exec_oc_cmd(command=cmd, out_yaml_format=False)
                    error_msg = (
                        f'{jenkins_deploy_pod.name} did not reach to '
                        f'{status} state after {timeout} sec'
                        f'\n output log {jenkins_deploy_pod.name}:\n{output_log}'
                        f'\n output  describe {jenkins_deploy_pod.name}:\n{output_describe}'
                    )
                    log.error(error_msg)
                    raise UnexpectedBehaviour(error_msg)
    def setup(self, interface, reclaim_policy, storageclass_factory,
              multi_pvc_factory, pod_factory):
        """
        Create pvc and pod
        """
        # Create storage class
        self.sc_obj = storageclass_factory(interface=interface,
                                           reclaim_policy=reclaim_policy)

        # Create PVCs
        self.pvc_objs = multi_pvc_factory(interface=interface,
                                          project=None,
                                          storageclass=self.sc_obj,
                                          size=5,
                                          status=constants.STATUS_BOUND,
                                          num_of_pvc=self.num_of_pvc,
                                          wait_each=False)

        # Create pods
        self.pod_objs = []
        for pvc_obj in self.pvc_objs:
            self.pod_objs.append(
                pod_factory(interface=interface, pvc=pvc_obj, status=None))
        for pod in self.pod_objs:
            wait_for_resource_state(pod, constants.STATUS_RUNNING)
            pod.reload()
Exemple #25
0
    def setup(self, interface, multi_pvc_factory, pod_factory):
        """
        Create PVCs and pods
        """
        access_modes = [constants.ACCESS_MODE_RWO]
        if interface == constants.CEPHFILESYSTEM:
            access_modes.append(constants.ACCESS_MODE_RWX)
        pvc_objs = multi_pvc_factory(interface=interface,
                                     project=None,
                                     storageclass=None,
                                     size=self.pvc_size,
                                     access_modes=access_modes,
                                     status=constants.STATUS_BOUND,
                                     num_of_pvc=self.num_of_pvcs,
                                     wait_each=False)
        rwo_pvcs = [
            pvc_obj for pvc_obj in pvc_objs
            if (pvc_obj.access_mode == constants.ACCESS_MODE_RWO)
        ]
        rwx_pvcs = [
            pvc_obj for pvc_obj in pvc_objs
            if (pvc_obj.access_mode == constants.ACCESS_MODE_RWX)
        ]

        num_of_rwo_pvc = len(rwo_pvcs)
        num_of_rwx_pvc = len(rwx_pvcs)

        log.info(f"Created {num_of_rwo_pvc} RWO PVCs.")
        log.info(f"Created {num_of_rwx_pvc} RWX PVCs.")

        # Select 5 PVCs for IO pods
        if rwx_pvcs:
            pvc_objs_for_io_pods = rwo_pvcs[0:3] + rwx_pvcs[0:2]
            pvc_objs_new_pods = rwo_pvcs[3:] + rwx_pvcs[2:]
        else:
            pvc_objs_for_io_pods = rwo_pvcs[0:5]
            pvc_objs_new_pods = rwo_pvcs[5:]

        io_pods = []

        # Create one pod using each RWO PVC and two pods using each RWX PVC
        # for running IO
        for pvc_obj in pvc_objs_for_io_pods:
            if pvc_obj.access_mode == constants.ACCESS_MODE_RWX:
                pod_obj = pod_factory(interface=interface,
                                      pvc=pvc_obj,
                                      status="")
                io_pods.append(pod_obj)
            pod_obj = pod_factory(interface=interface, pvc=pvc_obj, status="")
            io_pods.append(pod_obj)

        # Wait for pods to be in Running state
        for pod_obj in io_pods:
            helpers.wait_for_resource_state(resource=pod_obj,
                                            state=constants.STATUS_RUNNING)
            pod_obj.reload()
        log.info(f"Created {len(io_pods)} pods for running IO.")

        return pvc_objs, io_pods, pvc_objs_new_pods, access_modes
Exemple #26
0
def validate_registry_pod_status():
    """
    Function to validate registry pod status
    """
    pod_objs = get_registry_pod_obj()
    for pod_obj in pod_objs:
        helpers.wait_for_resource_state(pod_obj,
                                        state=constants.STATUS_RUNNING)
Exemple #27
0
def create_fio_pod(project,
                   interface,
                   pvc_factory,
                   storageclass,
                   access_mode,
                   fio_job_dict,
                   fio_configmap_dict,
                   tmp_path,
                   volume_mode=None,
                   pvc_size=10):
    """
    Create pods for upgrade testing.

    Args:
        project (obj): Project in which to create resources
        interface (str): CephBlockPool or CephFileSystem
        pvc_factory (function): Function for creating PVCs
        storageclass (obj): Storageclass to use
        access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
            This decides the access mode to be used for the PVC
        fio_job_dict (dict): fio job dictionary to use
        fio_configmap_dict (dict): fio configmap dictionary to use
        tmp_path (obj): reference to tmp_path fixture object
        volume_mode (str): Volume mode for rbd RWO PVC
        pvc_size (int): Size of PVC in GiB

    Return:
        list: List of generated pods

    """
    log.info(f"Creating pod via {interface} using {access_mode}"
             f" access mode, {volume_mode} volume mode and {storageclass.name}"
             f" storageclass")
    pvc = pvc_factory(project=project,
                      storageclass=storageclass,
                      access_mode=access_mode,
                      volume_mode=volume_mode,
                      size=pvc_size,
                      status=None)
    helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND, timeout=600)

    job_volume = fio_job_dict['spec']['template']['spec']['volumes'][0]
    job_volume['persistentVolumeClaim']['claimName'] = pvc.name
    fio_objs = [fio_configmap_dict, fio_job_dict]
    job_file = ObjectConfFile("fio_continuous", fio_objs, project, tmp_path)

    # deploy the Job to the cluster and start it
    job_file.create()

    ocp_pod_obj = ocp.OCP(kind=constants.POD, namespace=project.namespace)
    pods = ocp_pod_obj.get()['items']
    for pod in pods:
        pod_volume = pod['spec']['volumes'][0]
        if pod_volume['persistentVolumeClaim']['claimName'] == pvc.name:
            pod_data = pod
            break

    return Pod(**pod_data)
    def test_rwo_dynamic_pvc(self, setup_base):
        """
        RWO Dynamic PVC creation tests with Reclaim policy set to Delete/Retain
        """

        logger.info(
            f"Creating second pod on node: {self.worker_nodes_list[1]}")

        pod_obj2 = helpers.create_pod(interface_type=self.interface_type,
                                      pvc_name=self.pvc_obj.name,
                                      do_reload=False,
                                      namespace=self.namespace,
                                      node_name=self.worker_nodes_list[1],
                                      pod_dict_path=constants.NGINX_POD_YAML)
        node_pod1 = self.pod_obj1.get().get('spec').get('nodeName')
        node_pod2 = pod_obj2.get().get('spec').get('nodeName')

        assert node_pod1 != node_pod2, 'Both pods are on the same node'

        logger.info(f"Running IO on pod {self.pod_obj1.name}")
        file_name = self.pod_obj1.name
        self.pod_obj1.run_io(storage_type=self.storage_type,
                             size=self.io_size,
                             runtime=30,
                             fio_filename=file_name)
        pod.get_fio_rw_iops(self.pod_obj1)
        md5sum_pod1_data = pod.cal_md5sum(pod_obj=self.pod_obj1,
                                          file_name=file_name)
        # Verify that second pod is still in ContainerCreating state and not able to
        # attain Running state due to expected failure
        helpers.wait_for_resource_state(
            resource=pod_obj2, state=constants.STATUS_CONTAINER_CREATING)
        self.verify_expected_failure_event(
            ocs_obj=pod_obj2, failure_str=self.expected_pod_failure)
        logger.info(f"Deleting first pod so that second pod can attach"
                    f" {self.pvc_obj.name}")
        self.pod_obj1.delete()
        self.pod_obj1.ocp.wait_for_delete(resource_name=self.pod_obj1.name)

        # Wait for second pod to be in Running state
        helpers.wait_for_resource_state(resource=pod_obj2,
                                        state=constants.STATUS_RUNNING,
                                        timeout=240)

        assert pod.verify_data_integrity(pod_obj=pod_obj2,
                                         file_name=file_name,
                                         original_md5sum=md5sum_pod1_data)

        pod_obj2.run_io(storage_type=self.storage_type,
                        size=self.io_size,
                        runtime=30,
                        fio_filename=pod_obj2.name)
        pod.get_fio_rw_iops(pod_obj2)

        # Again verify data integrity
        assert pod.verify_data_integrity(pod_obj=pod_obj2,
                                         file_name=file_name,
                                         original_md5sum=md5sum_pod1_data)
Exemple #29
0
    def test_create_multiple_sc_with_same_pool_name(self, interface_type,
                                                    resources):
        """
        This test function does below,
        *. Creates multiple Storage Classes with same pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """
        # Unpack resources
        pods, pvcs, storageclasses = resources

        # Create 3 Storage Classes with same pool name
        if interface_type == constants.CEPHBLOCKPOOL:
            secret = self.rbd_secret_obj.name
            interface_name = self.cbp_obj.name
        else:
            interface_type = constants.CEPHFILESYSTEM
            secret = self.cephfs_secret_obj.name
            interface_name = helpers.get_cephfs_data_pool_name()
        for i in range(3):
            log.info(f"Creating a {interface_type} storage class")
            storageclasses.append(
                helpers.create_storage_class(interface_type=interface_type,
                                             interface_name=interface_name,
                                             secret_name=secret))
            log.info(f"{interface_type}StorageClass: {storageclasses[i].name} "
                     f"created successfully")

        # Create PVCs using each SC
        for i in range(3):
            log.info(f"Creating a PVC using {storageclasses[i].name}")
            pvcs.append(helpers.create_pvc(storageclasses[i].name))
        for pvc in pvcs:
            helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND)
            pvc.reload()

        # Create app pod and mount each PVC
        for i in range(3):
            log.info(f"Creating an app pod and mount {pvcs[i].name}")
            pods.append(
                helpers.create_pod(interface_type=interface_type,
                                   pvc_name=pvcs[i].name,
                                   namespace=defaults.ROOK_CLUSTER_NAMESPACE))
            for pod in pods:
                helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
                pod.reload()
            log.info(f"{pods[i].name} created successfully and "
                     f"mounted {pvcs[i].name}")

        # Run IO on each app pod for sometime
        for pod in pods:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io('fs', size='2G')

        for pod in pods:
            get_fio_rw_iops(pod)
Exemple #30
0
def test_start_pre_upgrade_pod_io(pre_upgrade_pods_running_io):
    """
    Confirm that there are pods created before upgrade.
    """
    for pod in pre_upgrade_pods_running_io:
        log.info("Waiting for all fio pods to come up")
        helpers.wait_for_resource_state(pod,
                                        constants.STATUS_RUNNING,
                                        timeout=600)