Esempio n. 1
0
def awscli_pod(noobaa_obj, created_pods):
    """
    Creates a new AWSCLI pod for relaying commands

    Args:
        created_pods (Fixture/list): A fixture used to keep track of created pods
        and clean them up in the teardown

    Returns:
        pod: A pod running the AWS CLI
    """
    awscli_pod_obj = helpers.create_pod(namespace='noobaa',
                                        pod_dict_path=constants.AWSCLI_POD_YAML)
    helpers.wait_for_resource_state(awscli_pod_obj, constants.STATUS_RUNNING)
    created_pods.append(awscli_pod_obj)
    return awscli_pod_obj
    def dynamic_pvc_base(self, interface_type, reclaim_policy):
        """
        Base function for Dynamic PVC creation tests
        Fetches the worker nodes name list, creates StorageClass and PVC
        """
        self.interface_type = interface_type
        self.reclaim_policy = reclaim_policy
        self.worker_nodes_list = helpers.get_worker_nodes()

        if self.interface_type == constants.CEPHBLOCKPOOL:
            self.interface_name = self.cbp_obj.name
            self.secret_name = self.rbd_secret_obj.name

        elif self.interface_type == constants.CEPHFILESYSTEM:
            self.interface_name = helpers.get_cephfs_data_pool_name()
            self.secret_name = self.cephfs_secret_obj.name

        logger.info(
            f"Creating Storage Class with reclaimPolicy: {self.reclaim_policy}"
        )
        self.sc_obj = helpers.create_storage_class(
            interface_type=self.interface_type,
            interface_name=self.interface_name,
            secret_name=self.secret_name,
            reclaim_policy=self.reclaim_policy
        )

        logger.info(f"Creating PVC with accessModes: {self.access_mode}")
        self.pvc_obj = helpers.create_pvc(
            sc_name=self.sc_obj.name, namespace=self.namespace,
            size=self.pvc_size, access_mode=self.access_mode
        )
        helpers.wait_for_resource_state(self.pvc_obj, constants.STATUS_BOUND)
        self.pvc_obj.reload()

        logger.info(
            f"Creating first pod on node: {self.worker_nodes_list[0]}"
            f" with pvc {self.pvc_obj.name}"
        )
        self.pod_obj1 = helpers.create_pod(
            interface_type=self.interface_type, pvc_name=self.pvc_obj.name,
            namespace=self.namespace, node_name=self.worker_nodes_list[0],
            pod_dict_path=constants.NGINX_POD_YAML
        )
        helpers.wait_for_resource_state(self.pod_obj1, constants.STATUS_RUNNING)
        self.pod_obj1.reload()
Esempio n. 3
0
    def factory(
        interface=constants.CEPHBLOCKPOOL,
        pvc=None,
        custom_data=None,
        status=constants.STATUS_RUNNING,
        pod_dict_path=None,
        raw_block_pv=False
    ):
        """
        Args:
            interface (str): CephBlockPool or CephFileSystem. This decides
                whether a RBD based or CephFS resource is created.
                RBD is default.
            pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
            custom_data (dict): If provided then Pod object is created
                by using these data. Parameter `pvc` is not used but reference
                is set if provided.
            status (str): If provided then factory waits for object to reach
                desired state.
            pod_dict_path (str): YAML path for the pod.
            raw_block_pv (bool): True for creating raw block pv based pod,
                False otherwise.

        Returns:
            object: helpers.create_pvc instance.
        """
        if custom_data:
            pod_obj = helpers.create_resource(**custom_data)
        else:
            pvc = pvc or pvc_factory(interface=interface)

            pod_obj = helpers.create_pod(
                pvc_name=pvc.name,
                namespace=pvc.namespace,
                interface_type=interface,
                pod_dict_path=pod_dict_path,
                raw_block_pv=raw_block_pv
            )
            assert pod_obj, "Failed to create PVC"
        instances.append(pod_obj)
        if status:
            helpers.wait_for_resource_state(pod_obj, status)
            pod_obj.reload()
        pod_obj.pvc = pvc

        return pod_obj
Esempio n. 4
0
    def factory(
        interface=constants.CEPHBLOCKPOOL,
        pvc=None,
        service_account=None,
        size=None,
        custom_data=None,
        node_name=None,
        replica_count=1,
    ):
        """
        Args:
            interface (str): CephBlockPool or CephFileSystem. This decides
                whether a RBD based or CephFS resource is created.
                RBD is default.
            pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
            service_account (str): service account name for dc_pods
            size (int): The requested size for the PVC
            custom_data (dict): If provided then Pod object is created
                by using these data. Parameter `pvc` is not used but reference
                is set if provided.
            node_name (str): The name of specific node to schedule the pod
            replica_count (int): Replica count for deployment config
        """
        if custom_data:
            dc_pod_obj = helpers.create_resource(**custom_data)
        else:

            pvc = pvc or pvc_factory(interface=interface, size=size)
            sa_obj = service_account_factory(project=pvc.project,
                                             service_account=service_account)
            dc_pod_obj = helpers.create_pod(interface_type=interface,
                                            pvc_name=pvc.name,
                                            do_reload=False,
                                            namespace=pvc.namespace,
                                            sa_name=sa_obj.name,
                                            dc_deployment=True,
                                            replica_count=replica_count,
                                            node_name=node_name)
        instances.append(dc_pod_obj)
        log.info(dc_pod_obj.name)
        helpers.wait_for_resource_state(dc_pod_obj,
                                        constants.STATUS_RUNNING,
                                        timeout=180)
        dc_pod_obj.pvc = pvc
        return dc_pod_obj
 def test_basics_rbd(self, test_fixture_rbd):
     """
     Testing basics: secret creation,
     storage class creation,pvc and pod with rbd
     """
     global RBD_PVC_OBJ, RBD_POD_OBJ
     log.info('creating pvc for RBD ')
     pvc_name = helpers.create_unique_resource_name('test-rbd', 'pvc')
     RBD_PVC_OBJ = helpers.create_pvc(sc_name=RBD_SC_OBJ.name,
                                      pvc_name=pvc_name)
     helpers.wait_for_resource_state(RBD_PVC_OBJ, constants.STATUS_BOUND)
     RBD_PVC_OBJ.reload()
     if RBD_PVC_OBJ.backed_pv is None:
         RBD_PVC_OBJ.reload()
     RBD_POD_OBJ = helpers.create_pod(
         interface_type=constants.CEPHBLOCKPOOL, pvc_name=RBD_PVC_OBJ.name)
     helpers.wait_for_resource_state(RBD_POD_OBJ, constants.STATUS_RUNNING)
     RBD_POD_OBJ.reload()
Esempio n. 6
0
def create_rhelpod(namespace, pod_name):
    """
    Creates the RHEL pod

    Args:
        namespace (str): Namespace to create RHEL pod
        pod_name (str): Pod name

    Returns:
        pod: Pod instance for RHEL

    """
    # importing here to avoid dependencies
    from tests import helpers
    rhelpod_obj = helpers.create_pod(namespace=namespace,
                                     pod_name=pod_name,
                                     pod_dict_path=constants.RHEL_7_7_POD_YAML)
    helpers.wait_for_resource_state(rhelpod_obj, constants.STATUS_RUNNING)
    return rhelpod_obj
 def test_basics_cephfs(self, test_fixture_cephfs):
     """
     Testing basics: secret creation,
      storage class creation, pvc and pod with cephfs
     """
     global CEPHFS_PVC_OBJ, CEPHFS_POD_OBJ
     log.info('creating pvc for CephFS ')
     pvc_name = helpers.create_unique_resource_name('test-cephfs', 'pvc')
     CEPHFS_PVC_OBJ = helpers.create_pvc(sc_name=CEPHFS_SC_OBJ.name,
                                         pvc_name=pvc_name)
     helpers.wait_for_resource_state(CEPHFS_PVC_OBJ, constants.STATUS_BOUND)
     CEPHFS_PVC_OBJ.reload()
     log.info('creating cephfs pod')
     CEPHFS_POD_OBJ = helpers.create_pod(
         interface_type=constants.CEPHFILESYSTEM,
         pvc_name=CEPHFS_PVC_OBJ.name)
     helpers.wait_for_resource_state(CEPHFS_POD_OBJ,
                                     constants.STATUS_RUNNING)
     CEPHFS_POD_OBJ.reload()
Esempio n. 8
0
    def test_rwo_pvc_assign_pod_node(
        self, interface, pvc_factory, teardown_factory
    ):
        """
        Test assign nodeName to a pod using RWO pvc
        """
        worker_nodes_list = helpers.get_worker_nodes()

        # Create a RWO PVC
        pvc_obj = pvc_factory(
            interface=interface, access_mode=constants.ACCESS_MODE_RWO,
            status=constants.STATUS_BOUND
        )

        # Create a pod on a particular node
        selected_node = random.choice(worker_nodes_list)
        logger.info(
            f"Creating a pod on node: {selected_node} with pvc {pvc_obj.name}"
        )

        pod_obj = helpers.create_pod(
            interface_type=interface, pvc_name=pvc_obj.name,
            namespace=pvc_obj.namespace, node_name=selected_node,
            pod_dict_path=constants.NGINX_POD_YAML
        )
        teardown_factory(pod_obj)

        # Confirm that the pod is running on the selected_node
        helpers.wait_for_resource_state(
            resource=pod_obj, state=constants.STATUS_RUNNING, timeout=120
        )
        pod_obj.reload()
        assert pod.verify_node_name(pod_obj, selected_node), (
            'Pod is running on a different node than the selected node'
        )

        # Run IO
        logger.info(f"Running IO on pod {pod_obj.name}")
        pod_obj.run_io(storage_type='fs', size='512M', runtime=30)
        pod.get_fio_rw_iops(pod_obj)
Esempio n. 9
0
 def test_pvc_delete_and_verify_size_is_returned_to_backend_pool(self):
     """
     Test case to verify after delete pvc size returned to backend pools
     """
     used_before_creating_pvc = check_ceph_used_space()
     logger.info(f"Used before creating pvc {used_before_creating_pvc}")
     pvc_obj = create_pvc_and_verify_pvc_exists(self.sc_obj.name,
                                                self.cbp_obj.name)
     pod_obj = helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                                  pvc_name=pvc_obj.name)
     used_percentage = pod.run_io_and_verify_mount_point(pod_obj)
     assert used_percentage > '90%', "I/O's didn't run completely"
     used_after_creating_pvc = check_ceph_used_space()
     logger.info(f"Used after creating pvc {used_after_creating_pvc}")
     assert used_before_creating_pvc < used_after_creating_pvc
     pod_obj.delete()
     pvc_obj.delete()
     verify_pv_not_exists(pvc_obj, self.cbp_obj.name)
     used_after_deleting_pvc = check_ceph_used_space()
     logger.info(f"Used after deleting pvc {used_after_deleting_pvc}")
     assert used_after_deleting_pvc < used_after_creating_pvc
     assert (abs(used_after_deleting_pvc - used_before_creating_pvc) < 0.2)
    def test_pvc_delete_and_verify_size_is_returned_to_backend_pool(self):
        """
        Test case to verify after delete pvc size returned to backend pools
        """
        failed_to_delete = []
        ceph_obj1 = CephCluster()
        used_before_creating_pvc = ceph_obj1.check_ceph_pool_used_space(
            cbp_name=self.cbp_obj.name)
        logger.info(f"Used before creating PVC {used_before_creating_pvc}")
        pvc_obj = create_pvc_and_verify_pvc_exists(self.sc_obj.name,
                                                   self.cbp_obj.name)
        pod_obj = helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                                     pvc_name=pvc_obj.name)
        helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
        pod_obj.reload()
        pod.run_io_and_verify_mount_point(pod_obj, bs='10M', count='300')
        used_after_creating_pvc = ceph_obj1.check_ceph_pool_used_space(
            cbp_name=self.cbp_obj.name)
        logger.info(f"Used after creating PVC {used_after_creating_pvc}")
        assert used_before_creating_pvc < used_after_creating_pvc
        rbd_image_id = pvc_obj.image_uuid
        for resource in pod_obj, pvc_obj:
            resource.delete()
            try:
                resource.ocp.wait_for_delete(resource)
            except TimeoutError:
                failed_to_delete.append(resource)
        if failed_to_delete:
            raise UnexpectedBehaviour(
                f"Failed to delete resources: {failed_to_delete}")
        verify_pv_not_exists(pvc_obj, self.cbp_obj.name, rbd_image_id)
        ceph_obj2 = CephCluster()
        used_after_deleting_pvc = ceph_obj2.check_ceph_pool_used_space(
            cbp_name=self.cbp_obj.name)

        logger.info(f"Used after deleting PVC {used_after_deleting_pvc}")
        assert used_after_deleting_pvc < used_after_creating_pvc
        assert (abs(used_after_deleting_pvc - used_before_creating_pvc) < 0.5)
Esempio n. 11
0
    def test_ocs_347(self, resources):
        pod, pvc, storageclass = resources

        log.info("Creating RBD StorageClass")
        storageclass.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=self.cbp_obj.name,
                secret_name=self.rbd_secret_obj.name,
            ))
        log.info("Creating a PVC")
        pvc.append(
            helpers.create_pvc(
                sc_name=storageclass[0].name,
                wait=True,
            ))
        log.info(f"Creating a pod on with pvc {pvc[0].name}")
        pod.append(
            helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                               pvc_name=pvc[0].name,
                               desired_status=constants.STATUS_RUNNING,
                               wait=True,
                               pod_dict_path=constants.NGINX_POD_YAML))
 def test_pvc_delete_and_verify_size_is_returned_to_backend_pool(self):
     """
     Test case to verify after delete pvc size returned to backend pools
     """
     used_before_creating_pvc = check_ceph_used_space()
     logger.info(f"Used before creating pvc {used_before_creating_pvc}")
     pvc_obj = create_pvc_and_verify_pvc_exists(self.sc_obj.name,
                                                self.cbp_obj.name)
     pod_data = defaults.CSI_RBD_POD_DICT.copy()
     pod_data['spec']['volumes'][0]['persistentVolumeClaim'][
         'claimName'] = pvc_obj.name
     pod_obj = helpers.create_pod(**pod_data)
     used_percentage = pod.run_io_and_verify_mount_point(pod_obj)
     assert used_percentage > '90%', "I/O's didn't run completely"
     used_after_creating_pvc = check_ceph_used_space()
     logger.info(f"Used after creating pvc {used_after_creating_pvc}")
     assert used_before_creating_pvc < used_after_creating_pvc
     pod_obj.delete()
     pvc_obj.delete()
     verify_pv_not_exists(pvc_obj.backed_pv, self.cbp_obj.name)
     used_after_deleting_pvc = check_ceph_used_space()
     logger.info(f"Used after deleting pvc {used_after_deleting_pvc}")
     assert used_after_deleting_pvc < used_after_creating_pvc
     assert (abs(used_after_deleting_pvc - used_before_creating_pvc) < 0.2)
    def test_rwx_dynamic_pvc(self, setup_base):
        """
        RWX Dynamic PVC creation tests with Reclaim policy set to Delete/Retain
        """
        logger.info(f"CephFS RWX test")
        logger.info(
            f"Creating second pod on node: {self.worker_nodes_list[1]} "
            f"with pvc {self.pvc_obj.name}")

        pod_obj2 = helpers.create_pod(interface_type=self.interface_type,
                                      pvc_name=self.pvc_obj.name,
                                      namespace=self.namespace,
                                      node_name=self.worker_nodes_list[1],
                                      pod_dict_path=constants.NGINX_POD_YAML)
        helpers.wait_for_resource_state(pod_obj2, constants.STATUS_RUNNING)
        pod_obj2.reload()
        node_pod1 = self.pod_obj1.get().get('spec').get('nodeName')
        node_pod2 = pod_obj2.get().get('spec').get('nodeName')

        assert node_pod1 != node_pod2, 'Both pods are on the same node'

        # Run IO on both the pods
        logger.info(f"Running IO on pod {self.pod_obj1.name}")
        file_name1 = self.pod_obj1.name
        logger.info(file_name1)
        self.pod_obj1.run_io(storage_type=self.storage_type,
                             size=self.io_size,
                             runtime=30,
                             fio_filename=file_name1)

        logger.info(f"Running IO on pod {pod_obj2.name}")
        file_name2 = pod_obj2.name
        pod_obj2.run_io(storage_type=self.storage_type,
                        size=self.io_size,
                        runtime=30,
                        fio_filename=file_name2)

        # Check IO and calculate md5sum of files
        pod.get_fio_rw_iops(self.pod_obj1)
        md5sum_pod1_data = pod.cal_md5sum(pod_obj=self.pod_obj1,
                                          file_name=file_name1)

        pod.get_fio_rw_iops(pod_obj2)
        md5sum_pod2_data = pod.cal_md5sum(pod_obj=pod_obj2,
                                          file_name=file_name2)

        logger.info(f"verify data from alternate pods")

        assert pod.verify_data_integrity(pod_obj=pod_obj2,
                                         file_name=file_name1,
                                         original_md5sum=md5sum_pod1_data)

        assert pod.verify_data_integrity(pod_obj=self.pod_obj1,
                                         file_name=file_name2,
                                         original_md5sum=md5sum_pod2_data)

        # Verify that data is mutable from any pod

        logger.info(f"Perform modification of files from alternate pod")
        # Access and rename file written by pod-2 from pod-1
        file_path2 = pod.get_file_path(pod_obj2, file_name2)
        logger.info(file_path2)
        self.pod_obj1.exec_cmd_on_pod(
            command=f"bash -c \"mv {file_path2} {file_path2}-renamed\"",
            out_yaml_format=False)

        # Access and rename file written by pod-1 from pod-2
        file_path1 = pod.get_file_path(self.pod_obj1, file_name1)
        logger.info(file_path1)
        pod_obj2.exec_cmd_on_pod(
            command=f"bash -c \"mv {file_path1} {file_path1}-renamed\"",
            out_yaml_format=False)

        logger.info(f"Verify presence of renamed files from both pods")
        file_names = [f"{file_path1}-renamed", f"{file_path2}-renamed"]
        for file in file_names:
            assert pod.check_file_existence(
                self.pod_obj1, file), (f"File {file} doesn't exist")
            logger.info(f"File {file} exists in {self.pod_obj1.name} ")
            assert pod.check_file_existence(
                pod_obj2, file), (f"File {file} doesn't exist")
            logger.info(f"File {file} exists in {pod_obj2.name}")
Esempio n. 14
0
    def test_rwo_dynamic_pvc(self, setup_base):
        logger.info(f"Creating two pods using same PVC {self.pvc_obj.name}")
        logger.info(f"Creating first pod on node: {self.worker_nodes_list[0]}")
        pod_obj1 = helpers.create_pod(interface_type=self.interface_type,
                                      pvc_name=self.pvc_obj.name,
                                      desired_status=constants.STATUS_RUNNING,
                                      wait=True,
                                      namespace=self.namespace,
                                      node_name=self.worker_nodes_list[0],
                                      pod_dict_path=constants.NGINX_POD_YAML)
        node_pod1 = pod_obj1.get().get('spec').get('nodeName')

        logger.info(
            f"Creating second pod on node: {self.worker_nodes_list[1]}")

        pod_obj2 = helpers.create_pod(interface_type=self.interface_type,
                                      pvc_name=self.pvc_obj.name,
                                      wait=False,
                                      namespace=self.namespace,
                                      node_name=self.worker_nodes_list[1],
                                      pod_dict_path=constants.NGINX_POD_YAML)
        node_pod2 = pod_obj2.get().get('spec').get('nodeName')

        assert node_pod1 != node_pod2, 'Both pods are on the same node'

        logger.info(f"Running IO on pod {pod_obj1.name}")
        file_name = pod_obj1.name
        pod_obj1.run_io(storage_type=self.storage_type,
                        size=self.io_size,
                        runtime=30,
                        fio_filename=file_name)
        pod.get_fio_rw_iops(pod_obj1)
        md5sum_pod1_data = pod.cal_md5sum(pod_obj=pod_obj1,
                                          file_name=file_name)

        # Verify that second pod is still in Pending state and not able to
        # attain Running state due to expected failure
        assert helpers.wait_for_resource_state(resource=pod_obj2,
                                               state=constants.STATUS_PENDING)
        self.verify_expected_failure_event(
            ocs_obj=pod_obj2, failure_str=self.expected_pod_failure)

        pod_obj1.delete()
        pod_obj1.ocp.wait_for_delete(resource_name=pod_obj1.name)

        # Wait for second pod to be in Running state
        assert helpers.wait_for_resource_state(resource=pod_obj2,
                                               state=constants.STATUS_RUNNING,
                                               timeout=240)

        assert pod.verify_data_integrity(pod_obj=pod_obj2,
                                         file_name=file_name,
                                         original_md5sum=md5sum_pod1_data)

        pod_obj2.run_io(storage_type=self.storage_type,
                        size=self.io_size,
                        runtime=30,
                        fio_filename=pod_obj2.name)
        pod.get_fio_rw_iops(pod_obj2)

        # Again verify data integrity
        assert pod.verify_data_integrity(pod_obj=pod_obj2,
                                         file_name=file_name,
                                         original_md5sum=md5sum_pod1_data)

        pod_obj2.delete()
        pod_obj1.ocp.wait_for_delete(resource_name=pod_obj2.name)
Esempio n. 15
0
    def test_monitoring_when_one_of_the_prometheus_node_down(
            self, test_fixture):
        """
        Test case to validate when the prometheus pod is down and
        interaction with prometheus
        """
        namespace_list, pvc_objs, pod_objs, sc = test_fixture

        aws_obj = aws.AWS()

        # Get all the openshift-monitoring pods
        monitoring_pod_obj_list = pod.get_all_pods(
            namespace=defaults.OCS_MONITORING_NAMESPACE)

        # Get the worker node list
        workers = get_typed_nodes(node_type='worker')

        # Get all prometheus pods
        pod_obj_list = pod.get_all_pods(
            namespace=defaults.OCS_MONITORING_NAMESPACE,
            selector=['prometheus'])

        for pod_obj in pod_obj_list:

            # Get the node where the prometheus pod is hosted
            prometheus_pod_obj = pod_obj.get()
            prometheus_node = prometheus_pod_obj['spec']['nodeName']

            prometheus_node = [
                node for node in workers
                if node.get().get('metadata').get('name') == prometheus_node
            ]

            # Make one of the node down where the prometheus pod is hosted
            instances = aws.get_instances_ids_and_names(prometheus_node)
            aws_obj.restart_ec2_instances(instances=instances,
                                          wait=True,
                                          force=True)

            # Validate all nodes are in READY state
            wait_for_nodes_status()

        # Check the node are Ready state and check cluster is health ok
        self.sanity_helpers.health_check()

        # Check all the monitoring pods are up
        for pod_obj in monitoring_pod_obj_list:
            wait_for_resource_state(resource=pod_obj,
                                    state=constants.STATUS_RUNNING)

        # Check for the created pvc metrics after nodes restarting
        for pvc_obj in pvc_objs:
            assert check_pvcdata_collected_on_prometheus(pvc_obj.name), (
                f"On prometheus pod for created pvc {pvc_obj.name} related data is not collected"
            )

        # Create projects after restarting nodes
        namespaces = helpers.create_multilpe_projects(number_of_project=1)
        namespace_list.extend(namespaces)

        # Create pvcs after restarting nodes
        pvcs = [
            helpers.create_pvc(sc_name=sc.name,
                               namespace=each_namespace.namespace)
            for each_namespace in namespaces
        ]
        for pvc_obj in pvcs:
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
        pvc_objs.extend(pvcs)

        # Create app pods after restarting nodes
        pods = [
            helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                               pvc_name=each_pvc.name,
                               namespace=each_pvc.namespace)
            for each_pvc in pvcs
        ]
        for pod_obj in pods:
            helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
            pod_obj.reload()
        pod_objs.extend(pods)

        # Check for the created pvc metrics on prometheus pod after restarting nodes
        for pvc_obj in pvcs:
            assert check_pvcdata_collected_on_prometheus(pvc_obj.name), (
                f"On prometheus pod for created pvc {pvc_obj.name} related data is not collected"
            )
Esempio n. 16
0
def run_io_in_background(request):
    """
    Run IO during the test execution
    """
    if config.RUN['cli_params'].get('io_in_bg'):
        log.info(f"Tests will be running while IO is in the background")

        g_sheet = None
        if config.RUN['google_api_secret']:
            g_sheet = GoogleSpreadSheetAPI("IO BG results", 0)
        else:
            log.warning(
                "Google API secret was not found. IO won't be reported to "
                "a Google spreadsheet")
        results = list()
        temp_file = tempfile.NamedTemporaryFile(mode='w+',
                                                prefix='test_status',
                                                delete=False)

        def get_test_status():
            with open(temp_file.name, 'r') as t_file:
                return t_file.readline()

        def set_test_status(status):
            with open(temp_file.name, 'w') as t_file:
                t_file.writelines(status)

        set_test_status('running')

        def finalizer():
            """
            Delete the resources created during setup, used for
            running IO in the test background
            """
            set_test_status('finished')
            try:
                for status in TimeoutSampler(90, 3, get_test_status):
                    if status == 'terminated':
                        break
            except TimeoutExpiredError:
                log.warning("Background IO was still in progress before IO "
                            "thread termination")
            if thread:
                thread.join()

            log.info(f"Background IO has stopped")
            for result in results:
                log.info(f"IOPs after FIO for pod {pod_obj.name}:")
                log.info(f"Read: {result[0]}")
                log.info(f"Write: {result[1]}")

            if pod_obj:
                pod_obj.delete()
                pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
            if pvc_obj:
                pvc_obj.delete()
                pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
            if sc_obj:
                sc_obj.delete()
            if cbp_obj:
                cbp_obj.delete()
            if secret_obj:
                secret_obj.delete()

        request.addfinalizer(finalizer)

        secret_obj = helpers.create_secret(
            interface_type=constants.CEPHBLOCKPOOL)
        cbp_obj = helpers.create_ceph_block_pool()
        sc_obj = helpers.create_storage_class(
            interface_type=constants.CEPHBLOCKPOOL,
            interface_name=cbp_obj.name,
            secret_name=secret_obj.name)
        pvc_obj = helpers.create_pvc(sc_name=sc_obj.name, size='2Gi')
        helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
        pvc_obj.reload()
        pod_obj = helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                                     pvc_name=pvc_obj.name)
        helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
        pod_obj.reload()

        def run_io_in_bg():
            """
            Run IO by executing FIO and deleting the file created for FIO on
            the pod, in a while true loop. Will be running as long as
            the test is running.
            """
            while get_test_status() == 'running':
                pod_obj.run_io('fs', '1G')
                result = pod_obj.get_fio_results()
                reads = result.get('jobs')[0].get('read').get('iops')
                writes = result.get('jobs')[0].get('write').get('iops')
                if g_sheet:
                    now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                    g_sheet.insert_row([now, reads, writes])

                results.append((reads, writes))

                file_path = os.path.join(
                    pod_obj.get_storage_path(storage_type='fs'),
                    pod_obj.io_params['filename'])
                pod_obj.exec_cmd_on_pod(f'rm -rf {file_path}')
            set_test_status('terminated')

        log.info(f"Start running IO in the test background")

        thread = threading.Thread(target=run_io_in_bg)
        thread.start()
Esempio n. 17
0
    def raw_block_pv(self):
        """
        Testing basic creation of app pod with RBD RWX raw block pv support
        """
        worker_nodes = helpers.get_worker_nodes()
        pvcs = list()
        for size in ['500Mi', '10Gi', '1Ti']:
            pvcs.append(
                helpers.create_pvc(sc_name=self.sc_obj.name,
                                   size=size,
                                   access_mode=constants.ACCESS_MODE_RWX,
                                   namespace=self.namespace,
                                   volume_mode='Block'))
        pvc_mb, pvc_gb, pvc_tb = pvcs[0], pvcs[1], pvcs[2]

        for pvc in pvcs:
            helpers.wait_for_resource_state(resource=pvc,
                                            state=constants.STATUS_BOUND,
                                            timeout=120)

        pvs = [pvc.backed_pv_obj for pvc in pvcs]

        pods = list()
        pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
        for pvc in pvc_mb, pvc_gb, pvc_tb:
            for _ in range(3):
                pods.append(
                    helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                                       pvc_name=pvc.name,
                                       namespace=self.namespace,
                                       raw_block_pv=True,
                                       pod_dict_path=pod_dict,
                                       node_name=random.choice(worker_nodes)))

        pvc_mb_pods, pvc_gb_pods, pvc_tb_pods = pods[0:3], pods[3:6], pods[6:9]
        for pod in pods:
            helpers.wait_for_resource_state(resource=pod,
                                            state=constants.STATUS_RUNNING,
                                            timeout=120)
        storage_type = 'block'

        with ThreadPoolExecutor() as p:
            for pod in pvc_mb_pods:
                logging.info(f'running io on pod {pod.name}')
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f'{random.randint(10,200)}M',
                )
            for pod in pvc_gb_pods:
                logging.info(f'running io on pod {pod.name}')
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f'{random.randint(1,5)}G',
                )
            for pod in pvc_tb_pods:
                logging.info(f'running io on pod {pod.name}')
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f'{random.randint(10,15)}G',
                )

        for pod in pods:
            get_fio_rw_iops(pod)
        return pods, pvcs, pvs
    def test_rwo_dynamic_pvc(self, setup_base):
        """
        RWO Dynamic PVC creation tests with Reclaim policy set to Delete/Retain
        """

        logger.info(
            f"Creating second pod on node: {self.worker_nodes_list[1]}"
        )

        pod_obj2 = helpers.create_pod(
            interface_type=self.interface_type, pvc_name=self.pvc_obj.name,
            do_reload=False, namespace=self.namespace,
            node_name=self.worker_nodes_list[1],
            pod_dict_path=constants.NGINX_POD_YAML
        )
        node_pod1 = self.pod_obj1.get().get('spec').get('nodeName')
        node_pod2 = pod_obj2.get().get('spec').get('nodeName')

        assert node_pod1 != node_pod2, 'Both pods are on the same node'

        logger.info(f"Running IO on pod {self.pod_obj1.name}")
        file_name = self.pod_obj1.name
        self.pod_obj1.run_io(
            storage_type=self.storage_type, size=self.io_size, runtime=30,
            fio_filename=file_name
        )
        pod.get_fio_rw_iops(self.pod_obj1)
        md5sum_pod1_data = pod.cal_md5sum(
            pod_obj=self.pod_obj1, file_name=file_name
        )
        # Verify that second pod is still in ContainerCreating state and not able to
        # attain Running state due to expected failure
        helpers.wait_for_resource_state(
            resource=pod_obj2, state=constants.STATUS_CONTAINER_CREATING
        )
        self.verify_expected_failure_event(
            ocs_obj=pod_obj2, failure_str=self.expected_pod_failure
        )
        logger.info(
            f"Deleting first pod so that second pod can attach"
            f" {self.pvc_obj.name}"
        )
        self.pod_obj1.delete()
        self.pod_obj1.ocp.wait_for_delete(resource_name=self.pod_obj1.name)

        # Wait for second pod to be in Running state
        helpers.wait_for_resource_state(
            resource=pod_obj2, state=constants.STATUS_RUNNING, timeout=240
        )

        assert pod.verify_data_integrity(
            pod_obj=pod_obj2, file_name=file_name,
            original_md5sum=md5sum_pod1_data
        )

        pod_obj2.run_io(
            storage_type=self.storage_type, size=self.io_size, runtime=30,
            fio_filename=pod_obj2.name
        )
        pod.get_fio_rw_iops(pod_obj2)

        # Again verify data integrity
        assert pod.verify_data_integrity(
            pod_obj=pod_obj2, file_name=file_name,
            original_md5sum=md5sum_pod1_data
        )
Esempio n. 19
0
    def test_pvc_reattach_time_performance(self, pvc_factory,
                                           teardown_factory):
        """
        Test assign nodeName to a pod using RWX pvc
        Performance in test_multiple_pvc_creation_measurement_performance
        Each kernel (unzipped) is 892M and 61694 files
        """

        kernel_url = 'https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.19.5.tar.gz'
        download_path = 'tmp'
        # Number of times we copy the kernel
        copies = 3

        # Download a linux Kernel
        import os
        dir_path = os.path.join(os.getcwd(), download_path)
        file_path = os.path.join(dir_path, 'file.gz')
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        urllib.request.urlretrieve(kernel_url, file_path)

        worker_nodes_list = helpers.get_worker_nodes()
        assert (len(worker_nodes_list) > 1)
        node_one = worker_nodes_list[0]
        node_two = worker_nodes_list[1]

        # Create a PVC
        accessmode = constants.ACCESS_MODE_RWX
        if self.interface == constants.CEPHBLOCKPOOL:
            accessmode = constants.ACCESS_MODE_RWO
        pvc_obj = pvc_factory(
            interface=self.interface,
            access_mode=accessmode,
            status=constants.STATUS_BOUND,
            size='15',
        )

        # Create a pod on one node
        logging.info(
            f"Creating Pod with pvc {pvc_obj.name} on node {node_one}")

        helpers.pull_images('nginx')
        pod_obj1 = helpers.create_pod(interface_type=self.interface,
                                      pvc_name=pvc_obj.name,
                                      namespace=pvc_obj.namespace,
                                      node_name=node_one,
                                      pod_dict_path=constants.NGINX_POD_YAML)

        # Confirm that pod is running on the selected_nodes
        logging.info('Checking whether pods are running on the selected nodes')
        helpers.wait_for_resource_state(resource=pod_obj1,
                                        state=constants.STATUS_RUNNING,
                                        timeout=120)

        pod_name = pod_obj1.name
        pod_path = '/var/lib/www/html'

        _ocp = OCP(namespace=pvc_obj.namespace)

        rsh_cmd = f"exec {pod_name} -- apt-get update"
        _ocp.exec_oc_cmd(rsh_cmd)
        rsh_cmd = f"exec {pod_name} -- apt-get install -y rsync"
        _ocp.exec_oc_cmd(rsh_cmd, ignore_error=True, out_yaml_format=False)

        rsh_cmd = f"rsync {dir_path} {pod_name}:{pod_path}"
        _ocp.exec_oc_cmd(rsh_cmd)

        rsh_cmd = f"exec {pod_name} -- tar xvf {pod_path}/tmp/file.gz -C /var/lib/www/html/tmp"
        _ocp.exec_oc_cmd(rsh_cmd)

        for x in range(copies):
            rsh_cmd = f"exec {pod_name} -- mkdir -p {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)
            rsh_cmd = f"exec {pod_name} -- cp -r {pod_path}/tmp {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)

        rsh_cmd = f"delete pod {pod_name}"
        _ocp.exec_oc_cmd(rsh_cmd)

        logging.info(
            f"Creating Pod with pvc {pvc_obj.name} on node {node_two}")

        pod_obj2 = helpers.create_pod(interface_type=self.interface,
                                      pvc_name=pvc_obj.name,
                                      namespace=pvc_obj.namespace,
                                      node_name=node_two,
                                      pod_dict_path=constants.NGINX_POD_YAML)

        start_time = time.time()

        pod_name = pod_obj2.name
        helpers.wait_for_resource_state(resource=pod_obj2,
                                        state=constants.STATUS_RUNNING,
                                        timeout=120)
        end_time = time.time()
        total_time = end_time - start_time
        if total_time > 60:
            raise ex.PerformanceException(
                f"Pod creation time is {total_time} and "
                f"greater than 60 seconds")
        logging.info(f"Pod {pod_name} creation time took {total_time} seconds")

        teardown_factory(pod_obj2)
        os.remove(file_path)
        os.rmdir(dir_path)
Esempio n. 20
0
    def test_rwx_pvc_assign_pod_node(self, interface, pvc_factory,
                                     teardown_factory):
        """
        Test assign nodeName to a pod using RWX pvc
        """
        worker_nodes_list = helpers.get_worker_nodes()
        if interface == constants.CEPHBLOCKPOOL:
            volume_mode = 'Block'
            storage_type = 'block'
            block_pv = True
            pod_yaml = constants.CSI_RBD_RAW_BLOCK_POD_YAML
        else:
            volume_mode = ''
            storage_type = 'fs'
            block_pv = False
            pod_yaml = ''

        # Create a RWX PVC
        pvc_obj = pvc_factory(interface=interface,
                              access_mode=constants.ACCESS_MODE_RWX,
                              status=constants.STATUS_BOUND,
                              volume_mode=volume_mode)

        # Create two pods on selected nodes
        pod_list = []
        selected_nodes = random.sample(worker_nodes_list, k=2)
        logger.info(
            f"Creating {len(selected_nodes)} pods with pvc {pvc_obj.name}")
        for node in selected_nodes:
            logger.info(f"Creating pod on node: {node}")
            pod_obj = helpers.create_pod(
                interface_type=interface,
                pvc_name=pvc_obj.name,
                namespace=pvc_obj.namespace,
                node_name=node,
                pod_dict_path=pod_yaml,
                raw_block_pv=block_pv,
            )
            pod_list.append(pod_obj)
            teardown_factory(pod_obj)

        # Confirm that both pods are running on the selected_nodes
        logger.info('Checking whether pods are running on the selected nodes')
        for index in range(0, len(selected_nodes)):
            pod_obj = pod_list[index]
            selected_node = selected_nodes[index]
            helpers.wait_for_resource_state(resource=pod_obj,
                                            state=constants.STATUS_RUNNING,
                                            timeout=120)
            pod_obj.reload()
            assert pod.verify_node_name(pod_obj, selected_node), (
                f"Pod {pod_obj.name} is running on a different node "
                f"than the selected node")

        # Run IOs on all pods. FIO Filename is kept same as pod name
        with ThreadPoolExecutor() as p:
            for pod_obj in pod_list:
                logger.info(f"Running IO on pod {pod_obj.name}")
                p.submit(pod_obj.run_io,
                         storage_type=storage_type,
                         size='512M',
                         runtime=30,
                         fio_filename=pod_obj.name)

        # Check IO from all pods
        for pod_obj in pod_list:
            pod.get_fio_rw_iops(pod_obj)
Esempio n. 21
0
def create_resources(resources, run_io=True):
    """
    Sanity validation - Create resources (FS and RBD) and run IO

    Args:
        resources (tuple): Lists of projects, secrets, pools,
            storageclasses, pvcs and pods
        run_io (bool): True for run IO, False otherwise

    """
    # Create resources and run IO for both FS and RBD
    # Unpack resources
    projects, secrets, pools, storageclasses, pvcs, pods = resources[:6]

    # Project
    projects.append(helpers.create_project())

    # Secrets
    secrets.append(helpers.create_secret(constants.CEPHBLOCKPOOL))
    secrets.append(helpers.create_secret(constants.CEPHFILESYSTEM))

    # Pools
    pools.append(helpers.create_ceph_block_pool())
    pools.append(helpers.get_cephfs_data_pool_name())

    # Storageclasses
    storageclasses.append(
        helpers.create_storage_class(interface_type=constants.CEPHBLOCKPOOL,
                                     interface_name=pools[0].name,
                                     secret_name=secrets[0].name))
    storageclasses.append(
        helpers.create_storage_class(interface_type=constants.CEPHFILESYSTEM,
                                     interface_name=pools[1],
                                     secret_name=secrets[1].name))

    # PVCs
    pvcs.append(
        helpers.create_pvc(sc_name=storageclasses[0].name,
                           namespace=projects[0].namespace))
    pvcs.append(
        helpers.create_pvc(sc_name=storageclasses[1].name,
                           namespace=projects[0].namespace))
    for pvc in pvcs:
        helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND)
        pvc.reload()

    # Pods
    pods.append(
        helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                           pvc_name=pvcs[0].name,
                           namespace=projects[0].namespace))
    pods.append(
        helpers.create_pod(interface_type=constants.CEPHFILESYSTEM,
                           pvc_name=pvcs[1].name,
                           namespace=projects[0].namespace))
    for pod in pods:
        helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
        pod.reload()

    if run_io:
        # Run IO
        for pod in pods:
            pod.run_io('fs', '1G')
        for pod in pods:
            fio_result = pod.get_fio_results()
            logger.info(f"IOPs after FIO for pod {pod.name}:")
            logger.info(
                f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
            logger.info(
                f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")
Esempio n. 22
0
    def test_create_multiple_sc_with_different_pool_name(
        self, teardown_factory
    ):
        """
        This test function does below,
        *. Creates multiple Storage Classes with different pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """

        # Create 3 storageclasses, each with different pool name
        cbp_list = []
        sc_list = []
        for i in range(3):
            log.info(f"Creating cephblockpool")
            cbp_obj = helpers.create_ceph_block_pool()
            log.info(
                f"{cbp_obj.name} created successfully"
            )
            log.info(
                f"Creating a RBD storage class using {cbp_obj.name}"
            )
            cbp_list.append(cbp_obj)
            sc_obj = helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=cbp_obj.name,
                secret_name=self.rbd_secret_obj.name
            )

            log.info(
                f"StorageClass: {sc_obj.name} "
                f"created successfully using {cbp_obj.name}"
            )
            sc_list.append(sc_obj)
            teardown_factory(cbp_obj)
            teardown_factory(sc_obj)

        # Create PVCs using each SC
        pvc_list = []
        for i in range(3):
            log.info(f"Creating a PVC using {sc_list[i].name}")
            pvc_obj = helpers.create_pvc(sc_list[i].name)
            log.info(
                f"PVC: {pvc_obj.name} created successfully using "
                f"{sc_list[i].name}"
            )
            pvc_list.append(pvc_obj)
            teardown_factory(pvc_obj)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()

        # Create app pod and mount each PVC
        pod_list = []
        for i in range(3):
            log.info(f"Creating an app pod and mount {pvc_list[i].name}")
            pod_obj = helpers.create_pod(
                interface_type=constants.CEPHBLOCKPOOL,
                pvc_name=pvc_list[i].name,
            )
            log.info(
                f"{pod_obj.name} created successfully and "
                f"mounted {pvc_list[i].name}"
            )
            pod_list.append(pod_obj)
            teardown_factory(pod_obj)
            helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
            pod_obj.reload()

        # Run IO on each app pod for sometime
        for pod in pod_list:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io('fs', size='2G')

        for pod in pod_list:
            get_fio_rw_iops(pod)
    def test_create_multiple_sc_with_same_pool_name(
            self, interface_type, resources
    ):
        """
        This test function does below,
        *. Creates multiple Storage Classes with same pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """
        # Unpack resources
        pods, pvcs, storageclasses = resources

        # Create 3 Storage Classes with same pool name
        if interface_type == constants.CEPHBLOCKPOOL:
            secret = self.rbd_secret_obj.name
            interface_name = self.cbp_obj.name
        else:
            interface_type = constants.CEPHFILESYSTEM
            secret = self.cephfs_secret_obj.name
            interface_name = helpers.get_cephfs_data_pool_name()
        for i in range(3):
            log.info(f"Creating a {interface_type} storage class")
            storageclasses.append(
                helpers.create_storage_class(
                    interface_type=interface_type,
                    interface_name=interface_name,
                    secret_name=secret
                )
            )
            log.info(
                f"{interface_type}StorageClass: {storageclasses[i].name} "
                f"created successfully"
            )

        # Create PVCs using each SC
        for i in range(3):
            log.info(f"Creating a PVC using {storageclasses[i].name}")
            pvcs.append(
                helpers.create_pvc(storageclasses[i].name)
            )
        for pvc in pvcs:
            helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND)
            pvc.reload()

        # Create app pod and mount each PVC
        for i in range(3):
            log.info(f"Creating an app pod and mount {pvcs[i].name}")
            pods.append(
                helpers.create_pod(
                    interface_type=interface_type, pvc_name=pvcs[i].name,
                    namespace=defaults.ROOK_CLUSTER_NAMESPACE
                )
            )
            for pod in pods:
                helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
                pod.reload()
            log.info(
                f"{pods[i].name} created successfully and "
                f"mounted {pvcs[i].name}"
            )

        # Run IO on each app pod for sometime
        for pod in pods:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io('fs', size='2G')

        for pod in pods:
            get_fio_rw_iops(pod)
Esempio n. 24
0
    def validate_cluster(self, resources, instances):
        """
        Perform cluster validation - nodes readiness, Ceph cluster health
        check and functional resources tests
        """
        instances_names = list(instances.values())
        assert ocp.wait_for_nodes_ready(instances_names), (
            "Not all nodes reached status Ready"
        )

        ceph_cluster = CephCluster()
        assert ceph_health_check(
            namespace=config.ENV_DATA['cluster_namespace']
        )
        ceph_cluster.cluster_health_check(timeout=60)

        # Create resources and run IO for both FS and RBD
        # Unpack resources
        projects, secrets, pools, storageclasses, pvcs, pods = resources[:6]

        # Project
        projects.append(helpers.create_project())

        # Secrets
        secrets.append(helpers.create_secret(constants.CEPHBLOCKPOOL))
        secrets.append(helpers.create_secret(constants.CEPHFILESYSTEM))

        # Pools
        pools.append(helpers.create_ceph_block_pool())
        pools.append(helpers.get_cephfs_data_pool_name())

        # Storageclasses
        storageclasses.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=pools[0].name,
                secret_name=secrets[0].name
            )
        )
        storageclasses.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHFILESYSTEM,
                interface_name=pools[1],
                secret_name=secrets[1].name
            )
        )

        # PVCs
        pvcs.append(helpers.create_pvc(
            sc_name=storageclasses[0].name, namespace=projects[0].namespace)
        )
        pvcs.append(helpers.create_pvc(
            sc_name=storageclasses[1].name, namespace=projects[0].namespace)
        )

        # Pods
        pods.append(
            helpers.create_pod(
                interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvcs[0].name,
                namespace=projects[0].namespace
            )
        )
        pods.append(
            helpers.create_pod(
                interface_type=constants.CEPHFILESYSTEM, pvc_name=pvcs[1].name,
                namespace=projects[0].namespace
            )
        )

        # Run IO
        for pod in pods:
            pod.run_io('fs', '1G')
        for pod in pods:
            fio_result = pod.get_fio_results()
            logger.info(f"IOPs after FIO for pod {pod.name}:")
            logger.info(
                f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
            )
            logger.info(
                f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
            )
Esempio n. 25
0
    def test_pvc_snapshot(self, interface, teardown_factory):
        """
        1. Run I/O on a pod file.
        2. Calculate md5sum of the file.
        3. Take a snapshot of the PVC.
        4. Create a new PVC out of that snapshot.
        5. Attach a new pod to it.
        6. Verify that the file is present on the new pod also.
        7. Verify that the md5sum of the file on the new pod matches
           with the md5sum of the file on the original pod.

        Args:
            interface(str): The type of the interface
            (e.g. CephBlockPool, CephFileSystem)
            pvc_factory: A fixture to create new pvc
            teardown_factory: A fixture to destroy objects
        """
        log.info(f"Running IO on pod {self.pod_obj.name}")
        file_name = self.pod_obj.name
        log.info(f"File created during IO {file_name}")
        self.pod_obj.run_io(
            storage_type='fs', size='1G', fio_filename=file_name
        )

        # Wait for fio to finish
        fio_result = self.pod_obj.get_fio_results()
        err_count = fio_result.get('jobs')[0].get('error')
        assert err_count == 0, (
            f"IO error on pod {self.pod_obj.name}. "
            f"FIO result: {fio_result}"
        )
        log.info(f"Verified IO on pod {self.pod_obj.name}.")

        # Verfiy presence of the file
        file_path = pod.get_file_path(self.pod_obj, file_name)
        log.info(f"Actual file path on the pod {file_path}")
        assert pod.check_file_existence(self.pod_obj, file_path), (
            f"File {file_name} doesn't exist"
        )
        log.info(f"File {file_name} exists in {self.pod_obj.name}")

        # Calculate md5sum
        orig_md5_sum = pod.cal_md5sum(self.pod_obj, file_name)
        # Take a snapshot
        snap_yaml = constants.CSI_RBD_SNAPSHOT_YAML
        if interface == constants.CEPHFILESYSTEM:
            snap_yaml = constants.CSI_CEPHFS_SNAPSHOT_YAML

        snap_name = helpers.create_unique_resource_name(
            'test', 'snapshot'
        )
        snap_obj = pvc.create_pvc_snapshot(
            self.pvc_obj.name,
            snap_yaml,
            snap_name,
            helpers.default_volumesnapshotclass(interface).name,
        )
        snap_obj.ocp.wait_for_resource(
            condition='true', resource_name=snap_obj.name,
            column=constants.STATUS_READYTOUSE, timeout=60
        )
        teardown_factory(snap_obj)

        # Same Storage class of the original PVC
        sc_name = self.pvc_obj.backed_sc

        # Size should be same as of the original PVC
        pvc_size = str(self.pvc_obj.size) + "Gi"

        # Create pvc out of the snapshot
        # Both, the snapshot and the restore PVC should be in same namespace
        restore_pvc_name = helpers.create_unique_resource_name(
            'test', 'restore-pvc'
        )
        restore_pvc_yaml = constants.CSI_RBD_PVC_RESTORE_YAML
        if interface == constants.CEPHFILESYSTEM:
            restore_pvc_yaml = constants.CSI_CEPHFS_PVC_RESTORE_YAML

        restore_pvc_obj = pvc.create_restore_pvc(
            sc_name=sc_name, snap_name=snap_obj.name,
            namespace=snap_obj.namespace, size=pvc_size,
            pvc_name=restore_pvc_name,
            restore_pvc_yaml=restore_pvc_yaml
        )
        helpers.wait_for_resource_state(
            restore_pvc_obj,
            constants.STATUS_BOUND
        )
        restore_pvc_obj.reload()
        teardown_factory(restore_pvc_obj)

        # Create and attach pod to the pvc
        restore_pod_obj = helpers.create_pod(
            interface_type=interface, pvc_name=restore_pvc_obj.name,
            namespace=snap_obj.namespace,
            pod_dict_path=constants.NGINX_POD_YAML
        )

        # Confirm that the pod is running
        helpers.wait_for_resource_state(
            resource=restore_pod_obj,
            state=constants.STATUS_RUNNING
        )
        restore_pod_obj.reload()
        teardown_factory(restore_pod_obj)

        # Verify that the file is present on the new pod
        log.info(
            f"Checking the existence of {file_name} "
            f"on restore pod {restore_pod_obj.name}"
        )
        assert pod.check_file_existence(restore_pod_obj, file_path), (
            f"File {file_name} doesn't exist"
        )
        log.info(f"File {file_name} exists in {restore_pod_obj.name}")

        # Verify that the md5sum matches
        log.info(
            f"Verifying that md5sum of {file_name} "
            f"on pod {self.pod_obj.name} matches with md5sum "
            f"of the same file on restore pod {restore_pod_obj.name}"
        )
        assert pod.verify_data_integrity(
            restore_pod_obj,
            file_name,
            orig_md5_sum
        ), 'Data integrity check failed'
        log.info("Data integrity check passed, md5sum are same")
    def test_pvc_rwx_writeable_after_pod_deletions(
        self, pvc_factory, teardown_factory
    ):
        """
        Test assign nodeName to a pod using RWX pvc

        1. Create a new project.
        2. Create a RWX CEPHFS based PVC
        3. Attach the same PVC to multiple PODs and start IO on all the PODs
        4. Delete all but one pod.
        5. Verify mount point is still write-able.
             - Start IO again on the Running pod.
        6. Also, access the data written by deleted pods from the Running pod

        """
        worker_nodes_list = helpers.get_worker_nodes()

        # Create a RWX PVC
        pvc_obj = pvc_factory(
            interface=constants.CEPHFILESYSTEM, access_mode=constants.ACCESS_MODE_RWX,
            size=10, status=constants.STATUS_BOUND
        )
        logger.info(
            f"Creating pods on all worker nodes backed"
            f"with same pvc {pvc_obj.name}"
        )

        pod_list = []

        for each_node in worker_nodes_list:
            pod_obj = helpers.create_pod(
                interface_type=constants.CEPHFILESYSTEM, pvc_name=pvc_obj.name,
                namespace=pvc_obj.namespace, node_name=each_node,
                pod_dict_path=constants.NGINX_POD_YAML
            )
            pod_list.append(pod_obj)
            teardown_factory(pod_obj)

        # Confirm pods are created and are running on designated nodes
        node_count = 0
        for pod_obj in pod_list:
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING,
                timeout=120
            )
            pod_obj.reload()
            assert pod.verify_node_name(pod_obj, worker_nodes_list[node_count]), (
                f'Pod {pod_obj.name} is running on a different node '
                f'than the selected node'
            )
            node_count = node_count + 1

        # Run IOs on all pods. FIO Filename is kept same as pod name
        with ThreadPoolExecutor() as p:
            for pod_obj in pod_list:
                logger.info(f"Running IO on pod {pod_obj.name}")
                p.submit(
                    pod_obj.run_io, storage_type='fs', size='512M',
                    runtime=30, fio_filename=pod_obj.name
                )

        # Check IO from all pods
        for pod_obj in pod_list:
            pod.get_fio_rw_iops(pod_obj)

        # Calculate md5sum of each file
        md5sum_pod_data = []
        for pod_obj in pod_list:
            md5sum_pod_data.append(pod.cal_md5sum(
                pod_obj=pod_obj, file_name=pod_obj.name
            ))

        # Delete all but the last app pod.
        for index in range(node_count - 1):
            pod_list[index].delete()
            pod_list[index].ocp.wait_for_delete(
                resource_name=pod_list[index].name
            )

        # Verify presence of files written by each pod
        logger.info(
            f"Verify existence of each file from app pod "
            f"{pod_list[-1].name} "
        )
        for pod_obj in pod_list:
            file_path = pod.get_file_path(pod_list[-1], pod_obj.name)
            assert pod.check_file_existence(pod_list[-1], file_path), (
                f"File {pod_obj.name} doesnt exist"
            )
            logger.info(
                f"File {pod_obj.name} exists in {pod_list[-1].name}"
            )

        # From surviving pod, verify data integrity of files
        # written by deleted pods
        logger.info(f"verify all data from {pod_list[-1].name}")

        for index, pod_obj in enumerate(pod_list):
            assert pod.verify_data_integrity(
                pod_obj=pod_list[-1], file_name=pod_obj.name,
                original_md5sum=md5sum_pod_data[index]
            )

        # From surviving pod, confirm mount point is still write-able
        logger.info(f"Re-running IO on pod {pod_list[-1].name}")
        fio_new_file = f"{pod_list[-1].name}-new-file"
        pod_list[-1].run_io(
            storage_type='fs', size='512M', runtime=30,
            fio_filename=fio_new_file
        )
        pod.get_fio_rw_iops(pod_list[-1])
        file_path = pod.get_file_path(pod_list[-1], fio_new_file)
        assert pod.check_file_existence(pod_list[-1], file_path), (
            f"File {fio_new_file} doesnt exist"
        )
        logger.info(
            f"File {fio_new_file} exists in {pod_list[-1].name} "
        )
Esempio n. 27
0
    def test_pvc_to_pvc_clone(self, interface_type, teardown_factory):
        """
        Create a clone from an existing pvc,
        verify data is preserved in the cloning.
        """
        logger.info(f"Running IO on pod {self.pod_obj.name}")
        file_name = self.pod_obj.name
        logger.info(f"File created during IO {file_name}")
        self.pod_obj.run_io(
            storage_type='fs', size='500M', fio_filename=file_name
        )

        # Wait for fio to finish
        self.pod_obj.get_fio_results()
        logger.info(f"Io completed on pod {self.pod_obj.name}.")

        # Verify presence of the file
        file_path = pod.get_file_path(self.pod_obj, file_name)
        logger.info(f"Actual file path on the pod {file_path}")
        assert pod.check_file_existence(self.pod_obj, file_path), (
            f"File {file_name} does not exist"
        )
        logger.info(f"File {file_name} exists in {self.pod_obj.name}")

        # Calculate md5sum of the file.
        orig_md5_sum = pod.cal_md5sum(self.pod_obj, file_name)

        # Create a clone of the existing pvc.
        sc_name = self.pvc_obj.backed_sc
        parent_pvc = self.pvc_obj.name
        clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        if interface_type == constants.CEPHFILESYSTEM:
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
        cloned_pvc_obj = pvc.create_pvc_clone(sc_name, parent_pvc, clone_yaml)
        teardown_factory(cloned_pvc_obj)
        helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND)
        cloned_pvc_obj.reload()

        # Create and attach pod to the pvc
        clone_pod_obj = helpers.create_pod(
            interface_type=interface_type, pvc_name=cloned_pvc_obj.name,
            namespace=cloned_pvc_obj.namespace,
            pod_dict_path=constants.NGINX_POD_YAML
        )
        # Confirm that the pod is running
        helpers.wait_for_resource_state(
            resource=clone_pod_obj,
            state=constants.STATUS_RUNNING
        )
        clone_pod_obj.reload()
        teardown_factory(clone_pod_obj)

        # Verify file's presence on the new pod
        logger.info(
            f"Checking the existence of {file_name} on cloned pod "
            f"{clone_pod_obj.name}"
        )
        assert pod.check_file_existence(clone_pod_obj, file_path), (
            f"File {file_path} does not exist"
        )
        logger.info(f"File {file_name} exists in {clone_pod_obj.name}")

        # Verify Contents of a file in the cloned pvc
        # by validating if md5sum matches.
        logger.info(
            f"Verifying that md5sum of {file_name} "
            f"on pod {self.pod_obj.name} matches with md5sum "
            f"of the same file on restore pod {clone_pod_obj.name}"
        )
        assert pod.verify_data_integrity(
            clone_pod_obj,
            file_name,
            orig_md5_sum
        ), 'Data integrity check failed'
        logger.info("Data integrity check passed, md5sum are same")