コード例 #1
0
    def create_resource_hsbench(self):
        """
        Create resource for hsbench mark test:
            Create service account
            Create PVC
            Create golang pod

        """

        # Create service account
        self.sa_name = helpers.create_serviceaccount(self.namespace)
        self.sa_name = self.sa_name.name
        helpers.add_scc_policy(sa_name=self.sa_name, namespace=self.namespace)

        # Create test pvc+pod
        log.info(
            f"Create Golang pod to generate S3 workload... {self.namespace}")
        pvc_size = "50Gi"
        self.pod_name = "hsbench-pod"
        self.pvc_obj = helpers.create_pvc(
            sc_name=constants.DEFAULT_STORAGECLASS_RBD,
            namespace=self.namespace,
            size=pvc_size,
        )
        self.pod_obj = helpers.create_pod(
            constants.CEPHBLOCKPOOL,
            namespace=self.namespace,
            pod_name=self.pod_name,
            pvc_name=self.pvc_obj.name,
            sa_name=self.sa_name,
            pod_dict_path=self.pod_dic_path,
            dc_deployment=True,
            deploy_pod_status=constants.STATUS_COMPLETED,
        )
コード例 #2
0
def create_pods(request):
    """
    Create multiple pods
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete multiple pods
        """
        if hasattr(class_instance, "pod_objs"):
            for pod in class_instance.pod_objs:
                pod.delete()

    request.addfinalizer(finalizer)

    class_instance.pod_objs = list()
    for pvc_obj in class_instance.pvc_objs:
        class_instance.pod_objs.append(
            helpers.create_pod(
                interface_type=class_instance.interface,
                pvc_name=pvc_obj.name,
                do_reload=False,
                namespace=class_instance.namespace,
            ))

    for pod in class_instance.pod_objs:
        helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
コード例 #3
0
def create_dc_pods(request):
    """
    Create multiple deploymentconfig pods
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete multiple dc pods
        """
        if hasattr(class_instance, "dc_pod_objs"):
            for pod in class_instance.dc_pod_objs:
                delete_deploymentconfig_pods(pod_obj=pod)

    request.addfinalizer(finalizer)

    class_instance.dc_pod_objs = [
        helpers.create_pod(
            interface_type=class_instance.interface,
            pvc_name=pvc_obj.name,
            do_reload=False,
            namespace=class_instance.namespace,
            sa_name=class_instance.sa_obj.name,
            dc_deployment=True,
            replica_count=class_instance.replica_count,
        ) for pvc_obj in class_instance.pvc_objs
    ]

    for pod in class_instance.dc_pod_objs:
        helpers.wait_for_resource_state(pod,
                                        constants.STATUS_RUNNING,
                                        timeout=180)
コード例 #4
0
    def create_pod_and_wait_for_completion(self, **kwargs):
        # Creating pod yaml file to run as a Job, the command to run on the pod and
        # arguments to it will replace in the create_pod function
        self.create_fio_pod_yaml(pvc_size=int(self.pvc_size),
                                 filesize=kwargs.pop("filesize", "1M"))
        # Create a pod
        logger.info(f"Creating Pod with pvc {self.pvc_obj.name}")

        try:
            self.pod_object = helpers.create_pod(
                pvc_name=self.pvc_obj.name,
                namespace=self.namespace,
                interface_type=self.interface,
                pod_name="pod-pas-test",
                pod_dict_path=self.pod_yaml_file.name,
                **kwargs,
                # pod_dict_path=constants.PERF_POD_YAML,
            )
        except Exception as e:
            logger.exception(
                f"Pod attached to PVC {self.pod_object.name} was not created, exception [{str(e)}]"
            )
            raise PodNotCreated("Pod attached to PVC was not created.")

        # Confirm that pod is running on the selected_nodes
        logger.info("Checking whether the pod is running")
        helpers.wait_for_resource_state(
            resource=self.pod_object,
            state=constants.STATUS_COMPLETED,
            timeout=self.timeout,
        )
コード例 #5
0
    def create_pvc_and_deploymentconfig_pod(self, request, pvc_factory):
        """"""

        def finalizer():
            delete_deploymentconfig_pods(pod_obj)

        request.addfinalizer(finalizer)

        # Create pvc
        pvc_obj = pvc_factory()

        # Create service_account to get privilege for deployment pods
        sa_name = helpers.create_serviceaccount(pvc_obj.project.namespace)

        helpers.add_scc_policy(
            sa_name=sa_name.name, namespace=pvc_obj.project.namespace
        )

        pod_obj = helpers.create_pod(
            interface_type=constants.CEPHBLOCKPOOL,
            pvc_name=pvc_obj.name,
            namespace=pvc_obj.project.namespace,
            sa_name=sa_name.name,
            dc_deployment=True,
        )
        helpers.wait_for_resource_state(
            resource=pod_obj, state=constants.STATUS_RUNNING
        )
        return pod_obj, pvc_obj
コード例 #6
0
    def __init__(self):
        with open(constants.CSI_CEPHFS_POD_YAML, "r") as pod_fd:
            pod_info = yaml.safe_load(pod_fd)
        pvc_name = pod_info["spec"]["volumes"][0]["persistentVolumeClaim"][
            "claimName"]
        self.pod_name = pod_info["metadata"]["name"]
        config.RUN["cli_params"]["teardown"] = True
        self.cephfs_pvc = helpers.create_pvc(
            sc_name=constants.DEFAULT_STORAGECLASS_CEPHFS,
            namespace=config.ENV_DATA["cluster_namespace"],
            pvc_name=pvc_name,
            size=SIZE,
        )
        helpers.wait_for_resource_state(self.cephfs_pvc,
                                        constants.STATUS_BOUND,
                                        timeout=300)
        self.cephfs_pod = helpers.create_pod(
            interface_type=constants.CEPHFILESYSTEM,
            namespace=config.ENV_DATA["cluster_namespace"],
            pvc_name=pvc_name,
            node_name="compute-0",
            pod_name=self.pod_name,
        )
        helpers.wait_for_resource_state(self.cephfs_pod,
                                        constants.STATUS_RUNNING,
                                        timeout=300)
        logging.info("pvc and cephfs pod created")
        self.ocp_obj = ocp.OCP(
            kind=constants.POD,
            namespace=config.ENV_DATA["cluster_namespace"],
        )

        self.test_file_list = add_million_files(self.pod_name, self.ocp_obj)
        logging.info("cephfs test files created")
コード例 #7
0
    def setup(self, interface, pvc_factory, service_account_factory,
              teardown_factory):
        """
        Create dc pod with replica 5
        """
        self.replica_count = 5
        pvc_obj = pvc_factory(interface=interface, size=3)
        sa_obj = service_account_factory(project=pvc_obj.project)
        pod1 = create_pod(
            interface_type=interface,
            pvc_name=pvc_obj.name,
            namespace=pvc_obj.namespace,
            sa_name=sa_obj.name,
            dc_deployment=True,
            replica_count=self.replica_count,
            deploy_pod_status=constants.STATUS_RUNNING,
        )
        self.name = pod1.labels["name"]
        self.namespace = pod1.namespace

        dc_obj = OCP(
            kind=constants.DEPLOYMENTCONFIG,
            namespace=self.namespace,
            resource_name=self.name,
        )
        dc_info = dc_obj.get(resource_name=self.name,
                             selector=f"app={self.name}")["items"][0]

        dc_obj = OCS(**dc_info)
        teardown_factory(dc_obj)
コード例 #8
0
    def test_ocs_347(self, resources):
        pod, pvc, storageclass = resources

        log.info("Creating RBD StorageClass")
        storageclass.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=self.cbp_obj.name,
                secret_name=self.rbd_secret_obj.name,
            )
        )
        log.info("Creating a PVC")
        pvc.append(helpers.create_pvc(sc_name=storageclass[0].name))
        for pvc_obj in pvc:
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
        log.info(f"Creating a pod on with pvc {pvc[0].name}")
        pod_obj = helpers.create_pod(
            interface_type=constants.CEPHBLOCKPOOL,
            pvc_name=pvc[0].name,
            pod_dict_path=constants.NGINX_POD_YAML,
        )
        pod.append(pod_obj)
        helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
        pod_obj.reload()
コード例 #9
0
    def test_capacity_breakdown_ui(
        self, setup_ui_class, project_name, pod_name, sc_type, teardown_project_factory
    ):
        """
        Test Capacity Breakdown UI

        project_name (str): the project name
        pod_name (str): pod name
        sc_type (str): storage class [fs, block]

        """
        project_obj = helpers.create_project(project_name=project_name)
        teardown_project_factory(project_obj)
        logger.info(
            f"Created new pvc sc_name={sc_type} namespace={project_name}, "
            f"size=6Gi, access_mode={constants.ACCESS_MODE_RWO}"
        )
        pvc_obj = helpers.create_pvc(
            sc_name=sc_type,
            namespace=project_name,
            size="6Gi",
            do_reload=False,
            access_mode=constants.ACCESS_MODE_RWO,
        )
        logger.info(
            f"Create new pod. Pod name={pod_name},"
            f"interface_type={constants.CEPHBLOCKPOOL}"
        )
        pod_obj = helpers.create_pod(
            pvc_name=pvc_obj.name,
            namespace=project_obj.namespace,
            interface_type=constants.CEPHBLOCKPOOL,
            pod_name=pod_name,
        )
        logger.info(f"Wait for pod {pod_name} move to Running state")
        helpers.wait_for_resource_state(
            pod_obj, state=constants.STATUS_RUNNING, timeout=300
        )
        logger.info("Run fio workload")
        pod_obj.run_io(
            storage_type=constants.WORKLOAD_STORAGE_TYPE_FS,
            size="4GB",
        )
        fio_result = pod_obj.get_fio_results()
        logger.info("IOPs after FIO:")
        reads = fio_result.get("jobs")[0].get("read").get("iops")
        writes = fio_result.get("jobs")[0].get("write").get("iops")
        logger.info(f"Read: {reads}")
        logger.info(f"Write: {writes}")

        validation_ui_obj = ValidationUI(setup_ui_class)
        assert validation_ui_obj.check_capacity_breakdown(
            project_name=project_name, pod_name=pod_name
        ), "The Project/Pod not created on Capacity Breakdown"
コード例 #10
0
def create_rbd_pod(request):
    """
    Create a pod
    """
    class_instance = request.node.cls
    class_instance.pod_obj = helpers.create_pod(
        interface_type=constants.CEPHBLOCKPOOL,
        pvc_name=class_instance.pvc_obj.name,
        namespace=class_instance.namespace,
    )
    helpers.wait_for_resource_state(class_instance.pod_obj,
                                    constants.STATUS_RUNNING)
    class_instance.pod_obj.reload()
コード例 #11
0
    def base_setup(
        self,
        interface_iterate,
        storageclass_factory,
        pvc_size,
    ):
        """
        A setup phase for the test - creating resources

        Args:
            interface_iterate: A fixture to iterate over ceph interfaces
            storageclass_factory: A fixture to create everything needed for a
                storageclass
            pvc_size: The size of the PVC in Gi

        """
        self.interface = interface_iterate
        self.sc_obj = storageclass_factory(self.interface)

        if self.interface == constants.CEPHBLOCKPOOL:
            self.sc = "RBD"
        elif self.interface == constants.CEPHFILESYSTEM:
            self.sc = "CephFS"
        elif self.interface == constants.CEPHBLOCKPOOL_THICK:
            self.sc = "RBD-Thick"

        self.create_test_project()

        self.pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name,
                                          size=pvc_size + "Gi",
                                          namespace=self.namespace)
        helpers.wait_for_resource_state(self.pvc_obj, constants.STATUS_BOUND)
        self.pvc_obj.reload()

        # Create a POD and attach it the the PVC
        try:
            self.pod_object = helpers.create_pod(
                interface_type=self.interface,
                pvc_name=self.pvc_obj.name,
                namespace=self.namespace,
                pod_dict_path=constants.PERF_POD_YAML,
            )
            helpers.wait_for_resource_state(self.pod_object,
                                            constants.STATUS_RUNNING)
            self.pod_object.reload()
            self.pod_object.workload_setup("fs", jobs=1, fio_installed=True)
        except Exception as e:
            log.error(
                f"Pod on PVC {self.pvc_obj.name} was not created, exception {str(e)}"
            )
            raise ex.PodNotCreated("Pod on PVC was not created.")
コード例 #12
0
    def run_io(self):
        """
        Creating POD(s), attache them tp PVC(s), run IO to fill 70% of the PVC
        and wait until the I/O operation is completed.
        In the end, delete the POD(s).

        Return:
            bool : Running I/O success

        Raise:
            TimeoutExpiredError : if not all completed I/O whithin 20 Min.

        """
        # wait up to 20 Min for all pod(s) to compleat running IO, this tuned for up to
        # 120 PVCs of 25GiB each.
        timeout = 1200
        pod_objs = []
        # Create PODs, connect them to the PVCs and run IO on them
        for pvc_obj in self.pvc_objs:
            log.info("Creating Pod and Starting IO on it")
            pod_obj = helpers.create_pod(
                pvc_name=pvc_obj.name,
                namespace=self.namespace,
                interface_type=self.interface,
                pod_dict_path=self.pod_yaml_file.name,
            )
            assert pod_obj, "Failed to create pod"
            pod_objs.append(pod_obj)

        log.info(
            "Wait for all of the POD(s) to be created, and compleat running I/O"
        )
        performance_lib.wait_for_resource_bulk_status(
            "pod", len(pod_objs), self.namespace, constants.STATUS_COMPLETED,
            timeout, 5)
        log.info("I/O Completed on all POD(s)")

        # Delete all created POD(s)
        log.info("Try to delete all created PODs")
        for pod_obj in pod_objs:
            pod_obj.delete(wait=False)

        log.info("Wait for all PODS(s) to be deleted")
        performance_lib.wait_for_resource_bulk_status(
            "pod", 0, self.namespace, constants.STATUS_COMPLETED, timeout, 5)
        log.info("All pOD(s) was deleted")
        return True
コード例 #13
0
 def test_basics_rbd(self, test_fixture_rbd):
     """
     Testing basics: secret creation,
     storage class creation,pvc and pod with rbd
     """
     global RBD_PVC_OBJ, RBD_POD_OBJ
     log.info("creating pvc for RBD ")
     pvc_name = helpers.create_unique_resource_name("test-rbd", "pvc")
     RBD_PVC_OBJ = helpers.create_pvc(sc_name=RBD_SC_OBJ.name,
                                      pvc_name=pvc_name)
     helpers.wait_for_resource_state(RBD_PVC_OBJ, constants.STATUS_BOUND)
     RBD_PVC_OBJ.reload()
     if RBD_PVC_OBJ.backed_pv is None:
         RBD_PVC_OBJ.reload()
     RBD_POD_OBJ = helpers.create_pod(
         interface_type=constants.CEPHBLOCKPOOL, pvc_name=RBD_PVC_OBJ.name)
     helpers.wait_for_resource_state(RBD_POD_OBJ, constants.STATUS_RUNNING)
     RBD_POD_OBJ.reload()
コード例 #14
0
    def run(self):
        """
        Running the test
        """
        for i in range(self.samples_num):

            # Creating PVC to attache POD to it
            csi_start_time = self.get_time("csi")
            log.info(f"{self.msg_prefix} Start creating PVC number {i + 1}.")
            pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name,
                                         size=self.pvc_size,
                                         namespace=self.namespace)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
            self.pvc_list.append(pvc_obj)
            log.info(
                f"{self.msg_prefix} PVC number {i + 1} was successfully created ."
            )

            # Create a POD and attache it the the PVC
            try:
                pod_obj = helpers.create_pod(
                    interface_type=self.interface,
                    pvc_name=pvc_obj.name,
                    namespace=self.namespace,
                    pod_dict_path=constants.PERF_POD_YAML,
                )
                helpers.wait_for_resource_state(pod_obj,
                                                constants.STATUS_RUNNING)
                pod_obj.reload()
            except Exception as e:
                log.error(
                    f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}"
                )
                raise ex.PodNotCreated("Pod on PVC was not created.")
            self.pod_result_list.append(pod_obj)

            # Get the POD start time including the attache time
            self.start_time_dict_list.append(helpers.pod_start_time(pod_obj))
            self.csi_time_dict_list.append(
                performance_lib.pod_attach_csi_time(self.interface,
                                                    pvc_obj.backed_pv,
                                                    csi_start_time,
                                                    self.namespace)[0])
コード例 #15
0
    def test_rwo_pvc_assign_pod_node(self, interface, pvc_factory,
                                     teardown_factory):
        """
        Test assign nodeName to a pod using RWO pvc
        """
        worker_nodes_list = get_worker_nodes()

        # Create a RWO PVC
        pvc_obj = pvc_factory(
            interface=interface,
            access_mode=constants.ACCESS_MODE_RWO,
            status=constants.STATUS_BOUND,
        )

        # Create a pod on a particular node
        selected_node = random.choice(worker_nodes_list)
        logger.info(
            f"Creating a pod on node: {selected_node} with pvc {pvc_obj.name}")

        pod_obj = helpers.create_pod(
            interface_type=interface,
            pvc_name=pvc_obj.name,
            namespace=pvc_obj.namespace,
            node_name=selected_node,
            pod_dict_path=constants.NGINX_POD_YAML,
        )
        teardown_factory(pod_obj)

        # Confirm that the pod is running on the selected_node
        helpers.wait_for_resource_state(resource=pod_obj,
                                        state=constants.STATUS_RUNNING,
                                        timeout=120)
        pod_obj.reload()
        assert pod.verify_node_name(
            pod_obj, selected_node
        ), "Pod is running on a different node than the selected node"

        # Run IO
        logger.info(f"Running IO on pod {pod_obj.name}")
        pod_obj.run_io(storage_type="fs",
                       size="512M",
                       runtime=30,
                       invalidate=0)
        pod.get_fio_rw_iops(pod_obj)
コード例 #16
0
 def test_basics_cephfs(self, test_fixture_cephfs):
     """
     Testing basics: secret creation,
      storage class creation, pvc and pod with cephfs
     """
     global CEPHFS_PVC_OBJ, CEPHFS_POD_OBJ
     log.info("creating pvc for CephFS ")
     pvc_name = helpers.create_unique_resource_name("test-cephfs", "pvc")
     CEPHFS_PVC_OBJ = helpers.create_pvc(sc_name=CEPHFS_SC_OBJ.name,
                                         pvc_name=pvc_name)
     helpers.wait_for_resource_state(CEPHFS_PVC_OBJ, constants.STATUS_BOUND)
     CEPHFS_PVC_OBJ.reload()
     log.info("creating cephfs pod")
     CEPHFS_POD_OBJ = helpers.create_pod(
         interface_type=constants.CEPHFILESYSTEM,
         pvc_name=CEPHFS_PVC_OBJ.name)
     helpers.wait_for_resource_state(CEPHFS_POD_OBJ,
                                     constants.STATUS_RUNNING)
     CEPHFS_POD_OBJ.reload()
コード例 #17
0
ファイル: hsbench.py プロジェクト: ypersky1980/ocs-ci
    def create_resource_hsbench(self):
        """
        Create resource for hsbench mark test:
            Create service account
            Create PVC
            Create golang pod

        """
        # Check for existing rgw pods on cluster
        self.rgw_pod = pod.get_rgw_pods()
        if self.rgw_pod:
            # Create service account
            self.sa_name = helpers.create_serviceaccount(self.namespace)
            self.sa_name = self.sa_name.name
            helpers.add_scc_policy(sa_name=self.sa_name,
                                   namespace=self.namespace)

            # Create test pvc+pod
            log.info(
                f"Create Golang pod to generate S3 workload... {self.namespace}"
            )
            pvc_size = "50Gi"
            node_name = "compute-0"
            self.pod_name = "hsbench-pod"
            self.pvc_obj = helpers.create_pvc(
                sc_name=constants.DEFAULT_STORAGECLASS_RBD,
                namespace=self.namespace,
                size=pvc_size,
            )
            self.pod_obj = helpers.create_pod(
                constants.CEPHBLOCKPOOL,
                namespace=self.namespace,
                pod_name=self.pod_name,
                pvc_name=self.pvc_obj.name,
                node_name=node_name,
                sa_name=self.sa_name,
                pod_dict_path=self.pod_dic_path,
                dc_deployment=True,
                deploy_pod_status=constants.STATUS_COMPLETED,
            )
        else:
            raise UnexpectedBehaviour(
                "This cluster doesn't have RGW pod(s) to perform hsbench")
コード例 #18
0
    def setup(self, interface, pvc_factory, service_account_factory,
              teardown_factory):
        """
        Create dc pod with replica 5
        """
        self.replica_count = 5
        pvc_obj = pvc_factory(interface=interface, size=3)
        sa_obj = service_account_factory(project=pvc_obj.project)
        try:
            pod1 = create_pod(
                interface_type=interface,
                pvc_name=pvc_obj.name,
                namespace=pvc_obj.namespace,
                sa_name=sa_obj.name,
                dc_deployment=True,
                replica_count=self.replica_count,
                deploy_pod_status=constants.STATUS_RUNNING,
            )
        except TimeoutExpiredError:
            # The test cannot be continued if all the pods are created on the same node
            pods = pod.get_all_pods(namespace=pvc_obj.namespace)
            pod_nodes = [pod.get_pod_node(pod_obj).name for pod_obj in pods]
            if set(pod_nodes) == 1:
                pytest.skip(
                    "All pods are created on same node and reached Running state"
                )
            raise

        self.name = pod1.labels["name"]
        self.namespace = pod1.namespace

        dc_obj = OCP(
            kind=constants.DEPLOYMENTCONFIG,
            namespace=self.namespace,
            resource_name=self.name,
        )
        dc_info = dc_obj.get(resource_name=self.name,
                             selector=f"app={self.name}")["items"][0]

        dc_obj = OCS(**dc_info)
        teardown_factory(dc_obj)
コード例 #19
0
    def test_pvc_reattach_time_performance(self, pvc_factory, teardown_factory):
        """
        Test assign nodeName to a pod using RWX pvc
        Performance in test_multiple_pvc_creation_measurement_performance
        Each kernel (unzipped) is 892M and 61694 files
        """

        kernel_url = "https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.19.5.tar.gz"
        download_path = "tmp"
        # Number of times we copy the kernel
        copies = 3

        # Download a linux Kernel
        import os

        dir_path = os.path.join(os.getcwd(), download_path)
        file_path = os.path.join(dir_path, "file.gz")
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        urllib.request.urlretrieve(kernel_url, file_path)

        worker_nodes_list = node.get_worker_nodes()
        assert len(worker_nodes_list) > 1
        node_one = worker_nodes_list[0]
        node_two = worker_nodes_list[1]

        # Create a PVC
        accessmode = constants.ACCESS_MODE_RWX
        if self.interface == constants.CEPHBLOCKPOOL:
            accessmode = constants.ACCESS_MODE_RWO
        pvc_obj = pvc_factory(
            interface=self.interface,
            access_mode=accessmode,
            status=constants.STATUS_BOUND,
            size="15",
        )

        # Create a pod on one node
        logging.info(f"Creating Pod with pvc {pvc_obj.name} on node {node_one}")

        helpers.pull_images("nginx")
        pod_obj1 = helpers.create_pod(
            interface_type=self.interface,
            pvc_name=pvc_obj.name,
            namespace=pvc_obj.namespace,
            node_name=node_one,
            pod_dict_path=constants.NGINX_POD_YAML,
        )

        # Confirm that pod is running on the selected_nodes
        logging.info("Checking whether pods are running on the selected nodes")
        helpers.wait_for_resource_state(
            resource=pod_obj1, state=constants.STATUS_RUNNING, timeout=120
        )

        pod_name = pod_obj1.name
        pod_path = "/var/lib/www/html"

        _ocp = OCP(namespace=pvc_obj.namespace)

        rsh_cmd = f"exec {pod_name} -- apt-get update"
        _ocp.exec_oc_cmd(rsh_cmd)
        rsh_cmd = f"exec {pod_name} -- apt-get install -y rsync"
        _ocp.exec_oc_cmd(rsh_cmd, ignore_error=True, out_yaml_format=False)

        rsh_cmd = f"rsync {dir_path} {pod_name}:{pod_path}"
        _ocp.exec_oc_cmd(rsh_cmd)

        rsh_cmd = f"exec {pod_name} -- tar xvf {pod_path}/tmp/file.gz -C /var/lib/www/html/tmp"
        _ocp.exec_oc_cmd(rsh_cmd)

        for x in range(copies):
            rsh_cmd = f"exec {pod_name} -- mkdir -p {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)
            rsh_cmd = f"exec {pod_name} -- cp -r {pod_path}/tmp {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)

        rsh_cmd = f"delete pod {pod_name}"
        _ocp.exec_oc_cmd(rsh_cmd)

        logging.info(f"Creating Pod with pvc {pvc_obj.name} on node {node_two}")

        pod_obj2 = helpers.create_pod(
            interface_type=self.interface,
            pvc_name=pvc_obj.name,
            namespace=pvc_obj.namespace,
            node_name=node_two,
            pod_dict_path=constants.NGINX_POD_YAML,
        )

        start_time = time.time()

        pod_name = pod_obj2.name
        helpers.wait_for_resource_state(
            resource=pod_obj2, state=constants.STATUS_RUNNING, timeout=120
        )
        end_time = time.time()
        total_time = end_time - start_time
        if total_time > 60:
            raise ex.PerformanceException(
                f"Pod creation time is {total_time} and " f"greater than 60 seconds"
            )
        logging.info(f"Pod {pod_name} creation time took {total_time} seconds")

        teardown_factory(pod_obj2)
        os.remove(file_path)
        os.rmdir(dir_path)
コード例 #20
0
    def test_pvc_rwx_writeable_after_pod_deletions(self, pvc_factory,
                                                   teardown_factory):
        """
        Test assign nodeName to a pod using RWX pvc

        1. Create a new project.
        2. Create a RWX CEPHFS based PVC
        3. Attach the same PVC to multiple PODs and start IO on all the PODs
        4. Delete all but one pod.
        5. Verify mount point is still write-able.
             - Start IO again on the Running pod.
        6. Also, access the data written by deleted pods from the Running pod

        """
        worker_nodes_list = node.get_worker_nodes()

        # Create a RWX PVC
        pvc_obj = pvc_factory(
            interface=constants.CEPHFILESYSTEM,
            access_mode=constants.ACCESS_MODE_RWX,
            size=10,
            status=constants.STATUS_BOUND,
        )
        logger.info(f"Creating pods on all worker nodes backed"
                    f"with same pvc {pvc_obj.name}")

        pod_list = []

        for each_node in worker_nodes_list:
            pod_obj = helpers.create_pod(
                interface_type=constants.CEPHFILESYSTEM,
                pvc_name=pvc_obj.name,
                namespace=pvc_obj.namespace,
                node_name=each_node,
                pod_dict_path=constants.NGINX_POD_YAML,
            )
            pod_list.append(pod_obj)
            teardown_factory(pod_obj)

        # Confirm pods are created and are running on designated nodes
        node_count = 0
        for pod_obj in pod_list:
            helpers.wait_for_resource_state(resource=pod_obj,
                                            state=constants.STATUS_RUNNING,
                                            timeout=120)
            pod_obj.reload()
            assert pod.verify_node_name(
                pod_obj, worker_nodes_list[node_count]), (
                    f"Pod {pod_obj.name} is running on a different node "
                    f"than the selected node")
            node_count = node_count + 1

        # Run IOs on all pods. FIO Filename is kept same as pod name
        with ThreadPoolExecutor() as p:
            for pod_obj in pod_list:
                logger.info(f"Running IO on pod {pod_obj.name}")
                p.submit(
                    pod_obj.run_io,
                    storage_type="fs",
                    size="512M",
                    runtime=30,
                    fio_filename=pod_obj.name,
                )

        # Check IO from all pods
        for pod_obj in pod_list:
            pod.get_fio_rw_iops(pod_obj)

        # Calculate md5sum of each file
        md5sum_pod_data = []
        for pod_obj in pod_list:
            md5sum_pod_data.append(
                pod.cal_md5sum(pod_obj=pod_obj, file_name=pod_obj.name))

        # Delete all but the last app pod.
        for index in range(node_count - 1):
            pod_list[index].delete()
            pod_list[index].ocp.wait_for_delete(
                resource_name=pod_list[index].name)

        # Verify presence of files written by each pod
        logger.info(f"Verify existence of each file from app pod "
                    f"{pod_list[-1].name} ")
        for pod_obj in pod_list:
            file_path = pod.get_file_path(pod_list[-1], pod_obj.name)
            assert pod.check_file_existence(
                pod_list[-1], file_path), f"File {pod_obj.name} doesnt exist"
            logger.info(f"File {pod_obj.name} exists in {pod_list[-1].name}")

        # From surviving pod, verify data integrity of files
        # written by deleted pods
        logger.info(f"verify all data from {pod_list[-1].name}")

        for index, pod_obj in enumerate(pod_list):
            assert pod.verify_data_integrity(
                pod_obj=pod_list[-1],
                file_name=pod_obj.name,
                original_md5sum=md5sum_pod_data[index],
            )

        # From surviving pod, confirm mount point is still write-able
        logger.info(f"Re-running IO on pod {pod_list[-1].name}")
        fio_new_file = f"{pod_list[-1].name}-new-file"
        pod_list[-1].run_io(storage_type="fs",
                            size="512M",
                            runtime=30,
                            fio_filename=fio_new_file)
        pod.get_fio_rw_iops(pod_list[-1])
        file_path = pod.get_file_path(pod_list[-1], fio_new_file)
        assert pod.check_file_existence(
            pod_list[-1], file_path), f"File {fio_new_file} doesnt exist"
        logger.info(f"File {fio_new_file} exists in {pod_list[-1].name} ")
コード例 #21
0
    def test_pod_reattach_time_performance(
        self, storageclass_factory, copies, timeout, total_time_limit
    ):
        """
        Test assign nodeName to a pod using RWX pvc
        Each kernel (unzipped) is 892M and 61694 files
        The test creates samples_num pvcs and pods, writes kernel files multiplied by number of copies
        and calculates average total and csi reattach times and standard deviation
        """
        kernel_url = "https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.19.5.tar.gz"
        download_path = "tmp"

        samples_num = 7
        if self.dev_mode:
            samples_num = 3

        test_start_time = PASTest.get_time()
        helpers.pull_images(constants.PERF_IMAGE)
        # Download a linux Kernel

        dir_path = os.path.join(os.getcwd(), download_path)
        file_path = os.path.join(dir_path, "file.gz")
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        urllib.request.urlretrieve(kernel_url, file_path)

        worker_nodes_list = node.get_worker_nodes()
        assert len(worker_nodes_list) > 1
        node_one = worker_nodes_list[0]
        node_two = worker_nodes_list[1]

        time_measures, csi_time_measures, files_written_list, data_written_list = (
            [],
            [],
            [],
            [],
        )

        self.sc_obj = storageclass_factory(self.interface)
        for sample_index in range(1, samples_num + 1):

            csi_start_time = self.get_time("csi")

            logger.info(f"Start creating PVC number {sample_index}.")
            pvc_obj = helpers.create_pvc(
                sc_name=self.sc_obj.name, size="100Gi", namespace=self.namespace
            )
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)

            # Create a pod on one node
            logger.info(f"Creating Pod with pvc {pvc_obj.name} on node {node_one}")

            pvc_obj.reload()
            self.pvc_list.append(pvc_obj)

            try:
                pod_obj1 = helpers.create_pod(
                    interface_type=self.interface,
                    pvc_name=pvc_obj.name,
                    namespace=pvc_obj.namespace,
                    node_name=node_one,
                    pod_dict_path=constants.PERF_POD_YAML,
                )
            except Exception as e:
                logger.error(
                    f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}"
                )
                raise PodNotCreated("Pod on PVC was not created.")

            # Confirm that pod is running on the selected_nodes
            logger.info("Checking whether pods are running on the selected nodes")
            helpers.wait_for_resource_state(
                resource=pod_obj1, state=constants.STATUS_RUNNING, timeout=timeout
            )

            pod_name = pod_obj1.name
            pod_path = "/mnt"

            _ocp = OCP(namespace=pvc_obj.namespace)

            rsh_cmd = f"rsync {dir_path} {pod_name}:{pod_path}"
            _ocp.exec_oc_cmd(rsh_cmd)

            rsh_cmd = (
                f"exec {pod_name} -- tar xvf {pod_path}/tmp/file.gz -C {pod_path}/tmp"
            )
            _ocp.exec_oc_cmd(rsh_cmd)

            for x in range(copies):
                rsh_cmd = f"exec {pod_name} -- mkdir -p {pod_path}/folder{x}"
                _ocp.exec_oc_cmd(rsh_cmd)
                rsh_cmd = (
                    f"exec {pod_name} -- cp -r {pod_path}/tmp {pod_path}/folder{x}"
                )
                _ocp.exec_oc_cmd(rsh_cmd)
                rsh_cmd = f"exec {pod_name} -- sync"
                _ocp.exec_oc_cmd(rsh_cmd)

            logger.info("Getting the amount of data written to the PVC")
            rsh_cmd = f"exec {pod_name} -- df -h {pod_path}"
            data_written_str = _ocp.exec_oc_cmd(rsh_cmd).split()[-4]
            logger.info(f"The amount of written data is {data_written_str}")
            data_written = float(data_written_str[:-1])

            rsh_cmd = f"exec {pod_name} -- find {pod_path} -type f"
            files_written = len(_ocp.exec_oc_cmd(rsh_cmd).split())
            logger.info(
                f"For {self.interface} - The number of files written to the pod is {files_written}"
            )
            files_written_list.append(files_written)
            data_written_list.append(data_written)

            logger.info("Deleting the pod")
            rsh_cmd = f"delete pod {pod_name}"
            _ocp.exec_oc_cmd(rsh_cmd)

            logger.info(f"Creating Pod with pvc {pvc_obj.name} on node {node_two}")

            try:
                pod_obj2 = helpers.create_pod(
                    interface_type=self.interface,
                    pvc_name=pvc_obj.name,
                    namespace=pvc_obj.namespace,
                    node_name=node_two,
                    pod_dict_path=constants.PERF_POD_YAML,
                )
            except Exception as e:
                logger.error(
                    f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}"
                )
                raise PodNotCreated("Pod on PVC was not created.")

            start_time = time.time()

            pod_name = pod_obj2.name
            helpers.wait_for_resource_state(
                resource=pod_obj2, state=constants.STATUS_RUNNING, timeout=timeout
            )
            end_time = time.time()
            total_time = end_time - start_time
            if total_time > total_time_limit:
                logger.error(
                    f"Pod creation time is {total_time} and greater than {total_time_limit} seconds"
                )
                raise ex.PerformanceException(
                    f"Pod creation time is {total_time} and greater than {total_time_limit} seconds"
                )

            csi_time = performance_lib.pod_attach_csi_time(
                self.interface, pvc_obj.backed_pv, csi_start_time, pvc_obj.namespace
            )[0]
            csi_time_measures.append(csi_time)
            logger.info(
                f"PVC #{pvc_obj.name} pod {pod_name} creation time took {total_time} seconds, "
                f"csi time is {csi_time} seconds"
            )
            time_measures.append(total_time)

            logger.info("Deleting the pod")
            rsh_cmd = f"delete pod {pod_name}"
            _ocp.exec_oc_cmd(rsh_cmd)
            # teardown_factory(pod_obj2)

        average = statistics.mean(time_measures)
        logger.info(
            f"The average time of {self.interface} pod creation on {samples_num} PVCs is {average} seconds"
        )

        st_deviation = statistics.stdev(time_measures)
        logger.info(
            f"The standard deviation of {self.interface} pod creation time on {samples_num} PVCs is {st_deviation}"
        )

        csi_average = statistics.mean(csi_time_measures)
        logger.info(
            f"The average csi time of {self.interface} pod creation on {samples_num} PVCs is {csi_average} seconds"
        )

        csi_st_deviation = statistics.stdev(csi_time_measures)
        logger.info(
            f"The standard deviation of {self.interface} csi pod creation time on {samples_num} PVCs "
            f"is {csi_st_deviation}"
        )

        files_written_average = statistics.mean(files_written_list)
        data_written_average = statistics.mean(data_written_list)

        os.remove(file_path)
        os.rmdir(dir_path)

        # Produce ES report

        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pod_reattach_time_fullres",
            )
        )

        full_results.add_key("storageclass", self.sc)
        full_results.add_key("pod_reattach_time", time_measures)
        full_results.add_key("copies_number", copies)
        full_results.add_key("files_number_average", files_written_average)
        full_results.add_key("data_average", data_written_average)
        full_results.add_key("pod_reattach_time_average", average)
        full_results.add_key("pod_reattach_standard_deviation", st_deviation)
        full_results.add_key("pod_csi_reattach_time_average", csi_average)
        full_results.add_key("pod_csi_reattach_standard_deviation", csi_st_deviation)

        test_end_time = PASTest.get_time()

        # Add the test time to the ES report
        full_results.add_key(
            "test_time", {"start": test_start_time, "end": test_end_time}
        )

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (4 - according to the parameters)
            self.results_path = get_full_test_logs_path(
                cname=self, fname="test_pod_reattach_time_performance"
            )
            self.write_result_to_file(res_link)
コード例 #22
0
    def test_pvc_to_pvc_clone(self, interface_type, teardown_factory):
        """
        Create a clone from an existing pvc,
        verify data is preserved in the cloning.
        """
        logger.info(f"Running IO on pod {self.pod_obj.name}")
        file_name = self.pod_obj.name
        logger.info(f"File created during IO {file_name}")
        self.pod_obj.run_io(storage_type="fs",
                            size="500M",
                            fio_filename=file_name)

        # Wait for fio to finish
        self.pod_obj.get_fio_results()
        logger.info(f"Io completed on pod {self.pod_obj.name}.")

        # Verify presence of the file
        file_path = pod.get_file_path(self.pod_obj, file_name)
        logger.info(f"Actual file path on the pod {file_path}")
        assert pod.check_file_existence(
            self.pod_obj, file_path), f"File {file_name} does not exist"
        logger.info(f"File {file_name} exists in {self.pod_obj.name}")

        # Calculate md5sum of the file.
        orig_md5_sum = pod.cal_md5sum(self.pod_obj, file_name)

        # Create a clone of the existing pvc.
        sc_name = self.pvc_obj.backed_sc
        parent_pvc = self.pvc_obj.name
        clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        if interface_type == constants.CEPHFILESYSTEM:
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
        cloned_pvc_obj = pvc.create_pvc_clone(sc_name, parent_pvc, clone_yaml)
        teardown_factory(cloned_pvc_obj)
        helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND)
        cloned_pvc_obj.reload()

        # Create and attach pod to the pvc
        clone_pod_obj = helpers.create_pod(
            interface_type=interface_type,
            pvc_name=cloned_pvc_obj.name,
            namespace=cloned_pvc_obj.namespace,
            pod_dict_path=constants.NGINX_POD_YAML,
        )
        # Confirm that the pod is running
        helpers.wait_for_resource_state(resource=clone_pod_obj,
                                        state=constants.STATUS_RUNNING)
        clone_pod_obj.reload()
        teardown_factory(clone_pod_obj)

        # Verify file's presence on the new pod
        logger.info(f"Checking the existence of {file_name} on cloned pod "
                    f"{clone_pod_obj.name}")
        assert pod.check_file_existence(
            clone_pod_obj, file_path), f"File {file_path} does not exist"
        logger.info(f"File {file_name} exists in {clone_pod_obj.name}")

        # Verify Contents of a file in the cloned pvc
        # by validating if md5sum matches.
        logger.info(f"Verifying that md5sum of {file_name} "
                    f"on pod {self.pod_obj.name} matches with md5sum "
                    f"of the same file on restore pod {clone_pod_obj.name}")
        assert pod.verify_data_integrity(
            clone_pod_obj, file_name,
            orig_md5_sum), "Data integrity check failed"
        logger.info("Data integrity check passed, md5sum are same")

        logger.info("Run IO on new pod")
        clone_pod_obj.run_io(storage_type="fs", size="100M", runtime=10)

        # Wait for IO to finish on the new pod
        clone_pod_obj.get_fio_results()
        logger.info(f"IO completed on pod {clone_pod_obj.name}")
コード例 #23
0
ファイル: test_pvc_ui.py プロジェクト: sidhant-agrawal/ocs-ci
    def test_create_resize_delete_pvc(
        self,
        project_factory,
        teardown_factory,
        setup_ui,
        sc_name,
        access_mode,
        pvc_size,
        vol_mode,
    ):
        """
        Test create, resize and delete pvc via UI

        """
        # Creating a test project via CLI
        pro_obj = project_factory()
        project_name = pro_obj.namespace

        pvc_ui_obj = PvcUI(setup_ui)

        # Creating PVC via UI
        pvc_name = create_unique_resource_name("test", "pvc")
        pvc_ui_obj.create_pvc_ui(
            project_name, sc_name, pvc_name, access_mode, pvc_size, vol_mode
        )

        pvc_objs = get_all_pvc_objs(namespace=project_name)
        pvc = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name]

        assert pvc[0].size == int(pvc_size), (
            f"size error| expected size:{pvc_size} \n "
            f"actual size:{str(pvc[0].size)}"
        )

        assert pvc[0].get_pvc_access_mode == access_mode, (
            f"access mode error| expected access mode:{access_mode} "
            f"\n actual access mode:{pvc[0].get_pvc_access_mode}"
        )

        assert pvc[0].backed_sc == sc_name, (
            f"storage class error| expected storage class:{sc_name} "
            f"\n actual storage class:{pvc[0].backed_sc}"
        )

        assert pvc[0].get_pvc_vol_mode == vol_mode, (
            f"volume mode error| expected volume mode:{vol_mode} "
            f"\n actual volume mode:{pvc[0].get_pvc_vol_mode}"
        )

        # Verifying PVC via UI
        logger.info("Verifying PVC Details via UI")
        pvc_ui_obj.verify_pvc_ui(
            pvc_size=pvc_size,
            access_mode=access_mode,
            vol_mode=vol_mode,
            sc_name=sc_name,
            pvc_name=pvc_name,
            project_name=project_name,
        )
        logger.info("PVC Details Verified via UI..!!")

        # Creating Pod via CLI
        logger.info("Creating Pod")
        if sc_name in (constants.DEFAULT_STORAGECLASS_RBD,):
            interface_type = constants.CEPHBLOCKPOOL
        else:
            interface_type = constants.CEPHFILESYSTEM

        new_pod = helpers.create_pod(
            interface_type=interface_type,
            pvc_name=pvc_name,
            namespace=project_name,
            raw_block_pv=vol_mode == constants.VOLUME_MODE_BLOCK,
        )

        logger.info(f"Waiting for Pod: state= {constants.STATUS_RUNNING}")
        wait_for_resource_state(resource=new_pod, state=constants.STATUS_RUNNING)

        # Calling the Teardown Factory Method to make sure Pod is deleted
        teardown_factory(new_pod)

        # Expanding the PVC
        logger.info("Pvc Resizing")
        new_size = int(pvc_size) + 3
        pvc_ui_obj.pvc_resize_ui(
            pvc_name=pvc_name, new_size=new_size, project_name=project_name
        )

        assert new_size > int(
            pvc_size
        ), f"New size of the PVC cannot be less than existing size: new size is {new_size})"

        ocp_version = get_ocp_version()
        self.pvc_loc = locators[ocp_version]["pvc"]

        # Verifying PVC expansion
        logger.info("Verifying PVC resize")
        expected_capacity = f"{new_size} GiB"
        pvc_resize = pvc_ui_obj.verify_pvc_resize_ui(
            project_name=project_name,
            pvc_name=pvc_name,
            expected_capacity=expected_capacity,
        )

        assert pvc_resize, "PVC resize failed"
        logger.info(
            "Pvc resize verified..!!"
            f"New Capacity after PVC resize is {expected_capacity}"
        )

        # Running FIO
        logger.info("Execute FIO on a Pod")
        if vol_mode == constants.VOLUME_MODE_BLOCK:
            storage_type = constants.WORKLOAD_STORAGE_TYPE_BLOCK
        else:
            storage_type = constants.WORKLOAD_STORAGE_TYPE_FS

        new_pod.run_io(storage_type, size=(new_size - 1), invalidate=0, rate="1000m")

        get_fio_rw_iops(new_pod)
        logger.info("FIO execution on Pod successfully completed..!!")

        # Checking if the Pod is deleted or not
        new_pod.delete(wait=True)
        new_pod.ocp.wait_for_delete(resource_name=new_pod.name)

        # Deleting the PVC via UI
        logger.info(f"Delete {pvc_name} pvc")
        pvc_ui_obj.delete_pvc_ui(pvc_name, project_name)

        pvc[0].ocp.wait_for_delete(pvc_name, timeout=120)

        pvc_objs = get_all_pvc_objs(namespace=project_name)
        pvcs = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name]
        if len(pvcs) > 0:
            assert f"PVC {pvcs[0].name} does not deleted"
コード例 #24
0
    def raw_block_pv(self):
        """
        Testing basic creation of app pod with RBD RWX raw block pv support
        """
        worker_nodes = node.get_worker_nodes()
        pvcs = list()
        size_mb = "500Mi"
        size_gb = "10Gi"
        if config.ENV_DATA["platform"].lower(
        ) in constants.MANAGED_SERVICE_PLATFORMS:
            size_tb = str(convert_device_size("50Gi", "TB")) + "Ti"
        else:
            size_tb = "1Ti"
        for size in [size_mb, size_gb, size_tb]:
            pvcs.append(
                helpers.create_pvc(
                    sc_name=self.sc_obj.name,
                    size=size,
                    access_mode=constants.ACCESS_MODE_RWX,
                    namespace=self.namespace,
                    volume_mode="Block",
                ))
        pvc_mb, pvc_gb, pvc_tb = pvcs[0], pvcs[1], pvcs[2]

        for pvc in pvcs:
            helpers.wait_for_resource_state(resource=pvc,
                                            state=constants.STATUS_BOUND,
                                            timeout=120)

        pvs = [pvc.backed_pv_obj for pvc in pvcs]

        pods = list()
        pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
        for pvc in pvc_mb, pvc_gb, pvc_tb:
            for _ in range(3):
                pods.append(
                    helpers.create_pod(
                        interface_type=constants.CEPHBLOCKPOOL,
                        pvc_name=pvc.name,
                        namespace=self.namespace,
                        raw_block_pv=True,
                        pod_dict_path=pod_dict,
                        node_name=random.choice(worker_nodes),
                    ))

        pvc_mb_pods, pvc_gb_pods, pvc_tb_pods = pods[0:3], pods[3:6], pods[6:9]
        for pod in pods:
            helpers.wait_for_resource_state(resource=pod,
                                            state=constants.STATUS_RUNNING,
                                            timeout=120)
        storage_type = "block"

        with ThreadPoolExecutor() as p:
            for pod in pvc_mb_pods:
                log.info(f"running io on pod {pod.name}")
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f"{random.randint(10,200)}M",
                    invalidate=0,
                )
            for pod in pvc_gb_pods:
                log.info(f"running io on pod {pod.name}")
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f"{random.randint(1,5)}G",
                    invalidate=0,
                )
            for pod in pvc_tb_pods:
                log.info(f"running io on pod {pod.name}")
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f"{random.randint(10,15)}G",
                    invalidate=0,
                )

        for pod in pods:
            get_fio_rw_iops(pod)
        return pods, pvcs, pvs
コード例 #25
0
    def test_rwx_pvc_assign_pod_node(self, interface, pvc_factory, teardown_factory):
        """
        Test assign nodeName to a pod using RWX pvc
        """
        worker_nodes_list = get_worker_nodes()
        if interface == constants.CEPHBLOCKPOOL:
            volume_mode = "Block"
            storage_type = "block"
            block_pv = True
            pod_yaml = constants.CSI_RBD_RAW_BLOCK_POD_YAML
        else:
            volume_mode = ""
            storage_type = "fs"
            block_pv = False
            pod_yaml = ""

        # Create a RWX PVC
        pvc_obj = pvc_factory(
            interface=interface,
            access_mode=constants.ACCESS_MODE_RWX,
            status=constants.STATUS_BOUND,
            volume_mode=volume_mode,
        )

        # Create two pods on selected nodes
        pod_list = []
        selected_nodes = random.sample(worker_nodes_list, k=2)
        logger.info(f"Creating {len(selected_nodes)} pods with pvc {pvc_obj.name}")
        for node in selected_nodes:
            logger.info(f"Creating pod on node: {node}")
            pod_obj = helpers.create_pod(
                interface_type=interface,
                pvc_name=pvc_obj.name,
                namespace=pvc_obj.namespace,
                node_name=node,
                pod_dict_path=pod_yaml,
                raw_block_pv=block_pv,
            )
            pod_list.append(pod_obj)
            teardown_factory(pod_obj)

        # Confirm that both pods are running on the selected_nodes
        logger.info("Checking whether pods are running on the selected nodes")
        for index in range(0, len(selected_nodes)):
            pod_obj = pod_list[index]
            selected_node = selected_nodes[index]
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING, timeout=120
            )
            pod_obj.reload()
            assert pod.verify_node_name(pod_obj, selected_node), (
                f"Pod {pod_obj.name} is running on a different node "
                f"than the selected node"
            )

        # Run IOs on all pods. FIO Filename is kept same as pod name
        with ThreadPoolExecutor() as p:
            for pod_obj in pod_list:
                logger.info(f"Running IO on pod {pod_obj.name}")
                p.submit(
                    pod_obj.run_io,
                    storage_type=storage_type,
                    size="512M",
                    runtime=30,
                    fio_filename=pod_obj.name,
                )

        # Check IO from all pods
        for pod_obj in pod_list:
            pod.get_fio_rw_iops(pod_obj)
コード例 #26
0
    def test_create_multiple_sc_with_same_pool_name(self, interface_type,
                                                    resources):
        """
        This test function does below,
        *. Creates multiple Storage Classes with same pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """
        # Unpack resources
        pods, pvcs, storageclasses = resources

        # Create 3 Storage Classes with same pool name
        if interface_type == constants.CEPHBLOCKPOOL:
            secret = self.rbd_secret_obj.name
            interface_name = self.cbp_obj.name
        else:
            interface_type = constants.CEPHFILESYSTEM
            secret = self.cephfs_secret_obj.name
            interface_name = helpers.get_cephfs_data_pool_name()
        for i in range(3):
            log.info(f"Creating a {interface_type} storage class")
            storageclasses.append(
                helpers.create_storage_class(
                    interface_type=interface_type,
                    interface_name=interface_name,
                    secret_name=secret,
                ))
            log.info(f"{interface_type}StorageClass: {storageclasses[i].name} "
                     f"created successfully")

        # Create PVCs using each SC
        for i in range(3):
            log.info(f"Creating a PVC using {storageclasses[i].name}")
            pvcs.append(helpers.create_pvc(storageclasses[i].name))
        for pvc in pvcs:
            helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND)
            pvc.reload()

        # Create app pod and mount each PVC
        for i in range(3):
            log.info(f"Creating an app pod and mount {pvcs[i].name}")
            pods.append(
                helpers.create_pod(
                    interface_type=interface_type,
                    pvc_name=pvcs[i].name,
                    namespace=defaults.ROOK_CLUSTER_NAMESPACE,
                ))
            for pod in pods:
                helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
                pod.reload()
            log.info(f"{pods[i].name} created successfully and "
                     f"mounted {pvcs[i].name}")

        # Run IO on each app pod for sometime
        for pod in pods:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io("fs", size="2G")

        for pod in pods:
            get_fio_rw_iops(pod)
    def test_create_multiple_sc_with_different_pool_name(
            self, teardown_factory):
        """
        This test function does below,
        *. Creates multiple Storage Classes with different pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """

        # Create 2 storageclasses, each with different pool name
        cbp_list = []
        sc_list = []
        for i in range(2):
            log.info("Creating cephblockpool")
            cbp_obj = helpers.create_ceph_block_pool()
            log.info(f"{cbp_obj.name} created successfully")
            log.info(f"Creating a RBD storage class using {cbp_obj.name}")
            cbp_list.append(cbp_obj)
            sc_obj = helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=cbp_obj.name,
                secret_name=self.rbd_secret_obj.name,
            )

            log.info(f"StorageClass: {sc_obj.name} "
                     f"created successfully using {cbp_obj.name}")
            sc_list.append(sc_obj)
            teardown_factory(cbp_obj)
            teardown_factory(sc_obj)

        # Create PVCs using each SC
        pvc_list = []
        for i in range(2):
            log.info(f"Creating a PVC using {sc_list[i].name}")
            pvc_obj = helpers.create_pvc(sc_list[i].name)
            log.info(f"PVC: {pvc_obj.name} created successfully using "
                     f"{sc_list[i].name}")
            pvc_list.append(pvc_obj)
            teardown_factory(pvc_obj)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()

        # Create app pod and mount each PVC
        pod_list = []
        for i in range(2):
            log.info(f"Creating an app pod and mount {pvc_list[i].name}")
            pod_obj = helpers.create_pod(
                interface_type=constants.CEPHBLOCKPOOL,
                pvc_name=pvc_list[i].name,
            )
            log.info(f"{pod_obj.name} created successfully and "
                     f"mounted {pvc_list[i].name}")
            pod_list.append(pod_obj)
            teardown_factory(pod_obj)
            helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
            pod_obj.reload()

        # Run IO on each app pod for sometime
        for pod in pod_list:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io("fs", size="2G")

        for pod in pod_list:
            get_fio_rw_iops(pod)
コード例 #28
0
    def test_pvc_snapshot_performance(self, teardown_factory, pvc_size):
        """
        1. Run I/O on a pod file.
        2. Calculate md5sum of the file.
        3. Take a snapshot of the PVC and measure the time of creation.
        4. Restore From the snapshot and measure the time
        5. Attach a new pod to it.
        6. Verify that the file is present on the new pod also.
        7. Verify that the md5sum of the file on the new pod matches
           with the md5sum of the file on the original pod.

        This scenario run 3 times and report all results
        Args:
            teardown_factory: A fixture to destroy objects
            pvc_size: the size of the PVC to be tested - parametrize

        """

        # Getting the total Storage capacity
        ceph_cluster = CephCluster()
        ceph_capacity = ceph_cluster.get_ceph_capacity()

        log.info(f"Total capacity size is : {ceph_capacity}")
        log.info(f"PVC Size is : {pvc_size}")
        log.info(f"Needed capacity is {int(int(pvc_size) * 5)}")
        if int(ceph_capacity) < int(pvc_size) * 5:
            log.error(
                f"PVC size is {pvc_size}GiB and it is too large for this system"
                f" which have only {ceph_capacity}GiB")
            return
        # Calculating the file size as 25% of the PVC size
        # in the end the PVC will be 75% full
        filesize = self.pvc_obj.size * 0.25
        # Change the file size to MB and from int to str
        file_size = f"{int(filesize * 1024)}M"

        all_results = []

        for test_num in range(self.tests_numbers):
            test_results = {
                "test_num": test_num + 1,
                "dataset": (test_num + 1) * filesize * 1024,  # size in MiB
                "create": {
                    "time": None,
                    "speed": None
                },
                "restore": {
                    "time": None,
                    "speed": None
                },
            }
            log.info(f"Starting test phase number {test_num}")
            # Step 1. Run I/O on a pod file.
            file_name = f"{self.pod_obj.name}-{test_num}"
            log.info(f"Starting IO on the POD {self.pod_obj.name}")
            # Going to run only write IO to fill the PVC for the snapshot
            self.pod_obj.fillup_fs(size=file_size, fio_filename=file_name)

            # Wait for fio to finish
            fio_result = self.pod_obj.get_fio_results()
            err_count = fio_result.get("jobs")[0].get("error")
            assert (
                err_count == 0
            ), f"IO error on pod {self.pod_obj.name}. FIO result: {fio_result}"
            log.info("IO on the PVC Finished")

            # Verify presence of the file
            file_path = pod.get_file_path(self.pod_obj, file_name)
            log.info(f"Actual file path on the pod {file_path}")
            assert pod.check_file_existence(
                self.pod_obj, file_path), f"File {file_name} doesn't exist"
            log.info(f"File {file_name} exists in {self.pod_obj.name}")

            # Step 2. Calculate md5sum of the file.
            orig_md5_sum = pod.cal_md5sum(self.pod_obj, file_name)

            # Step 3. Take a snapshot of the PVC and measure the time of creation.
            snap_name = self.pvc_obj.name.replace("pvc-test",
                                                  f"snapshot-test{test_num}")
            log.info(f"Taking snapshot of the PVC {snap_name}")

            test_results["create"]["time"] = self.measure_create_snapshot_time(
                pvc_name=self.pvc_obj.name,
                snap_name=snap_name,
                interface=self.interface,
            )
            test_results["create"]["speed"] = int(
                test_results["dataset"] / test_results["create"]["time"])
            log.info(
                f' Test {test_num} dataset is {test_results["dataset"]} MiB')
            log.info(
                f'Snapshot creation time is : {test_results["create"]["time"]} sec.'
            )
            log.info(
                f'Snapshot speed is : {test_results["create"]["speed"]} MB/sec'
            )

            # Step 4. Restore the PVC from the snapshot and measure the time
            # Same Storage class of the original PVC
            sc_name = self.pvc_obj.backed_sc

            # Size should be same as of the original PVC
            pvc_size = str(self.pvc_obj.size) + "Gi"

            # Create pvc out of the snapshot
            # Both, the snapshot and the restore PVC should be in same namespace

            log.info("Restoring from the Snapshot")
            restore_pvc_name = self.pvc_obj.name.replace(
                "pvc-test", f"restore-pvc{test_num}")
            restore_pvc_yaml = constants.CSI_RBD_PVC_RESTORE_YAML
            if self.interface == constants.CEPHFILESYSTEM:
                restore_pvc_yaml = constants.CSI_CEPHFS_PVC_RESTORE_YAML

            log.info("Resorting the PVC from Snapshot")
            restore_pvc_obj = pvc.create_restore_pvc(
                sc_name=sc_name,
                snap_name=self.snap_obj.name,
                namespace=self.snap_obj.namespace,
                size=pvc_size,
                pvc_name=restore_pvc_name,
                restore_pvc_yaml=restore_pvc_yaml,
            )
            helpers.wait_for_resource_state(
                restore_pvc_obj,
                constants.STATUS_BOUND,
                timeout=3600  # setting this to 60 Min.
                # since it can be take long time to restore, and we want it to finished.
            )
            teardown_factory(restore_pvc_obj)
            restore_pvc_obj.reload()
            log.info("PVC was restored from the snapshot")
            test_results["restore"][
                "time"] = helpers.measure_pvc_creation_time(
                    self.interface, restore_pvc_obj.name)
            test_results["restore"]["speed"] = int(
                test_results["dataset"] / test_results["restore"]["time"])
            log.info(
                f'Snapshot restore time is : {test_results["restore"]["time"]}'
            )
            log.info(
                f'restore sped is : {test_results["restore"]["speed"]} MB/sec')

            # Step 5. Attach a new pod to the restored PVC
            restore_pod_obj = helpers.create_pod(
                interface_type=self.interface,
                pvc_name=restore_pvc_obj.name,
                namespace=self.snap_obj.namespace,
                pod_dict_path=constants.NGINX_POD_YAML,
            )

            # Confirm that the pod is running
            helpers.wait_for_resource_state(resource=restore_pod_obj,
                                            state=constants.STATUS_RUNNING)
            teardown_factory(restore_pod_obj)
            restore_pod_obj.reload()

            # Step 6. Verify that the file is present on the new pod also.
            log.info(f"Checking the existence of {file_name} "
                     f"on restore pod {restore_pod_obj.name}")
            assert pod.check_file_existence(
                restore_pod_obj, file_path), f"File {file_name} doesn't exist"
            log.info(f"File {file_name} exists in {restore_pod_obj.name}")

            # Step 7. Verify that the md5sum matches
            log.info(f"Verifying that md5sum of {file_name} "
                     f"on pod {self.pod_obj.name} matches with md5sum "
                     f"of the same file on restore pod {restore_pod_obj.name}")
            assert pod.verify_data_integrity(
                restore_pod_obj, file_name,
                orig_md5_sum), "Data integrity check failed"
            log.info("Data integrity check passed, md5sum are same")

            all_results.append(test_results)

        # logging the test summery, all info in one place for easy log reading
        c_speed, c_runtime, r_speed, r_runtime = (0 for i in range(4))
        log.info("Test summery :")
        for tst in all_results:
            c_speed += tst["create"]["speed"]
            c_runtime += tst["create"]["time"]
            r_speed += tst["restore"]["speed"]
            r_runtime += tst["restore"]["time"]
            log.info(
                f"Test {tst['test_num']} results : dataset is {tst['dataset']} MiB. "
                f"Take snapshot time is {tst['create']['time']} "
                f"at {tst['create']['speed']} MiB/Sec "
                f"Restore from snapshot time is {tst['restore']['time']} "
                f"at {tst['restore']['speed']} MiB/Sec ")
        log.info(
            f" Average snapshot creation time is {c_runtime / self.tests_numbers} sec."
        )
        log.info(
            f" Average snapshot creation speed is {c_speed / self.tests_numbers} MiB/sec"
        )
        log.info(
            f" Average snapshot restore time is {r_runtime / self.tests_numbers} sec."
        )
        log.info(
            f" Average snapshot restore speed is {r_speed / self.tests_numbers} MiB/sec"
        )
コード例 #29
0
    def test_pvc_snapshot_performance(self, pvc_size):
        """
        1. Run I/O on a pod file
        2. Calculate md5sum of the file
        3. Take a snapshot of the PVC
        4. Measure the total snapshot creation time and the CSI snapshot creation time
        4. Restore From the snapshot and measure the time
        5. Attach a new pod to it
        6. Verify that the file is present on the new pod also
        7. Verify that the md5sum of the file on the new pod matches
           with the md5sum of the file on the original pod

        This scenario run 3 times and report all the average results of the 3 runs
        and will send them to the ES
        Args:
            pvc_size: the size of the PVC to be tested - parametrize

        """

        # Getting the total Storage capacity
        ceph_cluster = CephCluster()
        ceph_capacity = ceph_cluster.get_ceph_capacity()

        log.info(f"Total capacity size is : {ceph_capacity}")
        log.info(f"PVC Size is : {pvc_size}")
        log.info(f"Needed capacity is {int(int(pvc_size) * 5)}")
        if int(ceph_capacity) < int(pvc_size) * 5:
            log.error(
                f"PVC size is {pvc_size}GiB and it is too large for this system"
                f" which have only {ceph_capacity}GiB")
            return
        # Calculating the file size as 25% of the PVC size
        # in the end the PVC will be 75% full
        filesize = self.pvc_obj.size * 0.25
        # Change the file size to MB and from int to str
        file_size = f"{int(filesize * 1024)}M"

        all_results = []

        self.results_path = get_full_test_logs_path(cname=self)
        log.info(f"Logs file path name is : {self.full_log_path}")

        # Produce ES report
        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        self.full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pvc_snapshot_perf",
            ))
        self.full_results.add_key("pvc_size", pvc_size + " GiB")
        self.full_results.add_key("interface", self.sc)
        self.full_results.all_results["creation_time"] = []
        self.full_results.all_results["csi_creation_time"] = []
        self.full_results.all_results["creation_speed"] = []
        self.full_results.all_results["restore_time"] = []
        self.full_results.all_results["restore_speed"] = []
        self.full_results.all_results["restore_csi_time"] = []
        for test_num in range(self.tests_numbers):
            test_results = {
                "test_num": test_num + 1,
                "dataset": (test_num + 1) * filesize * 1024,  # size in MiB
                "create": {
                    "time": None,
                    "csi_time": None,
                    "speed": None
                },
                "restore": {
                    "time": None,
                    "speed": None
                },
            }
            log.info(f"Starting test phase number {test_num}")
            # Step 1. Run I/O on a pod file.
            file_name = f"{self.pod_object.name}-{test_num}"
            log.info(f"Starting IO on the POD {self.pod_object.name}")
            # Going to run only write IO to fill the PVC for the snapshot
            self.pod_object.fillup_fs(size=file_size, fio_filename=file_name)

            # Wait for fio to finish
            fio_result = self.pod_object.get_fio_results()
            err_count = fio_result.get("jobs")[0].get("error")
            assert (
                err_count == 0
            ), f"IO error on pod {self.pod_object.name}. FIO result: {fio_result}"
            log.info("IO on the PVC Finished")

            # Verify presence of the file
            file_path = pod.get_file_path(self.pod_object, file_name)
            log.info(f"Actual file path on the pod {file_path}")
            assert pod.check_file_existence(
                self.pod_object, file_path), f"File {file_name} doesn't exist"
            log.info(f"File {file_name} exists in {self.pod_object.name}")

            # Step 2. Calculate md5sum of the file.
            orig_md5_sum = pod.cal_md5sum(self.pod_object, file_name)

            # Step 3. Take a snapshot of the PVC and measure the time of creation.
            snap_name = self.pvc_obj.name.replace("pvc-test",
                                                  f"snapshot-test{test_num}")
            log.info(f"Taking snapshot of the PVC {snap_name}")

            start_time = datetime.datetime.utcnow().strftime(
                "%Y-%m-%dT%H:%M:%SZ")

            test_results["create"]["time"] = self.measure_create_snapshot_time(
                pvc_name=self.pvc_obj.name,
                snap_name=snap_name,
                namespace=self.pod_object.namespace,
                interface=self.interface,
                start_time=start_time,
            )

            test_results["create"][
                "csi_time"] = performance_lib.measure_csi_snapshot_creation_time(
                    interface=self.interface,
                    snapshot_id=self.snap_uid,
                    start_time=start_time,
                )

            test_results["create"]["speed"] = int(
                test_results["dataset"] / test_results["create"]["time"])
            log.info(
                f' Test {test_num} dataset is {test_results["dataset"]} MiB')
            log.info(
                f"Snapshot name {snap_name} and id {self.snap_uid} creation time is"
                f' : {test_results["create"]["time"]} sec.')
            log.info(
                f"Snapshot name {snap_name} and id {self.snap_uid} csi creation time is"
                f' : {test_results["create"]["csi_time"]} sec.')
            log.info(
                f'Snapshot speed is : {test_results["create"]["speed"]} MB/sec'
            )

            # Step 4. Restore the PVC from the snapshot and measure the time
            # Same Storage class of the original PVC
            sc_name = self.pvc_obj.backed_sc

            # Size should be same as of the original PVC
            pvc_size = str(self.pvc_obj.size) + "Gi"

            # Create pvc out of the snapshot
            # Both, the snapshot and the restore PVC should be in same namespace

            log.info("Restoring from the Snapshot")
            restore_pvc_name = self.pvc_obj.name.replace(
                "pvc-test", f"restore-pvc{test_num}")
            restore_pvc_yaml = constants.CSI_RBD_PVC_RESTORE_YAML
            if self.interface == constants.CEPHFILESYSTEM:
                restore_pvc_yaml = constants.CSI_CEPHFS_PVC_RESTORE_YAML

            csi_start_time = self.get_time("csi")
            log.info("Restoring the PVC from Snapshot")
            restore_pvc_obj = pvc.create_restore_pvc(
                sc_name=sc_name,
                snap_name=self.snap_obj.name,
                namespace=self.snap_obj.namespace,
                size=pvc_size,
                pvc_name=restore_pvc_name,
                restore_pvc_yaml=restore_pvc_yaml,
            )
            helpers.wait_for_resource_state(
                restore_pvc_obj,
                constants.STATUS_BOUND,
                timeout=3600  # setting this to 60 Min.
                # since it can be take long time to restore, and we want it to finished.
            )
            restore_pvc_obj.reload()
            log.info("PVC was restored from the snapshot")
            test_results["restore"][
                "time"] = helpers.measure_pvc_creation_time(
                    self.interface, restore_pvc_obj.name)

            test_results["restore"]["speed"] = int(
                test_results["dataset"] / test_results["restore"]["time"])
            log.info(
                f'Snapshot restore time is : {test_results["restore"]["time"]}'
            )
            log.info(
                f'restore speed is : {test_results["restore"]["speed"]} MB/sec'
            )

            test_results["restore"][
                "csi_time"] = performance_lib.csi_pvc_time_measure(
                    self.interface, restore_pvc_obj, "create", csi_start_time)
            log.info(
                f'Snapshot csi restore time is : {test_results["restore"]["csi_time"]}'
            )

            # Step 5. Attach a new pod to the restored PVC
            restore_pod_object = helpers.create_pod(
                interface_type=self.interface,
                pvc_name=restore_pvc_obj.name,
                namespace=self.snap_obj.namespace,
            )

            # Confirm that the pod is running
            helpers.wait_for_resource_state(resource=restore_pod_object,
                                            state=constants.STATUS_RUNNING)
            restore_pod_object.reload()

            # Step 6. Verify that the file is present on the new pod also.
            log.info(f"Checking the existence of {file_name} "
                     f"on restore pod {restore_pod_object.name}")
            assert pod.check_file_existence(
                restore_pod_object,
                file_path), f"File {file_name} doesn't exist"
            log.info(f"File {file_name} exists in {restore_pod_object.name}")

            # Step 7. Verify that the md5sum matches
            log.info(
                f"Verifying that md5sum of {file_name} "
                f"on pod {self.pod_object.name} matches with md5sum "
                f"of the same file on restore pod {restore_pod_object.name}")
            assert pod.verify_data_integrity(
                restore_pod_object, file_name,
                orig_md5_sum), "Data integrity check failed"
            log.info("Data integrity check passed, md5sum are same")

            restore_pod_object.delete()
            restore_pvc_obj.delete()

            all_results.append(test_results)

        # clean the enviroment
        self.pod_object.delete()
        self.pvc_obj.delete()
        self.delete_test_project()

        # logging the test summary, all info in one place for easy log reading
        c_speed, c_runtime, c_csi_runtime, r_speed, r_runtime, r_csi_runtime = (
            0 for i in range(6))

        log.info("Test summary :")
        for tst in all_results:
            c_speed += tst["create"]["speed"]
            c_runtime += tst["create"]["time"]
            c_csi_runtime += tst["create"]["csi_time"]
            r_speed += tst["restore"]["speed"]
            r_runtime += tst["restore"]["time"]
            r_csi_runtime += tst["restore"]["csi_time"]

            self.full_results.all_results["creation_time"].append(
                tst["create"]["time"])
            self.full_results.all_results["csi_creation_time"].append(
                tst["create"]["csi_time"])
            self.full_results.all_results["creation_speed"].append(
                tst["create"]["speed"])
            self.full_results.all_results["restore_time"].append(
                tst["restore"]["time"])
            self.full_results.all_results["restore_speed"].append(
                tst["restore"]["speed"])
            self.full_results.all_results["restore_csi_time"].append(
                tst["restore"]["csi_time"])
            self.full_results.all_results["dataset_inMiB"] = tst["dataset"]
            log.info(
                f"Test {tst['test_num']} results : dataset is {tst['dataset']} MiB. "
                f"Take snapshot time is {tst['create']['time']} "
                f"at {tst['create']['speed']} MiB/Sec "
                f"Restore from snapshot time is {tst['restore']['time']} "
                f"at {tst['restore']['speed']} MiB/Sec ")

        avg_snap_c_time = c_runtime / self.tests_numbers
        avg_snap_csi_c_time = c_csi_runtime / self.tests_numbers
        avg_snap_c_speed = c_speed / self.tests_numbers
        avg_snap_r_time = r_runtime / self.tests_numbers
        avg_snap_r_speed = r_speed / self.tests_numbers
        avg_snap_r_csi_time = r_csi_runtime / self.tests_numbers
        log.info(f" Average snapshot creation time is {avg_snap_c_time} sec.")
        log.info(
            f" Average csi snapshot creation time is {avg_snap_csi_c_time} sec."
        )
        log.info(
            f" Average snapshot creation speed is {avg_snap_c_speed} MiB/sec")
        log.info(f" Average snapshot restore time is {avg_snap_r_time} sec.")
        log.info(
            f" Average snapshot restore speed is {avg_snap_r_speed} MiB/sec")
        log.info(
            f" Average snapshot restore csi time is {avg_snap_r_csi_time} sec."
        )

        self.full_results.add_key("avg_snap_creation_time_insecs",
                                  avg_snap_c_time)
        self.full_results.add_key("avg_snap_csi_creation_time_insecs",
                                  avg_snap_csi_c_time)
        self.full_results.add_key("avg_snap_creation_speed", avg_snap_c_speed)
        self.full_results.add_key("avg_snap_restore_time_insecs",
                                  avg_snap_r_time)
        self.full_results.add_key("avg_snap_restore_speed", avg_snap_r_speed)
        self.full_results.add_key("avg_snap_restore_csi_time_insecs",
                                  avg_snap_r_csi_time)

        # Write the test results into the ES server
        log.info("writing results to elastic search server")
        if self.full_results.es_write():
            res_link = self.full_results.results_link()

            # write the ES link to the test results in the test log.
            log.info(f"The result can be found at : {res_link}")

            self.write_result_to_file(res_link)
コード例 #30
0
    def test_pvc_snapshot(self, interface, teardown_factory):
        """
        1. Run I/O on a pod file.
        2. Calculate md5sum of the file.
        3. Take a snapshot of the PVC.
        4. Create a new PVC out of that snapshot.
        5. Attach a new pod to it.
        6. Verify that the file is present on the new pod also.
        7. Verify that the md5sum of the file on the new pod matches
           with the md5sum of the file on the original pod.

        Args:
            interface(str): The type of the interface
            (e.g. CephBlockPool, CephFileSystem)
            pvc_factory: A fixture to create new pvc
            teardown_factory: A fixture to destroy objects
        """
        log.info(f"Running IO on pod {self.pod_obj.name}")
        file_name = self.pod_obj.name
        log.info(f"File created during IO {file_name}")
        self.pod_obj.run_io(storage_type="fs",
                            size="1G",
                            fio_filename=file_name)

        # Wait for fio to finish
        fio_result = self.pod_obj.get_fio_results()
        err_count = fio_result.get("jobs")[0].get("error")
        assert err_count == 0, (f"IO error on pod {self.pod_obj.name}. "
                                f"FIO result: {fio_result}")
        log.info(f"Verified IO on pod {self.pod_obj.name}.")

        # Verify presence of the file
        file_path = pod.get_file_path(self.pod_obj, file_name)
        log.info(f"Actual file path on the pod {file_path}")
        assert pod.check_file_existence(
            self.pod_obj, file_path), f"File {file_name} doesn't exist"
        log.info(f"File {file_name} exists in {self.pod_obj.name}")

        # Calculate md5sum
        orig_md5_sum = pod.cal_md5sum(self.pod_obj, file_name)
        # Take a snapshot
        snap_yaml = constants.CSI_RBD_SNAPSHOT_YAML
        if interface == constants.CEPHFILESYSTEM:
            snap_yaml = constants.CSI_CEPHFS_SNAPSHOT_YAML

        snap_name = helpers.create_unique_resource_name("test", "snapshot")
        snap_obj = pvc.create_pvc_snapshot(
            self.pvc_obj.name,
            snap_yaml,
            snap_name,
            self.pvc_obj.namespace,
            helpers.default_volumesnapshotclass(interface).name,
        )
        snap_obj.ocp.wait_for_resource(
            condition="true",
            resource_name=snap_obj.name,
            column=constants.STATUS_READYTOUSE,
            timeout=60,
        )
        teardown_factory(snap_obj)

        # Same Storage class of the original PVC
        sc_name = self.pvc_obj.backed_sc

        # Size should be same as of the original PVC
        pvc_size = str(self.pvc_obj.size) + "Gi"

        # Create pvc out of the snapshot
        # Both, the snapshot and the restore PVC should be in same namespace
        restore_pvc_name = helpers.create_unique_resource_name(
            "test", "restore-pvc")
        restore_pvc_yaml = constants.CSI_RBD_PVC_RESTORE_YAML
        if interface == constants.CEPHFILESYSTEM:
            restore_pvc_yaml = constants.CSI_CEPHFS_PVC_RESTORE_YAML

        restore_pvc_obj = pvc.create_restore_pvc(
            sc_name=sc_name,
            snap_name=snap_obj.name,
            namespace=snap_obj.namespace,
            size=pvc_size,
            pvc_name=restore_pvc_name,
            restore_pvc_yaml=restore_pvc_yaml,
        )
        helpers.wait_for_resource_state(restore_pvc_obj,
                                        constants.STATUS_BOUND)
        restore_pvc_obj.reload()
        teardown_factory(restore_pvc_obj)

        # Create and attach pod to the pvc
        restore_pod_obj = helpers.create_pod(
            interface_type=interface,
            pvc_name=restore_pvc_obj.name,
            namespace=snap_obj.namespace,
            pod_dict_path=constants.NGINX_POD_YAML,
        )

        # Confirm that the pod is running
        helpers.wait_for_resource_state(resource=restore_pod_obj,
                                        state=constants.STATUS_RUNNING)
        restore_pod_obj.reload()
        teardown_factory(restore_pod_obj)

        # Verify that the file is present on the new pod
        log.info(f"Checking the existence of {file_name} "
                 f"on restore pod {restore_pod_obj.name}")
        assert pod.check_file_existence(
            restore_pod_obj, file_path), f"File {file_name} doesn't exist"
        log.info(f"File {file_name} exists in {restore_pod_obj.name}")

        # Verify that the md5sum matches
        log.info(f"Verifying that md5sum of {file_name} "
                 f"on pod {self.pod_obj.name} matches with md5sum "
                 f"of the same file on restore pod {restore_pod_obj.name}")
        assert pod.verify_data_integrity(
            restore_pod_obj, file_name,
            orig_md5_sum), "Data integrity check failed"
        log.info("Data integrity check passed, md5sum are same")

        log.info("Running IO on new pod")
        # Run IO on new pod
        restore_pod_obj.run_io(storage_type="fs", size="1G", runtime=20)

        # Wait for fio to finish
        restore_pod_obj.get_fio_results()
        log.info("IO finished o new pod")