Ejemplo n.º 1
0
    def setup(self):
        """
        Setting up test parameters
        """
        log.info("Starting the test setup")
        super(TestBulkPodAttachPerformance, self).setup()
        self.benchmark_name = "bulk_pod_attach_time"

        self.create_test_project()
        # Pulling the pod image to the worker node, so pull image will not calculate
        # in the total attach time
        helpers.pull_images(constants.PERF_IMAGE)

        # Initializing some parameters
        self.pvc_objs = list()
        self.pods_obj = locals()
    def setup(self):
        """
        Setting up test parameters
        """
        logger.info("Starting the test setup")
        super(TestPVCClonePerformance, self).setup()
        self.benchmark_name = "pvc_clone_permorance"
        helpers.pull_images(constants.PERF_IMAGE)

        self.create_test_project()

        # Collecting environment information
        self.get_env_info()

        self.number_of_clones = 11
        if self.dev_mode:
            self.number_of_clones = 3

        self.clones_list = []
Ejemplo n.º 3
0
    def setup(self):
        """
        Setting up test parameters
        """
        log.info("Starting the test setup")
        super(TestPodStartTime, self).setup()
        self.benchmark_name = "pvc_attach_time"

        # Pull the perf image to the nodes before the test is starting
        helpers.pull_images(constants.PERF_IMAGE)

        # Run the test in its own project (namespace)
        self.create_test_project()

        # Initialize some lists used in the test.
        self.pod_result_list = []
        self.start_time_dict_list = []
        self.pvc_list = []

        # The maximum acceptable attach time in sec.
        self.acceptable_time = 30
Ejemplo n.º 4
0
    def test_pod_reattach_time_performance(
        self, storageclass_factory, copies, timeout, total_time_limit
    ):
        """
        Test assign nodeName to a pod using RWX pvc
        Each kernel (unzipped) is 892M and 61694 files
        The test creates samples_num pvcs and pods, writes kernel files multiplied by number of copies
        and calculates average total and csi reattach times and standard deviation
        """
        kernel_url = "https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.19.5.tar.gz"
        download_path = "tmp"

        samples_num = 7
        if self.dev_mode:
            samples_num = 3

        test_start_time = PASTest.get_time()
        helpers.pull_images(constants.PERF_IMAGE)
        # Download a linux Kernel

        dir_path = os.path.join(os.getcwd(), download_path)
        file_path = os.path.join(dir_path, "file.gz")
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        urllib.request.urlretrieve(kernel_url, file_path)

        worker_nodes_list = node.get_worker_nodes()
        assert len(worker_nodes_list) > 1
        node_one = worker_nodes_list[0]
        node_two = worker_nodes_list[1]

        time_measures, csi_time_measures, files_written_list, data_written_list = (
            [],
            [],
            [],
            [],
        )

        self.sc_obj = storageclass_factory(self.interface)
        for sample_index in range(1, samples_num + 1):

            csi_start_time = self.get_time("csi")

            logger.info(f"Start creating PVC number {sample_index}.")
            pvc_obj = helpers.create_pvc(
                sc_name=self.sc_obj.name, size="100Gi", namespace=self.namespace
            )
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)

            # Create a pod on one node
            logger.info(f"Creating Pod with pvc {pvc_obj.name} on node {node_one}")

            pvc_obj.reload()
            self.pvc_list.append(pvc_obj)

            try:
                pod_obj1 = helpers.create_pod(
                    interface_type=self.interface,
                    pvc_name=pvc_obj.name,
                    namespace=pvc_obj.namespace,
                    node_name=node_one,
                    pod_dict_path=constants.PERF_POD_YAML,
                )
            except Exception as e:
                logger.error(
                    f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}"
                )
                raise PodNotCreated("Pod on PVC was not created.")

            # Confirm that pod is running on the selected_nodes
            logger.info("Checking whether pods are running on the selected nodes")
            helpers.wait_for_resource_state(
                resource=pod_obj1, state=constants.STATUS_RUNNING, timeout=timeout
            )

            pod_name = pod_obj1.name
            pod_path = "/mnt"

            _ocp = OCP(namespace=pvc_obj.namespace)

            rsh_cmd = f"rsync {dir_path} {pod_name}:{pod_path}"
            _ocp.exec_oc_cmd(rsh_cmd)

            rsh_cmd = (
                f"exec {pod_name} -- tar xvf {pod_path}/tmp/file.gz -C {pod_path}/tmp"
            )
            _ocp.exec_oc_cmd(rsh_cmd)

            for x in range(copies):
                rsh_cmd = f"exec {pod_name} -- mkdir -p {pod_path}/folder{x}"
                _ocp.exec_oc_cmd(rsh_cmd)
                rsh_cmd = (
                    f"exec {pod_name} -- cp -r {pod_path}/tmp {pod_path}/folder{x}"
                )
                _ocp.exec_oc_cmd(rsh_cmd)
                rsh_cmd = f"exec {pod_name} -- sync"
                _ocp.exec_oc_cmd(rsh_cmd)

            logger.info("Getting the amount of data written to the PVC")
            rsh_cmd = f"exec {pod_name} -- df -h {pod_path}"
            data_written_str = _ocp.exec_oc_cmd(rsh_cmd).split()[-4]
            logger.info(f"The amount of written data is {data_written_str}")
            data_written = float(data_written_str[:-1])

            rsh_cmd = f"exec {pod_name} -- find {pod_path} -type f"
            files_written = len(_ocp.exec_oc_cmd(rsh_cmd).split())
            logger.info(
                f"For {self.interface} - The number of files written to the pod is {files_written}"
            )
            files_written_list.append(files_written)
            data_written_list.append(data_written)

            logger.info("Deleting the pod")
            rsh_cmd = f"delete pod {pod_name}"
            _ocp.exec_oc_cmd(rsh_cmd)

            logger.info(f"Creating Pod with pvc {pvc_obj.name} on node {node_two}")

            try:
                pod_obj2 = helpers.create_pod(
                    interface_type=self.interface,
                    pvc_name=pvc_obj.name,
                    namespace=pvc_obj.namespace,
                    node_name=node_two,
                    pod_dict_path=constants.PERF_POD_YAML,
                )
            except Exception as e:
                logger.error(
                    f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}"
                )
                raise PodNotCreated("Pod on PVC was not created.")

            start_time = time.time()

            pod_name = pod_obj2.name
            helpers.wait_for_resource_state(
                resource=pod_obj2, state=constants.STATUS_RUNNING, timeout=timeout
            )
            end_time = time.time()
            total_time = end_time - start_time
            if total_time > total_time_limit:
                logger.error(
                    f"Pod creation time is {total_time} and greater than {total_time_limit} seconds"
                )
                raise ex.PerformanceException(
                    f"Pod creation time is {total_time} and greater than {total_time_limit} seconds"
                )

            csi_time = performance_lib.pod_attach_csi_time(
                self.interface, pvc_obj.backed_pv, csi_start_time, pvc_obj.namespace
            )[0]
            csi_time_measures.append(csi_time)
            logger.info(
                f"PVC #{pvc_obj.name} pod {pod_name} creation time took {total_time} seconds, "
                f"csi time is {csi_time} seconds"
            )
            time_measures.append(total_time)

            logger.info("Deleting the pod")
            rsh_cmd = f"delete pod {pod_name}"
            _ocp.exec_oc_cmd(rsh_cmd)
            # teardown_factory(pod_obj2)

        average = statistics.mean(time_measures)
        logger.info(
            f"The average time of {self.interface} pod creation on {samples_num} PVCs is {average} seconds"
        )

        st_deviation = statistics.stdev(time_measures)
        logger.info(
            f"The standard deviation of {self.interface} pod creation time on {samples_num} PVCs is {st_deviation}"
        )

        csi_average = statistics.mean(csi_time_measures)
        logger.info(
            f"The average csi time of {self.interface} pod creation on {samples_num} PVCs is {csi_average} seconds"
        )

        csi_st_deviation = statistics.stdev(csi_time_measures)
        logger.info(
            f"The standard deviation of {self.interface} csi pod creation time on {samples_num} PVCs "
            f"is {csi_st_deviation}"
        )

        files_written_average = statistics.mean(files_written_list)
        data_written_average = statistics.mean(data_written_list)

        os.remove(file_path)
        os.rmdir(dir_path)

        # Produce ES report

        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pod_reattach_time_fullres",
            )
        )

        full_results.add_key("storageclass", self.sc)
        full_results.add_key("pod_reattach_time", time_measures)
        full_results.add_key("copies_number", copies)
        full_results.add_key("files_number_average", files_written_average)
        full_results.add_key("data_average", data_written_average)
        full_results.add_key("pod_reattach_time_average", average)
        full_results.add_key("pod_reattach_standard_deviation", st_deviation)
        full_results.add_key("pod_csi_reattach_time_average", csi_average)
        full_results.add_key("pod_csi_reattach_standard_deviation", csi_st_deviation)

        test_end_time = PASTest.get_time()

        # Add the test time to the ES report
        full_results.add_key(
            "test_time", {"start": test_start_time, "end": test_end_time}
        )

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (4 - according to the parameters)
            self.results_path = get_full_test_logs_path(
                cname=self, fname="test_pod_reattach_time_performance"
            )
            self.write_result_to_file(res_link)
Ejemplo n.º 5
0
    def test_pvc_reattach_time_performance(self, pvc_factory, teardown_factory):
        """
        Test assign nodeName to a pod using RWX pvc
        Performance in test_multiple_pvc_creation_measurement_performance
        Each kernel (unzipped) is 892M and 61694 files
        """

        kernel_url = "https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.19.5.tar.gz"
        download_path = "tmp"
        # Number of times we copy the kernel
        copies = 3

        # Download a linux Kernel
        import os

        dir_path = os.path.join(os.getcwd(), download_path)
        file_path = os.path.join(dir_path, "file.gz")
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        urllib.request.urlretrieve(kernel_url, file_path)

        worker_nodes_list = node.get_worker_nodes()
        assert len(worker_nodes_list) > 1
        node_one = worker_nodes_list[0]
        node_two = worker_nodes_list[1]

        # Create a PVC
        accessmode = constants.ACCESS_MODE_RWX
        if self.interface == constants.CEPHBLOCKPOOL:
            accessmode = constants.ACCESS_MODE_RWO
        pvc_obj = pvc_factory(
            interface=self.interface,
            access_mode=accessmode,
            status=constants.STATUS_BOUND,
            size="15",
        )

        # Create a pod on one node
        logging.info(f"Creating Pod with pvc {pvc_obj.name} on node {node_one}")

        helpers.pull_images("nginx")
        pod_obj1 = helpers.create_pod(
            interface_type=self.interface,
            pvc_name=pvc_obj.name,
            namespace=pvc_obj.namespace,
            node_name=node_one,
            pod_dict_path=constants.NGINX_POD_YAML,
        )

        # Confirm that pod is running on the selected_nodes
        logging.info("Checking whether pods are running on the selected nodes")
        helpers.wait_for_resource_state(
            resource=pod_obj1, state=constants.STATUS_RUNNING, timeout=120
        )

        pod_name = pod_obj1.name
        pod_path = "/var/lib/www/html"

        _ocp = OCP(namespace=pvc_obj.namespace)

        rsh_cmd = f"exec {pod_name} -- apt-get update"
        _ocp.exec_oc_cmd(rsh_cmd)
        rsh_cmd = f"exec {pod_name} -- apt-get install -y rsync"
        _ocp.exec_oc_cmd(rsh_cmd, ignore_error=True, out_yaml_format=False)

        rsh_cmd = f"rsync {dir_path} {pod_name}:{pod_path}"
        _ocp.exec_oc_cmd(rsh_cmd)

        rsh_cmd = f"exec {pod_name} -- tar xvf {pod_path}/tmp/file.gz -C /var/lib/www/html/tmp"
        _ocp.exec_oc_cmd(rsh_cmd)

        for x in range(copies):
            rsh_cmd = f"exec {pod_name} -- mkdir -p {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)
            rsh_cmd = f"exec {pod_name} -- cp -r {pod_path}/tmp {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)

        rsh_cmd = f"delete pod {pod_name}"
        _ocp.exec_oc_cmd(rsh_cmd)

        logging.info(f"Creating Pod with pvc {pvc_obj.name} on node {node_two}")

        pod_obj2 = helpers.create_pod(
            interface_type=self.interface,
            pvc_name=pvc_obj.name,
            namespace=pvc_obj.namespace,
            node_name=node_two,
            pod_dict_path=constants.NGINX_POD_YAML,
        )

        start_time = time.time()

        pod_name = pod_obj2.name
        helpers.wait_for_resource_state(
            resource=pod_obj2, state=constants.STATUS_RUNNING, timeout=120
        )
        end_time = time.time()
        total_time = end_time - start_time
        if total_time > 60:
            raise ex.PerformanceException(
                f"Pod creation time is {total_time} and " f"greater than 60 seconds"
            )
        logging.info(f"Pod {pod_name} creation time took {total_time} seconds")

        teardown_factory(pod_obj2)
        os.remove(file_path)
        os.rmdir(dir_path)
    def test_pvc_clone_performance_multiple_files(
        self,
        pvc_factory,
        interface,
        copies,
        timeout,
    ):
        """
        Test assign nodeName to a pod using RWX pvc
        Each kernel (unzipped) is 892M and 61694 files
        The test creates a pvc and a pods, writes kernel files multiplied by number of copies
        The test creates number of clones samples, calculates creation and deletion times for each one the clones
        and calculates the average creation and average deletion times
        """
        kernel_url = "https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.19.5.tar.gz"
        download_path = "tmp"

        test_start_time = self.get_time()
        helpers.pull_images(constants.PERF_IMAGE)
        # Download a linux Kernel

        dir_path = os.path.join(os.getcwd(), download_path)
        file_path = os.path.join(dir_path, "file.gz")
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        urllib.request.urlretrieve(kernel_url, file_path)

        # Create a PVC
        accessmode = constants.ACCESS_MODE_RWX
        if interface == constants.CEPHBLOCKPOOL:
            accessmode = constants.ACCESS_MODE_RWO

        pvc_size = "100"
        try:
            pvc_obj = pvc_factory(
                interface=interface,
                access_mode=accessmode,
                status=constants.STATUS_BOUND,
                size=pvc_size,
            )
        except Exception as e:
            logger.error(f"The PVC sample was not created, exception {str(e)}")
            raise PVCNotCreated("PVC did not reach BOUND state.")

        # Create a pod on one node
        logger.info(f"Creating Pod with pvc {pvc_obj.name}")

        try:
            pod_obj = helpers.create_pod(
                interface_type=interface,
                pvc_name=pvc_obj.name,
                namespace=pvc_obj.namespace,
                pod_dict_path=constants.PERF_POD_YAML,
            )
        except Exception as e:
            logger.error(
                f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}"
            )
            raise PodNotCreated("Pod on PVC was not created.")

        # Confirm that pod is running on the selected_nodes
        logger.info("Checking whether pods are running on the selected nodes")
        helpers.wait_for_resource_state(resource=pod_obj,
                                        state=constants.STATUS_RUNNING,
                                        timeout=timeout)

        pod_name = pod_obj.name
        pod_path = "/mnt"

        _ocp = OCP(namespace=pvc_obj.namespace)

        rsh_cmd = f"rsync {dir_path} {pod_name}:{pod_path}"
        _ocp.exec_oc_cmd(rsh_cmd)

        rsh_cmd = f"exec {pod_name} -- tar xvf {pod_path}/tmp/file.gz -C {pod_path}/tmp"
        _ocp.exec_oc_cmd(rsh_cmd)

        for x in range(copies):
            rsh_cmd = f"exec {pod_name} -- mkdir -p {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)
            rsh_cmd = f"exec {pod_name} -- cp -r {pod_path}/tmp {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)
            rsh_cmd = f"exec {pod_name} -- sync"
            _ocp.exec_oc_cmd(rsh_cmd)

        logger.info("Getting the amount of data written to the PVC")
        rsh_cmd = f"exec {pod_name} -- df -h {pod_path}"
        data_written = _ocp.exec_oc_cmd(rsh_cmd).split()[-4]
        logger.info(f"The amount of written data is {data_written}")

        rsh_cmd = f"exec {pod_name} -- find {pod_path} -type f"
        files_written = len(_ocp.exec_oc_cmd(rsh_cmd).split())
        logger.info(
            f"For {interface} - The number of files written to the pod is {files_written}"
        )

        # delete the pod
        pod_obj.delete(wait=False)

        logger.info("Wait for the pod to be deleted")
        performance_lib.wait_for_resource_bulk_status(
            "pod", 0, pvc_obj.namespace, constants.STATUS_COMPLETED, timeout,
            5)
        logger.info("The pod was deleted")

        num_of_clones = 11
        # increasing the timeout since clone creation time is longer than pod attach time
        timeout = 18000

        clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        if interface == constants.CEPHFILESYSTEM:
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML

        clone_creation_measures = []
        csi_clone_creation_measures = []
        clone_deletion_measures = []
        csi_clone_deletion_measures = []

        # taking the time, so parsing the provision log will be faster.
        start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")

        for i in range(num_of_clones):
            logger.info(f"Start creation of clone number {i + 1}.")
            cloned_pvc_obj = pvc.create_pvc_clone(
                pvc_obj.backed_sc,
                pvc_obj.name,
                clone_yaml,
                pvc_obj.namespace,
                storage_size=pvc_size + "Gi",
            )
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND, timeout)

            cloned_pvc_obj.reload()
            logger.info(
                f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {pvc_obj.name} was created."
            )
            create_time = helpers.measure_pvc_creation_time(
                interface, cloned_pvc_obj.name)
            logger.info(
                f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc."
            )
            clone_creation_measures.append(create_time)
            csi_clone_creation_measures.append(
                performance_lib.csi_pvc_time_measure(interface, cloned_pvc_obj,
                                                     "create", start_time))

            pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy
            cloned_pvc_obj.delete()
            logger.info(
                f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}."
            )
            cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout)
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                helpers.validate_pv_delete(cloned_pvc_obj.backed_pv)
            delete_time = helpers.measure_pvc_deletion_time(
                interface, cloned_pvc_obj.backed_pv)
            logger.info(
                f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc."
            )

            clone_deletion_measures.append(delete_time)
            csi_clone_deletion_measures.append(
                performance_lib.csi_pvc_time_measure(interface, cloned_pvc_obj,
                                                     "delete", start_time))

        os.remove(file_path)
        os.rmdir(dir_path)
        pvc_obj.delete()

        average_creation_time = statistics.mean(clone_creation_measures)
        logger.info(f"Average creation time is  {average_creation_time} secs.")
        average_csi_creation_time = statistics.mean(
            csi_clone_creation_measures)
        logger.info(
            f"Average csi creation time is  {average_csi_creation_time} secs.")

        average_deletion_time = statistics.mean(clone_deletion_measures)
        logger.info(f"Average deletion time is  {average_deletion_time} secs.")
        average_csi_deletion_time = statistics.mean(
            csi_clone_deletion_measures)
        logger.info(
            f"Average csi deletion time is  {average_csi_deletion_time} secs.")

        # Produce ES report

        # Collecting environment information
        self.get_env_info()
        self.results_path = get_full_test_logs_path(cname=self)
        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "test_pvc_clone_performance_multiple_files_fullres",
            ))

        full_results.add_key("files_number", files_written)

        test_end_time = self.get_time()

        full_results.add_key("test_time", {
            "start": test_start_time,
            "end": test_end_time
        })

        full_results.add_key("interface", interface)
        full_results.add_key("clones_number", num_of_clones)
        full_results.add_key("pvc_size", pvc_size)
        full_results.add_key("average_clone_creation_time",
                             average_creation_time)
        full_results.add_key("average_csi_clone_creation_time",
                             average_csi_creation_time)
        full_results.add_key("average_clone_deletion_time",
                             average_deletion_time)
        full_results.add_key("average_csi_clone_deletion_time",
                             average_csi_deletion_time)

        full_results.all_results = {
            "clone_creation_time": clone_creation_measures,
            "csi_clone_creation_time": csi_clone_creation_measures,
            "clone_deletion_time": clone_deletion_measures,
            "csi_clone_deletion_time": csi_clone_deletion_measures,
        }

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (4 - according to the parameters)
            self.results_path = get_full_test_logs_path(
                cname=self, fname="test_pvc_clone_performance_multiple_files")
            self.write_result_to_file(res_link)