Exemple #1
0
    def test_create_delete_pvc(
        self, setup_ui, sc_type, pvc_name, access_mode, pvc_size, vol_mode
    ):
        """
        Test create and delete pvc via UI

        """
        pvc_ui_obj = PvcUI(setup_ui)
        pvc_ui_obj.create_pvc_ui(sc_type, pvc_name, access_mode, pvc_size, vol_mode)
        time.sleep(2)

        pvc_objs = get_all_pvc_objs(namespace="openshift-storage")
        pvc = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name]

        assert pvc[0].size == int(pvc_size), (
            f"size error| expected size:{pvc_size} \n "
            f"actual size:{str(pvc[0].size)}"
        )

        assert pvc[0].get_pvc_access_mode == access_mode, (
            f"access mode error| expected access mode:{access_mode} "
            f"\n actual access mode:{pvc[0].get_pvc_access_mode}"
        )

        assert pvc[0].backed_sc == sc_type, (
            f"storage class error| expected storage class:{sc_type} "
            f"\n actual storage class:{pvc[0].backed_sc}"
        )

        assert pvc[0].get_pvc_vol_mode == vol_mode, (
            f"volume mode error| expected volume mode:{vol_mode} "
            f"\n actual volume mode:{pvc[0].get_pvc_vol_mode}"
        )

        logger.info("Verifying PVC Details via UI")
        pvc_ui_obj.verify_pvc_ui(
            pvc_size=pvc_size,
            access_mode=access_mode,
            vol_mode=vol_mode,
            sc_type=sc_type,
        )
        logger.info("PVC Details Verified via UI..!!")

        logger.info(f"Delete {pvc_name} pvc")
        pvc_ui_obj.delete_pvc_ui(pvc_name)
        time.sleep(5)

        pvc_objs = get_all_pvc_objs(namespace="openshift-storage")
        pvcs = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name]
        if len(pvcs) > 0:
            assert f"PVC {pvcs[0].name} does not deleted"
Exemple #2
0
def wait_for_all_resources_deletion(
    namespace, check_replication_resources_state=True, timeout=900
):
    """
    Wait for workload and replication resources to be deleted

    Args:
        namespace (str): the namespace of the workload
        check_replication_resources_state (bool): True for checking replication resources state, False otherwise
        timeout (int): time in seconds to wait for resource deletion

    """
    logger.info("Waiting for all pods to be deleted")
    all_pods = get_all_pods(namespace=namespace)
    for pod_obj in all_pods:
        pod_obj.ocp.wait_for_delete(
            resource_name=pod_obj.name, timeout=timeout, sleep=5
        )

    wait_for_replication_resources_deletion(
        namespace, timeout, check_replication_resources_state
    )

    logger.info("Waiting for all PVCs to be deleted")
    all_pvcs = get_all_pvc_objs(namespace=namespace)
    for pvc_obj in all_pvcs:
        pvc_obj.ocp.wait_for_delete(
            resource_name=pvc_obj.name, timeout=timeout, sleep=5
        )
    def test_monitoring_delete_pvc(self):
        """
        Test case to validate whether delete pvcs+configmap and recovery of a
        node where monitoring pods running has no functional impact

        """
        # Get 'cluster-monitoring-config' configmap
        ocp_configmap = ocp.OCP(
            namespace=constants.MONITORING_NAMESPACE, kind="configmap"
        )
        configmap_dict = ocp_configmap.get(resource_name="cluster-monitoring-config")
        dir_configmap = tempfile.mkdtemp(prefix="configmap_")
        yaml_file = f"{dir_configmap}/configmap.yaml"
        templating.dump_data_to_temp_yaml(configmap_dict, yaml_file)

        # Get prometheus and alertmanager pods
        prometheus_alertmanager_pods = pod.get_all_pods(
            namespace=defaults.OCS_MONITORING_NAMESPACE,
            selector=["prometheus", "alertmanager"],
        )

        # Get all pvc on monitoring namespace
        pvc_objs_list = pvc.get_all_pvc_objs(namespace=constants.MONITORING_NAMESPACE)

        # Delete configmap
        ocp_configmap.delete(resource_name="cluster-monitoring-config")

        # Delete all pvcs on monitoring namespace
        pvc.delete_pvcs(pvc_objs=pvc_objs_list)

        # Check all the prometheus and alertmanager pods are up
        for pod_obj in prometheus_alertmanager_pods:
            wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING, timeout=180
            )

        # Create configmap
        ocp_configmap.create(yaml_file=dir_configmap)

        # Check all the PVCs are up
        for pvc_obj in pvc_objs_list:
            wait_for_resource_state(
                resource=pvc_obj, state=constants.STATUS_BOUND, timeout=180
            )

        # Check all the prometheus and alertmanager pods are up
        # and pvc are mounted on monitoring pods
        for pod_obj in prometheus_alertmanager_pods:
            wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING, timeout=180
            )
            mount_point = pod_obj.exec_cmd_on_pod(
                command="df -kh",
                out_yaml_format=False,
            )
            assert "/dev/rbd" in mount_point, f"pvc is not mounted on pod {pod.name}"
        log.info("Verified all pvc are mounted on monitoring pods")

        # Validate the prometheus health is ok
        assert prometheus_health_check(), "Prometheus cluster health is not OK"
    def run_fio_on_pvcs(self, pvc_dict_list, pod_factory):
        total_files_size = 0
        searched_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
        logging.info(f"Found {len(searched_pvc_objs)} PVCs")
        for pvc_yaml in pvc_dict_list:
            pvc_name = pvc_yaml["metadata"]["name"]
            pvc_size = pvc_yaml["spec"]["resources"]["requests"]["storage"]
            logging.info(f"Size of pvc {pvc_name} is {pvc_size}")
            pvc_size_int = int(pvc_size[:-2])  # without "Gi"
            file_size_mb = int(pvc_size_int * 0.6) * constants.GB2MB
            total_files_size += file_size_mb
            file_size_mb_str = str(file_size_mb) + "M"
            logging.info(f"Writing file of size {file_size_mb_str}")

            # now find pvc_obj by name and create pod_obj to write to
            pvc_obj = None
            for obj in searched_pvc_objs:
                if obj.name == pvc_name:
                    pvc_obj = obj
                    searched_pvc_objs.remove(obj)
                    break
            assert pvc_obj is not None, f"Cannot find PVC with name {pvc_name}"

            logging.info(f"PVC with name {pvc_name} found")
            pod_obj = pod_factory(interface=self.interface,
                                  pvc=pvc_obj,
                                  status=constants.STATUS_RUNNING)

            performance_lib.write_fio_on_pod(pod_obj, file_size_mb_str)

        return total_files_size
Exemple #5
0
    def cleanup(self):
        """
        Function to tear down
        """
        # Delete all pods, pvcs and namespaces
        for namespace in self.namespace_list:
            delete_objs_parallel(
                obj_list=pod.get_all_pods(namespace=namespace.namespace),
                namespace=namespace.namespace,
                kind=self.kind,
            )
            delete_objs_parallel(
                obj_list=pvc.get_all_pvc_objs(namespace=namespace.namespace),
                namespace=namespace.namespace,
                kind=constants.PVC,
            )
            ocp = OCP(kind=constants.NAMESPACE)
            ocp.delete(resource_name=namespace.namespace)

        # Remove scale label from worker nodes in cleanup
        scale_workers = machine.get_labeled_nodes(constants.SCALE_LABEL)
        helpers.remove_label_from_worker_node(node_list=scale_workers,
                                              label_key="scale-label")

        # Delete machineset which will delete respective nodes too for aws-ipi platform
        if self.ms_name:
            for name in self.ms_name:
                machine.delete_custom_machineset(name)
Exemple #6
0
    def get_postgres_pvc(self):
        """
        Get all postgres pvc

        Returns:
             List: postgres pvc objects list
        """
        return get_all_pvc_objs(namespace=RIPSAW_NAMESPACE)
Exemple #7
0
    def cleanup(
        self,
        kafka_namespace=constants.AMQ_NAMESPACE,
        tiller_namespace=AMQ_BENCHMARK_NAMESPACE,
    ):
        """
        Clean up function,
        will start to delete from amq cluster operator
        then amq-connector, persistent, bridge, at the end it will delete the created namespace

        Args:
            kafka_namespace (str): Created namespace for amq
            tiller_namespace (str): Created namespace for benchmark

        """
        if self.amq_is_setup:
            if self.messaging:
                self.consumer_pod.delete()
                self.producer_pod.delete()
                self.kafka_user.delete()
                self.kafka_topic.delete()
            if self.benchmark:
                # Delete the helm app
                try:
                    purge_cmd = f"linux-amd64/helm delete benchmark --purge --tiller-namespace {tiller_namespace}"
                    run(purge_cmd, shell=True, cwd=self.dir, check=True)
                except (CommandFailed, CalledProcessError) as cf:
                    log.error("Failed to delete help app")
                    raise cf

                # Delete the pods and namespace created
                self.sa_tiller.delete()
                self.crb_tiller.delete()
                run_cmd(f"oc delete project {tiller_namespace}")
                self.ns_obj.wait_for_delete(resource_name=tiller_namespace)

            self.kafka_persistent.delete()
            self.kafka_connect.delete()
            self.kafka_bridge.delete()
            run_cmd(f"oc delete -f {self.amq_dir}",
                    shell=True,
                    check=True,
                    cwd=self.dir)

            ocs_pvc_obj = get_all_pvc_objs(namespace=kafka_namespace)

        run_cmd(f"oc delete project {kafka_namespace}")

        self.ns_obj.wait_for_delete(resource_name=kafka_namespace, timeout=90)
        for pvc in ocs_pvc_obj:
            logging.info(pvc.name)
            validate_pv_delete(pvc.backed_pv)
        # Reset namespace to default
        switch_to_default_rook_cluster_project()
Exemple #8
0
    def validate_pvc(self):
        """
        Check whether all PVCs are in bound state

        """
        ocs_pvc_obj = get_all_pvc_objs(namespace=self.namespace)

        for pvc_obj in ocs_pvc_obj:
            assert pvc_obj.status == constants.STATUS_BOUND, {
                f"PVC {pvc_obj.name} is not Bound"
            }
            logger.info(f"PVC {pvc_obj.name} is in Bound state")
    def run_fio_on_pvcs(self, pvc_size):
        searched_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
        pod_objs = pod.get_all_pods(namespace=self.namespace)
        log.info(f"Found {len(searched_pvc_objs)} PVCs")
        pvc_size_int = int(pvc_size[:-2])  # without "Gi"
        file_size_mb = int(pvc_size_int * 0.6) * constants.GB2MB
        total_files_size = file_size_mb * len(searched_pvc_objs)
        file_size_mb_str = str(file_size_mb) + "M"
        log.info(f"Writing file of size {file_size_mb_str} in each PVC")

        for objs in pod_objs:
            performance_lib.write_fio_on_pod(objs, file_size_mb_str)

        return total_files_size
Exemple #10
0
def validate_cluster_on_pvc():
    """
    Validate creation of PVCs for MON and OSD pods.
    Also validate that those PVCs are attached to the OCS pods

    Raises:
         AssertionError: If PVC is not mounted on one or more OCS pods

    """
    # Get the PVCs for selected label (MON/OSD)
    ns = config.ENV_DATA['cluster_namespace']
    ocs_pvc_obj = get_all_pvc_objs(namespace=ns)

    # Check all pvc's are in bound state

    pvc_names = []
    for pvc_obj in ocs_pvc_obj:
        if (pvc_obj.name.startswith(constants.DEFAULT_DEVICESET_PVC_NAME)
                or pvc_obj.name.startswith(constants.DEFAULT_MON_PVC_NAME)):
            assert pvc_obj.status == constants.STATUS_BOUND, (
                f"PVC {pvc_obj.name} is not Bound"
            )
            logger.info(f"PVC {pvc_obj.name} is in Bound state")
            pvc_names.append(pvc_obj.name)

    mon_pods = get_pod_name_by_pattern('rook-ceph-mon', ns)
    if not config.DEPLOYMENT.get('local_storage'):
        logger.info("Validating all mon pods have PVC")
        validate_ocs_pods_on_pvc(mon_pods, pvc_names)
    else:
        logger.debug(
            "Skipping validation if all mon pods have PVC because in LSO "
            "deployment we don't have mon pods backed by PVC"
        )
    logger.info("Validating all osd pods have PVC")
    osd_deviceset_pods = get_pod_name_by_pattern(
        'rook-ceph-osd-prepare-ocs-deviceset', ns
    )
    validate_ocs_pods_on_pvc(osd_deviceset_pods, pvc_names)
    osd_pods = get_pod_name_by_pattern('rook-ceph-osd', ns, filter='prepare')
    for ceph_pod in mon_pods + osd_pods:
        out = run_cmd(f'oc -n {ns} get pods {ceph_pod} -o yaml')
        out_yaml = yaml.safe_load(out)
        for vol in out_yaml['spec']['volumes']:
            if vol.get('persistentVolumeClaim'):
                claimName = vol.get('persistentVolumeClaim').get('claimName')
                logger.info(f"{ceph_pod} backed by pvc {claimName}")
                assert claimName in pvc_names, (
                    "Ceph Internal Volume not backed by PVC"
                )
Exemple #11
0
def wait_for_workload_resource_deletion(namespace, timeout=120):
    """
    Wait for workload resources to be deleted

    Args:
        namespace (str): the namespace of the workload
        timeout (int): time in seconds to wait for resource deletion

    """
    logger.info("Waiting for all pods to be deleted")
    all_pods = get_all_pods(namespace=namespace)
    for pod_obj in all_pods:
        pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name, timeout=timeout)

    logger.info("Waiting for all PVCs to be deleted")
    all_pvcs = get_all_pvc_objs(namespace=namespace)
    for pvc_obj in all_pvcs:
        pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name, timeout=timeout)
Exemple #12
0
def validate_cluster_on_pvc():
    """
    Validate creation of PVCs for MON and OSD pods.
    Also validate that those PVCs are attached to the OCS pods

    Raises:
         AssertionError: If PVC is not mounted on one or more OCS pods

    """
    # Get the PVCs for selected label (MON/OSD)
    ns = config.ENV_DATA['cluster_namespace']
    ocs_pvc_obj = get_all_pvc_objs(namespace=ns)

    # Check all pvc's are in bound state

    pvc_names = []
    for pvc_obj in ocs_pvc_obj:
        if (pvc_obj.name.startswith(constants.DEFAULT_DEVICESET_PVC_NAME)
                or pvc_obj.name.startswith(constants.DEFAULT_MON_PVC_NAME)):
            assert pvc_obj.status == constants.STATUS_BOUND, (
                f"PVC {pvc_obj.name} is not Bound"
            )
            logger.info(f"PVC {pvc_obj.name} is in Bound state")
            pvc_names.append(pvc_obj.name)

    mon_pods = get_pod_name_by_pattern('rook-ceph-mon', ns)
    osd_pods = get_pod_name_by_pattern('rook-ceph-osd', ns, filter='prepare')
    assert len(mon_pods) + len(osd_pods) == len(pvc_names), (
        "Not enough PVC's available for all Ceph Pods"
    )
    for ceph_pod in mon_pods + osd_pods:
        out = run_cmd(f'oc -n {ns} get pods {ceph_pod} -o yaml')
        out_yaml = yaml.safe_load(out)
        for vol in out_yaml['spec']['volumes']:
            if vol.get('persistentVolumeClaim'):
                claimName = vol.get('persistentVolumeClaim').get('claimName')
                logger.info(f"{ceph_pod} backed by pvc {claimName}")
                assert claimName in pvc_names, (
                    "Ceph Internal Volume not backed by PVC"
                )
Exemple #13
0
def get_osd_size():
    """
    Get osd size from Storage cluster

    Returns:
        int: osd size

    """
    # In the case of UI deployment of LSO cluster, the value in StorageCluster CR
    # is set to 1, so we can not take OSD size from there. For LSO we will return
    # the size from PVC.
    if config.DEPLOYMENT.get("local_storage"):
        ocs_pvc_objects = get_all_pvc_objs(
            namespace=config.ENV_DATA["cluster_namespace"])
        for pvc_obj in ocs_pvc_objects:
            if pvc_obj.name.startswith(constants.DEFAULT_DEVICESET_PVC_NAME):
                return int(pvc_obj.data["status"]["capacity"]["storage"][:-2])

    sc = get_storage_cluster()
    return int(sc.get().get("items")[0].get("spec").get("storageDeviceSets")
               [0].get("dataPVCTemplate").get("spec").get("resources").get(
                   "requests").get("storage")[:-2])
Exemple #14
0
def validate_cluster_on_pvc(label):
    """
    Validate creation of PVCs for MON and OSD pods.
    Also validate that those PVCs are attached to the OCS pods

    Args:
        label(string): Label for MON or OSD PVCs

    Raises:
         AssertionError: If PVC is not mounted on one or more OCS pods

    """
    # Get the PVCs for selected label (MON/OSD)
    ocs_pvc_obj = get_all_pvc_objs(namespace=defaults.ROOK_CLUSTER_NAMESPACE,
                                   selector=label)

    # Check all pvc's are in bound state
    for pvc_obj in ocs_pvc_obj:
        assert pvc_obj.status == constants.STATUS_BOUND, (
            f"PVC {pvc_obj.name} is not Bound")
        logger.info(f"PVC {pvc_obj.name} is in Bound state")

    # Get OCS pod names based on selected label
    if label == constants.MON_APP_LABEL:
        ocs_pod_obj = pod.get_mon_pods(
            mon_label=label, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
    if label == constants.DEFAULT_DEVICESET_LABEL:
        ocs_pod_obj = pod.get_osd_pods(
            osd_label=constants.OSD_APP_LABEL,
            namespace=defaults.ROOK_CLUSTER_NAMESPACE)

    # Create a pvc list for requested label
    pvc_list = []
    for pvc_obj in ocs_pvc_obj:
        pvc_list.append(pvc_obj.name)

    # Check if PVC is mounted on designated OCS pod
    # loop over all PVCs in the pvc_list
    # Each mon and osd pod is expected to have only one Claim attached
    claim_found = True
    all_backed_by_pvc = True
    for pod_obj in ocs_pod_obj:
        pod_volumes = pod_obj.get().get('spec').get('volumes')
        claim_spec_exists = False
        for volumes in pod_volumes:
            pvc = volumes.get('persistentVolumeClaim')
            if pvc:
                claim_name = pvc.get('claimName')
                claim_spec_exists = True
                backed_by_pvc = False
                if claim_name in pvc_list:
                    logger.info(
                        f"OCS pod {pod_obj.name} is backed by PVC {claim_name}"
                    )
                    # If backed by PVC, set backed_by_pvc = True
                    backed_by_pvc = True

                    # Check if Mon PVC is mounted as /var/lib/ceph/mon/ceph-x
                    if label == constants.MON_APP_LABEL:
                        mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
                        assert "/var/lib/ceph/mon/ceph" in mount_point, (
                            f"pvc is not mounted on pod {pod_obj.name}")
                        logger.info(f"PVC {claim_name} is mounted"
                                    f" on pod {pod_obj.name}")
        if not backed_by_pvc:
            logger.error(f"{pod_obj.name} is not backed by designated PVC ")
            all_backed_by_pvc = False

        # If no PVC is mounted, print error and continue checking other pods
        if not claim_spec_exists:
            logger.error(f"No PersistentVolumeClaim spec found in "
                         f"OCS pod {pod_obj.name}")
            claim_found = False

    # Even if one OCS POD is not backed by a PVC, fail the deployment
    assert all_backed_by_pvc, ("One or more pods are not backed by a PVC "
                               "please check deployment logs")

    # Even if one OCS POD as PVC spec, fail the deployment
    assert claim_found, (
        "Claim name doesn't exists in spec of one or more pods "
        "please check deployment logs")
Exemple #15
0
    def test_bulk_clone_performance(self, tmp_path, interface_iterate):
        """
        Creates number of PVCs in a bulk using kube job
        Write 60% of PVC capacity to each one of the created PVCs
        Creates 1 clone per each PVC altogether in a bulk
        Measuring total and csi creation times for bulk of clones

        """
        self.interface = interface_iterate
        job_pod_file, job_pvc_file, job_clone_file = [None, None, None]
        log.info(f"Start creating {self.interface} {self.pvc_count} PVC")

        try:
            pvc_dict_list = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                no_of_pvc=self.pvc_count,
                access_mode=Interfaces_info[self.interface]["accessmode"],
                sc_name=Interfaces_info[self.interface]["sc_name"],
                pvc_size=self.vol_size,
            )

            job_pvc_file = ObjectConfFile(
                name="job_profile_pvc",
                obj_dict_list=pvc_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job
            job_pvc_file.create(namespace=self.namespace)

            # Check all the PVC reached Bound state
            performance_lib.wait_for_resource_bulk_status(
                resource="pvc",
                resource_count=self.pvc_count,
                namespace=self.namespace,
                status=constants.STATUS_BOUND,
                timeout=120,
                sleep_time=5,
            )
            log.info(
                f"All the PVCs ({self.pvc_count}) was created and are in Bound state"
            )

            # Getting the list of the PVC names
            pvc_bound_list = [
                p.name for p in pvc.get_all_pvc_objs(namespace=self.namespace)
            ]

            # Kube_job to Create pod
            log.info(
                "Attaching PODs to the PVCs and filling them with data (60%)")
            pod_dict_list = self.attach_pvcs_to_pod_dict(pvc_bound_list)
            job_pod_file = ObjectConfFile(
                name="job_profile_pod",
                obj_dict_list=pod_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )
            job_pod_file.create(namespace=self.namespace)

            # Check all PODs are in Completed state
            performance_lib.wait_for_resource_bulk_status(
                resource="pod",
                resource_count=self.pvc_count,
                namespace=self.namespace,
                status=constants.STATUS_COMPLETED,
                timeout=1200,
                sleep_time=30,
            )
            log.info("All the PODs completed writing data to the PVC's")

            clone_dict_list = scale_lib.construct_pvc_clone_yaml_bulk_for_kube_job(
                pvc_dict_list,
                Interfaces_info[self.interface]["clone_yaml"],
                Interfaces_info[self.interface]["sc_name"],
            )

            log.info("Created clone dict list")

            csi_bulk_start_time = self.get_time(time_format="csi")

            job_clone_file = ObjectConfFile(
                name="job_profile_clone",
                obj_dict_list=clone_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job that creates clones
            job_clone_file.create(namespace=self.namespace)

            log.info("Going to check bound status for clones")
            # Check all the clones reached Bound state
            try:
                performance_lib.wait_for_resource_bulk_status(
                    resource="pvc",
                    resource_count=self.pvc_count * 2,
                    namespace=self.namespace,
                    status=constants.STATUS_BOUND,
                    timeout=1200,
                    sleep_time=30,
                )
            except Exception as ex:
                log.error("Failed to cvreate clones for PVCs")
                raise ex

            log.info(
                f"All the Clones ({self.pvc_count}) was created and are in Bound state"
            )

            all_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
            clone_objs = [
                cl for cl in all_pvc_objs if re.match("clone", cl.name)
            ]
            for clone_yaml in clone_dict_list:
                name = clone_yaml["metadata"]["name"]
                size = clone_yaml["spec"]["resources"]["requests"]["storage"]
                log.info(f"Clone {name} of size {size} created")

            start_time = get_provision_time(self.interface,
                                            clone_objs,
                                            status="start")
            end_time = get_provision_time(self.interface,
                                          clone_objs,
                                          status="end")
            total_time = (end_time - start_time).total_seconds()
            speed = round(self.total_files_size / total_time, 2)

            csi_creation_time = performance_lib.csi_bulk_pvc_time_measure(
                self.interface, clone_objs, "create", csi_bulk_start_time)

            log.info(
                f"Total creation time = {total_time} secs, csi creation time = {csi_creation_time},"
                f" data size = {self.total_files_size} MB, speed = {speed} MB/sec "
                f"for {self.interface} clone in bulk of {self.pvc_count} clones."
            )

            # Produce ES report
            # Collecting environment information
            self.get_env_info()

            # Initialize the results' doc file.
            full_results = self.init_full_results(
                ResultsAnalyse(
                    self.uuid,
                    self.crd_data,
                    self.full_log_path,
                    "bulk_clone_perf_fullres",
                ))

            full_results.add_key("interface", self.interface)
            full_results.add_key("bulk_size", self.pvc_count)
            full_results.add_key("clone_size", self.vol_size)
            full_results.add_key("bulk_creation_time", total_time)
            full_results.add_key("bulk_csi_creation_time", csi_creation_time)
            full_results.add_key("data_size(MB)", self.total_files_size)
            full_results.add_key("speed", speed)
            full_results.add_key("es_results_link",
                                 full_results.results_link())

            # Write the test results into the ES server
            full_results.es_write()
            self.results_path = get_full_test_logs_path(cname=self)
            res_link = full_results.results_link()
            # write the ES link to the test results in the test log.
            log.info(f"The result can be found at : {res_link}")

            # Create text file with results of all subtest (3 - according to the parameters)
            self.write_result_to_file(res_link)

        # Finally, is used to clean up the resources created
        # Irrespective of try block pass/fail finally will be executed.
        finally:
            # Cleanup activities
            log.info(
                "Cleanup of all the resources created during test execution")
            for object_file in [job_pod_file, job_clone_file, job_pvc_file]:
                if object_file:
                    object_file.delete(namespace=self.namespace)
                    try:
                        object_file.wait_for_delete(
                            resource_name=object_file.name,
                            namespace=self.namespace)
                    except Exception:
                        log.error(f"{object_file['name']} didnt deleted !")

            # Check ceph health status
            utils.ceph_health_check(tries=20)
def uninstall_cluster_logging():
    """
    Function to uninstall cluster-logging from the cluster
    Deletes the project "openshift-logging" and "openshift-operators-redhat"
    """
    # Validating the pods before deleting the instance
    pod_list = get_all_pods(namespace=constants.OPENSHIFT_LOGGING_NAMESPACE)

    for pod in pod_list:
        logger.info(
            f"Pods running in the openshift-logging namespace {pod.name}")

    # Excluding cluster-logging-operator from pod_list and getting pod names
    pod_names_list = [
        pod.name for pod in pod_list
        if not pod.name.startswith("cluster-logging-operator")
    ]
    pvc_objs = get_all_pvc_objs(
        namespace=constants.OPENSHIFT_LOGGING_NAMESPACE)

    # Fetch image uuid associated with PVCs to be deleted
    pvc_uuid_map = {}
    for pvc_obj in pvc_objs:
        pvc_uuid_map[pvc_obj.name] = pvc_obj.image_uuid

    # Checking for used space
    cbp_name = default_ceph_block_pool()
    used_space_before_deletion = fetch_used_size(cbp_name)
    logger.info(
        f"Used space before deletion of cluster logging {used_space_before_deletion}"
    )

    # Deleting the clusterlogging instance
    clusterlogging_obj = ocp.OCP(
        kind=constants.CLUSTER_LOGGING,
        namespace=constants.OPENSHIFT_LOGGING_NAMESPACE)
    assert clusterlogging_obj.delete(resource_name="instance")

    check_pod_vanished(pod_names_list)
    for pvc_obj in pvc_objs:
        pv_obj = pvc_obj.backed_pv_obj

    assert delete_pvcs(pvc_objs=pvc_objs), "PVCs deletion failed"

    for pvc_obj in pvc_objs:
        pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name, timeout=300)
        pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=300)
    logger.info("Verified: PVCs are deleted.")
    logger.info("Verified: PV are deleted")

    for pvc_name, uuid in pvc_uuid_map.items():
        rbd = verify_volume_deleted_in_backend(
            interface=constants.CEPHBLOCKPOOL,
            image_uuid=uuid,
            pool_name=cbp_name)
        assert rbd, f"Volume associated with PVC {pvc_name} still exists " f"in backend"

    # Checking for used space after PVC deletion
    used_space_after_deletion = fetch_used_size(cbp_name, exp_val=30)
    logger.info(
        f"Used space after deletion of cluster logging {used_space_after_deletion}"
    )
    if used_space_after_deletion < used_space_before_deletion:
        logger.info("Expected !!! Space has reclaimed")
    else:
        logger.warning(
            "Unexpected !! No space reclaimed after deletion of PVC")

    # Deleting the RBAC permission set
    rbac_role = ocp.OCP(
        kind=constants.ROLE,
        namespace=constants.OPENSHIFT_OPERATORS_REDHAT_NAMESPACE)
    rbac_role.delete(yaml_file=constants.EO_RBAC_YAML)

    openshift_logging_namespace = ocp.OCP(
        kind=constants.NAMESPACES,
        namespace=constants.OPENSHIFT_LOGGING_NAMESPACE)
    openshift_operators_redhat_namespace = ocp.OCP(
        kind=constants.NAMESPACES,
        namespace=constants.OPENSHIFT_OPERATORS_REDHAT_NAMESPACE,
    )

    if openshift_operators_redhat_namespace.get():
        assert openshift_operators_redhat_namespace.delete(
            resource_name=constants.OPENSHIFT_OPERATORS_REDHAT_NAMESPACE)
        logger.info(
            "The project openshift-opertors-redhat got deleted successfully")

    if openshift_logging_namespace.get():
        assert openshift_logging_namespace.delete(
            resource_name=constants.OPENSHIFT_LOGGING_NAMESPACE)
        logger.info("The namespace openshift-logging got deleted successfully")
Exemple #17
0
    def test_create_resize_delete_pvc(
        self,
        project_factory,
        teardown_factory,
        setup_ui,
        sc_name,
        access_mode,
        pvc_size,
        vol_mode,
    ):
        """
        Test create, resize and delete pvc via UI

        """
        # Creating a test project via CLI
        pro_obj = project_factory()
        project_name = pro_obj.namespace

        pvc_ui_obj = PvcUI(setup_ui)

        # Creating PVC via UI
        pvc_name = create_unique_resource_name("test", "pvc")
        pvc_ui_obj.create_pvc_ui(
            project_name, sc_name, pvc_name, access_mode, pvc_size, vol_mode
        )

        pvc_objs = get_all_pvc_objs(namespace=project_name)
        pvc = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name]

        assert pvc[0].size == int(pvc_size), (
            f"size error| expected size:{pvc_size} \n "
            f"actual size:{str(pvc[0].size)}"
        )

        assert pvc[0].get_pvc_access_mode == access_mode, (
            f"access mode error| expected access mode:{access_mode} "
            f"\n actual access mode:{pvc[0].get_pvc_access_mode}"
        )

        assert pvc[0].backed_sc == sc_name, (
            f"storage class error| expected storage class:{sc_name} "
            f"\n actual storage class:{pvc[0].backed_sc}"
        )

        assert pvc[0].get_pvc_vol_mode == vol_mode, (
            f"volume mode error| expected volume mode:{vol_mode} "
            f"\n actual volume mode:{pvc[0].get_pvc_vol_mode}"
        )

        # Verifying PVC via UI
        logger.info("Verifying PVC Details via UI")
        pvc_ui_obj.verify_pvc_ui(
            pvc_size=pvc_size,
            access_mode=access_mode,
            vol_mode=vol_mode,
            sc_name=sc_name,
            pvc_name=pvc_name,
            project_name=project_name,
        )
        logger.info("PVC Details Verified via UI..!!")

        # Creating Pod via CLI
        logger.info("Creating Pod")
        if sc_name in (constants.DEFAULT_STORAGECLASS_RBD,):
            interface_type = constants.CEPHBLOCKPOOL
        else:
            interface_type = constants.CEPHFILESYSTEM

        new_pod = helpers.create_pod(
            interface_type=interface_type,
            pvc_name=pvc_name,
            namespace=project_name,
            raw_block_pv=vol_mode == constants.VOLUME_MODE_BLOCK,
        )

        logger.info(f"Waiting for Pod: state= {constants.STATUS_RUNNING}")
        wait_for_resource_state(resource=new_pod, state=constants.STATUS_RUNNING)

        # Calling the Teardown Factory Method to make sure Pod is deleted
        teardown_factory(new_pod)

        # Expanding the PVC
        logger.info("Pvc Resizing")
        new_size = int(pvc_size) + 3
        pvc_ui_obj.pvc_resize_ui(
            pvc_name=pvc_name, new_size=new_size, project_name=project_name
        )

        assert new_size > int(
            pvc_size
        ), f"New size of the PVC cannot be less than existing size: new size is {new_size})"

        ocp_version = get_ocp_version()
        self.pvc_loc = locators[ocp_version]["pvc"]

        # Verifying PVC expansion
        logger.info("Verifying PVC resize")
        expected_capacity = f"{new_size} GiB"
        pvc_resize = pvc_ui_obj.verify_pvc_resize_ui(
            project_name=project_name,
            pvc_name=pvc_name,
            expected_capacity=expected_capacity,
        )

        assert pvc_resize, "PVC resize failed"
        logger.info(
            "Pvc resize verified..!!"
            f"New Capacity after PVC resize is {expected_capacity}"
        )

        # Running FIO
        logger.info("Execute FIO on a Pod")
        if vol_mode == constants.VOLUME_MODE_BLOCK:
            storage_type = constants.WORKLOAD_STORAGE_TYPE_BLOCK
        else:
            storage_type = constants.WORKLOAD_STORAGE_TYPE_FS

        new_pod.run_io(storage_type, size=(new_size - 1), invalidate=0, rate="1000m")

        get_fio_rw_iops(new_pod)
        logger.info("FIO execution on Pod successfully completed..!!")

        # Checking if the Pod is deleted or not
        new_pod.delete(wait=True)
        new_pod.ocp.wait_for_delete(resource_name=new_pod.name)

        # Deleting the PVC via UI
        logger.info(f"Delete {pvc_name} pvc")
        pvc_ui_obj.delete_pvc_ui(pvc_name, project_name)

        pvc[0].ocp.wait_for_delete(pvc_name, timeout=120)

        pvc_objs = get_all_pvc_objs(namespace=project_name)
        pvcs = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name]
        if len(pvcs) > 0:
            assert f"PVC {pvcs[0].name} does not deleted"
    def test_bulk_clone_performance(self, namespace, tmp_path, pod_factory):
        """
        Creates number of PVCs in a bulk using kube job
        Write 60% of PVC capacity to each one of the created PVCs
        Creates 1 clone per each PVC altogether in a bulk
        Measuring time for bulk of clones creation

        """
        pvc_count = 50
        log.info(f"Start creating {self.interface} {pvc_count} PVC")
        if self.interface == constants.CEPHBLOCKPOOL:
            sc_name = constants.DEFAULT_STORAGECLASS_RBD
            clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        elif self.interface == constants.CEPHFILESYSTEM:
            sc_name = constants.DEFAULT_STORAGECLASS_CEPHFS
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML

        pvc_dict_list = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
            no_of_pvc=pvc_count,
            access_mode=constants.ACCESS_MODE_RWO,
            sc_name=sc_name,
            pvc_size="5Gi",
        )

        job_pvc_file = ObjectConfFile(
            name="job_profile_pvc",
            obj_dict_list=pvc_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job
        job_pvc_file.create(namespace=self.namespace)

        # Check all the PVC reached Bound state
        pvc_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_pvc_file,
            namespace=self.namespace,
            no_of_pvc=pvc_count,
        )

        logging.info(f"Number of PVCs in Bound state {len(pvc_bound_list)}")

        total_files_size = self.run_fio_on_pvcs(pvc_dict_list, pod_factory)

        clone_dict_list = scale_lib.construct_pvc_clone_yaml_bulk_for_kube_job(
            pvc_dict_list, clone_yaml, sc_name)

        logging.info("Created clone dict list")

        job_clone_file = ObjectConfFile(
            name="job_profile_clone",
            obj_dict_list=clone_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job that creates clones
        job_clone_file.create(namespace=self.namespace)

        logging.info("Going to check bound status for clones")
        # Check all the clones reached Bound state
        clone_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_clone_file,
            namespace=self.namespace,
            no_of_pvc=pvc_count,
            timeout=200,
        )

        logging.info(
            f"Number of clones in Bound state {len(clone_bound_list)}")

        clone_objs = []
        all_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
        for clone_yaml in clone_dict_list:
            name = clone_yaml["metadata"]["name"]
            size = clone_yaml["spec"]["resources"]["requests"]["storage"]
            logging.info(f"Clone {name} of size {size} created")
            for pvc_obj in all_pvc_objs:
                if pvc_obj.name == name:
                    clone_objs.append(pvc_obj)

        assert len(clone_bound_list) == len(
            clone_objs
        ), "Not all clones reached BOUND state, cannot measure time"
        start_time = helpers.get_provision_time(self.interface,
                                                clone_objs,
                                                status="start")
        end_time = helpers.get_provision_time(self.interface,
                                              clone_objs,
                                              status="end")
        total_time = (end_time - start_time).total_seconds()
        speed = round(total_files_size / total_time, 2)
        logging.info(
            f"Total creation time = {total_time} secs, data size = {total_files_size} MB, speed = {speed} MB/sec "
            f"for {self.interface} clone in bulk of {pvc_count} clones.")
    def test_bulk_clone_performance(self, namespace, tmp_path):
        """
        Creates number of PVCs in a bulk using kube job
        Write 60% of PVC capacity to each one of the created PVCs
        Creates 1 clone per each PVC altogether in a bulk
        Measuring total and csi creation times for bulk of clones

        """
        pvc_count = 50
        vol_size = "5Gi"
        job_pod_file, job_pvc_file, job_clone_file = [None, None, None]
        log.info(f"Start creating {self.interface} {pvc_count} PVC")
        if self.interface == constants.CEPHBLOCKPOOL:
            sc_name = constants.DEFAULT_STORAGECLASS_RBD
            clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        elif self.interface == constants.CEPHFILESYSTEM:
            sc_name = constants.DEFAULT_STORAGECLASS_CEPHFS
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML

        try:
            pvc_dict_list = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                no_of_pvc=pvc_count,
                access_mode=constants.ACCESS_MODE_RWO,
                sc_name=sc_name,
                pvc_size=vol_size,
            )

            job_pvc_file = ObjectConfFile(
                name="job_profile_pvc",
                obj_dict_list=pvc_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job
            job_pvc_file.create(namespace=self.namespace)

            # Check all the PVC reached Bound state
            pvc_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                kube_job_obj=job_pvc_file,
                namespace=self.namespace,
                no_of_pvc=pvc_count,
            )

            log.info(f"Number of PVCs in Bound state {len(pvc_bound_list)}")

            # Kube_job to Create pod
            pod_dict_list = scale_lib.attach_multiple_pvc_to_pod_dict(
                pvc_list=pvc_bound_list,
                namespace=self.namespace,
                pvcs_per_pod=1,
                start_io=False,
                pod_yaml=constants.NGINX_POD_YAML,
            )
            job_pod_file = ObjectConfFile(
                name="job_profile_pod",
                obj_dict_list=pod_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )
            job_pod_file.create(namespace=self.namespace)

            # Check all PODs in Running state
            scale_lib.check_all_pod_reached_running_state_in_kube_job(
                kube_job_obj=job_pod_file,
                namespace=self.namespace,
                no_of_pod=len(pod_dict_list),
                timeout=90,
            )
            log.info(f"Number of PODs in Running state {len(pod_dict_list)}")

            total_files_size = self.run_fio_on_pvcs(vol_size)

            clone_dict_list = scale_lib.construct_pvc_clone_yaml_bulk_for_kube_job(
                pvc_dict_list, clone_yaml, sc_name)

            log.info("Created clone dict list")

            csi_bulk_start_time = self.get_time(time_format="csi")

            job_clone_file = ObjectConfFile(
                name="job_profile_clone",
                obj_dict_list=clone_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job that creates clones
            job_clone_file.create(namespace=self.namespace)

            log.info("Going to check bound status for clones")
            # Check all the clones reached Bound state
            clone_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                kube_job_obj=job_clone_file,
                namespace=self.namespace,
                no_of_pvc=pvc_count,
                timeout=180,
            )

            log.info(
                f"Number of clones in Bound state {len(clone_bound_list)}")

            clone_objs = []
            all_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
            for clone_yaml in clone_dict_list:
                name = clone_yaml["metadata"]["name"]
                size = clone_yaml["spec"]["resources"]["requests"]["storage"]
                log.info(f"Clone {name} of size {size} created")
                for pvc_obj in all_pvc_objs:
                    if pvc_obj.name == name:
                        clone_objs.append(pvc_obj)

            assert len(clone_bound_list) == len(
                clone_objs
            ), "Not all clones reached BOUND state, cannot measure time"
            start_time = helpers.get_provision_time(self.interface,
                                                    clone_objs,
                                                    status="start")
            end_time = helpers.get_provision_time(self.interface,
                                                  clone_objs,
                                                  status="end")
            total_time = (end_time - start_time).total_seconds()
            speed = round(total_files_size / total_time, 2)

            csi_creation_time = performance_lib.csi_bulk_pvc_time_measure(
                self.interface, clone_objs, "create", csi_bulk_start_time)

            log.info(
                f"Total creation time = {total_time} secs, csi creation time = {csi_creation_time},"
                f" data size = {total_files_size} MB, speed = {speed} MB/sec "
                f"for {self.interface} clone in bulk of {pvc_count} clones.")

            # Produce ES report
            # Collecting environment information
            self.get_env_info()

            # Initialize the results doc file.
            full_results = self.init_full_results(
                ResultsAnalyse(
                    self.uuid,
                    self.crd_data,
                    self.full_log_path,
                    "bulk_clone_perf_fullres",
                ))

            full_results.add_key("interface", self.interface)
            full_results.add_key("bulk_size", pvc_count)
            full_results.add_key("clone_size", vol_size)
            full_results.add_key("bulk_creation_time", total_time)
            full_results.add_key("bulk_csi_creation_time", csi_creation_time)
            full_results.add_key("data_size(MB)", total_files_size)
            full_results.add_key("speed", speed)
            full_results.add_key("es_results_link",
                                 full_results.results_link())

            # Write the test results into the ES server
            full_results.es_write()
            self.results_path = get_full_test_logs_path(cname=self)
            res_link = full_results.results_link()
            # write the ES link to the test results in the test log.
            log.info(f"The result can be found at : {res_link}")

            # Create text file with results of all subtest (3 - according to the parameters)
            self.write_result_to_file(res_link)

        # Finally is used to clean-up the resources created
        # Irrespective of try block pass/fail finally will be executed.
        finally:
            # Cleanup activities
            log.info(
                "Cleanup of all the resources created during test execution")
            if job_pod_file:
                job_pod_file.delete(namespace=self.namespace)
                job_pod_file.wait_for_delete(resource_name=job_pod_file.name,
                                             namespace=self.namespace)

            if job_clone_file:
                job_clone_file.delete(namespace=self.namespace)
                job_clone_file.wait_for_delete(
                    resource_name=job_clone_file.name,
                    namespace=self.namespace)

            if job_pvc_file:
                job_pvc_file.delete(namespace=self.namespace)
                job_pvc_file.wait_for_delete(resource_name=job_pvc_file.name,
                                             namespace=self.namespace)

            # Check ceph health status
            utils.ceph_health_check(tries=20)
    def test_all_4_type_pvc_creation_deletion_scale(self, namespace, tmp_path):
        """
        Measuring PVC creation time while scaling PVC of all 4 types,
        A total of 500 times the number of worker nodes
        will be created, i.e. 375 each pvc type
        Measure PVC deletion time in scale env
        """
        scale_pvc_count = scale_lib.get_max_pvc_count()
        log.info(f"Start creating {scale_pvc_count} PVC of all 4 types")
        cephfs_sc_obj = constants.DEFAULT_STORAGECLASS_CEPHFS
        rbd_sc_obj = constants.DEFAULT_STORAGECLASS_RBD

        # Get pvc_dict_list, append all the pvc.yaml dict to pvc_dict_list
        rbd_pvc_dict_list, cephfs_pvc_dict_list = ([] for i in range(2))
        for mode in [constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX]:
            rbd_pvc_dict_list.extend(
                scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                    no_of_pvc=int(scale_pvc_count / 4),
                    access_mode=mode,
                    sc_name=rbd_sc_obj,
                ))
            cephfs_pvc_dict_list.extend(
                scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                    no_of_pvc=int(scale_pvc_count / 4),
                    access_mode=mode,
                    sc_name=cephfs_sc_obj,
                ))

        # There is 2 kube_job for cephfs and rbd PVCs
        job_file_rbd = ObjectConfFile(
            name="rbd_pvc_job",
            obj_dict_list=rbd_pvc_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )
        job_file_cephfs = ObjectConfFile(
            name="cephfs_pvc_job",
            obj_dict_list=cephfs_pvc_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job
        job_file_rbd.create(namespace=self.namespace)
        job_file_cephfs.create(namespace=self.namespace)

        # Check all the PVC reached Bound state
        rbd_pvc_name = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_file_rbd,
            namespace=self.namespace,
            no_of_pvc=int(scale_pvc_count / 2),
        )
        fs_pvc_name = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_file_cephfs,
            namespace=self.namespace,
            no_of_pvc=int(scale_pvc_count / 2),
        )

        # Get pvc objs from namespace, which is used to identify backend pv
        rbd_pvc_obj, cephfs_pvc_obj = ([] for i in range(2))
        pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
        for pvc_obj in pvc_objs:
            if pvc_obj.backed_sc == constants.DEFAULT_STORAGECLASS_RBD:
                rbd_pvc_obj.append(pvc_obj)
            elif pvc_obj.backed_sc == constants.DEFAULT_STORAGECLASS_CEPHFS:
                cephfs_pvc_obj.append(pvc_obj)

        # Get PVC creation time
        fs_pvc_create_time = helpers.measure_pvc_creation_time_bulk(
            interface=constants.CEPHFS_INTERFACE, pvc_name_list=fs_pvc_name)
        rbd_pvc_create_time = helpers.measure_pvc_creation_time_bulk(
            interface=constants.CEPHBLOCKPOOL, pvc_name_list=rbd_pvc_name)
        fs_pvc_create_time.update(rbd_pvc_create_time)

        # TODO: Update below code with google API, to record value in spreadsheet
        # TODO: For now observing Google API limit to write more than 100 writes
        log_path = f"{ocsci_log_path()}/All-type-PVC"
        with open(f"{log_path}-creation-time.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in fs_pvc_create_time.items():
                csv_obj.writerow([k, v])
        log.info(f"Create data present in {log_path}-creation-time.csv file")

        # Get pv_name, require pv_name to fetch deletion time data from log
        rbd_pv_list, fs_pv_list = ([] for i in range(2))
        get_rbd_kube_job = job_file_rbd.get(namespace=self.namespace)
        for i in range(int(scale_pvc_count / 2)):
            rbd_pv_list.append(
                get_rbd_kube_job["items"][i]["spec"]["volumeName"])

        get_fs_kube_job = job_file_cephfs.get(namespace=self.namespace)
        for i in range(int(scale_pvc_count / 2)):
            fs_pv_list.append(
                get_fs_kube_job["items"][i]["spec"]["volumeName"])

        # Delete kube_job
        job_file_rbd.delete(namespace=self.namespace)
        job_file_cephfs.delete(namespace=self.namespace)

        # Adding 1min wait time for PVC deletion logs to be updated
        # Observed failure when we immediately check the logs for pvc delete time
        # https://github.com/red-hat-storage/ocs-ci/issues/3371
        time.sleep(60)

        # Get PV deletion time
        fs_pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
            interface=constants.CEPHFS_INTERFACE, pv_name_list=fs_pv_list)
        rbd_pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
            interface=constants.CEPHBLOCKPOOL, pv_name_list=rbd_pv_list)
        fs_pvc_deletion_time.update(rbd_pvc_deletion_time)

        # TODO: Update below code with google API, to record value in spreadsheet
        # TODO: For now observing Google API limit to write more than 100 writes
        with open(f"{log_path}-deletion-time.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in fs_pvc_deletion_time.items():
                csv_obj.writerow([k, v])
        log.info(f"Delete data present in {log_path}-deletion-time.csv file")
        end_time = default_timer()
        log.info(f"Elapsed time -- {end_time - self.start_time} seconds")
Exemple #21
0
 def teardown(self):
     pvc_objs = get_all_pvc_objs(namespace="openshift-storage")
     pvcs = [pvc_obj for pvc_obj in pvc_objs if "test-pvc" in pvc_obj.name]
     delete_pvcs(pvc_objs=pvcs)