Ejemplo n.º 1
0
    def destroy_ocs(self):
        """
        Uninstall ODF Managed Service addon via rosa cli.
        """
        cluster_namespace = config.ENV_DATA["cluster_namespace"]

        # Deleting PVCs
        rbd_pvcs = [
            p for p in pvc.get_all_pvcs_in_storageclass(
                constants.CEPHBLOCKPOOL_SC)
            if not (p.data["metadata"]["namespace"] == cluster_namespace
                    and p.data["metadata"]["labels"]["app"] == "noobaa")
        ]
        pvc.delete_pvcs(rbd_pvcs)
        cephfs_pvcs = pvc.get_all_pvcs_in_storageclass(
            constants.CEPHFILESYSTEM_SC)
        pvc.delete_pvcs(cephfs_pvcs)
        rosa.delete_odf_addon(self.cluster_name)
Ejemplo n.º 2
0
def wait_for_mirroring_status_ok(replaying_images=None, timeout=300):
    """
    Wait for mirroring status to reach health OK and expected number of replaying
    images for each of the ODF cluster

    Args:
        replaying_images (int): Expected number of images in replaying state
        timeout (int): time in seconds to wait for mirroring status reach OK

    Returns:
        bool: True if status contains expected health and states values

    Raises:
        TimeoutExpiredError: In case of unexpected mirroring status

    """
    restore_index = config.cur_index
    if not replaying_images:
        replaying_images = 0
        for cluster in get_non_acm_cluster_config():
            config.switch_ctx(cluster.MULTICLUSTER["multicluster_index"])
            replaying_images += len(
                get_all_pvcs_in_storageclass(constants.CEPHBLOCKPOOL_SC)
            )
        replaying_images -= 2  # Ignore db-noobaa-db-pg-0 PVCs

    for cluster in get_non_acm_cluster_config():
        config.switch_ctx(cluster.MULTICLUSTER["multicluster_index"])
        logger.info(
            f"Validating mirroring status on cluster {cluster.ENV_DATA['cluster_name']}"
        )
        sample = TimeoutSampler(
            timeout=timeout,
            sleep=5,
            func=check_mirroring_status_ok,
            replaying_images=replaying_images,
        )
        if not sample.wait_for_func_status(result=True):
            error_msg = (
                "The mirroring status does not have expected values within the time"
                f" limit on cluster {cluster.ENV_DATA['cluster_name']}"
            )
            logger.error(error_msg)
            raise TimeoutExpiredError(error_msg)

    config.switch_ctx(restore_index)
    return True
Ejemplo n.º 3
0
def uninstall_ocs():
    """
    The function uninstalls the OCS operator from a openshift
    cluster and removes all its settings and dependencies

    """
    ocp_obj = ocp.OCP()

    log.info("deleting volume snapshots")
    vs_ocp_obj = ocp.OCP(kind=constants.VOLUMESNAPSHOT)
    vs_list = vs_ocp_obj.get(all_namespaces=True)["items"]
    for vs in vs_list:
        vs_obj = ocp.OCP(kind=constants.VOLUMESNAPSHOT,
                         namespace=vs.get("metadata").get("namespace"))
        vs_obj.delete(resource_name=vs.get("metadata").get("name"))

    log.info("queering for OCS PVCs")
    provisioners = constants.OCS_PROVISIONERS
    sc_list = [
        sc for sc in get_all_storageclass()
        if sc.get("provisioner") in provisioners
    ]

    pvc_to_delete = []
    for sc in sc_list:
        pvc_to_delete.extend(pvc for pvc in get_all_pvcs_in_storageclass(
            sc.get("metadata").get("name")) if "noobaa" not in pvc.name)

    if config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM:
        log.info("Deleting OCS PVCs")
        for pvc in pvc_to_delete:
            log.info(f"Deleting PVC: {pvc.name}")
            pvc.delete()
        rosa.delete_odf_addon(config.ENV_DATA["cluster_name"])
        return None
    log.info("Removing monitoring stack from OpenShift Container Storage")
    remove_monitoring_stack_from_ocs()

    log.info(
        "Removing OpenShift Container Platform registry from OpenShift Container Storage"
    )
    remove_ocp_registry_from_ocs(config.ENV_DATA["platform"])

    log.info(
        "Removing the cluster logging operator from OpenShift Container Storage"
    )
    try:
        remove_cluster_logging_operator_from_ocs()
    except CommandFailed:
        log.info("No cluster logging found")

    log.info("Deleting OCS PVCs")
    for pvc in pvc_to_delete:
        log.info(f"Deleting PVC: {pvc.name}")
        pvc.delete()

    storage_cluster = ocp.OCP(
        kind=constants.STORAGECLUSTER,
        resource_name=constants.DEFAULT_CLUSTERNAME,
        namespace="openshift-storage",
    )

    log.info("Checking for local storage")
    lso_sc = None
    if check_local_volume_local_volume_set():
        "Local volume was found. Will be removed later"
        lso_sc = (storage_cluster.get().get("spec").get("storageDeviceSets")[0]
                  .get("dataPVCTemplate").get("spec").get("storageClassName"))

    cleanup_policy = (storage_cluster.get().get("metadata").get(
        "annotations").get("uninstall.ocs.openshift.io/cleanup-policy"))

    log.info("Deleting storageCluster object")
    storage_cluster.delete(resource_name=constants.DEFAULT_CLUSTERNAME)

    if cleanup_policy == "delete":
        log.info("Cleanup policy set to delete. checking cleanup pods")
        cleanup_pods = [
            pod for pod in get_all_pods() if "cluster-cleanup-job" in pod.name
        ]
        for pod in cleanup_pods:
            while pod.get().get("status").get("phase") != "Succeeded":
                log.info(f"waiting for cleanup pod {pod.name} to complete")
                TimeoutSampler(timeout=10, sleep=30)
            log.info(f"Cleanup pod {pod.name} completed successfully ")
        # no need to confirm var/vib/rook was deleted from nodes if all cleanup pods are completed.
    else:
        log.info("Cleanup policy set to retain. skipping nodes cleanup")

    log.info("Deleting openshift-storage namespace")
    ocp_obj.delete_project(constants.OPENSHIFT_STORAGE_NAMESPACE)
    ocp_obj.wait_for_delete(constants.OPENSHIFT_STORAGE_NAMESPACE)
    switch_to_project(constants.DEFAULT_NAMESPACE)

    # step 10: TODO remove crypto from nodes.
    """for node in storage_node_list:
        log.info(f"removing encryption from {node}")
        ocp_obj.exec_oc_debug_cmd(node=node, cmd_list=[])"""

    if lso_sc is not None:
        log.info("Removing LSO")
        try:
            uninstall_lso(lso_sc)
        except Exception as e:
            log.info(f"LSO removal failed.{e}")

    log.info("deleting noobaa storage class")
    noobaa_sc = ocp.OCP(kind=constants.STORAGECLASS)
    noobaa_sc.delete(resource_name=constants.NOOBAA_SC)

    nodes = get_all_nodes()
    node_objs = get_node_objs(nodes)

    log.info("Unlabeling storage nodes")
    label_nodes(nodes=node_objs,
                label=constants.OPERATOR_NODE_LABEL[:-3] + "-")
    label_nodes(nodes=node_objs, label=constants.TOPOLOGY_ROOK_LABEL + "-")

    log.info("Removing taints from storage nodes")
    taint_nodes(nodes=nodes, taint_label=constants.OPERATOR_NODE_TAINT + "-")

    log.info("Deleting remaining OCS PVs (if there are any)")
    try:
        rbd_pv = ocp.OCP(kind=constants.PV,
                         resource_name="ocs-storagecluster-ceph-rbd")
        fs_pv = ocp.OCP(kind=constants.PV,
                        resource_name="ocs-storagecluster-cephfs")
        rbd_pv.delete()
        fs_pv.delete()
        log.info("OCS PVs deleted")
    except Exception as e:
        log.info(f"OCS PV(s) not found. {e}")

    log.info("Removing CRDs")
    crd_list = [
        "backingstores.noobaa.io",
        "bucketclasses.noobaa.io",
        "cephblockpools.ceph.rook.io",
        "cephclusters.ceph.rook.io",
        "cephfilesystems.ceph.rook.io",
        "cephnfses.ceph.rook.io",
        "cephobjectstores.ceph.rook.io",
        "cephobjectstoreusers.ceph.rook.io",
        "noobaas.noobaa.io",
        "ocsinitializations.ocs.openshift.io",
        "storageclusters.ocs.openshift.io",
        "cephclients.ceph.rook.io",
        "cephobjectrealms.ceph.rook.io",
        "cephobjectzonegroups.ceph.rook.io",
        "cephobjectzones.ceph.rook.io",
        "cephrbdmirrors.ceph.rook.io",
    ]

    for crd in crd_list:
        try:
            ocp_obj.exec_oc_cmd(f"delete crd {crd} --timeout=300m")
        except Exception:
            log.info(f"crd {crd} was not found")
Ejemplo n.º 4
0
def uninstall_ocs():
    """
    The function uninstalls the OCS operator from a openshift
    cluster and removes all its settings and dependencies

    """
    ocp_obj = ocp.OCP()
    provisioners = constants.OCS_PROVISIONERS

    # List the storage classes
    sc_list = [
        sc for sc in get_all_storageclass()
        if sc.get('provisioner') in provisioners
    ]

    # Query for PVCs and OBCs that are using the storage class provisioners listed in the previous step.
    pvc_to_delete = []
    for sc in sc_list:
        pvc_to_delete.extend(pvc for pvc in get_all_pvcs_in_storageclass(
            sc.get('metadata').get('name')) if 'noobaa' not in pvc.name)

    log.info("Removing monitoring stack from OpenShift Container Storage")
    remove_monitoring_stack_from_ocs()

    log.info(
        "Removing OpenShift Container Platform registry from OpenShift Container Storage"
    )
    remove_ocp_registry_from_ocs(config.ENV_DATA['platform'])

    log.info(
        "Removing the cluster logging operator from OpenShift Container Storage"
    )
    try:
        remove_cluster_logging_operator_from_ocs()
    except CommandFailed:
        log.info("No cluster logging found")

    log.info("Deleting pvcs")
    for pvc in pvc_to_delete:
        log.info(f"Deleting pvc: {pvc.name}")
        pvc.delete()

    storage_cluster = ocp.OCP(kind=constants.STORAGECLUSTER,
                              resource_name=constants.DEFAULT_CLUSTERNAME,
                              namespace='openshift-storage')

    log.info("Checking for local storage")
    lso_sc = None
    if check_local_volume():
        "Local volume was found. Will be removed later"
        lso_sc = storage_cluster.get().get('spec').get('storageDeviceSets')[
            0].get('dataPVCTemplate').get('spec').get('storageClassName')

    log.info("Deleting storageCluster object")
    storage_cluster.delete(resource_name=constants.DEFAULT_CLUSTERNAME)

    log.info("Removing CRDs")
    crd_list = [
        'backingstores.noobaa.io', 'bucketclasses.noobaa.io',
        'cephblockpools.ceph.rook.io', 'cephfilesystems.ceph.rook.io',
        'cephnfses.ceph.rook.io', 'cephobjectstores.ceph.rook.io',
        'cephobjectstoreusers.ceph.rook.io', 'noobaas.noobaa.io',
        'ocsinitializations.ocs.openshift.io',
        'storageclusterinitializations.ocs.openshift.io',
        'storageclusters.ocs.openshift.io', 'cephclusters.ceph.rook.io'
    ]
    for crd in crd_list:
        ocp_obj.exec_oc_cmd(f"delete crd {crd} --timeout=300m")

    log.info("Deleting openshift-storage namespace")
    ocp_obj.delete_project('openshift-storage')
    ocp_obj.wait_for_delete('openshift-storage')
    switch_to_project("default")

    log.info("Removing rook directory from nodes")
    nodes_list = get_labeled_nodes(constants.OPERATOR_NODE_LABEL)
    for node in nodes_list:
        log.info(f"Removing rook from {node}")
        ocp_obj.exec_oc_debug_cmd(node=node, cmd_list=["rm -rf /var/lib/rook"])

    log.info("Removing LSO ")
    if lso_sc is not None:
        uninstall_lso(lso_sc)

    log.info(
        "Delete the storage classes with an openshift-storage provisioner list"
    )
    for storage_class in sc_list:
        log.info(
            f"Deleting storage class {storage_class.get('metadata').get('name')}"
        )
        sc_obj = ocp.OCP(kind=constants.STORAGECLASS)
        sc_obj.delete(resource_name=storage_class.get('metadata').get('name'))

    log.info("Unlabeling storage nodes")
    nodes_list = get_all_nodes()
    for node in nodes_list:
        node_obj = ocp.OCP(kind=constants.NODE, resource_name=node)
        node_obj.add_label(resource_name=node,
                           label=constants.OPERATOR_NODE_LABEL[:-3] + '-')
        node_obj.add_label(resource_name=node,
                           label=constants.TOPOLOGY_ROOK_LABEL + '-')

    log.info("OCS was removed successfully from cluster ")
Ejemplo n.º 5
0
    def test_multiple_sc_comp_rep_data_deletion(self, storageclass_factory,
                                                pvc_factory, pod_factory):
        """
        This test function does below,
        *. Creates 2 Storage Class with creating new rbd pool
        *. Creates PVCs using new Storage Class
        *. Mount PVC to an app pod
        *. Run IO on an app pod
        *. Delete the pods and pvc
        *. Verify that the data is deleted

        """
        log.info("Creating storageclasses with compression and replica3")
        interface_type = constants.CEPHBLOCKPOOL
        sc_obj1 = storageclass_factory(
            interface=interface_type,
            new_rbd_pool=True,
            replica=3,
            compression="aggressive",
        )
        log.info("Creating storageclasses with compression and replica2")
        sc_obj2 = storageclass_factory(
            interface=interface_type,
            new_rbd_pool=True,
            replica=2,
            compression="aggressive",
        )

        sc_obj_list = [sc_obj1, sc_obj2]
        pod_obj_list = []
        pvc_obj_list = []

        log.info("Creating PVCs and PODs")
        for sc_obj in sc_obj_list:
            pvc_obj = pvc_factory(interface=interface_type,
                                  storageclass=sc_obj)
            pvc_obj_list.append(pvc_obj)
            pod_obj_list.append(
                pod_factory(interface=interface_type, pvc=pvc_obj))

        log.info("Running IO on pods")
        for pod_obj in pod_obj_list:
            pod_obj.run_io("fs", size="1G")

        for pod_obj in pod_obj_list:
            get_fio_rw_iops(pod_obj)

        log.info("deleting PODs and PVCs")
        delete_pods(pod_obj_list, wait=True)
        delete_pvcs(pvc_obj_list, concurrent=True)

        log.info("Wait for 15 seconds for all data to delete")
        sleep(15)
        log.info("Checking stats after deleting PODs and PVCs")
        for sc_obj in sc_obj_list:
            pvc_list = get_all_pvcs_in_storageclass(sc_obj.name)
            if len(pvc_list) == 0:
                cbp_name = sc_obj.get()["parameters"]["pool"]
                ceph_pool_byte_used = get_byte_used_by_pool(cbp_name)
                log.info(
                    f"pool {cbp_name} has {ceph_pool_byte_used} bytes used")
                if ceph_pool_byte_used > MAX_BYTES_IN_POOL_AFTER_DATA_DELETE:
                    raise PoolDataNotErased(
                        f"Pool {cbp_name} has {ceph_pool_byte_used} bytes which were not deleted"
                    )
            else:
                raise PvcNotDeleted(f"PVC {pvc_list} were not deleted")
Ejemplo n.º 6
0
    def destroy_ocs(self):
        """
        Handle OCS destruction. Remove storage classes, PVCs, Storage
        Cluster, Openshift-storage namespace, LocalVolume, unlabel
        worker-storage nodes, delete ocs CRDs, etc.
        """
        cluster_namespace = config.ENV_DATA["cluster_namespace"]

        # https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.5/html/deploying_openshift_container_storage_using_bare_metal_infrastructure/assembly_uninstalling-openshift-container-storage_rhocs

        # Section 3.1 Step 1
        # Deleting PVCs
        rbd_pvcs = [
            p for p in pvc.get_all_pvcs_in_storageclass(
                constants.CEPHBLOCKPOOL_SC)
            if not (p.data["metadata"]["namespace"] == cluster_namespace
                    and p.data["metadata"]["labels"]["app"] == "noobaa")
        ]
        pvc.delete_pvcs(rbd_pvcs)
        cephfs_pvcs = pvc.get_all_pvcs_in_storageclass(
            constants.CEPHFILESYSTEM_SC)

        # Section 3.1 Step 2
        # Section 3.3 Step 1
        # Removing OpenShift Container Platform registry from OpenShift Container Storage
        registry_conf_name = "configs.imageregistry.operator.openshift.io"
        registry_conf = ocp.OCP().exec_oc_cmd(
            f"get {registry_conf_name} -o yaml")
        if registry_conf["items"][0]["spec"].get("storage", dict()).get("pvc"):
            patch = dict(spec=dict(storage=dict(emptyDir=dict(), pvc=None)))
            ocp.OCP().exec_oc_cmd(
                f"patch {registry_conf_name} cluster --type merge "
                f"-p '{json.dumps(patch)}'")
        # Section 3.3 Step 2
        pvc.delete_pvcs(cephfs_pvcs)

        # Section 3.1 Step 3
        try:
            ocp.OCP().exec_oc_cmd(
                f"delete -n {cluster_namespace} storagecluster --all --wait=true"
            )
        except (CommandFailed, subprocess.TimeoutExpired):
            pass

        # Section 3.1 Step 4
        ocp.OCP().exec_oc_cmd("project default")
        ocp.OCP().exec_oc_cmd(
            f"delete project {cluster_namespace} --wait=true --timeout=5m")
        tried = 0
        leftovers = True
        while tried < 5:
            # We need to loop here until the project can't be found
            try:
                ocp.OCP().exec_oc_cmd(
                    f"get project {cluster_namespace}",
                    out_yaml_format=False,
                )
            except CommandFailed:
                leftovers = False
                break
            time.sleep(60)
            tried += 1
        if leftovers:
            # https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.5/html/troubleshooting_openshift_container_storage/troubleshooting-and-deleting-remaining-resources-during-uninstall_rhocs
            leftover_types = [
                "cephfilesystem.ceph.rook.io",
                "cephobjectstore.ceph.rook.io",
                "cephobjectstoreuser.ceph.rook.io",
                "storagecluster.ocs.openshift.io",
            ]
            patch = dict(metadata=dict(finalizers=None))
            for obj_type in leftover_types:
                try:
                    objs = ocp.OCP(kind=obj_type).get()
                except CommandFailed:
                    continue
                for obj in objs["items"]:
                    name = obj["metadata"]["name"]
                    ocp.OCP().exec_oc_cmd(
                        f"oc patch -n {cluster_namespace} {obj_type} {name} --type=merge -p '{json.dumps(patch)}'"
                    )

        # Section 3.1 Step 5
        nodes = ocp.OCP().exec_oc_cmd(
            "get node -l cluster.ocs.openshift.io/openshift-storage= -o yaml")
        for node in nodes["items"]:
            node_name = node["metadata"]["name"]
            ocp.OCP().exec_oc_cmd(
                f"debug node/{node_name} -- chroot /host rm -rfv /var/lib/rook"
            )

        # Section 3.1 Step 6
        ocp.OCP().exec_oc_cmd(
            "delete storageclass  openshift-storage.noobaa.io --wait=true --timeout=5m"
        )

        # Section 3.1 Step 7
        ocp.OCP().exec_oc_cmd(
            "label nodes  --all cluster.ocs.openshift.io/openshift-storage-")
        ocp.OCP().exec_oc_cmd("label nodes  --all topology.rook.io/rack-")

        # Section 3.1 Step 8
        pvs = ocp.OCP(kind="PersistentVolume").get()
        for pv in pvs["items"]:
            pv_name = pv["metadata"]["name"]
            if pv_name.startswith("ocs-storagecluster-ceph"):
                ocp.OCP().exec_oc_cmd(f"oc delete pv {pv_name}")

        # Section 3.1 Step 9
        # Note that the below process differs from the documentation slightly.
        # Instead of deleting all CRDs at once and calling the job done, we
        # iterate over a list of them, noting which ones don't delete fully and
        # applying the standard workaround of removing the finalizers from any
        # CRs and also the CRD. Finally, the documentation leaves out a few
        # CRDs that we've seen in deployed clusters.
        crd_types = [
            "backingstores.noobaa.io",
            "bucketclasses.noobaa.io",
            "cephblockpools.ceph.rook.io",
            "cephclients.ceph.rook.io",
            "cephclusters.ceph.rook.io",
            "cephfilesystems.ceph.rook.io",
            "cephnfses.ceph.rook.io",
            "cephobjectrealms.ceph.rook.io",  # not in doc
            "cephobjectstores.ceph.rook.io",
            "cephobjectstoreusers.ceph.rook.io",
            "cephobjectzonegroups.ceph.rook.io",  # not in doc
            "cephobjectzones.ceph.rook.io",  # not in doc
            "cephrbdmirrors.ceph.rook.io",  # not in doc
            "noobaas.noobaa.io",
            "ocsinitializations.ocs.openshift.io",
            "storageclusterinitializations.ocs.openshift.io",
            "storageclusters.ocs.openshift.io",
        ]
        cr_patch = json.dumps(dict(finalizers=None))
        crd_patch = json.dumps(dict(metadata=dict(finalizers=None)))
        for crd_type in crd_types:
            try:
                ocp.OCP().exec_oc_cmd(
                    f"delete crd {crd_type} --wait=true --timeout=30s",
                    out_yaml_format=False,
                )
            except CommandFailed:
                pass
            crs = []
            try:
                crs = ocp.OCP(kind=crd_type).get(all_namespaces=True)["items"]
            except CommandFailed:
                continue
            for cr in crs:
                cr_md = cr["metadata"]
                ocp.OCP().exec_oc_cmd(
                    f"patch -n {cr_md['namespace']} {crd_type} {cr_md['name']} --type=merge -p '{cr_patch}'"
                )
            try:
                crs = ocp.OCP(kind=crd_type).get(all_namespaces=True)["items"]
            except CommandFailed:
                continue
            ocp.OCP().exec_oc_cmd(
                f"patch crd {crd_type} --type=merge -p '{crd_patch}'")

        # End sections from above documentation
        ocp.OCP().exec_oc_cmd(
            f"delete catalogsource {constants.OPERATOR_CATALOG_SOURCE_NAME} "
            f"-n {constants.MARKETPLACE_NAMESPACE}")

        storageclasses = ocp.OCP(kind="StorageClass").get(all_namespaces=True)
        for sc in storageclasses["items"]:
            if sc["provisioner"].startswith("openshift-storage."):
                sc_name = sc["metadata"]["name"]
                ocp.OCP().exec_oc_cmd(f"delete storageclass {sc_name}")
        volumesnapclasses = ocp.OCP(kind="VolumeSnapshotClass").get(
            all_namespaces=True)
        for vsc in volumesnapclasses["items"]:
            if vsc["driver"].startswith("openshift-storage."):
                vsc_name = vsc["metadata"]["name"]
                ocp.OCP().exec_oc_cmd(f"delete volumesnapshotclass {vsc_name}")

        self.destroy_lso()
Ejemplo n.º 7
0
def uninstall_ocs():
    """
    The function uninstalls the OCS operator from a openshift
    cluster and removes all its settings and dependencies

    """
    ocp_obj = ocp.OCP()
    provisioners = constants.OCS_PROVISIONERS

    # List the storage classes
    sc_list = get_all_storageclass()
    sc_name_list = []
    for storage_class in sc_list:
        if storage_class.get('provisioner') not in provisioners:
            sc_list.remove(storage_class)
        else:
            sc_name_list.append(storage_class.get('metadata').get('name'))

    # Query for PVCs and OBCs that are using the storage class provisioners listed in the previous step.
    pvc_to_delete = []
    pvc_name_list = []
    for sc in sc_name_list:
        pvc_to_delete.extend(get_all_pvcs_in_storageclass(sc))

    # ignoring all noobaa pvcs & make name list
    for pvc in pvc_to_delete:
        if "noobaa" in pvc.name:
            pvc_to_delete.remove(pvc)
        else:
            pvc_name_list.append(pvc.name)

    pods_to_delete = []
    all_pods = get_all_pods()  # default openshift-storage namespace
    all_pods.extend(get_all_pods(namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE))
    all_pods.extend(get_all_pods(namespace=constants.OPENSHIFT_MONITORING_NAMESPACE))

    for pod_obj in all_pods:
        try:
            pvc_name = get_pvc_name(pod_obj)
        except UnavailableResourceException:
            continue
        if pvc_name in pvc_name_list:
            pods_to_delete.append(pod_obj)

    log.info("Removing monitoring stack from OpenShift Container Storage")
    remove_monitoring_stack_from_ocs()

    log.info("Removing OpenShift Container Platform registry from OpenShift Container Storage")
    remove_ocp_registry_from_ocs(config.ENV_DATA['platform'])

    log.info("Removing the cluster logging operator from OpenShift Container Storage")
    csv = ocp.OCP(
        kind=constants.CLUSTER_SERVICE_VERSION,
        namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
    )
    logging_csv = csv.get().get('items')
    if logging_csv:
        clusterlogging_obj = ocp.OCP(
            kind=constants.CLUSTER_LOGGING, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
        )
        clusterlogging_obj.delete(resource_name='instance')

    log.info("deleting pvcs")
    for pvc in pvc_to_delete:
        log.info(f"deleting pvc: {pvc.name}")
        pvc.delete()

    log.info("deleting pods")
    for pod in pods_to_delete:
        log.info(f"deleting pod {pod.name}")
        pod.delete()

    log.info("removing rook directory from nodes")
    nodes_list = get_labeled_nodes(constants.OPERATOR_NODE_LABEL)
    for node in nodes_list:
        log.info(f"removing rook from {node}")
        ocp_obj.exec_oc_debug_cmd(node=node, cmd_list=["rm -rf /var/lib/rook"])

    log.info("Delete the storage classes with an openshift-storage provisioner list")
    for storage_class in sc_list:
        log.info(f"deleting storage class {storage_class.get('metadata').get('name')}")
        sc_obj = ocp.OCP(kind=constants.STORAGECLASS)
        sc_obj.delete(resource_name=storage_class.get('metadata').get('name'))

    log.info("unlabaling storage nodes")
    nodes_list = get_all_nodes()
    for node in nodes_list:
        node_obj = ocp.OCP(kind=constants.NODE, resource_name=node)
        node_obj.add_label(resource_name=node, label=constants.OPERATOR_NODE_LABEL[:-3] + '-')
        node_obj.add_label(resource_name=node, label=constants.TOPOLOGY_ROOK_LABEL + '-')

    log.info("deleting storageCluster object")
    storage_cluster = ocp.OCP(kind=constants.STORAGECLUSTER, resource_name=constants.DEFAULT_CLUSTERNAME)
    storage_cluster.delete(resource_name=constants.DEFAULT_CLUSTERNAME)

    log.info("removing CRDs")
    crd_list = ['backingstores.noobaa.io', 'bucketclasses.noobaa.io', 'cephblockpools.ceph.rook.io',
                'cephfilesystems.ceph.rook.io', 'cephnfses.ceph.rook.io',
                'cephobjectstores.ceph.rook.io', 'cephobjectstoreusers.ceph.rook.io', 'noobaas.noobaa.io',
                'ocsinitializations.ocs.openshift.io', 'storageclusterinitializations.ocs.openshift.io',
                'storageclusters.ocs.openshift.io', 'cephclusters.ceph.rook.io']
    for crd in crd_list:
        ocp_obj.exec_oc_cmd(f"delete crd {crd} --timeout=300m")

    log.info("deleting openshift-storage namespace")
    ocp_obj.delete_project('openshift-storage')
    ocp_obj.wait_for_delete('openshift-storage')