def pvc_create_verify(yml):
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        """array_ip, array_uname, array_pwd = manager.read_array_prop(yml)
        hpe3par_cli = manager.get_3par_cli_client(yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        print("\n########################### new_method %s::%s::%s ###########################" %
              (str(yml), protocol, hpe3par_version[0:5]))"""

        sc = manager.create_sc(yml)
        pvc = manager.create_pvc(yml)

        # Check PVC status in events
        provisioning = None
        compression = None
        size = None
        is_cpg_ssd = None
        provisioning, compression, cpg_name, size = manager.get_sc_properties(
            yml)
        host_encryption = None
        host_encryption_secret_name = None
        host_encryption_secret_namespace = None
        host_SeesVLUN_set = False

        with open(yml) as f:
            elements = list(yaml.safe_load_all(f))
            for el in elements:
                # print("======== kind :: %s " % str(el.get('kind')))
                if str(el.get('kind')) == "StorageClass":
                    if 'hostEncryption' in el['parameters']:
                        host_encryption = el['parameters']['hostEncryption']
                    if 'hostEncryptionSecretName' in el['parameters']:
                        host_encryption_secret_name = el['parameters'][
                            'hostEncryptionSecretName']
                    if 'hostEncryptionSecretNamespace' in el['parameters']:
                        host_encryption_secret_namespace = el['parameters'][
                            'hostEncryptionSecretNamespace']
                    if 'hostSeesVLUN' in el['parameters']:
                        host_SeesVLUN_set = True
                        hostSeesVLUN = el['parameters']['hostSeesVLUN']

        logging.getLogger().info("Check in events if volume is created...")
        status, message = manager.check_status_from_events(
            kind='PersistentVolumeClaim',
            name=pvc.metadata.name,
            namespace=pvc.metadata.namespace,
            uid=pvc.metadata.uid)
        logging.getLogger().info("Check if test passed...")
        flag = manager.is_test_passed_with_encryption(
            status=status,
            enc_secret_name=host_encryption_secret_name,
            yml=yml)
        logging.getLogger().info("Test passed :: %s " % flag)
        assert flag is True, message

        if status == 'ProvisioningSucceeded':
            flag, pvc_obj = manager.check_status(
                timeout,
                pvc.metadata.name,
                kind='pvc',
                status='Bound',
                namespace=pvc.metadata.namespace)
            assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

            pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
            volume_name = manager.get_pvc_volume(pvc_crd)
            logging.getLogger().info(globals.hpe3par_cli)
            volume = manager.get_volume_from_array(globals.hpe3par_cli,
                                                   volume_name)
            assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
            logging.getLogger().info(volume)
            flag, failure_cause = manager.verify_volume_properties_3par(
                volume,
                size=size,
                provisioning=provisioning,
                compression=compression,
                cpg=cpg_name)
            assert flag is True, "Volume properties verification at array is failed for %s" % failure_cause
            pod = manager.create_pod(yml)

            flag, pod_obj = manager.check_status(
                timeout,
                pod.metadata.name,
                kind='pod',
                status='Running',
                namespace=pod.metadata.namespace)

            assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

            # Verify crd fpr published status
            assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is True, \
                "PVC CRD %s Published is false after Pod is running" % pvc_obj.spec.volume_name

            hpe3par_vlun = manager.get_3par_vlun(globals.hpe3par_cli,
                                                 volume_name)
            assert manager.verify_pod_node(hpe3par_vlun, pod_obj) is True, \
                "Node for pod received from 3par and cluster do not match"

            iscsi_ips = manager.get_iscsi_ips(globals.hpe3par_cli)

            # Adding hostSeesVLUN check
            hpe3par_active_vlun = manager.get_all_active_vluns(
                globals.hpe3par_cli, volume_name)
            if host_SeesVLUN_set:
                for vlun_item in hpe3par_active_vlun:
                    if hostSeesVLUN == "true":
                        assert vlun_item[
                            'type'] == globals.HOST_TYPE, "hostSeesVLUN parameter validation failed for volume %s" % pvc_obj.spec.volume_name
                    else:
                        assert vlun_item[
                            'type'] == globals.MATCHED_SET, "hostSeesVLUN parameter validation failed for volume %s" % pvc_obj.spec.volume_name
                logging.getLogger().info(
                    "Successfully completed hostSeesVLUN parameter check")

            # Read pvc crd again after pod creation. It will have IQN and LunId.
            pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
            flag, disk_partition = manager.verify_by_path(
                iscsi_ips, pod_obj.spec.node_name, pvc_crd, hpe3par_vlun)
            assert flag is True, "partition not found"
            logging.getLogger().info("disk_partition received are %s " %
                                     disk_partition)

            flag, disk_partition_mod, partition_map = manager.verify_multipath(
                hpe3par_vlun, disk_partition)
            assert flag is True, "multipath check failed"
            """print("disk_partition after multipath check are %s " % disk_partition)
            print("disk_partition_mod after multipath check are %s " % disk_partition_mod)"""
            logging.getLogger().info(
                "disk_partition after multipath check are %s " %
                disk_partition)
            logging.getLogger().info(
                "disk_partition_mod after multipath check are %s " %
                disk_partition_mod)
            assert manager.verify_partition(
                disk_partition_mod), "partition mismatch"

            assert manager.verify_lsscsi(
                pod_obj.spec.node_name,
                disk_partition), "lsscsi verificatio failed"
            assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                                  pod.metadata.name
            assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod",
                                            namespace=pod.metadata.namespace) is True, \
                "Pod %s is not deleted yet " % pod.metadata.name

            flag, ip = manager.verify_deleted_partition(
                iscsi_ips, pod_obj.spec.node_name, hpe3par_vlun, pvc_crd)
            assert flag is True, "Partition(s) not cleaned after volume deletion for iscsi-ip %s " % ip

            paths = manager.verify_deleted_multipath_entries(
                pod_obj.spec.node_name, hpe3par_vlun, disk_partition)
            assert paths is None or len(
                paths) == 0, "Multipath entries are not cleaned"

            # partitions = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
            # assert len(partitions) == 0, "lsscsi verificatio failed for vlun deletion"
            flag = manager.verify_deleted_lsscsi_entries(
                pod_obj.spec.node_name, disk_partition)
            # print("flag after deleted lsscsi verificatio is %s " % flag)
            logging.getLogger().info(
                "flag after deleted lsscsi verificatio is %s " % flag)
            assert flag, "lsscsi verification failed for vlun deletion"

            # Verify crd for unpublished status
            try:
                assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is False, \
                    "PVC CRD %s Published is true after Pod is deleted" % pvc_obj.spec.volume_name
                # print("PVC CRD published is false after pod deletion.")
                logging.getLogger().info(
                    "PVC CRD published is false after pod deletion.")
                # logging.warning("PVC CRD published is false after pod deletion.")
            except Exception as e:
                # print("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
                logging.getLogger().warning(
                    "Resuming test after failure of publishes status check for pvc crd... \n%s"
                    % e)
                # logging.error("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
            assert manager.delete_pvc(pvc.metadata.name)

            assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC",
                                            namespace=pvc.metadata.namespace) is True, \
                "PVC %s is not deleted yet " % pvc.metadata.name

            # pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
            # print("PVC crd after PVC object deletion :: %s " % pvc_crd)
            assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
                "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

            assert manager.verify_delete_volume_on_3par(globals.hpe3par_cli, volume_name), \
                "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

            assert manager.delete_sc(sc.metadata.name) is True

            assert manager.check_if_deleted(timeout, sc.metadata.name, "SC",
                                            sc.metadata.namespace) is True, "SC %s is not deleted yet " \
                                                                            % sc.metadata.name
            """assert manager.delete_secret(secret.metadata.name, secret.metadata.namespace) is True

            assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
                "Secret %s is not deleted yet " % secret.metadata.name"""

    except Exception as e:
        # print("Exception in test_publish :: %s" % e)
        logging.getLogger().error("Exception in test_publish :: %s" % e)
        # logging.error("Exception in test_publish :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        #hpe3par_cli.logout()
        cleanup(None, sc, pvc, pod)
Пример #2
0
def test_publish_sanity():
    sc = None
    pvc = None
    pod = None
    try:
        yml = "%s/test-publish.yml" % globals.yaml_dir
        #array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(yml)
        #hpe3par_cli = manager.get_3par_cli_client(yml)
        #hpe3par_version = manager.get_array_version(hpe3par_cli)
        #print("\n########################### test_publish::%s::%s ###########################" %
        #      (protocol, hpe3par_version[0:5]))
        """logging.error("\n########################### test_publish::%s::%s###########################" %
                      (protocol, hpe3par_version))"""
        #secret = manager.create_secret(yml)
        #step = "secret"
        sc = manager.create_sc(yml)
        #step = "sc"
        pvc = manager.create_pvc(yml)
        #step = "pvc"
        flag, pvc_obj = manager.check_status(timeout,
                                             pvc.metadata.name,
                                             kind='pvc',
                                             status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name
        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        #print("hpe3par_cli object :: %s " % hpe3par_cli)
        volume = manager.get_volume_from_array(globals.hpe3par_cli,
                                               volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
        """assert manager.verify_volume_properties(volume, provisioning='thin', compression='true') is True, \
            "tpvv no comp volume verification failed""" ""

        pod = manager.create_pod(yml)

        flag, pod_obj = manager.check_status(timeout,
                                             pod.metadata.name,
                                             kind='pod',
                                             status='Running',
                                             namespace=pod.metadata.namespace)

        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

        # Verify crd fpr published status
        assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is True, \
            "PVC CRD %s Published is false after Pod is running" % pvc_obj.spec.volume_name

        hpe3par_vlun = manager.get_3par_vlun(globals.hpe3par_cli, volume_name)
        assert manager.verify_pod_node(hpe3par_vlun, pod_obj) is True, \
            "Node for pod received from 3par and cluster do not match"

        iscsi_ips = manager.get_iscsi_ips(globals.hpe3par_cli)

        # Read pvc crd again after pod creation. It will have IQN and LunId.
        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        flag, disk_partition = manager.verify_by_path(iscsi_ips,
                                                      pod_obj.spec.node_name,
                                                      pvc_crd, hpe3par_vlun)
        assert flag is True, "partition not found"
        logging.getLogger().info("disk_partition received are %s " %
                                 disk_partition)

        flag, disk_partition_mod, partition_map = manager.verify_multipath(
            hpe3par_vlun, disk_partition)
        assert flag is True, "multipath check failed"
        """print("disk_partition after multipath check are %s " % disk_partition)
        print("disk_partition_mod after multipath check are %s " % disk_partition_mod)"""
        logging.getLogger().info(
            "disk_partition after multipath check are %s " % disk_partition)
        logging.getLogger().info(
            "disk_partition_mod after multipath check are %s " %
            disk_partition_mod)
        assert manager.verify_partition(
            disk_partition_mod), "partition mismatch"

        assert manager.verify_lsscsi(
            pod_obj.spec.node_name,
            disk_partition), "lsscsi verificatio failed"

        assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                              pod.metadata.name
        assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is True, \
            "Pod %s is not deleted yet " % pod.metadata.name

        flag, ip = manager.verify_deleted_partition(iscsi_ips,
                                                    pod_obj.spec.node_name,
                                                    hpe3par_vlun, pvc_crd)
        assert flag is True, "Partition(s) not cleaned after volume deletion for iscsi-ip %s " % ip

        paths = manager.verify_deleted_multipath_entries(
            pod_obj.spec.node_name, hpe3par_vlun, disk_partition)
        assert paths is None or len(
            paths) == 0, "Multipath entries are not cleaned"

        # partitions = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
        # assert len(partitions) == 0, "lsscsi verificatio failed for vlun deletion"
        flag = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name,
                                                     disk_partition)
        #print("flag after deleted lsscsi verificatio is %s " % flag)
        logging.getLogger().info(
            "flag after deleted lsscsi verificatio is %s " % flag)
        assert flag, "lsscsi verification failed for vlun deletion"

        # Verify crd for unpublished status
        try:
            assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is False, \
                "PVC CRD %s Published is true after Pod is deleted" % pvc_obj.spec.volume_name
            #print("PVC CRD published is false after pod deletion.")
            logging.getLogger().info(
                "PVC CRD published is false after pod deletion.")
            #logging.warning("PVC CRD published is false after pod deletion.")
        except Exception as e:
            #print("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
            logging.getLogger().warning(
                "Resuming test after failure of publishes status check for pvc crd... \n%s"
                % e)
            #logging.error("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        #pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print("PVC crd after PVC object deletion :: %s " % pvc_crd)
        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(globals.hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC", sc.metadata.namespace) is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name
        """assert manager.delete_secret(secret.metadata.name, secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name"""

    except Exception as e:
        #print("Exception in test_publish :: %s" % e)
        logging.getLogger().error("Exception in test_publish :: %s" % e)
        #logging.error("Exception in test_publish :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        #hpe3par_cli.logout()
        cleanup(None, sc, pvc, pod)
Пример #3
0
def run_pod_bkp(yml, hpe3par_cli, protocol):
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        secret = manager.create_secret(yml)
        sc = manager.create_sc(yml)
        pvc = manager.create_pvc(yml)
        flag, pvc_obj = manager.check_status(timeout, pvc.metadata.name, kind='pvc', status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s. Terminating test. " % volume_name

        pod = manager.create_pod(yml)
        flag, pod_obj = manager.check_status(timeout, pod.metadata.name, kind='pod', status='Running',
                                             namespace=pod.metadata.namespace)
        assert flag is True, "Pod %s status check timed out, not in Running state yet. Terminating test." % pod.metadata.name

        # Verify crd fpr published status
        assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is True, \
            "PVC CRD %s Published is false after Pod is running" % pvc_obj.spec.volume_name

        hpe3par_vlun = manager.get_3par_vlun(hpe3par_cli,volume_name)
        assert manager.verify_pod_node(hpe3par_vlun, pod_obj) is True, \
            "Node for pod received from 3par and cluster do not match"

        if protocol == 'iscsi':
            iscsi_ips = manager.get_iscsi_ips(hpe3par_cli)

            flag, disk_partition = manager.verify_by_path(iscsi_ips, pod_obj.spec.node_name)
            assert flag is True, "partition not found"
            print("disk_partition received are %s " % disk_partition)

            flag, disk_partition_mod = manager.verify_multipath(hpe3par_vlun, disk_partition)
            assert flag is True, "multipath check failed"
            print("disk_partition after multipath check are %s " % disk_partition)
            print("disk_partition_mod after multipath check are %s " % disk_partition_mod)
            assert manager.verify_partition(disk_partition_mod), "partition mismatch"

            assert manager.verify_lsscsi(pod_obj.spec.node_name, disk_partition), "lsscsi verificatio failed"

        assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                              pod.metadata.name
        assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is True, \
            "Pod %s is not deleted yet " % pod.metadata.name

        if protocol == 'iscsi':
            flag, ip = manager.verify_deleted_partition(iscsi_ips, pod_obj.spec.node_name)
            assert flag is True, "Partition(s) not cleaned after volume deletion for iscsi-ip %s " % ip

            paths = manager.verify_deleted_multipath_entries(pod_obj.spec.node_name, hpe3par_vlun)
            assert paths is None or len(paths) == 0, "Multipath entries are not cleaned"

            # partitions = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
            # assert len(partitions) == 0, "lsscsi verificatio failed for vlun deletion"
            flag = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
            print("flag after deleted lsscsi verificatio is %s " % flag)
            assert flag, "lsscsi verification failed for vlun deletion"

        # Verify crd for unpublished status
        """try:
            assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is False, \
                "PVC CRD %s Published is true after Pod is deleted" % pvc_obj.spec.volume_name
            print("PVC CRD published is false after pod deletion.")
            #logging.warning("PVC CRD published is false after pod deletion.")
        except Exception as e:
            print("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
            #logging.error("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)"""
        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        #pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print("PVC crd after PVC object deletion :: %s " % pvc_crd)
        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name, secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name

    except Exception as e:
        print("Exception in run_pod :: %s" % e)
        #logging.error("Exception in test_publish :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e
    finally:
        cleanup(secret, sc, pvc, pod)
def check_pod_status_vlun_crd(deploy_added=False, pod_removed=False):
    global list_pod_name
    global map_pod_node_dist
    global map_pod_vlun
    global map_pvc_crd
    global access_protocol
    global disk_partition_map
    global list_pod_obj
    global list_replica_set
    global pod_status_check_done
    global all_pods_running_time

    try:
        iscsi_ips = manager.get_iscsi_ips(globals.hpe3par_cli)
        logging.getLogger().info("Verifying all pods in running state")
        list_pod_obj.clear()
        for node_name in map_pod_node_dist.keys():
            map_pod_node_dist[node_name].clear()

        # Check each pod for status
        pod_status_check_done = False
        thread1 = Thread(target=timer, name="timer")
        thread1.start()

        # List of replica sets those are ready
        ready_replicas = set()
        # Iterate through deployments to get replica set names
        replica_list = set(list_replica_set)
        # obj_list = {pod_obj for pod_obj in dep_map.values()}
        while all_pods_running_time < 30 * 60:
            # logging.getLogger().info(f"ready_deps :: {ready_replicas}")
            # logging.getLogger().info(f"replica_list :: {replica_list}")
            if ready_replicas == replica_list:  # all deps are ready
                pod_status_check_done = True
                break
            else:
                replica_list_to_be_checked = replica_list - ready_replicas
                logging.getLogger().info(
                    f"==========\nReplica sets to be checked if pods are created :: {replica_list_to_be_checked}\n"
                )
                for replica_set_name in replica_list_to_be_checked:
                    replica_has_running_pod = False
                    pods_for_dep = [
                        i for i in list_pod_name
                        if i.startswith(replica_set_name)
                    ]
                    logging.getLogger().info("%s has %s list of pods" %
                                             (replica_set_name, pods_for_dep))
                    for pod in pods_for_dep:
                        flag, pod_obj = manager.check_status(
                            5,
                            pod,
                            kind='pod',
                            status='Running',
                            namespace=globals.namespace)
                        if flag is True:
                            """if deploy_added is False and pod_removed is False:
                                previous_node = map_pod_obj[pod].spec.node_name
                                assert pod_obj.spec.node_name != node_to_reboot and , \
                                    "Pod is still mounted on previous worker node %s " % pod_obj.spec.node_name"""
                            check_mount_node = False
                            if event.startswith('drain'):
                                node_to_match = node_to_drain
                                check_mount_node = True
                            elif event.startswith('reboot'):
                                node_to_match = node_to_reboot
                                check_mount_node = True
                            if check_mount_node is True:
                                """assert pod_obj.spec.node_name != node_to_match, \
                                    "Pod %s is still mounted on previous worker node %s " % (pod, pod_obj.spec.node_name)"""
                                pass
                            replica_has_running_pod = True
                            list_pod_obj.append(pod_obj)
                            ready_replicas.add(replica_set_name)
                            map_pod_node_dist[pod_obj.spec.node_name].append(
                                pod_obj.metadata.name)
                            break
                        else:
                            replica_has_running_pod = False
                    """assert replica_has_running_pod is True, "Deployment %s does not have any pod in running state yet out of %s" % \
                                                            (replica_set_name[0:replica_set_name.index('-')],
                                                             pods_for_dep)"""
                    logging.getLogger().info(
                        "Deployment %s has pod in running state" %
                        replica_set_name[0:replica_set_name.index('-')])
        assert pod_status_check_done is True, f"All pods did not come to running in " \
                                              f"{str(datetime.timedelta(0, all_pods_running_time))}, terminating test..."
        logging.getLogger().info(
            "==================================== Time taken to all pods come to running is %s"
            % str(datetime.timedelta(0, all_pods_running_time)))
        logging.getLogger().info("Node wide distribution of pods...")
        for node, pod_list in map_pod_node_dist.items():
            logging.getLogger().info(f"{node} :: {pod_list}\n")

        logging.getLogger().info("Now verifying vlun and CRDs for each pod")
        # Verify CRD and vlun for pods
        for pod_obj in list_pod_obj:
            # logging.getLogger().info(pod_obj)
            # Verify crd fpr published status
            pvc_name = pod_obj.spec.volumes[
                0].persistent_volume_claim.claim_name
            logging.getLogger().info("\n\nPVC is :: %s " % pvc_name)
            volume_name = manager.hpe_read_pvc_object(
                pvc_name, globals.namespace).spec.volume_name
            logging.getLogger().info("volume_name is :: %s " % volume_name)
            assert manager.verify_pvc_crd_published(volume_name) is True, \
                "PVC CRD %s Published is false after Pod is running" % volume_name
            logging.getLogger().info("PVC CRD %s published is True" %
                                     volume_name)
            pvc_crd = manager.get_pvc_crd(volume_name)
            volume_name = volume_name[0:31]
            hpe3par_vlun = manager.get_3par_vlun(hpe3par_cli, volume_name)

            # store vlun for pod in map to be referenced during cleanup verification
            map_pod_vlun[pod_obj.metadata.name] = hpe3par_vlun
            assert manager.verify_pod_node(hpe3par_vlun, pod_obj) is True, \
                "Node for pod received from 3par and cluster do not match"
            logging.getLogger().info(
                "Node for pod received from 3par and cluster match")

            # store pvc crd to be referenced during cleanup verification
            map_pvc_crd[pod_obj.metadata.name] = pvc_crd

            flag, disk_partition = manager.verify_by_path(
                iscsi_ips, pod_obj.spec.node_name, pvc_crd, hpe3par_vlun)
            assert flag is True, "partition not found"
            logging.getLogger().info("disk_partition received are %s " %
                                     disk_partition)

            flag, disk_partition_mod, partition_map = manager.verify_multipath(
                hpe3par_vlun, disk_partition)
            assert flag is True, "multipath check failed"
            logging.getLogger().info(
                "disk_partition after multipath check are %s " %
                disk_partition)
            logging.getLogger().info(
                "disk_partition_mod after multipath check are %s " %
                disk_partition_mod)
            assert manager.verify_partition(
                disk_partition_mod), "partition mismatch"
            logging.getLogger().info(
                "Partition verification done successfully")

            assert manager.verify_lsscsi(
                pod_obj.spec.node_name,
                disk_partition), "lsscsi verification failed"
            logging.getLogger().info("lsscsi verification done successfully")
            # save disk_partition to verify cleanup after node drain
            disk_partition_map[
                pod_obj.metadata.owner_references[0].name] = disk_partition

            pod_node_name = pod_obj.spec.node_name
            logging.getLogger().info("%s is mounted on %s" %
                                     (pod_obj.metadata.name, pod_node_name))
            map_pod_node_dist[pod_node_name].append(pod_obj.metadata.name)

        logging.getLogger().info(
            "\n\nSuccessfully verified vlun and CRD status for each pod")
        # logging.getLogger().info("Node wide distribution of pod %s" % map_pod_node_dist)
    except Exception as e:
        logging.getLogger().error("Error in vlun verification :: %s" % e)
        raise e
    finally:
        pod_status_check_done = True