Esempio n. 1
0
def cleanup(secret, sc, pvc, pod):
    logging.getLogger().info("====== cleanup :START =========")
    #logging.info("====== cleanup after failure:START =========")
    logging.getLogger().info("pod :: %s, pvc :: %s, sc :: %s" % (pod, pvc, sc))
    if pod is not None and manager.check_if_deleted(
            2, pod.metadata.name, "Pod",
            namespace=pod.metadata.namespace) is False:
        manager.delete_pod(pod.metadata.name, pod.metadata.namespace)
    if pvc is not None and manager.check_if_deleted(
            2, pvc.metadata.name, "PVC",
            namespace=pvc.metadata.namespace) is False:
        manager.delete_pvc(pvc.metadata.name)
        """assert manager.check_if_crd_deleted(pvc.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc.spec.volume_name, 'hpevolumeinfos')"""
        flag = manager.check_if_crd_deleted(pvc.spec.volume_name,
                                            "hpevolumeinfos")
        if flag is False:
            logging.getLogger().error(
                "CRD %s of %s is not deleted yet. Taking longer..." %
                (pvc.spec.volume_name, 'hpevolumeinfos'))
    if sc is not None and manager.check_if_deleted(
            2, sc.metadata.name, "SC", sc.metadata.namespace) is False:
        manager.delete_sc(sc.metadata.name)
    """if secret is not None and manager.check_if_deleted(2, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is False:
        manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
    logging.getLogger().info("====== cleanup :END =========")
def helm_uninstall(host_ip, host_name):
    try:
        '''node_obj = manager.hpe_list_node_objects()
        node_list = node_obj.items
        # Fetching master node ip
        for item in node_list:
            if item.spec.taints != "None":
                host_ip = item.status.addresses[0].address
                host_name = item.status.addresses[1].address
            break'''
        host_ip = host_ip
        host_name = host_name

        # ssh to master node and check csi driver installed
        command = "helm ls -n " + globals.namespace
        command_output = manager.get_command_output(host_name, command)
        if len(command_output) == 1:
            logging.getLogger().info("Driver not installed")
            return

        # ssh to master node and execute uninstall commands
        command = "helm uninstall hpe-csi --namespace " + globals.namespace
        command_output = manager.get_command_output(host_name, command)
        logging.getLogger().info(command_output)
        assert command_output[
            0] == 'release "hpe-csi" uninstalled', "Uninstall of 'hpe-csi' driver failed"
        #sleep(30)

        # Delete crds
        crd_obj = manager.hpe_list_crds()
        for crd in crd_obj:
            manager.hpe_delete_crd(crd)
            assert manager.check_if_deleted(timeout, crd, kind='Crd',namespace=globals.namespace) is True, \
            "crd %s is not deleted yet " % crd

        # Check plugin pods have been deleted
        pod_obj = manager.hpe_list_pod_objects(globals.namespace)
        pod_list = pod_obj.items
        for item in pod_list:
            if bool(item.metadata.labels):
                if 'app' in item.metadata.labels:
                    if item.metadata.labels[
                            'app'] == 'hpe-csi-controller' or item.metadata.labels[
                                'app'] == 'hpe-csi-node' or item.metadata.labels[
                                    'app'] == 'primera3par-csp':
                        assert manager.check_if_deleted(timeout, item.metadata.name, "Pod", namespace=item.metadata.namespace) is True, \
                        "Pod %s is not deleted yet " % pod.metadata.name
            else:
                continue
        logging.getLogger().info("Plugin pods deleted")

    except AssertionError as e:
        raise e

    except Exception as e:
        raise e
def cleanup(secret, sc, pvc, pod):
    logging.getLogger().info("====== cleanup :START =========")
    if pod is not None and manager.check_if_deleted(2, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is False:
        manager.delete_pod(pod.metadata.name, pod.metadata.namespace)
    if pvc is not None and manager.check_if_deleted(2, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is False:
        manager.delete_pvc(pvc.metadata.name)
    if sc is not None and manager.check_if_deleted(2, sc.metadata.name, "SC", None) is False:
        manager.delete_sc(sc.metadata.name)
    if secret is not None and manager.check_if_deleted(2, secret.metadata.name, "SECRET", None) is False:
        manager.delete_secret(secret.metadata.name,globals.namespace)
    logging.getLogger().info("====== cleanup :END =========")
Esempio n. 4
0
def cleanup(secret, sc, pvc, pod):
    print("====== cleanup :START =========")
    #logging.info("====== cleanup after failure:START =========")
    if pod is not None and manager.check_if_deleted(2, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is False:
        manager.delete_pod(pod.metadata.name, pod.metadata.namespace)
    if pvc is not None and manager.check_if_deleted(2, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is False:
        manager.delete_pvc(pvc.metadata.name)
    if sc is not None and manager.check_if_deleted(2, sc.metadata.name, "SC") is False:
        manager.delete_sc(sc.metadata.name)
    if secret is not None and manager.check_if_deleted(2, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is False:
        manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
    print("====== cleanup :END =========")
def cleanup(sc, pvc, pod):
    logging.getLogger().info("====== cleanup :START =========")
    #logging.info("====== cleanup after failure:START =========")
    if pod is not None and manager.check_if_deleted(
            2, pod.metadata.name, "Pod",
            namespace=pod.metadata.namespace) is False:
        manager.delete_pod(pod.metadata.name, pod.metadata.namespace)
    if pvc is not None and manager.check_if_deleted(
            2, pvc.metadata.name, "PVC",
            namespace=pvc.metadata.namespace) is False:
        manager.delete_pvc(pvc.metadata.name)
    if sc is not None and manager.check_if_deleted(2, sc.metadata.name, "SC",
                                                   None) is False:
        manager.delete_sc(sc.metadata.name)
    logging.getLogger().info("====== cleanup :END =========")
def cleanup(sc, pvc, pod):
    logging.getLogger().info("====== cleanup :START =========")
    if pod is not None and manager.check_if_deleted(
            2, pod.metadata.name, "Pod",
            namespace=pod.metadata.namespace) is False:
        manager.delete_pod(pod.metadata.name, pod.metadata.namespace)
    if pvc is not None and manager.check_if_deleted(
            2, pvc.metadata.name, "PVC",
            namespace=pvc.metadata.namespace) is False:
        manager.delete_pvc(pvc.metadata.name)

    # "CRD %s of %s is not deleted yet. Taking longer..." % (pvc.spec.volume_name, 'hpevolumeinfos')
    if sc is not None and manager.check_if_deleted(2, sc.metadata.name, "SC",
                                                   None) is False:
        manager.delete_sc(sc.metadata.name)
    logging.getLogger().info("====== cleanup :END =========")
def verify_pod_deletion_bulk():
    timeout = 300
    # Verify if the pods are deleted
    for pod in list_pod_obj:
        assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is True, \
        "Pod %s is not deleted yet " % pod.metadata.name

    verify_vlun_cleanup()
Esempio n. 8
0
def cleanup(sc, pvc, pod):
    print("====== cleanup :START =========")
    #logging.info("====== cleanup after failure:START =========")
    if pod is not None and manager.check_if_deleted(
            2, pod.metadata.name, "Pod",
            namespace=pod.metadata.namespace) is False:
        manager.delete_pod(pod.metadata.name, pod.metadata.namespace)
    if pvc is not None and manager.check_if_deleted(
            2, pvc.metadata.name, "PVC",
            namespace=pvc.metadata.namespace) is False:
        manager.delete_pvc(pvc.metadata.name)
        assert manager.check_if_crd_deleted(pvc.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc.spec.volume_name, 'hpevolumeinfos')
    if sc is not None and manager.check_if_deleted(2, sc.metadata.name, "SC",
                                                   None) is False:
        manager.delete_sc(sc.metadata.name)
    print("====== cleanup :END =========")
def verify_pvc_deletion_bulk():
    global list_pvc_obj
    global map_pvc_volume_name
    logging.getLogger().info(map_pvc_volume_name)
    timeout = 300
    # Verify if the pods are deleted
    for pvc in list_pvc_obj:
        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        assert manager.check_if_crd_deleted(pvc.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(hpe3par_cli, map_pvc_volume_name[pvc.metadata.name]), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (map_pvc_volume_name[pvc.metadata.name], pvc.metadata.name)

    logging.getLogger().info("Verified PVC deletion")
Esempio n. 10
0
def run_pod_bkp(yml, hpe3par_cli, protocol):
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        secret = manager.create_secret(yml)
        sc = manager.create_sc(yml)
        pvc = manager.create_pvc(yml)
        flag, pvc_obj = manager.check_status(timeout, pvc.metadata.name, kind='pvc', status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s. Terminating test. " % volume_name

        pod = manager.create_pod(yml)
        flag, pod_obj = manager.check_status(timeout, pod.metadata.name, kind='pod', status='Running',
                                             namespace=pod.metadata.namespace)
        assert flag is True, "Pod %s status check timed out, not in Running state yet. Terminating test." % pod.metadata.name

        # Verify crd fpr published status
        assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is True, \
            "PVC CRD %s Published is false after Pod is running" % pvc_obj.spec.volume_name

        hpe3par_vlun = manager.get_3par_vlun(hpe3par_cli,volume_name)
        assert manager.verify_pod_node(hpe3par_vlun, pod_obj) is True, \
            "Node for pod received from 3par and cluster do not match"

        if protocol == 'iscsi':
            iscsi_ips = manager.get_iscsi_ips(hpe3par_cli)

            flag, disk_partition = manager.verify_by_path(iscsi_ips, pod_obj.spec.node_name)
            assert flag is True, "partition not found"
            print("disk_partition received are %s " % disk_partition)

            flag, disk_partition_mod = manager.verify_multipath(hpe3par_vlun, disk_partition)
            assert flag is True, "multipath check failed"
            print("disk_partition after multipath check are %s " % disk_partition)
            print("disk_partition_mod after multipath check are %s " % disk_partition_mod)
            assert manager.verify_partition(disk_partition_mod), "partition mismatch"

            assert manager.verify_lsscsi(pod_obj.spec.node_name, disk_partition), "lsscsi verificatio failed"

        assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                              pod.metadata.name
        assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is True, \
            "Pod %s is not deleted yet " % pod.metadata.name

        if protocol == 'iscsi':
            flag, ip = manager.verify_deleted_partition(iscsi_ips, pod_obj.spec.node_name)
            assert flag is True, "Partition(s) not cleaned after volume deletion for iscsi-ip %s " % ip

            paths = manager.verify_deleted_multipath_entries(pod_obj.spec.node_name, hpe3par_vlun)
            assert paths is None or len(paths) == 0, "Multipath entries are not cleaned"

            # partitions = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
            # assert len(partitions) == 0, "lsscsi verificatio failed for vlun deletion"
            flag = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
            print("flag after deleted lsscsi verificatio is %s " % flag)
            assert flag, "lsscsi verification failed for vlun deletion"

        # Verify crd for unpublished status
        """try:
            assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is False, \
                "PVC CRD %s Published is true after Pod is deleted" % pvc_obj.spec.volume_name
            print("PVC CRD published is false after pod deletion.")
            #logging.warning("PVC CRD published is false after pod deletion.")
        except Exception as e:
            print("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
            #logging.error("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)"""
        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        #pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print("PVC crd after PVC object deletion :: %s " % pvc_crd)
        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name, secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name

    except Exception as e:
        print("Exception in run_pod :: %s" % e)
        #logging.error("Exception in test_publish :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e
    finally:
        cleanup(secret, sc, pvc, pod)
Esempio n. 11
0
def test_expand_volume():
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        yml = "YAML/test-expand_vol_pvc.yml"
        hpe3par_cli = manager.get_3par_cli_client(yml)
        array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(
            yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        print(
            "\n########################### test_expand_volume::%s::%s ###########################"
            % (protocol, hpe3par_version[0:5]))
        """logging.info("\n########################### test_expand_volume::%s::%s###########################" %
                     (protocol, hpe3par_version))"""
        secret = manager.create_secret(yml)
        step = "secret"
        sc = manager.create_sc(yml)
        step = "sc"
        pvc = manager.create_pvc(yml)
        step = "pvc"
        flag, pvc_obj = manager.check_status(timeout,
                                             pvc.metadata.name,
                                             kind='pvc',
                                             status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        # print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        print("volume_name :: %s " % volume_name)
        print(hpe3par_cli)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
        print("Volume verification at array done successfully")
        # patch pvc for expand size
        size_in_gb = '20'
        patch_json = {
            'spec': {
                'resources': {
                    'requests': {
                        'storage': size_in_gb + 'Gi'
                    }
                }
            }
        }
        mod_pvc = manager.patch_pvc(pvc.metadata.name, pvc.metadata.namespace,
                                    patch_json)
        print("Patched PVC %s" % mod_pvc)

        pod = manager.create_pod("YAML/test-expand_vol_pod.yml")

        flag, pod_obj = manager.check_status(timeout,
                                             pod.metadata.name,
                                             kind='pod',
                                             status='Running',
                                             namespace=pod.metadata.namespace)

        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

        # Now check if volume in 3par has increased size
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume['sizeMiB'] == int(
            size_in_gb) * 1024, "Volume on array does not have updated size"

        # Check if PVC has increaded size
        mod_pvc = manager.hpe_read_pvc_object(pvc.metadata.name,
                                              pvc.metadata.namespace)
        print("\n PVC after expansion %s" % mod_pvc)
        # assert mod_pvc['spec']['resources']['requests']['storage'] == "%sGi" % size_in_gb, "PVC %s does not have updated size" % pvc.metadata.name
        assert mod_pvc.spec.resources.requests[
            'storage'] == "%sGi" % size_in_gb, "PVC %s does not have updated size" % pvc.metadata.name

        # check size of mounted vlun
        """node = pod_obj.spec.node_name
        hpe3par_vlun = manager.get_3par_vlun(hpe3par_cli,volume_name)
        vv_wwn = hpe3par_vlun['volumeWWN']

        command = "mount | grep -i 3%s" % vv_wwn"""
        assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                              pod.metadata.name
        assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is True, \
            "Pod %s is not deleted yet " % pod.metadata.name

        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name,
                                     secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret",
                                        namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name

    except Exception as e:
        print("Exception in test_expand_volume :: %s" % e)
        #logging.error("Exception in test_snapshot :: %s" % e)
        """if step == 'pvc':
                manager.delete_pvc(pvc.metadata.name)
                manager.delete_sc(sc.metadata.name)
                manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
            if step == 'sc':
                manager.delete_sc(sc.metadata.name)
                manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
            if step == 'secret':
                manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        hpe3par_cli.logout()
        cleanup(secret, sc, pvc, pod)
Esempio n. 12
0
def pvc_create_verify(yml):
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(
            yml)
        hpe3par_cli = manager.get_3par_cli_client(yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        print(
            "\n########################### new_method %s::%s::%s ###########################"
            % (str(yml), protocol, hpe3par_version[0:5]))
        #logging.info("\n########################### test_thin_absent_comp::%s::%s###########################" %
        #(protocol, hpe3par_version))
        secret = manager.create_secret(yml)
        step = "secret"
        sc = manager.create_sc(yml)
        step = "sc"
        pvc = manager.create_pvc(yml)
        print("PVC created :: %s " % pvc)
        step = "pvc"
        # Check PVC status in events
        provisioning = None
        compression = None
        size = None
        is_cpg_ssd = None
        provisioning, compression, cpg_name, size = manager.get_sc_properties(
            yml)
        print("Check if cpg is ssd")
        is_cpg_ssd = manager.check_cpg_prop_at_array(hpe3par_cli,
                                                     cpg_name,
                                                     property='ssd')
        print("Check in events if volume is created...")
        status, message = manager.check_status_from_events(
            kind='PersistentVolumeClaim',
            name=pvc.metadata.name,
            namespace=pvc.metadata.namespace,
            uid=pvc.metadata.uid)
        print("Check if test passed...")
        flag = manager.is_test_passed(array_version=hpe3par_version,
                                      status=status,
                                      is_cpg_ssd=is_cpg_ssd,
                                      provisioning=provisioning,
                                      compression=compression)
        print("Test passed :: %s " % flag)
        assert flag is True, "Volume created on %s with provisioning=%s, compression=%s" % (
            hpe3par_version, provisioning, compression)

        if status == 'ProvisioningSucceeded':
            flag, pvc_obj = manager.check_status(
                timeout,
                pvc.metadata.name,
                kind='pvc',
                status='Bound',
                namespace=pvc.metadata.namespace)
            assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

            pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
            #print(pvc_crd)
            volume_name = manager.get_pvc_volume(pvc_crd)
            print(hpe3par_cli)
            volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
            assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
            print(volume)
            flag, failure_cause = manager.verify_volume_properties_3par(
                volume,
                size=size,
                provisioning=provisioning,
                compression=compression,
                cpg=cpg_name)
            assert flag is True, "Volume properties verification at array is failed for %s" % failure_cause

        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        if status == 'ProvisioningSucceeded':
            assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
                "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

            assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
                "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name,
                                     secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name

    except Exception as e:
        print("Exception in pvc_create_verify :: %s" % e)
        #logging.error("Exception in test_thin_absent :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        hpe3par_cli.logout()
        cleanup(secret, sc, pvc, None)
Esempio n. 13
0
def test_snapshot():
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        base_yml = "YAML/source-pvc-snap.yml"
        hpe3par_cli = manager.get_3par_cli_client(base_yml)
        array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(
            base_yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        print(
            "\n########################### test_snapshot::%s::%s ###########################"
            % (protocol, hpe3par_version[0:5]))
        """logging.info("\n########################### test_clone::%s::%s###########################" %
                     (protocol, hpe3par_version))"""
        secret = manager.create_secret(base_yml)
        step = "secret"
        sc = manager.create_sc(base_yml)
        step = "sc"
        pvc = manager.create_pvc(base_yml)
        step = "pvc"
        flag, pvc_obj = manager.check_status(timeout,
                                             pvc.metadata.name,
                                             kind='pvc',
                                             status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        # print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        print(hpe3par_cli)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
        print()

        assert manager.create_snapclass(
            "YAML/snapshot-class.yaml"
        ) is True, 'Snapclass ci-snapclass is not created.'

        assert manager.verify_snapclass_created(
        ) is True, 'Snapclass ci-snapclass is not found in crd list.'

        assert manager.create_snapshot(
            "YAML/snapshot.yaml"
        ) is True, 'Snapshot ci-pvc-snapshot is not created.'

        assert manager.verify_snapshot_created(
        ) is True, 'Snapshot ci-pvc-snapshot is not found in crd list.'

        flag, snap_uid = manager.verify_snapshot_ready()
        assert flag is True, "Snapshot ci-pvc-snapshot is not ready to use"

        snap_uid = "snapshot-" + snap_uid
        snap_volume = manager.get_volume_from_array(hpe3par_cli,
                                                    snap_uid[0:31])
        snap_volume_name = snap_volume['name']
        print("\nsnap_volume :: %s " % snap_volume)
        flag, message = manager.verify_snapshot_on_3par(
            snap_volume, volume_name)
        assert flag is True, message
        print()
        assert manager.delete_snapshot(
        ), "Snapshot ci-pvc-snapshot deletion request failed"
        #sleep(180)
        assert manager.check_if_crd_deleted('ci-pvc-snapshot', "volumesnapshots") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % ('ci-pvc-snapshot', 'volumesnapshots')
        #assert manager.verify_snapshot_deleted() is False, 'Snapshot CRD ci-pvc-snapshot is not deleted yet.'
        #sleep(180)
        assert manager.delete_snapclass(
        ), "Snapclass ci-snapclass deletion request failed"
        assert manager.check_if_crd_deleted('ci-snapclass', "volumesnapshotclasses") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % ('ci-snapclass', 'volumesnapshotclasses')
        #sleep(180)
        #assert manager.verify_snapclass_deleted is False, 'Snapclass CRD ci-snapclass is not deleted yet.'
        #sleep(180)
        assert manager.verify_delete_volume_on_3par(hpe3par_cli, snap_volume_name) is True, \
            "Snap Volume %s from 3PAR for PVC %s is not deleted" % (snap_volume_name, pvc.metadata.name)

        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name,
                                     secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret",
                                        namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name

    except Exception as e:
        print("Exception in test_snapshot :: %s" % e)
        #logging.error("Exception in test_snapshot :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        hpe3par_cli.logout()
        cleanup(secret, sc, pvc, None)
        cleanup_snapshot()
Esempio n. 14
0
def test_clone():
    secret = None
    sc = None
    pvc = None
    pod = None
    clone_pvc = None
    try:
        base_yml = "YAML//base-pvc-clone.yml"
        clone_yml = "YAML/test-clone.yml"
        hpe3par_cli = manager.get_3par_cli_client(base_yml)
        array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(
            base_yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        print(
            "\n########################### test_clone::%s::%s ###########################"
            % (protocol, hpe3par_version[0:5]))
        """logging.info("\n########################### test_clone::%s::%s###########################" %
                     (protocol, hpe3par_version))"""
        secret = manager.create_secret(base_yml)
        step = "secret"
        sc = manager.create_sc(base_yml)
        step = "sc"
        pvc = manager.create_pvc(base_yml)
        step = "pvc"
        flag, pvc_obj = manager.check_status(timeout,
                                             pvc.metadata.name,
                                             kind='pvc',
                                             status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        print(hpe3par_cli)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name

        clone_pvc = manager.create_pvc(clone_yml)
        step = "clone_pvc"
        flag, clone_pvc_obj = manager.check_status(
            timeout,
            clone_pvc.metadata.name,
            kind='pvc',
            status='Bound',
            namespace=clone_pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % clone_pvc_obj.metadata.name

        assert manager.verify_clone_crd_status(clone_pvc_obj.spec.volume_name) is True, \
            "Clone PVC CRD is not yet completed"
        """assert manager.verify_volume_properties(volume, provisioning='thin', compression='true') is True, \
            "tpvv no comp volume verification failed""" ""
        clone_pvc_crd = manager.get_pvc_crd(clone_pvc_obj.spec.volume_name)
        # print(pvc_crd)
        clone_volume_name = manager.get_pvc_volume(clone_pvc_crd)

        assert manager.delete_pvc(clone_pvc.metadata.name)

        assert manager.check_if_deleted(timeout, clone_pvc.metadata.name, "PVC", namespace=clone_pvc.metadata.namespace)\
               is True, "Clone PVC %s is not deleted yet " % clone_pvc.metadata.name

        assert manager.check_if_crd_deleted(clone_pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "Clone PVC CRD %s of %s is not deleted yet. Taking longer..." % (clone_pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(hpe3par_cli, clone_volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (clone_volume_name, clone_pvc.metadata.name)

        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        sleep(30)
        assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name,
                                     secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name

    except Exception as e:
        print("Exception in test_clone :: %s" % e)
        #logging.error("Exception in test_clone :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        hpe3par_cli.logout()
        cleanup(None, None, clone_pvc, None)
        cleanup(secret, sc, pvc, None)
Esempio n. 15
0
def create_virtual_copyOf(base_yml, snap_yml):
    #base_yml = '%s/virtual-copy/virtual-copy-base-vol.yml' % globals.yaml_dir
    #snap_yml = '%s/virtual-copy/virtual-copy-snap-vol.yml' % globals.yaml_dir
    pvc_snap = None
    sc_snap = None
    snap_pod = None
    sc = None
    pvc = None
    pod = None
    isPresent = False
    isValid = False
    try:
        with open(base_yml, "r") as ymlfile:
            elements = list(yaml.safe_load_all(ymlfile))
            for el in elements:
                if str(el.get('kind')) == "StorageClass":
                    cpg = el['parameters']['cpg']
                    snapCPG = el['parameters']['snapCpg']
                    provisioningType = el['parameters']['provisioning_type']
                    compression = el['parameters']['compression']
                    break

        #array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(base_yml)
        # Creating base volume now in CSI
        sc = manager.create_sc(base_yml)
        pvc = manager.create_pvc(base_yml)
        logging.getLogger().info("Check in events if volume is created...")
        flag, base_pvc_obj = manager.check_status(
            30,
            pvc.metadata.name,
            kind='pvc',
            status='Bound',
            namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % base_pvc_obj.metadata.name

        # Export base volume
        pod = manager.create_pod(base_yml)
        flag, base_pod_obj = manager.check_status(
            timeout,
            pod.metadata.name,
            kind='pod',
            status='Running',
            namespace=pod.metadata.namespace)
        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

        time.sleep(20)

        # Checking base volume data
        command = ['/bin/sh', '-c', 'ls -l /export']
        data = manager.hpe_connect_pod_container(pod.metadata.name, command)
        if any("mydata.txt" in x for x in data.split('\n')):
            isPresent = True
        assert isPresent is True, "File not present in base volume"

        # Creating snap volume in CSI
        sc_snap = manager.create_sc(snap_yml)
        pvc_snap = manager.create_pvc(snap_yml)
        logging.getLogger().info("Check in events if volume is created...")
        flag, snap_pvc_obj = manager.check_status(
            30,
            pvc_snap.metadata.name,
            kind='pvc',
            status='Bound',
            namespace=pvc_snap.metadata.namespace)
        assert flag is True, "Snapshot PVC %s status check timed out, not in Bound state yet..." % snap_pvc_obj.metadata.name

        #Export snapshot volume
        snap_pod = manager.create_pod(snap_yml)
        flag, snap_pod_obj = manager.check_status(
            timeout,
            snap_pod.metadata.name,
            kind='pod',
            status='Running',
            namespace=snap_pod.metadata.namespace)
        assert flag is True, "Snapshot volume pod mount %s status check timed out, not in Running state yet..." % snap_pod.metadata.name
        logging.getLogger().info("Checking snapshot volume data")
        time.sleep(20)

        # Validating data on snapshot volume
        isPresent = False
        command = ['/bin/sh', '-c', 'ls -l /export']
        snap_data = manager.hpe_connect_pod_container(snap_pod.metadata.name,
                                                      command)
        if any("mydata.txt" in x for x in snap_data.split('\n')):
            isPresent = True
        assert isPresent is True, "File on base volume not found in snap volume"

        isPresent = False
        if any("mysnapdata.txt" in x for x in snap_data.split('\n')):
            isPresent = True
        assert isPresent is True, "File not present in snap volume"
        logging.getLogger().info("snapshot volume data check successfull")
        time.sleep(10)

        #Validate snapshot pvc crd
        pvc_crd = manager.get_pvc_crd("pvc-" + snap_pvc_obj.metadata.uid)
        assert pvc_crd['spec']['record'][
            'BaseSnapshotId'] == "pvc-" + snap_pvc_obj.metadata.uid, "Base snapshot id is incorrect %s" % pvc_crd[
                'spec']['record']['BaseSnapshotId']
        assert pvc_crd['spec']['record'][
            'ParentVolumeId'] == "pvc-" + base_pvc_obj.metadata.uid, "Parent id is incorrect %s" % pvc_crd[
                'spec']['record']['ParentVolumeId']
        logging.getLogger().info("Volume crd check successfull")

        #Validating base volume properties
        #hpe3par_cli = manager.get_3par_cli_client(base_yml)
        base_volume = manager.get_volume_from_array(
            globals.hpe3par_cli,
            pvc_crd['spec']['record']['ParentBackendName'])
        if int(globals.hpe3par_version.split(".")[0]) < 4:
            isValid = manager.verify_volume_properties_3par(
                base_volume,
                cpg=cpg,
                provisioning=provisioningType,
                compression=compression)
            assert isValid[0] is True, "Validation of base volume failed"
        else:
            isValid = manager.verify_volume_properties_primera(
                base_volume,
                cpg=cpg,
                provisioning=provisioningType,
                compression=compression)
            assert isValid is True, "Validation of base volume failed"
        logging.getLogger().info("base volume check on the array successfull")

        #Validating snapshot volume properties
        #hpe3par_cli = manager.get_3par_cli_client(snap_yml)
        snap_volume = manager.get_volume_from_array(
            globals.hpe3par_cli, pvc_crd['spec']['record']['Name'])

        isValid = manager.verify_volume_properties(
            snap_volume,
            name=pvc_crd['spec']['record']['Name'],
            copyOf=pvc_crd['spec']['record']['ParentBackendName'],
            snapCPG=snapCPG)
        assert isValid is True, "Validation of snapshot volume failed"
        logging.getLogger().info(
            "snapshot volume check on the array successfull")
        time.sleep(20)

        # Deleting created resources
        manager.hpe_delete_pod_object_by_name(snap_pod.metadata.name,
                                              snap_pod.metadata.namespace)
        flag = manager.check_if_deleted(180,
                                        snap_pod.metadata.name,
                                        "Pod",
                                        namespace=snap_pod.metadata.namespace)
        assert flag is True, "POD %s delete status check timedout..." % snap_pod.metadata.name

        manager.delete_pvc(snap_pvc_obj.metadata.name)
        flag = manager.check_if_deleted(
            2,
            snap_pvc_obj.metadata.name,
            "PVC",
            namespace=snap_pvc_obj.metadata.namespace)
        assert flag is True, "PVC %s delete status check timedout..." % snap_pvc_obj.metadata.name

        manager.delete_sc(sc_snap.metadata.name)
        flag = manager.check_if_deleted(2, sc_snap.metadata.name, "SC", None)
        assert flag is True, "SC %s delete status check timedout..." % sc_snap.metadata.name

        manager.hpe_delete_pod_object_by_name(base_pod_obj.metadata.name,
                                              base_pod_obj.metadata.namespace)
        flag = manager.check_if_deleted(
            180,
            base_pod_obj.metadata.name,
            "Pod",
            namespace=base_pod_obj.metadata.namespace)
        assert flag is True, "POD %s delete status check timedout..." % base_pod_obj.metadata.name

        manager.delete_pvc(base_pvc_obj.metadata.name)
        flag = manager.check_if_deleted(
            2,
            base_pvc_obj.metadata.name,
            "Pod",
            namespace=base_pvc_obj.metadata.namespace)
        assert flag is True, "PVC %s delete status check timedout..." % base_pvc_obj.metadata.name

        manager.delete_sc(sc.metadata.name)
        flag = manager.check_if_deleted(2, sc.metadata.name, "SC", None)
        assert flag is True, "SC %s delete status check timedout..." % sc.metadata.name
        #manager.delete_secret(secret.metadata.name, secret.metadata.namespace)

    finally:
        # Now cleanup secret, sc, pv, pvc, pod
        cleanup(sc_snap, pvc_snap, snap_pod)
        cleanup(sc, pvc, pod)
Esempio n. 16
0
def delete_resources(hpe3par_cli, secret, sc, pvc, pod, protocol):
    timeout = 600
    try:
        assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                              pod.metadata.name
        assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is True, \
            "Pod %s is not deleted yet " % pod.metadata.name

        pvc_crd = manager.get_pvc_crd(pvc.spec.volume_name)
        # logging.getLogger().info(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)

        if protocol == 'iscsi':
            hpe3par_vlun = manager.get_3par_vlun(hpe3par_cli, volume_name)
            iscsi_ips = manager.get_iscsi_ips(hpe3par_cli)
            flag, disk_partition = manager.verify_by_path(
                iscsi_ips, pod.spec.node_name)
            flag, ip = manager.verify_deleted_partition(
                iscsi_ips, pod.spec.node_name)
            assert flag is True, "Partition(s) not cleaned after volume deletion for iscsi-ip %s " % ip

            paths = manager.verify_deleted_multipath_entries(
                pod.spec.node_name, hpe3par_vlun)
            assert paths is None or len(
                paths) == 0, "Multipath entries are not cleaned"

            # partitions = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
            # assert len(partitions) == 0, "lsscsi verificatio failed for vlun deletion"
            flag = manager.verify_deleted_lsscsi_entries(
                pod.spec.node_name, disk_partition)
            logging.getLogger().info(
                "flag after deleted lsscsi verificatio is %s " % flag)
            assert flag, "lsscsi verification failed for vlun deletion"

        # Verify crd for unpublished status
        try:
            assert manager.verify_pvc_crd_published(pvc.spec.volume_name) is False, \
                "PVC CRD %s Published is true after Pod is deleted" % pvc.spec.volume_name
            logging.getLogger().info(
                "PVC CRD published is false after pod deletion.")
            # logging.warning("PVC CRD published is false after pod deletion.")
        except Exception as e:
            logging.getLogger().info(
                "Resuming test after failure of publishes status check for pvc crd... \n%s"
                % e)
            # logging.error("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        # pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        # logging.getLogger().info("PVC crd after PVC object deletion :: %s " % pvc_crd)
        assert manager.check_if_crd_deleted(pvc.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc.spec.volume_name, 'hpevolumeinfos')
        sleep(30)
        assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name,
                                     secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret",
                                        namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name
    except Exception as e:
        logging.getLogger().info("Exception in delete_resource() :: %s " % e)
def test_performance_serial():
    try:
        global node_to_drain
        global pod_status_check_done
        global node_to_reboot
        global event
        global path_base_yaml
        global hpe3par_cli
        global access_protocol

        path_base_yaml = '%s/scale/' % globals.yaml_dir
        path_pvc_yaml = path_base_yaml + 'pvc'
        path_dep_yaml = path_base_yaml + 'dep'
        path_additional_pvc_yaml = path_base_yaml + 'additional_pvc'
        path_additional_dep_yaml = path_base_yaml + 'additional_deps'
        event = 'start'
        logging.getLogger().info(
            "================================= Getting nodes in cluster =========================================="
        )
        get_nodes_info()

        # populate values from globals
        hpe3par_cli = globals.hpe3par_cli
        access_protocol = globals.access_protocol

        # Create storage class
        create_sc()

        logging.getLogger().info(
            "================================= STEP-1 Creating pvc in bulk =========================================="
        )
        create_pvc(path_pvc_yaml)

        logging.getLogger().info(
            "================================= STEP-2 Creating deployments in bulk =========================================="
        )
        create_dep(path_dep_yaml)

        # Populate list of pods from the replicaset list received from reading deployment
        populate_pod_list_from_replicaset()

        # Check if pods are in running state and verify vluns and crd status
        check_pod_status_vlun_crd(deploy_added=True)

        # drain_node('cssosbe01-196114.in.rdlabs.hpecorp.net')
        logging.getLogger().info(map_worker_nodes.keys())
        # get worker 1
        worker_1 = 0
        worker_2 = 1
        node_to_drain = list(map_worker_nodes.keys())[worker_1]
        # cordon all other nodes so that all pods will be mounted on single node only.
        if len(map_worker_nodes.keys()) > 2:
            for node in map_worker_nodes.keys():
                if node is not node_to_drain and node is not list(
                        map_worker_nodes.keys())[worker_2]:
                    corden_node(node)

        logging.getLogger().info(
            "================================= STEP-3 Drain worker 1 %s ======================================="
            % node_to_drain)
        # drain worker 1
        event = 'drain worker 1'
        drain_node(node_to_drain)
        # Make worker 1 schedulable
        logging.getLogger().info(
            "============================= STEP-4 uncordon worker 1 %s ===================================="
            % node_to_drain)
        #uncorden_node(node_to_drain)
        # uncordon all nodes now
        for node in map_worker_nodes.keys():
            uncorden_node(node)

        node_to_drain = list(map_worker_nodes.keys())[worker_2]
        # drain worker 2
        logging.getLogger().info(
            "================================= STEP-5 Drain worker 2 %s ======================================"
            % node_to_drain)
        event = 'drain worker 2'
        drain_node(node_to_drain)

        # Create additional pvc and deployments
        logging.getLogger().info(
            "================================= STEP-6 Creating additonal pvcs and deployments ================================"
        )

        create_pvc(path_additional_pvc_yaml)
        create_dep(path_additional_dep_yaml)
        # Populate list of pods from the replicaset list received from reading deployment
        event = 'added deployment and pvc'
        populate_pod_list_from_replicaset()

        # Check if pods are in running state and verify vluns and crd status
        check_pod_status_vlun_crd(deploy_added=True)

        # Make worker 2 schedulable
        logging.getLogger().info(
            "============================= STEP-7 uncordon worker 2 %s ===================================="
            % node_to_drain)
        uncorden_node(node_to_drain)

        # Delete some pods
        logging.getLogger().info(
            "================================= STEP-8 Delete few pods ================================"
        )
        event = 'delete pods'
        delete_random_pods()

        # Populate list of pods from the replicaset list received from reading deployment
        populate_pod_list_from_replicaset()

        # Check if pods are in running state and verify vluns and crd status
        check_pod_status_vlun_crd(pod_removed=True)

        # reboot worker 1
        node_to_reboot = list(map_worker_nodes.keys())[worker_1]
        logging.getLogger().info(
            "============================= STEP-9 reboot worker 1 %s ===================================="
            % node_to_reboot)
        event = 'reboot worker'
        reboot_flag = reboot_node(node_to_reboot)

        # wait before verifying pods
        logging.getLogger().info(
            "========= Sleeping for a min before verification...")
        sleep(60)

        if reboot_flag:
            # Populate list of pods from the replicaset list received from reading deployment
            populate_pod_list_from_replicaset()

            # Check if pods are in running state and verify vluns and crd status
            check_pod_status_vlun_crd()

    finally:
        #cleanup_deployments()
        #cleanup_PVC()
        # Delete all resources and verify if cleanup is done
        # Save vluns for each pvc to verify cleanup after pod deletion

        logging.getLogger().info(
            "================================= Deleting deployments ================================"
        )
        manager.delete_dep_bulk(path_dep_yaml, globals.namespace)
        manager.delete_dep_bulk(path_additional_dep_yaml, globals.namespace)

        # sleep for 2 mins before verification
        logging.getLogger().info(
            "Sleeping for 2 mins before verification of deleted pods...")
        sleep(120)
        verify_pod_deletion_bulk()

        logging.getLogger().info(
            "================================= Deleting PVCs ================================"
        )
        manager.delete_pvc_bulk(path_pvc_yaml, globals.namespace)
        manager.delete_pvc_bulk(path_additional_pvc_yaml, globals.namespace)
        verify_pvc_deletion_bulk()

        logging.getLogger().info(
            "================================= Deleting SC and Secret ================================"
        )
        if sc is not None and manager.check_if_deleted(
                2, sc.metadata.name, "SC",
                namespace=sc.metadata.namespace) is False:
            manager.delete_sc(sc.metadata.name)

        # uncordon all nodes
        for node in map_worker_nodes.keys():
            manager.uncorden_node(node)
def pvc_create_verify(yml):
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        """array_ip, array_uname, array_pwd = manager.read_array_prop(yml)
        hpe3par_cli = manager.get_3par_cli_client(yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        print("\n########################### new_method %s::%s::%s ###########################" %
              (str(yml), protocol, hpe3par_version[0:5]))"""

        sc = manager.create_sc(yml)
        pvc = manager.create_pvc(yml)

        # Check PVC status in events
        provisioning = None
        compression = None
        size = None
        is_cpg_ssd = None
        provisioning, compression, cpg_name, size = manager.get_sc_properties(
            yml)
        host_encryption = None
        host_encryption_secret_name = None
        host_encryption_secret_namespace = None
        host_SeesVLUN_set = False

        with open(yml) as f:
            elements = list(yaml.safe_load_all(f))
            for el in elements:
                # print("======== kind :: %s " % str(el.get('kind')))
                if str(el.get('kind')) == "StorageClass":
                    if 'hostEncryption' in el['parameters']:
                        host_encryption = el['parameters']['hostEncryption']
                    if 'hostEncryptionSecretName' in el['parameters']:
                        host_encryption_secret_name = el['parameters'][
                            'hostEncryptionSecretName']
                    if 'hostEncryptionSecretNamespace' in el['parameters']:
                        host_encryption_secret_namespace = el['parameters'][
                            'hostEncryptionSecretNamespace']
                    if 'hostSeesVLUN' in el['parameters']:
                        host_SeesVLUN_set = True
                        hostSeesVLUN = el['parameters']['hostSeesVLUN']

        logging.getLogger().info("Check in events if volume is created...")
        status, message = manager.check_status_from_events(
            kind='PersistentVolumeClaim',
            name=pvc.metadata.name,
            namespace=pvc.metadata.namespace,
            uid=pvc.metadata.uid)
        logging.getLogger().info("Check if test passed...")
        flag = manager.is_test_passed_with_encryption(
            status=status,
            enc_secret_name=host_encryption_secret_name,
            yml=yml)
        logging.getLogger().info("Test passed :: %s " % flag)
        assert flag is True, message

        if status == 'ProvisioningSucceeded':
            flag, pvc_obj = manager.check_status(
                timeout,
                pvc.metadata.name,
                kind='pvc',
                status='Bound',
                namespace=pvc.metadata.namespace)
            assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

            pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
            volume_name = manager.get_pvc_volume(pvc_crd)
            logging.getLogger().info(globals.hpe3par_cli)
            volume = manager.get_volume_from_array(globals.hpe3par_cli,
                                                   volume_name)
            assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
            logging.getLogger().info(volume)
            flag, failure_cause = manager.verify_volume_properties_3par(
                volume,
                size=size,
                provisioning=provisioning,
                compression=compression,
                cpg=cpg_name)
            assert flag is True, "Volume properties verification at array is failed for %s" % failure_cause
            pod = manager.create_pod(yml)

            flag, pod_obj = manager.check_status(
                timeout,
                pod.metadata.name,
                kind='pod',
                status='Running',
                namespace=pod.metadata.namespace)

            assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

            # Verify crd fpr published status
            assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is True, \
                "PVC CRD %s Published is false after Pod is running" % pvc_obj.spec.volume_name

            hpe3par_vlun = manager.get_3par_vlun(globals.hpe3par_cli,
                                                 volume_name)
            assert manager.verify_pod_node(hpe3par_vlun, pod_obj) is True, \
                "Node for pod received from 3par and cluster do not match"

            iscsi_ips = manager.get_iscsi_ips(globals.hpe3par_cli)

            # Adding hostSeesVLUN check
            hpe3par_active_vlun = manager.get_all_active_vluns(
                globals.hpe3par_cli, volume_name)
            if host_SeesVLUN_set:
                for vlun_item in hpe3par_active_vlun:
                    if hostSeesVLUN == "true":
                        assert vlun_item[
                            'type'] == globals.HOST_TYPE, "hostSeesVLUN parameter validation failed for volume %s" % pvc_obj.spec.volume_name
                    else:
                        assert vlun_item[
                            'type'] == globals.MATCHED_SET, "hostSeesVLUN parameter validation failed for volume %s" % pvc_obj.spec.volume_name
                logging.getLogger().info(
                    "Successfully completed hostSeesVLUN parameter check")

            # Read pvc crd again after pod creation. It will have IQN and LunId.
            pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
            flag, disk_partition = manager.verify_by_path(
                iscsi_ips, pod_obj.spec.node_name, pvc_crd, hpe3par_vlun)
            assert flag is True, "partition not found"
            logging.getLogger().info("disk_partition received are %s " %
                                     disk_partition)

            flag, disk_partition_mod, partition_map = manager.verify_multipath(
                hpe3par_vlun, disk_partition)
            assert flag is True, "multipath check failed"
            """print("disk_partition after multipath check are %s " % disk_partition)
            print("disk_partition_mod after multipath check are %s " % disk_partition_mod)"""
            logging.getLogger().info(
                "disk_partition after multipath check are %s " %
                disk_partition)
            logging.getLogger().info(
                "disk_partition_mod after multipath check are %s " %
                disk_partition_mod)
            assert manager.verify_partition(
                disk_partition_mod), "partition mismatch"

            assert manager.verify_lsscsi(
                pod_obj.spec.node_name,
                disk_partition), "lsscsi verificatio failed"
            assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                                  pod.metadata.name
            assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod",
                                            namespace=pod.metadata.namespace) is True, \
                "Pod %s is not deleted yet " % pod.metadata.name

            flag, ip = manager.verify_deleted_partition(
                iscsi_ips, pod_obj.spec.node_name, hpe3par_vlun, pvc_crd)
            assert flag is True, "Partition(s) not cleaned after volume deletion for iscsi-ip %s " % ip

            paths = manager.verify_deleted_multipath_entries(
                pod_obj.spec.node_name, hpe3par_vlun, disk_partition)
            assert paths is None or len(
                paths) == 0, "Multipath entries are not cleaned"

            # partitions = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
            # assert len(partitions) == 0, "lsscsi verificatio failed for vlun deletion"
            flag = manager.verify_deleted_lsscsi_entries(
                pod_obj.spec.node_name, disk_partition)
            # print("flag after deleted lsscsi verificatio is %s " % flag)
            logging.getLogger().info(
                "flag after deleted lsscsi verificatio is %s " % flag)
            assert flag, "lsscsi verification failed for vlun deletion"

            # Verify crd for unpublished status
            try:
                assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is False, \
                    "PVC CRD %s Published is true after Pod is deleted" % pvc_obj.spec.volume_name
                # print("PVC CRD published is false after pod deletion.")
                logging.getLogger().info(
                    "PVC CRD published is false after pod deletion.")
                # logging.warning("PVC CRD published is false after pod deletion.")
            except Exception as e:
                # print("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
                logging.getLogger().warning(
                    "Resuming test after failure of publishes status check for pvc crd... \n%s"
                    % e)
                # logging.error("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
            assert manager.delete_pvc(pvc.metadata.name)

            assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC",
                                            namespace=pvc.metadata.namespace) is True, \
                "PVC %s is not deleted yet " % pvc.metadata.name

            # pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
            # print("PVC crd after PVC object deletion :: %s " % pvc_crd)
            assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
                "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

            assert manager.verify_delete_volume_on_3par(globals.hpe3par_cli, volume_name), \
                "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

            assert manager.delete_sc(sc.metadata.name) is True

            assert manager.check_if_deleted(timeout, sc.metadata.name, "SC",
                                            sc.metadata.namespace) is True, "SC %s is not deleted yet " \
                                                                            % sc.metadata.name
            """assert manager.delete_secret(secret.metadata.name, secret.metadata.namespace) is True

            assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
                "Secret %s is not deleted yet " % secret.metadata.name"""

    except Exception as e:
        # print("Exception in test_publish :: %s" % e)
        logging.getLogger().error("Exception in test_publish :: %s" % e)
        # logging.error("Exception in test_publish :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        #hpe3par_cli.logout()
        cleanup(None, sc, pvc, pod)
Esempio n. 19
0
def test_publish_sanity():
    sc = None
    pvc = None
    pod = None
    try:
        yml = "%s/test-publish.yml" % globals.yaml_dir
        #array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(yml)
        #hpe3par_cli = manager.get_3par_cli_client(yml)
        #hpe3par_version = manager.get_array_version(hpe3par_cli)
        #print("\n########################### test_publish::%s::%s ###########################" %
        #      (protocol, hpe3par_version[0:5]))
        """logging.error("\n########################### test_publish::%s::%s###########################" %
                      (protocol, hpe3par_version))"""
        #secret = manager.create_secret(yml)
        #step = "secret"
        sc = manager.create_sc(yml)
        #step = "sc"
        pvc = manager.create_pvc(yml)
        #step = "pvc"
        flag, pvc_obj = manager.check_status(timeout,
                                             pvc.metadata.name,
                                             kind='pvc',
                                             status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name
        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        #print("hpe3par_cli object :: %s " % hpe3par_cli)
        volume = manager.get_volume_from_array(globals.hpe3par_cli,
                                               volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
        """assert manager.verify_volume_properties(volume, provisioning='thin', compression='true') is True, \
            "tpvv no comp volume verification failed""" ""

        pod = manager.create_pod(yml)

        flag, pod_obj = manager.check_status(timeout,
                                             pod.metadata.name,
                                             kind='pod',
                                             status='Running',
                                             namespace=pod.metadata.namespace)

        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

        # Verify crd fpr published status
        assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is True, \
            "PVC CRD %s Published is false after Pod is running" % pvc_obj.spec.volume_name

        hpe3par_vlun = manager.get_3par_vlun(globals.hpe3par_cli, volume_name)
        assert manager.verify_pod_node(hpe3par_vlun, pod_obj) is True, \
            "Node for pod received from 3par and cluster do not match"

        iscsi_ips = manager.get_iscsi_ips(globals.hpe3par_cli)

        # Read pvc crd again after pod creation. It will have IQN and LunId.
        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        flag, disk_partition = manager.verify_by_path(iscsi_ips,
                                                      pod_obj.spec.node_name,
                                                      pvc_crd, hpe3par_vlun)
        assert flag is True, "partition not found"
        logging.getLogger().info("disk_partition received are %s " %
                                 disk_partition)

        flag, disk_partition_mod, partition_map = manager.verify_multipath(
            hpe3par_vlun, disk_partition)
        assert flag is True, "multipath check failed"
        """print("disk_partition after multipath check are %s " % disk_partition)
        print("disk_partition_mod after multipath check are %s " % disk_partition_mod)"""
        logging.getLogger().info(
            "disk_partition after multipath check are %s " % disk_partition)
        logging.getLogger().info(
            "disk_partition_mod after multipath check are %s " %
            disk_partition_mod)
        assert manager.verify_partition(
            disk_partition_mod), "partition mismatch"

        assert manager.verify_lsscsi(
            pod_obj.spec.node_name,
            disk_partition), "lsscsi verificatio failed"

        assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                              pod.metadata.name
        assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is True, \
            "Pod %s is not deleted yet " % pod.metadata.name

        flag, ip = manager.verify_deleted_partition(iscsi_ips,
                                                    pod_obj.spec.node_name,
                                                    hpe3par_vlun, pvc_crd)
        assert flag is True, "Partition(s) not cleaned after volume deletion for iscsi-ip %s " % ip

        paths = manager.verify_deleted_multipath_entries(
            pod_obj.spec.node_name, hpe3par_vlun, disk_partition)
        assert paths is None or len(
            paths) == 0, "Multipath entries are not cleaned"

        # partitions = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
        # assert len(partitions) == 0, "lsscsi verificatio failed for vlun deletion"
        flag = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name,
                                                     disk_partition)
        #print("flag after deleted lsscsi verificatio is %s " % flag)
        logging.getLogger().info(
            "flag after deleted lsscsi verificatio is %s " % flag)
        assert flag, "lsscsi verification failed for vlun deletion"

        # Verify crd for unpublished status
        try:
            assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is False, \
                "PVC CRD %s Published is true after Pod is deleted" % pvc_obj.spec.volume_name
            #print("PVC CRD published is false after pod deletion.")
            logging.getLogger().info(
                "PVC CRD published is false after pod deletion.")
            #logging.warning("PVC CRD published is false after pod deletion.")
        except Exception as e:
            #print("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
            logging.getLogger().warning(
                "Resuming test after failure of publishes status check for pvc crd... \n%s"
                % e)
            #logging.error("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        #pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print("PVC crd after PVC object deletion :: %s " % pvc_crd)
        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(globals.hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC", sc.metadata.namespace) is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name
        """assert manager.delete_secret(secret.metadata.name, secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name"""

    except Exception as e:
        #print("Exception in test_publish :: %s" % e)
        logging.getLogger().error("Exception in test_publish :: %s" % e)
        #logging.error("Exception in test_publish :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        #hpe3par_cli.logout()
        cleanup(None, sc, pvc, pod)