def create_pvc(yml):
    global list_pvc_obj
    global hpe3par_cli
    global map_pvc_volume_name
    try:
        pvc_map = manager.create_pvc_bulk(yml)
        status_obj_map = manager.check_status_for_bulk('pvc', pvc_map)
        list_pvc_obj.extend(status_obj_map['ProvisioningSucceeded'])
        assert len(status_obj_map['ProvisioningFailed']
                   ) == 0, "Some PVC failed to Bound. Terminating test..."
        logging.getLogger().info("PVCs created successfully %s" %
                                 list(pvc_map.keys()))

        # logging.getLogger().info(list_pvc_obj)

        # Verify CRD and volume creation on array
        for pvc in list_pvc_obj:
            pvc_crd = manager.get_pvc_crd(pvc.spec.volume_name)
            # logging.getLogger().info(pvc_crd)
            volume_name = manager.get_pvc_volume(pvc_crd)
            volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
            assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
            # Save volume name for pvc for later usage
            map_pvc_volume_name[pvc.metadata.name] = volume_name
    except Exception as e:
        logging.getLogger().error("Exception in test_create_pvc :: %s" % e)
        raise e
    finally:
        pass
Example #2
0
def create_pod(yml, hpe3par_cli):
    try:
        secret = manager.create_secret(yml)
        sc = manager.create_sc(yml)
        pvc = manager.create_pvc(yml)
        flag, pvc_obj = manager.check_status(timeout, pvc.metadata.name, kind='pvc', status='Bound',
                                             namespace=pvc.metadata.namespace)
        # assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name
        if flag is False:
            return False, "PVC", 'Bound', pvc_obj, {'secret': secret, 'sc': sc, 'pvc': pvc_obj, 'pod': None}

        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        # print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        # assert volume is not None, "Volume is not created on 3PAR for pvc %s. Terminating test. " % volume_name
        if volume is None:
            return False, "3PAR_Volume", 'create', volume_name, {'secret': secret, 'sc': sc, 'pvc': pvc_obj, 'pod': None}

        pod = manager.create_pod(yml)
        flag, pod_obj = manager.check_status(timeout, pod.metadata.name, kind='pod', status='Running',
                                             namespace=pod.metadata.namespace)
        # assert flag is True, "Pod %s status check timed out, not in Running state yet. Terminating test." % pod.metadata.name
        if flag is False:
            return False, 'POD', 'Running', pod_obj, {'secret': secret, 'sc': sc, 'pvc': pvc_obj, 'pod': pod_obj}

        return True, None, None, None, {'secret': secret, 'sc': sc, 'pvc': pvc_obj, 'pod': pod_obj}
    except Exception as e:
        print("Exception in create_pod :: %s" % e)
        raise e
    """finally:
def test_override_compression():
    base_yml = '%s/override/override.yaml' % globals.yaml_dir
    timeout = globals.status_check_timeout
    sc = None
    pvc = None
    pod = None
    try:
        # Creating storage class and pvc
        sc = manager.create_sc(base_yml)
        provisioning, compression, cpg_name, snap_cpg, desc, accessProtocol = get_sc_properties(
            base_yml)
        logging.getLogger().info(
            "Volume properties set in SC, provisioning::%s compression::%s CPG::%s SNAP CPG::%s desc::%s Protocol::%s"
            % (provisioning, compression, cpg_name, snap_cpg, desc,
               accessProtocol))
        pvc = manager.create_pvc(base_yml)
        flag, base_pvc_obj = manager.check_status(
            30,
            pvc.metadata.name,
            kind='pvc',
            status='Bound',
            namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % base_pvc_obj.metadata.name
        logging.getLogger().info("Pvc in bound state :: %s" %
                                 base_pvc_obj.metadata.name)

        #Get pvc crd details
        pvc_crd = manager.get_pvc_crd(base_pvc_obj.spec.volume_name)
        vol_name, vol_cpg, vol_snpCpg, vol_provType, vol_desc, vol_compr = manager.get_pvc_editable_properties(
            pvc_crd)

        assert compression != vol_compr, "Override of compression parameter failed for %s" % vol_name
        logging.getLogger().info(
            "Overriden Volume properties, name::%s usrCPG::%s snpCPG::%s provType::%s compr::%s desc::%s"
            %
            (vol_name, vol_cpg, vol_snpCpg, vol_provType, vol_compr, vol_desc))

        # Get proprties from the array
        hpe3par_volume = manager.get_volume_from_array(globals.hpe3par_cli,
                                                       vol_name)
        assert hpe3par_volume[
            'compressionState'] == 2, "compressionState does not match ovveride compression parameter"
        # (compressionState is set to 1 if compression is enabled on 3PAR volume)
        pod = manager.create_pod(base_yml)
        flag, pod_obj = manager.check_status(timeout,
                                             pod.metadata.name,
                                             kind='pod',
                                             status='Running',
                                             namespace=pod.metadata.namespace)
        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

    except Exception as e:
        logging.getLogger().error(
            "Exception in test_override_compression :: %s" % e)
        raise e

    finally:
        # Now cleanup secret, sc, pv, pvc, pod
        cleanup(sc, pvc, pod)
def verify_multiple_service_pod(yml, pod_list, pvc_list, sc_list, secret_list):
    with open(yml) as f:
            elements = list(yaml.safe_load_all(f))
            logging.getLogger().debug(elements)
            for el in elements:
                if str(el.get('kind')) == "Secret":
                    HPE3PAR_IP = el['stringData']['backend']
                break
    secret = None
    sc = None
    pvc = None
    pod = None
    namespace = globals.namespace
    timeout = globals.status_check_timeout

    try:
        # Create secret sc, pvc, pod
        secret = manager.create_secret(yml, namespace)
        secret_list.append(secret)
        sc = manager.create_sc(yml)
        sc_list.append(sc)
        step = "sc"
        pvc = manager.create_pvc(yml)
        pvc_list.append(pvc)
        step = "pvc"
        flag, pvc_obj = manager.check_status(timeout, pvc.metadata.name, kind='pvc', status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name
        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        # print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        hpe3par_cli = manager.get_3par_cli_client(HPE3PAR_IP)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name

        pod = manager.create_pod(yml)
        pod_list.append(pod)
        flag, pod_obj = manager.check_status(timeout, pod.metadata.name, kind='pod', status='Running',
                                             namespace=pod.metadata.namespace)

        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

    except Exception as e:
        logging.getLogger().error("Exception in test_multiple_service_pod :: %s" % e)
        raise e

    finally:
       return secret_list, sc_list, pvc_list, pod_list
def verify_new_brand(yml):
    global my_hpe3par_cli
    #secret_yaml = '%s/new_branding//alletra-svc-3par-backend-secret.yaml' % globals.yaml_dir
    sc_yml = '%s/new_branding//sc.yaml' % globals.yaml_dir
    pvc_yml = '%s/new_branding//pvc.yaml' % globals.yaml_dir
    pod_yml = '%s/new_branding//pod.yaml' % globals.yaml_dir
    with open(yml) as f:
            elements = list(yaml.safe_load_all(f))
            logging.getLogger().debug(elements)
            for el in elements:
                if str(el.get('kind')) == "Secret":
                    HPE3PAR_IP = el['stringData']['backend']
                break
    secret = None
    sc = None
    pvc = None
    pod = None
    namespace = globals.namespace
    timeout = globals.status_check_timeout

    try:
        # Create secret sc, pvc, pod
        secret = manager.create_secret(yml, namespace)
        sc = manager.create_sc(sc_yml)
        step = "sc"
        pvc = manager.create_pvc(pvc_yml)
        step = "pvc"
        flag, pvc_obj = manager.check_status(timeout, pvc.metadata.name, kind='pvc', status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name
        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        # print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        hpe3par_cli = manager.get_3par_cli_client(HPE3PAR_IP)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name

        pod = manager.create_pod(pod_yml)
        flag, pod_obj = manager.check_status(timeout, pod.metadata.name, kind='pod', status='Running',
                                             namespace=pod.metadata.namespace)

        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name


    finally:
        # Now cleanup sc, pv, pvc, pod
        #print("Inside Finally")
        cleanup(secret, sc, pvc, pod)
def check_pod_status_vlun_crd(deploy_added=False, pod_removed=False):
    global list_pod_name
    global map_pod_node_dist
    global map_pod_vlun
    global map_pvc_crd
    global access_protocol
    global disk_partition_map
    global list_pod_obj
    global list_replica_set
    global pod_status_check_done
    global all_pods_running_time

    try:
        iscsi_ips = manager.get_iscsi_ips(globals.hpe3par_cli)
        logging.getLogger().info("Verifying all pods in running state")
        list_pod_obj.clear()
        for node_name in map_pod_node_dist.keys():
            map_pod_node_dist[node_name].clear()

        # Check each pod for status
        pod_status_check_done = False
        thread1 = Thread(target=timer, name="timer")
        thread1.start()

        # List of replica sets those are ready
        ready_replicas = set()
        # Iterate through deployments to get replica set names
        replica_list = set(list_replica_set)
        # obj_list = {pod_obj for pod_obj in dep_map.values()}
        while all_pods_running_time < 30 * 60:
            # logging.getLogger().info(f"ready_deps :: {ready_replicas}")
            # logging.getLogger().info(f"replica_list :: {replica_list}")
            if ready_replicas == replica_list:  # all deps are ready
                pod_status_check_done = True
                break
            else:
                replica_list_to_be_checked = replica_list - ready_replicas
                logging.getLogger().info(
                    f"==========\nReplica sets to be checked if pods are created :: {replica_list_to_be_checked}\n"
                )
                for replica_set_name in replica_list_to_be_checked:
                    replica_has_running_pod = False
                    pods_for_dep = [
                        i for i in list_pod_name
                        if i.startswith(replica_set_name)
                    ]
                    logging.getLogger().info("%s has %s list of pods" %
                                             (replica_set_name, pods_for_dep))
                    for pod in pods_for_dep:
                        flag, pod_obj = manager.check_status(
                            5,
                            pod,
                            kind='pod',
                            status='Running',
                            namespace=globals.namespace)
                        if flag is True:
                            """if deploy_added is False and pod_removed is False:
                                previous_node = map_pod_obj[pod].spec.node_name
                                assert pod_obj.spec.node_name != node_to_reboot and , \
                                    "Pod is still mounted on previous worker node %s " % pod_obj.spec.node_name"""
                            check_mount_node = False
                            if event.startswith('drain'):
                                node_to_match = node_to_drain
                                check_mount_node = True
                            elif event.startswith('reboot'):
                                node_to_match = node_to_reboot
                                check_mount_node = True
                            if check_mount_node is True:
                                """assert pod_obj.spec.node_name != node_to_match, \
                                    "Pod %s is still mounted on previous worker node %s " % (pod, pod_obj.spec.node_name)"""
                                pass
                            replica_has_running_pod = True
                            list_pod_obj.append(pod_obj)
                            ready_replicas.add(replica_set_name)
                            map_pod_node_dist[pod_obj.spec.node_name].append(
                                pod_obj.metadata.name)
                            break
                        else:
                            replica_has_running_pod = False
                    """assert replica_has_running_pod is True, "Deployment %s does not have any pod in running state yet out of %s" % \
                                                            (replica_set_name[0:replica_set_name.index('-')],
                                                             pods_for_dep)"""
                    logging.getLogger().info(
                        "Deployment %s has pod in running state" %
                        replica_set_name[0:replica_set_name.index('-')])
        assert pod_status_check_done is True, f"All pods did not come to running in " \
                                              f"{str(datetime.timedelta(0, all_pods_running_time))}, terminating test..."
        logging.getLogger().info(
            "==================================== Time taken to all pods come to running is %s"
            % str(datetime.timedelta(0, all_pods_running_time)))
        logging.getLogger().info("Node wide distribution of pods...")
        for node, pod_list in map_pod_node_dist.items():
            logging.getLogger().info(f"{node} :: {pod_list}\n")

        logging.getLogger().info("Now verifying vlun and CRDs for each pod")
        # Verify CRD and vlun for pods
        for pod_obj in list_pod_obj:
            # logging.getLogger().info(pod_obj)
            # Verify crd fpr published status
            pvc_name = pod_obj.spec.volumes[
                0].persistent_volume_claim.claim_name
            logging.getLogger().info("\n\nPVC is :: %s " % pvc_name)
            volume_name = manager.hpe_read_pvc_object(
                pvc_name, globals.namespace).spec.volume_name
            logging.getLogger().info("volume_name is :: %s " % volume_name)
            assert manager.verify_pvc_crd_published(volume_name) is True, \
                "PVC CRD %s Published is false after Pod is running" % volume_name
            logging.getLogger().info("PVC CRD %s published is True" %
                                     volume_name)
            pvc_crd = manager.get_pvc_crd(volume_name)
            volume_name = volume_name[0:31]
            hpe3par_vlun = manager.get_3par_vlun(hpe3par_cli, volume_name)

            # store vlun for pod in map to be referenced during cleanup verification
            map_pod_vlun[pod_obj.metadata.name] = hpe3par_vlun
            assert manager.verify_pod_node(hpe3par_vlun, pod_obj) is True, \
                "Node for pod received from 3par and cluster do not match"
            logging.getLogger().info(
                "Node for pod received from 3par and cluster match")

            # store pvc crd to be referenced during cleanup verification
            map_pvc_crd[pod_obj.metadata.name] = pvc_crd

            flag, disk_partition = manager.verify_by_path(
                iscsi_ips, pod_obj.spec.node_name, pvc_crd, hpe3par_vlun)
            assert flag is True, "partition not found"
            logging.getLogger().info("disk_partition received are %s " %
                                     disk_partition)

            flag, disk_partition_mod, partition_map = manager.verify_multipath(
                hpe3par_vlun, disk_partition)
            assert flag is True, "multipath check failed"
            logging.getLogger().info(
                "disk_partition after multipath check are %s " %
                disk_partition)
            logging.getLogger().info(
                "disk_partition_mod after multipath check are %s " %
                disk_partition_mod)
            assert manager.verify_partition(
                disk_partition_mod), "partition mismatch"
            logging.getLogger().info(
                "Partition verification done successfully")

            assert manager.verify_lsscsi(
                pod_obj.spec.node_name,
                disk_partition), "lsscsi verification failed"
            logging.getLogger().info("lsscsi verification done successfully")
            # save disk_partition to verify cleanup after node drain
            disk_partition_map[
                pod_obj.metadata.owner_references[0].name] = disk_partition

            pod_node_name = pod_obj.spec.node_name
            logging.getLogger().info("%s is mounted on %s" %
                                     (pod_obj.metadata.name, pod_node_name))
            map_pod_node_dist[pod_node_name].append(pod_obj.metadata.name)

        logging.getLogger().info(
            "\n\nSuccessfully verified vlun and CRD status for each pod")
        # logging.getLogger().info("Node wide distribution of pod %s" % map_pod_node_dist)
    except Exception as e:
        logging.getLogger().error("Error in vlun verification :: %s" % e)
        raise e
    finally:
        pod_status_check_done = True
Example #7
0
def pvc_create_verify(yml):
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(
            yml)
        hpe3par_cli = manager.get_3par_cli_client(yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        print(
            "\n########################### new_method %s::%s::%s ###########################"
            % (str(yml), protocol, hpe3par_version[0:5]))
        #logging.info("\n########################### test_thin_absent_comp::%s::%s###########################" %
        #(protocol, hpe3par_version))
        secret = manager.create_secret(yml)
        step = "secret"
        sc = manager.create_sc(yml)
        step = "sc"
        pvc = manager.create_pvc(yml)
        print("PVC created :: %s " % pvc)
        step = "pvc"
        # Check PVC status in events
        provisioning = None
        compression = None
        size = None
        is_cpg_ssd = None
        provisioning, compression, cpg_name, size = manager.get_sc_properties(
            yml)
        print("Check if cpg is ssd")
        is_cpg_ssd = manager.check_cpg_prop_at_array(hpe3par_cli,
                                                     cpg_name,
                                                     property='ssd')
        print("Check in events if volume is created...")
        status, message = manager.check_status_from_events(
            kind='PersistentVolumeClaim',
            name=pvc.metadata.name,
            namespace=pvc.metadata.namespace,
            uid=pvc.metadata.uid)
        print("Check if test passed...")
        flag = manager.is_test_passed(array_version=hpe3par_version,
                                      status=status,
                                      is_cpg_ssd=is_cpg_ssd,
                                      provisioning=provisioning,
                                      compression=compression)
        print("Test passed :: %s " % flag)
        assert flag is True, "Volume created on %s with provisioning=%s, compression=%s" % (
            hpe3par_version, provisioning, compression)

        if status == 'ProvisioningSucceeded':
            flag, pvc_obj = manager.check_status(
                timeout,
                pvc.metadata.name,
                kind='pvc',
                status='Bound',
                namespace=pvc.metadata.namespace)
            assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

            pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
            #print(pvc_crd)
            volume_name = manager.get_pvc_volume(pvc_crd)
            print(hpe3par_cli)
            volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
            assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
            print(volume)
            flag, failure_cause = manager.verify_volume_properties_3par(
                volume,
                size=size,
                provisioning=provisioning,
                compression=compression,
                cpg=cpg_name)
            assert flag is True, "Volume properties verification at array is failed for %s" % failure_cause

        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        if status == 'ProvisioningSucceeded':
            assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
                "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

            assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
                "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name,
                                     secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name

    except Exception as e:
        print("Exception in pvc_create_verify :: %s" % e)
        #logging.error("Exception in test_thin_absent :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        hpe3par_cli.logout()
        cleanup(secret, sc, pvc, None)
Example #8
0
def test_expand_volume():
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        yml = "YAML/test-expand_vol_pvc.yml"
        hpe3par_cli = manager.get_3par_cli_client(yml)
        array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(
            yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        print(
            "\n########################### test_expand_volume::%s::%s ###########################"
            % (protocol, hpe3par_version[0:5]))
        """logging.info("\n########################### test_expand_volume::%s::%s###########################" %
                     (protocol, hpe3par_version))"""
        secret = manager.create_secret(yml)
        step = "secret"
        sc = manager.create_sc(yml)
        step = "sc"
        pvc = manager.create_pvc(yml)
        step = "pvc"
        flag, pvc_obj = manager.check_status(timeout,
                                             pvc.metadata.name,
                                             kind='pvc',
                                             status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        # print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        print("volume_name :: %s " % volume_name)
        print(hpe3par_cli)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
        print("Volume verification at array done successfully")
        # patch pvc for expand size
        size_in_gb = '20'
        patch_json = {
            'spec': {
                'resources': {
                    'requests': {
                        'storage': size_in_gb + 'Gi'
                    }
                }
            }
        }
        mod_pvc = manager.patch_pvc(pvc.metadata.name, pvc.metadata.namespace,
                                    patch_json)
        print("Patched PVC %s" % mod_pvc)

        pod = manager.create_pod("YAML/test-expand_vol_pod.yml")

        flag, pod_obj = manager.check_status(timeout,
                                             pod.metadata.name,
                                             kind='pod',
                                             status='Running',
                                             namespace=pod.metadata.namespace)

        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

        # Now check if volume in 3par has increased size
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume['sizeMiB'] == int(
            size_in_gb) * 1024, "Volume on array does not have updated size"

        # Check if PVC has increaded size
        mod_pvc = manager.hpe_read_pvc_object(pvc.metadata.name,
                                              pvc.metadata.namespace)
        print("\n PVC after expansion %s" % mod_pvc)
        # assert mod_pvc['spec']['resources']['requests']['storage'] == "%sGi" % size_in_gb, "PVC %s does not have updated size" % pvc.metadata.name
        assert mod_pvc.spec.resources.requests[
            'storage'] == "%sGi" % size_in_gb, "PVC %s does not have updated size" % pvc.metadata.name

        # check size of mounted vlun
        """node = pod_obj.spec.node_name
        hpe3par_vlun = manager.get_3par_vlun(hpe3par_cli,volume_name)
        vv_wwn = hpe3par_vlun['volumeWWN']

        command = "mount | grep -i 3%s" % vv_wwn"""
        assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                              pod.metadata.name
        assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is True, \
            "Pod %s is not deleted yet " % pod.metadata.name

        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name,
                                     secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret",
                                        namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name

    except Exception as e:
        print("Exception in test_expand_volume :: %s" % e)
        #logging.error("Exception in test_snapshot :: %s" % e)
        """if step == 'pvc':
                manager.delete_pvc(pvc.metadata.name)
                manager.delete_sc(sc.metadata.name)
                manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
            if step == 'sc':
                manager.delete_sc(sc.metadata.name)
                manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
            if step == 'secret':
                manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        hpe3par_cli.logout()
        cleanup(secret, sc, pvc, pod)
Example #9
0
def test_snapshot():
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        base_yml = "YAML/source-pvc-snap.yml"
        hpe3par_cli = manager.get_3par_cli_client(base_yml)
        array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(
            base_yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        print(
            "\n########################### test_snapshot::%s::%s ###########################"
            % (protocol, hpe3par_version[0:5]))
        """logging.info("\n########################### test_clone::%s::%s###########################" %
                     (protocol, hpe3par_version))"""
        secret = manager.create_secret(base_yml)
        step = "secret"
        sc = manager.create_sc(base_yml)
        step = "sc"
        pvc = manager.create_pvc(base_yml)
        step = "pvc"
        flag, pvc_obj = manager.check_status(timeout,
                                             pvc.metadata.name,
                                             kind='pvc',
                                             status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        # print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        print(hpe3par_cli)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
        print()

        assert manager.create_snapclass(
            "YAML/snapshot-class.yaml"
        ) is True, 'Snapclass ci-snapclass is not created.'

        assert manager.verify_snapclass_created(
        ) is True, 'Snapclass ci-snapclass is not found in crd list.'

        assert manager.create_snapshot(
            "YAML/snapshot.yaml"
        ) is True, 'Snapshot ci-pvc-snapshot is not created.'

        assert manager.verify_snapshot_created(
        ) is True, 'Snapshot ci-pvc-snapshot is not found in crd list.'

        flag, snap_uid = manager.verify_snapshot_ready()
        assert flag is True, "Snapshot ci-pvc-snapshot is not ready to use"

        snap_uid = "snapshot-" + snap_uid
        snap_volume = manager.get_volume_from_array(hpe3par_cli,
                                                    snap_uid[0:31])
        snap_volume_name = snap_volume['name']
        print("\nsnap_volume :: %s " % snap_volume)
        flag, message = manager.verify_snapshot_on_3par(
            snap_volume, volume_name)
        assert flag is True, message
        print()
        assert manager.delete_snapshot(
        ), "Snapshot ci-pvc-snapshot deletion request failed"
        #sleep(180)
        assert manager.check_if_crd_deleted('ci-pvc-snapshot', "volumesnapshots") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % ('ci-pvc-snapshot', 'volumesnapshots')
        #assert manager.verify_snapshot_deleted() is False, 'Snapshot CRD ci-pvc-snapshot is not deleted yet.'
        #sleep(180)
        assert manager.delete_snapclass(
        ), "Snapclass ci-snapclass deletion request failed"
        assert manager.check_if_crd_deleted('ci-snapclass', "volumesnapshotclasses") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % ('ci-snapclass', 'volumesnapshotclasses')
        #sleep(180)
        #assert manager.verify_snapclass_deleted is False, 'Snapclass CRD ci-snapclass is not deleted yet.'
        #sleep(180)
        assert manager.verify_delete_volume_on_3par(hpe3par_cli, snap_volume_name) is True, \
            "Snap Volume %s from 3PAR for PVC %s is not deleted" % (snap_volume_name, pvc.metadata.name)

        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name,
                                     secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret",
                                        namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name

    except Exception as e:
        print("Exception in test_snapshot :: %s" % e)
        #logging.error("Exception in test_snapshot :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        hpe3par_cli.logout()
        cleanup(secret, sc, pvc, None)
        cleanup_snapshot()
Example #10
0
def test_clone():
    secret = None
    sc = None
    pvc = None
    pod = None
    clone_pvc = None
    try:
        base_yml = "YAML//base-pvc-clone.yml"
        clone_yml = "YAML/test-clone.yml"
        hpe3par_cli = manager.get_3par_cli_client(base_yml)
        array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(
            base_yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        print(
            "\n########################### test_clone::%s::%s ###########################"
            % (protocol, hpe3par_version[0:5]))
        """logging.info("\n########################### test_clone::%s::%s###########################" %
                     (protocol, hpe3par_version))"""
        secret = manager.create_secret(base_yml)
        step = "secret"
        sc = manager.create_sc(base_yml)
        step = "sc"
        pvc = manager.create_pvc(base_yml)
        step = "pvc"
        flag, pvc_obj = manager.check_status(timeout,
                                             pvc.metadata.name,
                                             kind='pvc',
                                             status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        print(hpe3par_cli)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name

        clone_pvc = manager.create_pvc(clone_yml)
        step = "clone_pvc"
        flag, clone_pvc_obj = manager.check_status(
            timeout,
            clone_pvc.metadata.name,
            kind='pvc',
            status='Bound',
            namespace=clone_pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % clone_pvc_obj.metadata.name

        assert manager.verify_clone_crd_status(clone_pvc_obj.spec.volume_name) is True, \
            "Clone PVC CRD is not yet completed"
        """assert manager.verify_volume_properties(volume, provisioning='thin', compression='true') is True, \
            "tpvv no comp volume verification failed""" ""
        clone_pvc_crd = manager.get_pvc_crd(clone_pvc_obj.spec.volume_name)
        # print(pvc_crd)
        clone_volume_name = manager.get_pvc_volume(clone_pvc_crd)

        assert manager.delete_pvc(clone_pvc.metadata.name)

        assert manager.check_if_deleted(timeout, clone_pvc.metadata.name, "PVC", namespace=clone_pvc.metadata.namespace)\
               is True, "Clone PVC %s is not deleted yet " % clone_pvc.metadata.name

        assert manager.check_if_crd_deleted(clone_pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "Clone PVC CRD %s of %s is not deleted yet. Taking longer..." % (clone_pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(hpe3par_cli, clone_volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (clone_volume_name, clone_pvc.metadata.name)

        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        sleep(30)
        assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name,
                                     secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name

    except Exception as e:
        print("Exception in test_clone :: %s" % e)
        #logging.error("Exception in test_clone :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        hpe3par_cli.logout()
        cleanup(None, None, clone_pvc, None)
        cleanup(secret, sc, pvc, None)
Example #11
0
def create_virtual_copyOf(base_yml, snap_yml):
    #base_yml = '%s/virtual-copy/virtual-copy-base-vol.yml' % globals.yaml_dir
    #snap_yml = '%s/virtual-copy/virtual-copy-snap-vol.yml' % globals.yaml_dir
    pvc_snap = None
    sc_snap = None
    snap_pod = None
    sc = None
    pvc = None
    pod = None
    isPresent = False
    isValid = False
    try:
        with open(base_yml, "r") as ymlfile:
            elements = list(yaml.safe_load_all(ymlfile))
            for el in elements:
                if str(el.get('kind')) == "StorageClass":
                    cpg = el['parameters']['cpg']
                    snapCPG = el['parameters']['snapCpg']
                    provisioningType = el['parameters']['provisioning_type']
                    compression = el['parameters']['compression']
                    break

        #array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(base_yml)
        # Creating base volume now in CSI
        sc = manager.create_sc(base_yml)
        pvc = manager.create_pvc(base_yml)
        logging.getLogger().info("Check in events if volume is created...")
        flag, base_pvc_obj = manager.check_status(
            30,
            pvc.metadata.name,
            kind='pvc',
            status='Bound',
            namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % base_pvc_obj.metadata.name

        # Export base volume
        pod = manager.create_pod(base_yml)
        flag, base_pod_obj = manager.check_status(
            timeout,
            pod.metadata.name,
            kind='pod',
            status='Running',
            namespace=pod.metadata.namespace)
        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

        time.sleep(20)

        # Checking base volume data
        command = ['/bin/sh', '-c', 'ls -l /export']
        data = manager.hpe_connect_pod_container(pod.metadata.name, command)
        if any("mydata.txt" in x for x in data.split('\n')):
            isPresent = True
        assert isPresent is True, "File not present in base volume"

        # Creating snap volume in CSI
        sc_snap = manager.create_sc(snap_yml)
        pvc_snap = manager.create_pvc(snap_yml)
        logging.getLogger().info("Check in events if volume is created...")
        flag, snap_pvc_obj = manager.check_status(
            30,
            pvc_snap.metadata.name,
            kind='pvc',
            status='Bound',
            namespace=pvc_snap.metadata.namespace)
        assert flag is True, "Snapshot PVC %s status check timed out, not in Bound state yet..." % snap_pvc_obj.metadata.name

        #Export snapshot volume
        snap_pod = manager.create_pod(snap_yml)
        flag, snap_pod_obj = manager.check_status(
            timeout,
            snap_pod.metadata.name,
            kind='pod',
            status='Running',
            namespace=snap_pod.metadata.namespace)
        assert flag is True, "Snapshot volume pod mount %s status check timed out, not in Running state yet..." % snap_pod.metadata.name
        logging.getLogger().info("Checking snapshot volume data")
        time.sleep(20)

        # Validating data on snapshot volume
        isPresent = False
        command = ['/bin/sh', '-c', 'ls -l /export']
        snap_data = manager.hpe_connect_pod_container(snap_pod.metadata.name,
                                                      command)
        if any("mydata.txt" in x for x in snap_data.split('\n')):
            isPresent = True
        assert isPresent is True, "File on base volume not found in snap volume"

        isPresent = False
        if any("mysnapdata.txt" in x for x in snap_data.split('\n')):
            isPresent = True
        assert isPresent is True, "File not present in snap volume"
        logging.getLogger().info("snapshot volume data check successfull")
        time.sleep(10)

        #Validate snapshot pvc crd
        pvc_crd = manager.get_pvc_crd("pvc-" + snap_pvc_obj.metadata.uid)
        assert pvc_crd['spec']['record'][
            'BaseSnapshotId'] == "pvc-" + snap_pvc_obj.metadata.uid, "Base snapshot id is incorrect %s" % pvc_crd[
                'spec']['record']['BaseSnapshotId']
        assert pvc_crd['spec']['record'][
            'ParentVolumeId'] == "pvc-" + base_pvc_obj.metadata.uid, "Parent id is incorrect %s" % pvc_crd[
                'spec']['record']['ParentVolumeId']
        logging.getLogger().info("Volume crd check successfull")

        #Validating base volume properties
        #hpe3par_cli = manager.get_3par_cli_client(base_yml)
        base_volume = manager.get_volume_from_array(
            globals.hpe3par_cli,
            pvc_crd['spec']['record']['ParentBackendName'])
        if int(globals.hpe3par_version.split(".")[0]) < 4:
            isValid = manager.verify_volume_properties_3par(
                base_volume,
                cpg=cpg,
                provisioning=provisioningType,
                compression=compression)
            assert isValid[0] is True, "Validation of base volume failed"
        else:
            isValid = manager.verify_volume_properties_primera(
                base_volume,
                cpg=cpg,
                provisioning=provisioningType,
                compression=compression)
            assert isValid is True, "Validation of base volume failed"
        logging.getLogger().info("base volume check on the array successfull")

        #Validating snapshot volume properties
        #hpe3par_cli = manager.get_3par_cli_client(snap_yml)
        snap_volume = manager.get_volume_from_array(
            globals.hpe3par_cli, pvc_crd['spec']['record']['Name'])

        isValid = manager.verify_volume_properties(
            snap_volume,
            name=pvc_crd['spec']['record']['Name'],
            copyOf=pvc_crd['spec']['record']['ParentBackendName'],
            snapCPG=snapCPG)
        assert isValid is True, "Validation of snapshot volume failed"
        logging.getLogger().info(
            "snapshot volume check on the array successfull")
        time.sleep(20)

        # Deleting created resources
        manager.hpe_delete_pod_object_by_name(snap_pod.metadata.name,
                                              snap_pod.metadata.namespace)
        flag = manager.check_if_deleted(180,
                                        snap_pod.metadata.name,
                                        "Pod",
                                        namespace=snap_pod.metadata.namespace)
        assert flag is True, "POD %s delete status check timedout..." % snap_pod.metadata.name

        manager.delete_pvc(snap_pvc_obj.metadata.name)
        flag = manager.check_if_deleted(
            2,
            snap_pvc_obj.metadata.name,
            "PVC",
            namespace=snap_pvc_obj.metadata.namespace)
        assert flag is True, "PVC %s delete status check timedout..." % snap_pvc_obj.metadata.name

        manager.delete_sc(sc_snap.metadata.name)
        flag = manager.check_if_deleted(2, sc_snap.metadata.name, "SC", None)
        assert flag is True, "SC %s delete status check timedout..." % sc_snap.metadata.name

        manager.hpe_delete_pod_object_by_name(base_pod_obj.metadata.name,
                                              base_pod_obj.metadata.namespace)
        flag = manager.check_if_deleted(
            180,
            base_pod_obj.metadata.name,
            "Pod",
            namespace=base_pod_obj.metadata.namespace)
        assert flag is True, "POD %s delete status check timedout..." % base_pod_obj.metadata.name

        manager.delete_pvc(base_pvc_obj.metadata.name)
        flag = manager.check_if_deleted(
            2,
            base_pvc_obj.metadata.name,
            "Pod",
            namespace=base_pvc_obj.metadata.namespace)
        assert flag is True, "PVC %s delete status check timedout..." % base_pvc_obj.metadata.name

        manager.delete_sc(sc.metadata.name)
        flag = manager.check_if_deleted(2, sc.metadata.name, "SC", None)
        assert flag is True, "SC %s delete status check timedout..." % sc.metadata.name
        #manager.delete_secret(secret.metadata.name, secret.metadata.namespace)

    finally:
        # Now cleanup secret, sc, pv, pvc, pod
        cleanup(sc_snap, pvc_snap, snap_pod)
        cleanup(sc, pvc, pod)
def test_chap():
    sc_yml = 'YAML_CHAP/sc_140.yaml'
    pvc_yml = 'YAML_CHAP/pvc_140.yaml'
    pod_yml = 'YAML_CHAP/pod_140.yml'
    hpe3par_cli = None
    secret = None
    sc = None
    pvc = None
    pod = None
    timeout = 900
    
    # Fetching chap details from install yml
    with open("INSTALL/values.yaml", 'r') as ymlfile:
        cfg = yaml.load(ymlfile)

    chapUsr = cfg['iscsi']['chapUser']
    chapPwd = cfg['iscsi']['chapPassword']
    try:
        yml = "YAML_CHAP/sc_140.yaml"
        array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(yml)
        hpe3par_cli = manager.get_3par_cli_client(yml)

        hpe3par_version = manager.get_array_version(hpe3par_cli)

        # Call uninstall of plugin, to re-install the product with chap enabled
        tc.test_helm_uninstall()
        time.sleep(20)

        # Call install with CHAP enabled 
        tc.test_helm_install()


        # Get node details.
        node_list = manager.hpe_list_node_objects()
        workers = {}
        for _ in node_list.items:
            if 'worker_id' in _.metadata.labels:
                workers[ _.metadata.name] = _.status.addresses[0].address
            else:
                continue

        # Validate node crd
        worker_node_name = [ keys for keys in workers]
        for node_name in worker_node_name:
            flag = manager.verify_node_crd_chap(node_name,chapUser=chapUsr,chapPassword=chapPwd)
        assert flag is True, "Crd validation failed"

        # Create sc, pvc, pod
        secret = manager.create_secret(sc_yml)
        step = "secret"
        sc = manager.create_sc(sc_yml)
        step = "sc"
        pvc = manager.create_pvc(pvc_yml)
        step = "pvc"
        flag, pvc_obj = manager.check_status(timeout, pvc.metadata.name, kind='pvc', status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name
        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        print("hpe3par_cli object :: %s " % hpe3par_cli)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name

        pod = manager.create_pod(pod_yml)
        flag, pod_obj = manager.check_status(timeout, pod.metadata.name, kind='pod', status='Running',
                                             namespace=pod.metadata.namespace)

        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

        
        #Validate chap details on 3par
        host_name = (pod_obj.spec.node_name).split(".")[0]
        host_ip = pod_obj.status.host_ip
        hpe3par_host = manager.get_host_from_array(hpe3par_cli, host_name)
        flag = manager.verify_host_properties(hpe3par_host, chapUser=chapUsr,chapPassword=chapPwd)
        assert flag is True, "Verification of crd on array failed" 
        
        #Validate chap details on host. 
        command = "iscsiadm -m node -o show | grep -w node.session.auth.username"
        raw_output = manager.get_command_output(host_name,command) 
        assert raw_output[0].split(",")[0].split("=")[1].strip() == chapUsr, "Chap user not as in input file %s " % raw_output[0].split(",")[0].split("=")[1]
        command = "iscsiadm -m node -o show | grep -w node.session.auth.password"
        raw_output = manager.get_command_output(host_name,command)
        assert raw_output[0].split(",")[0].split("=")[1], "Chap password on host is 'NULL' %s " % raw_output[0].split(",")[0].split("=")[1]
        
    finally:
        # Now cleanup secret, sc, pv, pvc, pod
        print("Inside Finally")
        cleanup(secret, sc, pvc, pod)
        hpe3par_cli.logout()
Example #13
0
def run_pod_bkp(yml, hpe3par_cli, protocol):
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        secret = manager.create_secret(yml)
        sc = manager.create_sc(yml)
        pvc = manager.create_pvc(yml)
        flag, pvc_obj = manager.check_status(timeout, pvc.metadata.name, kind='pvc', status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        volume = manager.get_volume_from_array(hpe3par_cli, volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s. Terminating test. " % volume_name

        pod = manager.create_pod(yml)
        flag, pod_obj = manager.check_status(timeout, pod.metadata.name, kind='pod', status='Running',
                                             namespace=pod.metadata.namespace)
        assert flag is True, "Pod %s status check timed out, not in Running state yet. Terminating test." % pod.metadata.name

        # Verify crd fpr published status
        assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is True, \
            "PVC CRD %s Published is false after Pod is running" % pvc_obj.spec.volume_name

        hpe3par_vlun = manager.get_3par_vlun(hpe3par_cli,volume_name)
        assert manager.verify_pod_node(hpe3par_vlun, pod_obj) is True, \
            "Node for pod received from 3par and cluster do not match"

        if protocol == 'iscsi':
            iscsi_ips = manager.get_iscsi_ips(hpe3par_cli)

            flag, disk_partition = manager.verify_by_path(iscsi_ips, pod_obj.spec.node_name)
            assert flag is True, "partition not found"
            print("disk_partition received are %s " % disk_partition)

            flag, disk_partition_mod = manager.verify_multipath(hpe3par_vlun, disk_partition)
            assert flag is True, "multipath check failed"
            print("disk_partition after multipath check are %s " % disk_partition)
            print("disk_partition_mod after multipath check are %s " % disk_partition_mod)
            assert manager.verify_partition(disk_partition_mod), "partition mismatch"

            assert manager.verify_lsscsi(pod_obj.spec.node_name, disk_partition), "lsscsi verificatio failed"

        assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                              pod.metadata.name
        assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is True, \
            "Pod %s is not deleted yet " % pod.metadata.name

        if protocol == 'iscsi':
            flag, ip = manager.verify_deleted_partition(iscsi_ips, pod_obj.spec.node_name)
            assert flag is True, "Partition(s) not cleaned after volume deletion for iscsi-ip %s " % ip

            paths = manager.verify_deleted_multipath_entries(pod_obj.spec.node_name, hpe3par_vlun)
            assert paths is None or len(paths) == 0, "Multipath entries are not cleaned"

            # partitions = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
            # assert len(partitions) == 0, "lsscsi verificatio failed for vlun deletion"
            flag = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
            print("flag after deleted lsscsi verificatio is %s " % flag)
            assert flag, "lsscsi verification failed for vlun deletion"

        # Verify crd for unpublished status
        """try:
            assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is False, \
                "PVC CRD %s Published is true after Pod is deleted" % pvc_obj.spec.volume_name
            print("PVC CRD published is false after pod deletion.")
            #logging.warning("PVC CRD published is false after pod deletion.")
        except Exception as e:
            print("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
            #logging.error("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)"""
        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        #pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print("PVC crd after PVC object deletion :: %s " % pvc_crd)
        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name, secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name

    except Exception as e:
        print("Exception in run_pod :: %s" % e)
        #logging.error("Exception in test_publish :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e
    finally:
        cleanup(secret, sc, pvc, pod)
Example #14
0
def test_import_vol_as_clone_sanity():
    yml = '%s/import_vol_as_clone/import-vol-as-clone.yml' % globals.yaml_dir
    sc = None
    pvc_obj = None
    pod_obj = None
    vol_name = None
    try:
        # Create volume
        """hpe3par_cli = manager.get_3par_cli_client(yml)
        array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        logging.getLogger().info("\n########################### test_import_cloned_vol test %s::%s::%s ###########################" %
              (str(yml), protocol, hpe3par_version[0:5]))"""

        yaml_values = manager.get_details_for_volume(yml)
        options = prepare_options(yaml_values, globals.hpe3par_version)
        # Create volume in array to be cloned and later imported to csi
        vol_name = yaml_values['vol_name']
        volume = None
        volume, exception = create_vol_in_array(globals.hpe3par_cli,
                                                options,
                                                vol_name=vol_name,
                                                size=yaml_values['size'],
                                                cpg_name=yaml_values['cpg'])
        # assert volume is not None, "Volume %s is not created on array as %s. Terminating test." % (vol_name, exception)
        if volume is None:
            logging.getLogger().info(
                "Volume %s is not created on array as %s. Terminating test." %
                (vol_name, exception))
            return
        logging.getLogger().info(
            "Volume %s created successfully on array. Now Create clone..." %
            volume['name'])
        #clone_volume, message = create_clone_in_array(globals.hpe3par_cli, source_vol_name=vol_name, option={'online': True, 'tpvv': True})
        #logging.getLogger().info("Cloned volume is :: %s" % clone_volume)
        #message = "Clone volume creation on array failed with error %s. Terminating test." % message
        #assert clone_volume is not None, message
        #clone_vol_name = clone_volume['name']

        # Now import base of the clone to csi
        #secret = manager.create_secret(yml)
        sc = manager.create_sc(yml)
        pvc = manager.create_pvc(yml)
        logging.getLogger().info("Check in events if volume is created...")
        status, message = manager.check_status_from_events(
            kind='PersistentVolumeClaim',
            name=pvc.metadata.name,
            namespace=pvc.metadata.namespace,
            uid=pvc.metadata.uid)
        assert status == 'ProvisioningSucceeded', f"{message}"
        flag, pvc_obj = manager.check_status(30,
                                             pvc.metadata.name,
                                             kind='pvc',
                                             status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s for base volume %s status check timed out, not in Bound state yet..." % \
                             (pvc_obj.metadata.name, vol_name)
        logging.getLogger().info(
            "\n\nBase volume (after cloning at array) has been imported successfully to CSI."
        )

        # Compare imported volume object with old volume object on array
        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        # logging.getLogger().info(pvc_crd)
        imported_volume_name = manager.get_pvc_volume(pvc_crd)
        assert manager.verify_clone_crd_status(pvc_obj.spec.volume_name) is True, \
            "Clone PVC CRD is not yet completed"
        csi_volume = manager.get_volume_from_array(globals.hpe3par_cli,
                                                   imported_volume_name)
        vol_has_diff, diff = compare_volumes(volume, csi_volume)
        assert vol_has_diff is False, "After import volume properties are changed. Modified properties are %s" % diff
        logging.getLogger().info(
            "\nImported volume's properties have been verified successfully, all property values retain."
        )
        pod_obj = create_verify_pod(yml, globals.hpe3par_cli, pvc_obj,
                                    imported_volume_name,
                                    globals.access_protocol)
    finally:
        # Now cleanup secret, sc, pv, pvc, pod
        cleanup(None, sc, pvc_obj, pod_obj)
        delete_vol_from_array(globals.hpe3par_cli, vol_name)
Example #15
0
def delete_resources(hpe3par_cli, secret, sc, pvc, pod, protocol):
    timeout = 600
    try:
        assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                              pod.metadata.name
        assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is True, \
            "Pod %s is not deleted yet " % pod.metadata.name

        pvc_crd = manager.get_pvc_crd(pvc.spec.volume_name)
        # logging.getLogger().info(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)

        if protocol == 'iscsi':
            hpe3par_vlun = manager.get_3par_vlun(hpe3par_cli, volume_name)
            iscsi_ips = manager.get_iscsi_ips(hpe3par_cli)
            flag, disk_partition = manager.verify_by_path(
                iscsi_ips, pod.spec.node_name)
            flag, ip = manager.verify_deleted_partition(
                iscsi_ips, pod.spec.node_name)
            assert flag is True, "Partition(s) not cleaned after volume deletion for iscsi-ip %s " % ip

            paths = manager.verify_deleted_multipath_entries(
                pod.spec.node_name, hpe3par_vlun)
            assert paths is None or len(
                paths) == 0, "Multipath entries are not cleaned"

            # partitions = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
            # assert len(partitions) == 0, "lsscsi verificatio failed for vlun deletion"
            flag = manager.verify_deleted_lsscsi_entries(
                pod.spec.node_name, disk_partition)
            logging.getLogger().info(
                "flag after deleted lsscsi verificatio is %s " % flag)
            assert flag, "lsscsi verification failed for vlun deletion"

        # Verify crd for unpublished status
        try:
            assert manager.verify_pvc_crd_published(pvc.spec.volume_name) is False, \
                "PVC CRD %s Published is true after Pod is deleted" % pvc.spec.volume_name
            logging.getLogger().info(
                "PVC CRD published is false after pod deletion.")
            # logging.warning("PVC CRD published is false after pod deletion.")
        except Exception as e:
            logging.getLogger().info(
                "Resuming test after failure of publishes status check for pvc crd... \n%s"
                % e)
            # logging.error("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        # pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        # logging.getLogger().info("PVC crd after PVC object deletion :: %s " % pvc_crd)
        assert manager.check_if_crd_deleted(pvc.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc.spec.volume_name, 'hpevolumeinfos')
        sleep(30)
        assert manager.verify_delete_volume_on_3par(hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC") is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name

        assert manager.delete_secret(secret.metadata.name,
                                     secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret",
                                        namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name
    except Exception as e:
        logging.getLogger().info("Exception in delete_resource() :: %s " % e)
def test_override_and_expand_volume():
    base_yml = '%s/override/override.yaml' % globals.yaml_dir
    timeout = globals.status_check_timeout
    sc = None
    pvc = None
    pod = None
    try:
        # Creating storage class and pvc
        sc = manager.create_sc(base_yml)
        provisioning, compression, cpg_name, snap_cpg, desc, accessProtocol = get_sc_properties(
            base_yml)
        logging.getLogger().info(
            "Volume properties set in SC, provisioning::%s compression::%s CPG::%s SNAP CPG::%s desc::%s Protocol::%s"
            % (provisioning, compression, cpg_name, snap_cpg, desc,
               accessProtocol))
        pvc = manager.create_pvc(base_yml)
        flag, base_pvc_obj = manager.check_status(
            30,
            pvc.metadata.name,
            kind='pvc',
            status='Bound',
            namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % base_pvc_obj.metadata.name
        logging.getLogger().info("Pvc in bound state :: %s" %
                                 base_pvc_obj.metadata.name)

        #Get pvc crd details
        pvc_crd = manager.get_pvc_crd(base_pvc_obj.spec.volume_name)
        vol_name, vol_cpg, vol_snpCpg, vol_provType, vol_desc, vol_compr = manager.get_pvc_editable_properties(
            pvc_crd)

        assert cpg_name != vol_cpg, "Override failed for %s" % vol_name
        logging.getLogger().info(
            "Overriden Volume properties, name::%s usrCPG::%s snpCPG::%s provType::%s compr::%s desc::%s"
            %
            (vol_name, vol_cpg, vol_snpCpg, vol_provType, vol_compr, vol_desc))

        # Get proprties from the array
        hpe3par_volume = manager.get_volume_from_array(globals.hpe3par_cli,
                                                       vol_name)
        assert hpe3par_volume[
            'userCPG'] == vol_cpg, "userCPG does not match ovveride cpg parameter"

        # proceed to expanding pvc
        size_in_gb = '30'
        patch_json = {
            'spec': {
                'resources': {
                    'requests': {
                        'storage': size_in_gb + 'Gi'
                    }
                }
            }
        }
        mod_pvc = manager.patch_pvc(pvc.metadata.name, pvc.metadata.namespace,
                                    patch_json)
        logging.getLogger().info("Patched PVC %s" % mod_pvc)
        sleep(20)

        # Creating pod
        pod = manager.create_pod(base_yml)
        flag, pod_obj = manager.check_status(timeout,
                                             pod.metadata.name,
                                             kind='pod',
                                             status='Running',
                                             namespace=pod.metadata.namespace)
        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

        # Now check if volume in 3par has increased size
        volume = manager.get_volume_from_array(globals.hpe3par_cli, vol_name)
        assert volume['sizeMiB'] == int(
            size_in_gb) * 1024, "Volume on array does not have updated size"

        # Check if PVC has increaded size
        mod_pvc = manager.hpe_read_pvc_object(pvc.metadata.name,
                                              pvc.metadata.namespace)
        logging.getLogger().info("PVC after expansion %s" % mod_pvc)
        assert mod_pvc.spec.resources.requests[
            'storage'] == "%sGi" % size_in_gb, "PVC %s does not have updated size" % pvc.metadata.name

    except Exception as e:
        logging.getLogger().error("Exception in test_override_usrCPG :: %s" %
                                  e)
        raise e

    finally:
        # Now cleanup secret, sc, pv, pvc, pod
        cleanup(sc, pvc, pod)
Example #17
0
def create_import_verify_volume(yml,
                                hpe3par_cli,
                                protocol,
                                publish=True,
                                pvc_bound=True,
                                pvc_message='',
                                pod_run=True,
                                pod_message=''):
    secret = None
    sc = None
    pvc_obj = None
    pod_obj = None
    volume = None

    # Create volume
    """hpe3par_cli = manager.get_3par_cli_client(yml)
    array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(yml)
    hpe3par_version = manager.get_array_version(hpe3par_cli)
    logging.getLogger().info("\n########################### Import volume test %s::%s::%s ###########################" %
              (str(yml), protocol, hpe3par_version[0:5]))"""
    hpe3par_version = manager.get_array_version(hpe3par_cli)
    yaml_values = manager.get_details_for_volume(yml)
    if yaml_values['provisioning'].lower(
    ) == 'full' and hpe3par_version[0:1] == '4':
        logging.getLogger().info(
            "Full Provisioning not supported on primera. Terminating test.")
        return None, None, None, None, None
    options = prepare_options(yaml_values, hpe3par_version)
    # Create volume in array to be imported
    logging.getLogger().info("Options to 3par cli for creating volume :: %s " %
                             options)
    volume, exception = create_vol_in_array(hpe3par_cli,
                                            options,
                                            vol_name=yaml_values['vol_name'],
                                            size=yaml_values['size'],
                                            cpg_name=yaml_values['cpg'])
    # assert volume is not None, "Volume %s is not created on array as %s. Terminating test." % (vol_name, exception)
    if volume is None:
        logging.getLogger().info(
            "Volume %s is not created on array as %s. Terminating test." %
            (yaml_values['vol_name'], exception))
        return None, None, None, None, None
    logging.getLogger().info(
        "Volume %s created successfully on array. Now import it to CSI..." %
        volume['name'])

    # Import volume now in CSI
    #secret = manager.create_secret(yml)
    sc = manager.create_sc(yml)
    pvc = manager.create_pvc(yml)
    logging.getLogger().info("Check in events if volume is created...")
    status, message = manager.check_status_from_events(
        kind='PersistentVolumeClaim',
        name=pvc.metadata.name,
        namespace=pvc.metadata.namespace,
        uid=pvc.metadata.uid)
    #logging.getLogger().info(status)
    #logging.getLogger().info(message)
    if pvc_bound:
        assert status == 'ProvisioningSucceeded', f"{message}"
        flag, pvc_obj = manager.check_status(30,
                                             pvc.metadata.name,
                                             kind='pvc',
                                             status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

        # Compare imported volume object with old volume object on array
        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        # logging.getLogger().info(pvc_crd)
        imported_volume_name = manager.get_pvc_volume(pvc_crd)
        assert manager.verify_clone_crd_status(pvc_obj.spec.volume_name) is True, \
            "Clone PVC CRD is not yet completed"
        csi_volume = manager.get_volume_from_array(globals.hpe3par_cli,
                                                   imported_volume_name)
        csi_volume = manager.get_volume_from_array(hpe3par_cli,
                                                   imported_volume_name)
        vol_has_diff, diff = compare_volumes(volume, csi_volume)
        assert vol_has_diff is False, "After import volume properties are changed. Modified properties are %s" % diff
        logging.getLogger().info(
            "\nImported volume's properties have been verified successfully, all property values retain."
        )

    else:
        pvc_obj = pvc
        """assert status == 'ProvisioningFailed', "Imported volume that starts from PVC (%s)" % yaml_values['vol_name']
        logging.getLogger().info("\n\nCould not import volume starts with PVC, as expected.")"""
        assert status == 'ProvisioningFailed', "Imported volume that %s" % pvc_message
        logging.getLogger().info(
            "\n\nCould not import volume %s, as expected." % pvc_message)
        # return status, "\n\nCould not import volume starts with PVC, as expected.", secret, pvc_obj, None

    # Now publish this volume and verify vluns
    if publish is True:
        if protocol is None:  # not specified at command line
            # read it from sc yml
            protocol = manager.read_protocol(yml)

        pod_obj = create_verify_pod(yml, hpe3par_cli, pvc_obj,
                                    imported_volume_name, protocol, pod_run,
                                    pod_message)

    return volume, secret, sc, pvc_obj, pod_obj
def pvc_create_verify(yml):
    secret = None
    sc = None
    pvc = None
    pod = None
    try:
        """array_ip, array_uname, array_pwd = manager.read_array_prop(yml)
        hpe3par_cli = manager.get_3par_cli_client(yml)
        hpe3par_version = manager.get_array_version(hpe3par_cli)
        print("\n########################### new_method %s::%s::%s ###########################" %
              (str(yml), protocol, hpe3par_version[0:5]))"""

        sc = manager.create_sc(yml)
        pvc = manager.create_pvc(yml)

        # Check PVC status in events
        provisioning = None
        compression = None
        size = None
        is_cpg_ssd = None
        provisioning, compression, cpg_name, size = manager.get_sc_properties(
            yml)
        host_encryption = None
        host_encryption_secret_name = None
        host_encryption_secret_namespace = None
        host_SeesVLUN_set = False

        with open(yml) as f:
            elements = list(yaml.safe_load_all(f))
            for el in elements:
                # print("======== kind :: %s " % str(el.get('kind')))
                if str(el.get('kind')) == "StorageClass":
                    if 'hostEncryption' in el['parameters']:
                        host_encryption = el['parameters']['hostEncryption']
                    if 'hostEncryptionSecretName' in el['parameters']:
                        host_encryption_secret_name = el['parameters'][
                            'hostEncryptionSecretName']
                    if 'hostEncryptionSecretNamespace' in el['parameters']:
                        host_encryption_secret_namespace = el['parameters'][
                            'hostEncryptionSecretNamespace']
                    if 'hostSeesVLUN' in el['parameters']:
                        host_SeesVLUN_set = True
                        hostSeesVLUN = el['parameters']['hostSeesVLUN']

        logging.getLogger().info("Check in events if volume is created...")
        status, message = manager.check_status_from_events(
            kind='PersistentVolumeClaim',
            name=pvc.metadata.name,
            namespace=pvc.metadata.namespace,
            uid=pvc.metadata.uid)
        logging.getLogger().info("Check if test passed...")
        flag = manager.is_test_passed_with_encryption(
            status=status,
            enc_secret_name=host_encryption_secret_name,
            yml=yml)
        logging.getLogger().info("Test passed :: %s " % flag)
        assert flag is True, message

        if status == 'ProvisioningSucceeded':
            flag, pvc_obj = manager.check_status(
                timeout,
                pvc.metadata.name,
                kind='pvc',
                status='Bound',
                namespace=pvc.metadata.namespace)
            assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name

            pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
            volume_name = manager.get_pvc_volume(pvc_crd)
            logging.getLogger().info(globals.hpe3par_cli)
            volume = manager.get_volume_from_array(globals.hpe3par_cli,
                                                   volume_name)
            assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
            logging.getLogger().info(volume)
            flag, failure_cause = manager.verify_volume_properties_3par(
                volume,
                size=size,
                provisioning=provisioning,
                compression=compression,
                cpg=cpg_name)
            assert flag is True, "Volume properties verification at array is failed for %s" % failure_cause
            pod = manager.create_pod(yml)

            flag, pod_obj = manager.check_status(
                timeout,
                pod.metadata.name,
                kind='pod',
                status='Running',
                namespace=pod.metadata.namespace)

            assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

            # Verify crd fpr published status
            assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is True, \
                "PVC CRD %s Published is false after Pod is running" % pvc_obj.spec.volume_name

            hpe3par_vlun = manager.get_3par_vlun(globals.hpe3par_cli,
                                                 volume_name)
            assert manager.verify_pod_node(hpe3par_vlun, pod_obj) is True, \
                "Node for pod received from 3par and cluster do not match"

            iscsi_ips = manager.get_iscsi_ips(globals.hpe3par_cli)

            # Adding hostSeesVLUN check
            hpe3par_active_vlun = manager.get_all_active_vluns(
                globals.hpe3par_cli, volume_name)
            if host_SeesVLUN_set:
                for vlun_item in hpe3par_active_vlun:
                    if hostSeesVLUN == "true":
                        assert vlun_item[
                            'type'] == globals.HOST_TYPE, "hostSeesVLUN parameter validation failed for volume %s" % pvc_obj.spec.volume_name
                    else:
                        assert vlun_item[
                            'type'] == globals.MATCHED_SET, "hostSeesVLUN parameter validation failed for volume %s" % pvc_obj.spec.volume_name
                logging.getLogger().info(
                    "Successfully completed hostSeesVLUN parameter check")

            # Read pvc crd again after pod creation. It will have IQN and LunId.
            pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
            flag, disk_partition = manager.verify_by_path(
                iscsi_ips, pod_obj.spec.node_name, pvc_crd, hpe3par_vlun)
            assert flag is True, "partition not found"
            logging.getLogger().info("disk_partition received are %s " %
                                     disk_partition)

            flag, disk_partition_mod, partition_map = manager.verify_multipath(
                hpe3par_vlun, disk_partition)
            assert flag is True, "multipath check failed"
            """print("disk_partition after multipath check are %s " % disk_partition)
            print("disk_partition_mod after multipath check are %s " % disk_partition_mod)"""
            logging.getLogger().info(
                "disk_partition after multipath check are %s " %
                disk_partition)
            logging.getLogger().info(
                "disk_partition_mod after multipath check are %s " %
                disk_partition_mod)
            assert manager.verify_partition(
                disk_partition_mod), "partition mismatch"

            assert manager.verify_lsscsi(
                pod_obj.spec.node_name,
                disk_partition), "lsscsi verificatio failed"
            assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                                  pod.metadata.name
            assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod",
                                            namespace=pod.metadata.namespace) is True, \
                "Pod %s is not deleted yet " % pod.metadata.name

            flag, ip = manager.verify_deleted_partition(
                iscsi_ips, pod_obj.spec.node_name, hpe3par_vlun, pvc_crd)
            assert flag is True, "Partition(s) not cleaned after volume deletion for iscsi-ip %s " % ip

            paths = manager.verify_deleted_multipath_entries(
                pod_obj.spec.node_name, hpe3par_vlun, disk_partition)
            assert paths is None or len(
                paths) == 0, "Multipath entries are not cleaned"

            # partitions = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
            # assert len(partitions) == 0, "lsscsi verificatio failed for vlun deletion"
            flag = manager.verify_deleted_lsscsi_entries(
                pod_obj.spec.node_name, disk_partition)
            # print("flag after deleted lsscsi verificatio is %s " % flag)
            logging.getLogger().info(
                "flag after deleted lsscsi verificatio is %s " % flag)
            assert flag, "lsscsi verification failed for vlun deletion"

            # Verify crd for unpublished status
            try:
                assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is False, \
                    "PVC CRD %s Published is true after Pod is deleted" % pvc_obj.spec.volume_name
                # print("PVC CRD published is false after pod deletion.")
                logging.getLogger().info(
                    "PVC CRD published is false after pod deletion.")
                # logging.warning("PVC CRD published is false after pod deletion.")
            except Exception as e:
                # print("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
                logging.getLogger().warning(
                    "Resuming test after failure of publishes status check for pvc crd... \n%s"
                    % e)
                # logging.error("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
            assert manager.delete_pvc(pvc.metadata.name)

            assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC",
                                            namespace=pvc.metadata.namespace) is True, \
                "PVC %s is not deleted yet " % pvc.metadata.name

            # pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
            # print("PVC crd after PVC object deletion :: %s " % pvc_crd)
            assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
                "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

            assert manager.verify_delete_volume_on_3par(globals.hpe3par_cli, volume_name), \
                "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

            assert manager.delete_sc(sc.metadata.name) is True

            assert manager.check_if_deleted(timeout, sc.metadata.name, "SC",
                                            sc.metadata.namespace) is True, "SC %s is not deleted yet " \
                                                                            % sc.metadata.name
            """assert manager.delete_secret(secret.metadata.name, secret.metadata.namespace) is True

            assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
                "Secret %s is not deleted yet " % secret.metadata.name"""

    except Exception as e:
        # print("Exception in test_publish :: %s" % e)
        logging.getLogger().error("Exception in test_publish :: %s" % e)
        # logging.error("Exception in test_publish :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        #hpe3par_cli.logout()
        cleanup(None, sc, pvc, pod)
Example #19
0
def test_publish_sanity():
    sc = None
    pvc = None
    pod = None
    try:
        yml = "%s/test-publish.yml" % globals.yaml_dir
        #array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(yml)
        #hpe3par_cli = manager.get_3par_cli_client(yml)
        #hpe3par_version = manager.get_array_version(hpe3par_cli)
        #print("\n########################### test_publish::%s::%s ###########################" %
        #      (protocol, hpe3par_version[0:5]))
        """logging.error("\n########################### test_publish::%s::%s###########################" %
                      (protocol, hpe3par_version))"""
        #secret = manager.create_secret(yml)
        #step = "secret"
        sc = manager.create_sc(yml)
        #step = "sc"
        pvc = manager.create_pvc(yml)
        #step = "pvc"
        flag, pvc_obj = manager.check_status(timeout,
                                             pvc.metadata.name,
                                             kind='pvc',
                                             status='Bound',
                                             namespace=pvc.metadata.namespace)
        assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name
        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print(pvc_crd)
        volume_name = manager.get_pvc_volume(pvc_crd)
        #print("hpe3par_cli object :: %s " % hpe3par_cli)
        volume = manager.get_volume_from_array(globals.hpe3par_cli,
                                               volume_name)
        assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name
        """assert manager.verify_volume_properties(volume, provisioning='thin', compression='true') is True, \
            "tpvv no comp volume verification failed""" ""

        pod = manager.create_pod(yml)

        flag, pod_obj = manager.check_status(timeout,
                                             pod.metadata.name,
                                             kind='pod',
                                             status='Running',
                                             namespace=pod.metadata.namespace)

        assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name

        # Verify crd fpr published status
        assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is True, \
            "PVC CRD %s Published is false after Pod is running" % pvc_obj.spec.volume_name

        hpe3par_vlun = manager.get_3par_vlun(globals.hpe3par_cli, volume_name)
        assert manager.verify_pod_node(hpe3par_vlun, pod_obj) is True, \
            "Node for pod received from 3par and cluster do not match"

        iscsi_ips = manager.get_iscsi_ips(globals.hpe3par_cli)

        # Read pvc crd again after pod creation. It will have IQN and LunId.
        pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        flag, disk_partition = manager.verify_by_path(iscsi_ips,
                                                      pod_obj.spec.node_name,
                                                      pvc_crd, hpe3par_vlun)
        assert flag is True, "partition not found"
        logging.getLogger().info("disk_partition received are %s " %
                                 disk_partition)

        flag, disk_partition_mod, partition_map = manager.verify_multipath(
            hpe3par_vlun, disk_partition)
        assert flag is True, "multipath check failed"
        """print("disk_partition after multipath check are %s " % disk_partition)
        print("disk_partition_mod after multipath check are %s " % disk_partition_mod)"""
        logging.getLogger().info(
            "disk_partition after multipath check are %s " % disk_partition)
        logging.getLogger().info(
            "disk_partition_mod after multipath check are %s " %
            disk_partition_mod)
        assert manager.verify_partition(
            disk_partition_mod), "partition mismatch"

        assert manager.verify_lsscsi(
            pod_obj.spec.node_name,
            disk_partition), "lsscsi verificatio failed"

        assert manager.delete_pod(pod.metadata.name, pod.metadata.namespace), "Pod %s is not deleted yet " % \
                                                                              pod.metadata.name
        assert manager.check_if_deleted(timeout, pod.metadata.name, "Pod", namespace=pod.metadata.namespace) is True, \
            "Pod %s is not deleted yet " % pod.metadata.name

        flag, ip = manager.verify_deleted_partition(iscsi_ips,
                                                    pod_obj.spec.node_name,
                                                    hpe3par_vlun, pvc_crd)
        assert flag is True, "Partition(s) not cleaned after volume deletion for iscsi-ip %s " % ip

        paths = manager.verify_deleted_multipath_entries(
            pod_obj.spec.node_name, hpe3par_vlun, disk_partition)
        assert paths is None or len(
            paths) == 0, "Multipath entries are not cleaned"

        # partitions = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name, disk_partition)
        # assert len(partitions) == 0, "lsscsi verificatio failed for vlun deletion"
        flag = manager.verify_deleted_lsscsi_entries(pod_obj.spec.node_name,
                                                     disk_partition)
        #print("flag after deleted lsscsi verificatio is %s " % flag)
        logging.getLogger().info(
            "flag after deleted lsscsi verificatio is %s " % flag)
        assert flag, "lsscsi verification failed for vlun deletion"

        # Verify crd for unpublished status
        try:
            assert manager.verify_pvc_crd_published(pvc_obj.spec.volume_name) is False, \
                "PVC CRD %s Published is true after Pod is deleted" % pvc_obj.spec.volume_name
            #print("PVC CRD published is false after pod deletion.")
            logging.getLogger().info(
                "PVC CRD published is false after pod deletion.")
            #logging.warning("PVC CRD published is false after pod deletion.")
        except Exception as e:
            #print("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
            logging.getLogger().warning(
                "Resuming test after failure of publishes status check for pvc crd... \n%s"
                % e)
            #logging.error("Resuming test after failure of publishes status check for pvc crd... \n%s" % e)
        assert manager.delete_pvc(pvc.metadata.name)

        assert manager.check_if_deleted(timeout, pvc.metadata.name, "PVC", namespace=pvc.metadata.namespace) is True, \
            "PVC %s is not deleted yet " % pvc.metadata.name

        #pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name)
        #print("PVC crd after PVC object deletion :: %s " % pvc_crd)
        assert manager.check_if_crd_deleted(pvc_obj.spec.volume_name, "hpevolumeinfos") is True, \
            "CRD %s of %s is not deleted yet. Taking longer..." % (pvc_obj.spec.volume_name, 'hpevolumeinfos')

        assert manager.verify_delete_volume_on_3par(globals.hpe3par_cli, volume_name), \
            "Volume %s from 3PAR for PVC %s is not deleted" % (volume_name, pvc.metadata.name)

        assert manager.delete_sc(sc.metadata.name) is True

        assert manager.check_if_deleted(timeout, sc.metadata.name, "SC", sc.metadata.namespace) is True, "SC %s is not deleted yet " \
                                                                                  % sc.metadata.name
        """assert manager.delete_secret(secret.metadata.name, secret.metadata.namespace) is True

        assert manager.check_if_deleted(timeout, secret.metadata.name, "Secret", namespace=secret.metadata.namespace) is True, \
            "Secret %s is not deleted yet " % secret.metadata.name"""

    except Exception as e:
        #print("Exception in test_publish :: %s" % e)
        logging.getLogger().error("Exception in test_publish :: %s" % e)
        #logging.error("Exception in test_publish :: %s" % e)
        """if step == 'pvc':
            manager.delete_pvc(pvc.metadata.name)
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'sc':
            manager.delete_sc(sc.metadata.name)
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)
        if step == 'secret':
            manager.delete_secret(secret.metadata.name, secret.metadata.namespace)"""
        raise e

    finally:
        #hpe3par_cli.logout()
        cleanup(None, sc, pvc, pod)