def test_helm_uninstall(): try: node_obj = manager.hpe_list_node_objects() node_list = node_obj.items # Fetching master node ip for item in node_list: if item.spec.taints != "None": host_ip = item.status.addresses[0].address host_name = item.status.addresses[1].address break # ssh to master node and check csi driver installed command = "helm ls -n kube-system" command_output = manager.get_command_output(host_name, command) if len(command_output) == 1: print("Driver not installed") return # ssh to master node and execute uninstall commands command = "helm uninstall hpe-csi --namespace kube-system" command_output = manager.get_command_output(host_name, command) print(command_output) assert command_output[0] == 'release "hpe-csi" uninstalled', "Uninstall of 'hpe-csi' driver failed" #sleep(30) # Delete crds crd_obj = manager.hpe_list_crds() for crd in crd_obj: manager.hpe_delete_crd(crd) assert manager.check_if_deleted(timeout, crd, kind='Crd',namespace="Default") is True, \ "crd %s is not deleted yet " % crd # Check plugin pods have been deleted pod_obj = manager.hpe_list_pod_objects("kube-system") pod_list = pod_obj.items for item in pod_list: if 'app' in item.metadata.labels: if item.metadata.labels['app']=='hpe-csi-controller' or item.metadata.labels['app']=='hpe-csi-node' or item.metadata.labels['app']=='primera3par-csp': assert manager.check_if_deleted(timeout, item.metadata.name, "Pod", namespace=item.metadata.namespace) is True, \ "Pod %s is not deleted yet " % pod.metadata.name else: continue print("Plugin pods deleted") except AssertionError as e: raise e except Exception as e: raise e
def get_nodes_info(): global map_pod_node_dist try: list_nodes = manager.hpe_list_node_objects() for node in list_nodes.items: if node.spec.unschedulable is None and node.spec.taints is None: node_name = node.metadata.name """dot_index = node_name.find('.') if dot_index > 0: node_name = node_name[0:dot_index]""" map_worker_nodes[node_name] = node map_pod_node_dist[node_name] = [] logging.getLogger().info("Worker nodes in cluster are %s" % list(map_worker_nodes.keys())) except Exception as e: logging.getLogger().error("Exception in test_get_nodes_info :: %s" % e) raise e
def test_helm_install(): try: node_obj = manager.hpe_list_node_objects() node_list = node_obj.items # Fetching master node ip for item in node_list: if item.spec.taints != "None": host_ip = item.status.addresses[0].address host_name = item.status.addresses[1].address break # ssh to master node and check csi driver already installed command = "helm ls -n kube-system" command_output = manager.get_command_output(host_name, command) if len(command_output) == 1: print("Installing driver") else: print("Driver is already installed") return # copy values.yml file to /root dir on master node #cmd = "cp INSTALL/values_3par_1.18.yaml values_3par_1.18.yaml" cmd = "cp INSTALL/values.yaml values.yaml" call(cmd.split(" ")) # ssh to master node and execute adding repo command command = "helm repo add hpe https://hpe-storage.github.io/co-deployments" command_output = manager.get_command_output(host_name, command) print(command_output) assert command_output[0] == '"hpe" has been added to your repositories', "Install of HPE repo failed" # ssh to master node and execute helm repo update command command = "helm repo update" command_output = manager.get_command_output(host_name, command) print(command_output) assert command_output[1].rfind('Successfully got an update from the "hpe" chart repository') != -1, "Update of HPE repo failed" # ssh to master node and execute helm search command for the repo command = "helm search repo hpe-csi-driver" command_output = manager.get_command_output(host_name, command) chart_version = command_output[1].split()[1] app_version = command_output[1].split()[2] # ssh to master node and install csi driver #command = "helm install hpe-csi hpe/hpe-csi-driver --namespace kube-system -f values_3par_1.18.yaml" #command = "helm install hpe-csi hpe-csi-driver-1.3.0.tgz --namespace kube-system -f values.yaml" command = "helm install hpe-csi hpe/hpe-csi-driver --namespace kube-system -f values.yaml" command_output = manager.get_command_output(host_name, command) print(command_output) #assert command_output[1].rfind('Successfully got an update from the "hpe" chart repository') != -1, "CSI driver installation failed" # ssh to master node and check csi driver installation #sleep(30) command = "helm ls -n kube-system" command_output = manager.get_command_output(host_name, command) assert command_output[1].rfind('deployed') != -1, "CSI driver not deployed" install_app_version = command_output[1].split()[9] installed_chart = command_output[1].split()[8].split("-")[3] assert install_app_version == app_version,"app version mismatch: repo vesion {0}, installed version {1}".format(app_version, install_app_version) assert installed_chart == chart_version, "chart version mismatch: repo vesion {0}, installed version {1}".format(chart_version, installed_chart) # Check plugin pods have been created pod_obj = manager.hpe_list_pod_objects("kube-system") pod_list = pod_obj.items for item in pod_list: if 'app' in item.metadata.labels: if item.metadata.labels['app']=='hpe-csi-controller' or item.metadata.labels['app']=='hpe-csi-node' or item.metadata.labels['app']=='primera3par-csp': flag, pod_obj = manager.check_status(timeout,item.metadata.name, kind='pod', status='Running', namespace='kube-system') assert flag is True, "Pod %s status check timed out, not in Running state yet..." % item.metadata.name else: continue # Check crds crd_obj = manager.hpe_list_crds() assert 'hpenodeinfos.storage.hpe.com' in crd_obj,"node info crd not created" assert 'hpevolumeinfos.storage.hpe.com' in crd_obj,"node info crd not created" except AssertionError as e: raise e except Exception as e: raise e
def test_chap(): sc_yml = 'YAML_CHAP/sc_140.yaml' pvc_yml = 'YAML_CHAP/pvc_140.yaml' pod_yml = 'YAML_CHAP/pod_140.yml' hpe3par_cli = None secret = None sc = None pvc = None pod = None timeout = 900 # Fetching chap details from install yml with open("INSTALL/values.yaml", 'r') as ymlfile: cfg = yaml.load(ymlfile) chapUsr = cfg['iscsi']['chapUser'] chapPwd = cfg['iscsi']['chapPassword'] try: yml = "YAML_CHAP/sc_140.yaml" array_ip, array_uname, array_pwd, protocol = manager.read_array_prop(yml) hpe3par_cli = manager.get_3par_cli_client(yml) hpe3par_version = manager.get_array_version(hpe3par_cli) # Call uninstall of plugin, to re-install the product with chap enabled tc.test_helm_uninstall() time.sleep(20) # Call install with CHAP enabled tc.test_helm_install() # Get node details. node_list = manager.hpe_list_node_objects() workers = {} for _ in node_list.items: if 'worker_id' in _.metadata.labels: workers[ _.metadata.name] = _.status.addresses[0].address else: continue # Validate node crd worker_node_name = [ keys for keys in workers] for node_name in worker_node_name: flag = manager.verify_node_crd_chap(node_name,chapUser=chapUsr,chapPassword=chapPwd) assert flag is True, "Crd validation failed" # Create sc, pvc, pod secret = manager.create_secret(sc_yml) step = "secret" sc = manager.create_sc(sc_yml) step = "sc" pvc = manager.create_pvc(pvc_yml) step = "pvc" flag, pvc_obj = manager.check_status(timeout, pvc.metadata.name, kind='pvc', status='Bound', namespace=pvc.metadata.namespace) assert flag is True, "PVC %s status check timed out, not in Bound state yet..." % pvc_obj.metadata.name pvc_crd = manager.get_pvc_crd(pvc_obj.spec.volume_name) #print(pvc_crd) volume_name = manager.get_pvc_volume(pvc_crd) print("hpe3par_cli object :: %s " % hpe3par_cli) volume = manager.get_volume_from_array(hpe3par_cli, volume_name) assert volume is not None, "Volume is not created on 3PAR for pvc %s " % volume_name pod = manager.create_pod(pod_yml) flag, pod_obj = manager.check_status(timeout, pod.metadata.name, kind='pod', status='Running', namespace=pod.metadata.namespace) assert flag is True, "Pod %s status check timed out, not in Running state yet..." % pod.metadata.name #Validate chap details on 3par host_name = (pod_obj.spec.node_name).split(".")[0] host_ip = pod_obj.status.host_ip hpe3par_host = manager.get_host_from_array(hpe3par_cli, host_name) flag = manager.verify_host_properties(hpe3par_host, chapUser=chapUsr,chapPassword=chapPwd) assert flag is True, "Verification of crd on array failed" #Validate chap details on host. command = "iscsiadm -m node -o show | grep -w node.session.auth.username" raw_output = manager.get_command_output(host_name,command) assert raw_output[0].split(",")[0].split("=")[1].strip() == chapUsr, "Chap user not as in input file %s " % raw_output[0].split(",")[0].split("=")[1] command = "iscsiadm -m node -o show | grep -w node.session.auth.password" raw_output = manager.get_command_output(host_name,command) assert raw_output[0].split(",")[0].split("=")[1], "Chap password on host is 'NULL' %s " % raw_output[0].split(",")[0].split("=")[1] finally: # Now cleanup secret, sc, pv, pvc, pod print("Inside Finally") cleanup(secret, sc, pvc, pod) hpe3par_cli.logout()