def cli_pcc_down(self, *args, **kwargs): """ [Args] (str) host_ip: (str) linux_password: (str) linux_user: [Returns] (str) OK if command successful, stderr output if there's an error """ self._load_kwargs(kwargs) print("kwargs:-" + str(kwargs)) banner("CLI.Pcc Down ip=%s" % self.host_ip) cmd = "sudo /home/pcc/platina-cli-ws/platina-cli down -p {}".format( self.pcc_password) print("Making PCC Down ...") trace("Making PCC Down ...") print("Command:" + str(cmd)) output = cli_run(self.host_ip, self.linux_user, self.linux_password, cmd) print("Output:" + str(output)) trace("Output:" + str(output)) if re.search("FAIL", str(output)): return "Error" else: return "OK"
def cli_pull_code(self, *args, **kwargs): """ [Args] (str) host_ip: (str) linux_password: (str) linux_user: (str) pcc_version_cmd: [Returns] (str) OK if command successful, stderr output if there's an error """ self._load_kwargs(kwargs) print("kwargs:-" + str(kwargs)) banner("CLI.PCC Pull Code ip=%s" % self.host_ip) if self.pcc_version_cmd: print("Pulling the code ....") trace("Pulling the code ....") print("Command:" + str(self.pcc_version_cmd)) pull_output = cli_run(self.host_ip, self.linux_user, self.linux_password, self.pcc_version_cmd) print("Output:" + str(pull_output)) trace("Output:" + str(pull_output)) if re.search("FAIL", str(pull_output)): print(str(pull_output)) return "Error" else: return "OK" else: print("Pull/pcc_version_cmd command is empty !!") return "OK"
def alert_create_rule_raw(self, *args, **kwargs): self._load_kwargs(kwargs) banner("PCC.Alert Create Rule Raw") print("kwargs:-" + str(kwargs)) try: conn = BuiltIn().get_variable_value("${PCC_CONN}") except Exception as e: raise e if self.templateId: self.templateId = ast.literal_eval(str(self.templateId)) token = self.auth_data["token"] print("Authorization:-" + str(token)) cmd_strct = 'cd /home/pcc/rules;curl -k -X POST --data-binary @{} -H "Authorization:Bearer {}" -H "Content-type: application/x-yaml" -H "Accept: application/x-yaml" https://127.0.0.1:9999/platina-monitor/alerts/rules' cmd = cmd_strct.format(self.filename, token) print("Command:-" + str(cmd)) output = cli_run(self.setup_ip, self.user, self.password, cmd) serialise_output = PccBase()._serialize_response( time.time(), output)['Result']['stdout'] print("Serialize Output:" + str(serialise_output)) trace("Serialize Output:- %s " % (serialise_output)) if re.search("statuscode: 0", serialise_output) and re.search( "messagetype: OK", serialise_output): return "OK" return "Error"
def get_k8s_version(self, *args, **kwargs): banner("Get K8s Version") self._load_kwargs(kwargs) try: print("Kwargs are: {}".format(kwargs)) # Get Ceph Version #cmd = "kubectl version" #status = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.user, linux_password=self.password) #print("cmd: {} executed successfully and status is: {}".format(cmd, status)) #return status print("Kwargs are: {}".format(kwargs)) conn = BuiltIn().get_variable_value("${PCC_CONN}") print("conn is {}".format(conn)) k8s_list = pcc.get_kubernetes(conn) print("k8s_list is {}".format(k8s_list)) k8s_ver_list = {} for data in k8s_list["Result"]["Data"]: print("portus version of portus {} is {} ".format( data["name"], data["k8sVersion"])) k8s_ver_list[data["name"]] = data["k8sVersion"] print("k8s_ver_list is {}".format(k8s_ver_list)) return k8s_ver_list except Exception as e: trace("Error in getting k8s version: {}".format(e))
def create_dummy_file_copy_to_mount_path(self, *args, **kwargs): banner("Create dummy file and copy to mount path") self._load_kwargs(kwargs) try: print("Kwargs are: {}".format(kwargs)) #Creating dummy file cmd = "sudo dd if=/dev/zero of={} bs={} count=1".format( self.dummy_file_name, self.dummy_file_size) status = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.user, linux_password=self.password) print("cmd4: {} executed successfully and status is: {}".format( cmd, status)) time.sleep(2) #Copying sample 2mb file to mount folder cmd = "sudo cp /home/pcc/{} /mnt/{}".format( self.dummy_file_name, self.mount_folder_name) status = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.user, linux_password=self.password) print("cmd5: {} executed successfully and status is:{}".format( cmd, status)) time.sleep(2) return "OK" except Exception as e: trace( "Error in create_dummy_file_copy_to_mount_path : {}".format(e))
def check_replicated_pool_creation(self, *args, **kwargs): banner( "Check Replicated Pool Creation After Erasure Pool RBD Creation") self._load_kwargs(kwargs) print("Kwargs are: {}".format(kwargs)) print("Username: {}".format(self.username)) print("Password: {}".format(self.password)) try: cmd = "sudo ceph osd lspools" check_replicated_pool = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.username, linux_password=self.password) print("check_replicated_pool output: {}".format( check_replicated_pool)) serialised_check_replicated_pool = self._serialize_response( time.time(), check_replicated_pool) print("serialised_check_replicated_pool is:{}".format( serialised_check_replicated_pool)) cmd_output = str( serialised_check_replicated_pool['Result']['stdout']).replace( '\n', '').strip() replicated_pool = str(self.erasure_pool_name) + "-hs" if replicated_pool in str(cmd_output): return "OK" else: return "Error" except Exception as e: trace("Error in check_replicated_pool_creation: {}".format(e))
def get_ceph_state_nodes(self, *args, **kwargs): self._load_kwargs(kwargs) banner("PCC.Ceph Get State Nodes: {}".format(self.state)) print("Kwargs:" + str(kwargs)) try: conn = BuiltIn().get_variable_value("${PCC_CONN}") except Exception as e: raise e cluster_id = easy.get_ceph_cluster_id_by_name(conn, self.name) print("Cluster Name: {} Id: {}".format(self.name, cluster_id)) nodes = [] nodes_name = [] response = pcc.get_ceph_clusters_state(conn, str(cluster_id), str(self.state)) trace("Response:" + str(response)) if self.state.lower() == 'mds': for val in get_response_data(response)['nodes']: if self.state_status: if re.search(self.state_status, val['state']): nodes_name.append(val['name']) nodes.append(easy.get_hostip_by_name( conn, val['name'])) else: nodes_name.append(val['name']) nodes.append(easy.get_hostip_by_name(conn, val['name'])) else: for data in get_response_data(response): print("Data:" + str(data)) nodes_name.append(data['server']) nodes.append(easy.get_hostip_by_name(conn, data['server'])) nodes = list(set(nodes)) print("{} Nodes Host IP's: {}".format(self.state, str(nodes))) print("{} Nodes Name: {}".format(self.state, str(nodes_name))) trace("{} Nodes: {}".format(self.state, str(nodes))) return nodes
def ceph_cleanup_be(self, **kwargs): self._load_kwargs(kwargs) cmd1 = "sudo lsblk | grep 'disk' | awk '{print $1}'" for ip in self.nodes_ip: trace("======== cmd: {} is getting executed ========".format(cmd1)) print("======== cmd: {} is getting executed ========".format(cmd1)) drives_op = cli_run(ip, self.user, self.password, cmd1) trace("Drives_op: {}".format(str(drives_op))) print("Drives_op: {}".format(str(drives_op))) serialized_drives = str( self._serialize_response( time.time(), drives_op)['Result']['stdout']).strip().split('\n')[1:] trace("serialized_drives: {}".format(serialized_drives)) print("serialized_drives: {}".format(serialized_drives)) for drive in serialized_drives: cmd2 = "sudo wipefs -a /dev/{}".format(drive.strip()) trace("======== cmd: {} is getting executed ========".format( cmd2)) print("======== cmd: {} is getting executed ========".format( cmd2)) clean_drives_op = cli_run(ip, self.user, self.password, cmd2) trace("Clean drives output:{}".format(str(clean_drives_op))) print("Clean drives output:{}".format(str(clean_drives_op))) time.sleep(5) return "OK"
def wait_until_cluster_ready(self, *args, **kwargs): banner("PCC.Ceph Wait Until Cluster Ready") self._load_kwargs(kwargs) if self.name == None: return None try: conn = BuiltIn().get_variable_value("${PCC_CONN}") except Exception as e: raise e cluster_ready = False timeout = time.time() + PCCSERVER_TIMEOUT capture_data = "" while cluster_ready == False: response = pcc.get_ceph_clusters(conn) for data in get_response_data(response): if str(data['name']).lower() == str(self.name).lower(): capture_data = data if data['progressPercentage'] == 100 or data[ 'deploy_status'].lower() == "completed": print("Response To Look :-" + str(data)) cluster_ready = True elif re.search("failed", str(data['deploy_status'])): print("Response:-" + str(data)) return "Error" if time.time() > timeout: print("Response:-" + str(capture_data)) raise Exception("[PCC.Ceph Wait Until Cluster Ready] Timeout") trace(" Waiting until cluster: %s is Ready, currently: %s" % (data['name'], data['progressPercentage'])) time.sleep(5) return "OK"
def cli_pcc_set_keys(self, *args, **kwargs): """ [Args] (str) host_ip: (str) linux_password: (str) linux_user: (list) nodes_ip: [Returns] (str) OK if command successful, stderr output if there's an error """ self._load_kwargs(kwargs) print("kwargs:-" + str(kwargs)) banner("CLI.Pcc Set Keys ip=%s" % self.host_ip) flag = [] if not self.nodes_ip: print("Nodes Ips are missing, Please provide ...") return "Error" for node_ip in eval(str(self.nodes_ip)): print("Setting keys for {} ...".format(str(node_ip))) trace("Setting keys for {} ...".format(str(node_ip))) cmd = "sudo docker exec platina-executor sh -c 'ssh -o StrictHostKeyChecking=no -i /root/.ssh/id_rsa_ansible pcc@{} -t echo -e 'yes''".format( node_ip) print("Command: " + str(cmd)) cmd_output = cli_run(self.host_ip, self.linux_user, self.linux_password, cmd) trace(cmd_output) if re.search("failed", str(cmd_output)): flag.append(node_ip) else: continue if flag: print("failed while copying the keys for {}".format(flag)) return "Error" return "OK"
def validate_storage_and_cache_pool_relation(self, *args, **kwargs): self._load_kwargs(kwargs) banner("PCC.Ceph Validate storage and cache pool relation") try: conn = BuiltIn().get_variable_value("${PCC_CONN}") except Exception as e: raise e validation = [] get_all_pools_response = pcc.get_ceph_pools_by_cluster_id( conn, str(self.ceph_cluster_id)) trace("get_all_pools_response: {}".format(get_all_pools_response)) for data in get_response_data(get_all_pools_response): if (data['name'] == self.data_pool_name) and (data['cachePool']['name'] == self.cache_pool_name): validation.append("OK") if (data['name'] == self.cache_pool_name) and (data['storagePool']['name'] == self.data_pool_name): validation.append("OK") trace("Validation status: {}".format(validation)) if (len(validation) == 2) and (len(validation) > 0 and all(elem == "OK" for elem in validation)): return "OK" else: return "Validation failed for datapool- {} and cache_pool- {}".format( self.data_pool_name, self.cache_pool_name)
def phone_home_encrypted_value_validation(self, *arg, **kwargs): banner("PCC.PhoneHome Encrypted Values Validation") self._load_kwargs(kwargs) print("Kwargs:" + str(kwargs)) trace("Kwargs:" + str(kwargs)) try: encrypted_values = [ "plat1na", "cals0ft", "miniominio", "aucta2018", "BEGIN CERTIFICATE", "BEGIN PRIVATE KEY", "Bearer" ] status = [] for val in encrypted_values: cmd1 = 'sudo grep -rnw "/home/pcc/platina-cli-ws/phone-home/" -e "{}"| wc -l'.format( val) check_encrypted_values = cli_run(self.host_ip, self.user, self.password, cmd1) check_encrypted_values_serialize = int( self._serialize_response( time.time(), check_encrypted_values)['Result']['stdout'].strip()) if check_encrypted_values_serialize == 0: status.append("OK") else: status.append("Not Encrypted:{}".format(val)) trace("status is :{}".format(status)) print("status is :{}".format(status)) result = len(status) > 0 and all(elem == "OK" for elem in status) if result: return "OK" else: return "Result is :{}".format(status) except Exception as e: return "Exception occured in phone home encrypted value validation: {}".format( e)
def modify_ceph_pool(self, *args, **kwargs): self._load_kwargs(kwargs) if self.size: try: self.size = ast.literal_eval(str(self.size)) except ValueError: print("Values is None or AlphaNumeric") if self.tags: self.tags = eval(self.tags) try: payload = { "id": self.id, "name": self.name, "size": self.size, "tags": self.tags, "ceph_cluster_id": self.ceph_cluster_id, "type": self.pool_type, "resilienceScheme": self.resilienceScheme, "quota": self.quota, "quota_unit": self.quota_unit } conn = BuiltIn().get_variable_value("${PCC_CONN}") print(str(payload)) except Exception as e: trace("[update_pool] EXCEPTION: %s" % str(e)) raise Exception(e) return pcc.modify_ceph_pool(conn, payload)
def wait_until_pool_ready(self, *args, **kwargs): banner("PCC.Ceph Wait Until Pool Ready") self._load_kwargs(kwargs) if self.name == None: return None try: conn = BuiltIn().get_variable_value("${PCC_CONN}") except Exception as e: raise e pool_ready = False timeout = time.time() + PCCSERVER_TIMEOUT while pool_ready == False: response = pcc.get_ceph_pools(conn) if time.time() > timeout: return "[PCC.Ceph Wait Until Pool Ready] Timeout" for data in get_response_data(response): if str(data['name']).lower() == str(self.name).lower(): print(str(data)) if data['deploy_status'] == "completed": pool_ready = True return "OK" if data['deploy_status'] == "failed": return "Error" else: trace( " Waiting until pool : %s is Ready, currently: %s" % (data['name'], data['progressPercentage'])) time.sleep(5)
def wait_until_pool_deleted(self, *args, **kwargs): banner("PCC.Ceph Wait Until Pool Deleted") self._load_kwargs(kwargs) if self.id == None: # pool doesn't exist, nothing to wait for return "OK" try: conn = BuiltIn().get_variable_value("${PCC_CONN}") except Exception as e: raise e Id_found_in_list_of_pools = True timeout = time.time() + PCCSERVER_TIMEOUT while Id_found_in_list_of_pools == True: Id_found_in_list_of_pools = False response = pcc.get_ceph_pools(conn) for pool in get_response_data(response): if str(pool['id']) == str(self.id): name = pool["name"] Id_found_in_list_of_pools = True if time.time() > timeout: raise Exception("[PCC.Wait Until Pool Deleted] Timeout") if Id_found_in_list_of_pools: trace( "Waiting until node: %s is deleted. Timeout in %.1f seconds." % (name, timeout - time.time())) time.sleep(5) else: trace("Pool deleted!") time.sleep(10) return "OK"
def wait_until_rbd_deleted(self, *args, **kwargs): banner("PCC.Ceph Wait Until Rbd Deleted") self._load_kwargs(kwargs) if self.id == None: return None try: conn = BuiltIn().get_variable_value("${PCC_CONN}") except Exception as e: raise e Id_found_in_list_of_rbd = True timeout = time.time() + PCCSERVER_TIMEOUT while Id_found_in_list_of_rbd == True: Id_found_in_list_of_rbd = False response = pcc.get_ceph_rbds(conn) for data in get_response_data(response): print(data) if str(data['id']) == str(self.id): Id_found_in_list_of_rbd = True if time.time() > timeout: raise Exception("[PCC.Ceph Wait Until Rbd Deleted] Timeout") if Id_found_in_list_of_rbd: trace(" Waiting until rbd: %s is deleted. Timeout in %.1f seconds." % (data['name'], timeout-time.time())) time.sleep(5) time.sleep(10) return "OK"
def wait_until_rados_ready(self, *args, **kwargs): banner("PCC.Wait Until Rados Gateway Ready") self._load_kwargs(kwargs) print("Kwargs" + str(kwargs)) if self.name == None: return None try: conn = BuiltIn().get_variable_value("${PCC_CONN}") except Exception as e: raise e gateway_ready = False timeout = time.time() + PCCSERVER_TIMEOUT ceph_cluster_id = str( easy.get_ceph_cluster_id_by_name(conn, Name=self.ceph_cluster_name)) while gateway_ready == False: response = pcc.get_ceph_rgws(conn, ceph_cluster_id) for data in get_response_data(response): if str(data['name']).lower() == str(self.name).lower(): print("Response To Look :-" + str(data)) trace(" Waiting until %s is Ready, current status: %s" % (str(data['name']), str(data['deploy_status']))) if data['deploy_status'] == "completed": return "OK" elif re.search("failed", str(data['deploy_status'])): return "Error" else: break if time.time() > timeout: raise Exception("[PCC.Ceph Wait Until Rgw Ready] Timeout") time.sleep(5) return "OK"
def wait_until_node_deleted(self, *args, **kwargs): """ Wait Until Node Deleted [Args] (dict) conn: Connection dictionary obtained after logging in (str) Name: Name of the Node [Returns] (dict) Wait time (dict) Error response: If Exception occured """ self._load_kwargs(kwargs) banner("PCC.Wait Until Node Deleted") conn = BuiltIn().get_variable_value("${PCC_CONN}") found = True time_waited = 0 timeout = time.time() + PCC_TIMEOUT try: while found: node_list = pcc.get_nodes(conn)['Result']['Data'] if node_list == None: return "OK" if re.search(self.Name, str(node_list)): trace("Node:{} not yet deleted".format(self.Name)) time.sleep(3) if time.time() > timeout: return {"Error": "Timeout"} else: return "OK" except Exception as e: return "Exception encountered: {}".format(e)
def restart_rsyslog_service(self, *args, **kwargs): banner("CLI.Restart Rsyslog service") self._load_kwargs(kwargs) print("Kwargs are: {}".format(kwargs)) try: restart_status = [] cmd1 = "sudo systemctl restart rsyslog" cmd2 = "sudo systemctl status rsyslog" for hostip in ast.literal_eval(self.host_ips): cmd1_op=cli_run(hostip,self.linux_user,self.linux_password,cmd1) print("cmd1: {}\n ====== Output is {} ========".format(cmd1, cmd1_op)) time.sleep(10) cmd2_op= cli_run(hostip,self.linux_user,self.linux_password,cmd2) print("cmd2: {}\n ====== Output is {} ========".format(cmd2, cmd2_op)) trace("cmd2: {}\n ====== Output is {} ========".format(cmd2, cmd2_op)) if re.search("active \(running\)", str(cmd2_op)): print("============================== Inside re search =====================") restart_status.append("OK") else: restart_status.append("Restart validation failed on {}".format(hostip)) result = len(restart_status) > 0 and all(elem == "OK" for elem in restart_status) if result: return "OK" else: return "Error: while Rsyslog service restart. Restart status is: {}".format(restart_status) except Exception as e: return "Exception encountered while restarting Rsyslog client services {}".format(e)
def mount_fs(self, *args, **kwargs): banner("Mount FS to Mount Point") self._load_kwargs(kwargs) try: print("Kwargs are: {}".format(kwargs)) #inet_ip = CephCluster().get_ceph_inet_ip(**kwargs) #print("Inet IP is: {}".format(inet_ip)) #Maps fs cmd= "sudo mount -t ceph {}:/ /mnt/{} -o name=admin,secret='ceph-authtool -p /etc/ceph/ceph.client.admin.keyring'".format(self.inet_ip,self.mount_folder_name) status = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.user,linux_password=self.password) print("cmd1: {} executed successfully and status is: {}".format(cmd, status)) time.sleep(1) cmd= "sudo mount| grep test_fs_mnt" status = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.user,linux_password=self.password) print("cmd2: {} executed successfully and status is: {}".format(cmd, status)) serialised_status = self._serialize_response(time.time(), status) cmd_output = str(serialised_status['Result']['stdout']).replace('\n', '').strip() if '/mnt/{}'.format(self.mount_folder_name) in cmd_output: print("Found in string") else: return "Error: {} file not found".format(self.mount_folder_name) return "OK" except Exception as e: trace("Error in mount_fs: {}".format(e))
def check_fs_mount(self, *args, **kwargs): banner("PCC.Check FS Mount on other server") self._load_kwargs(kwargs) try: print("Kwargs are: {}".format(kwargs)) print("username is '{}' and password is: '{}'".format(self.user,self.password)) #inet_ip = CephCluster().get_ceph_inet_ip(**kwargs) cmd= "sudo mkdir /mnt/{}".format(self.mount_folder_name) status = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.user,linux_password=self.password) print("cmd1: {} executed successfully and status is: {}".format(cmd, status)) cmd= "sudo mount -t ceph {}:/ /mnt/{} -o name=admin,secret='ceph-authtool -p /etc/ceph/ceph.client.admin.keyring'".format(self.inet_ip,self.mount_folder_name) status = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.user,linux_password=self.password) print("cmd2: {} executed successfully and status is: {}".format(cmd, status)) cmd= "sudo ls /mnt/{}".format(self.mount_folder_name) status = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.user,linux_password=self.password) print("cmd3: {} executed successfully and status is: {}".format(cmd, status)) serialised_status = self._serialize_response(time.time(), status) cmd_output = str(serialised_status['Result']['stdout']).replace('\n', '').strip() if '{}'.format(self.dummy_file_name) in cmd_output: print("Data Found in output") else: return "Error: '{}' file not found".format(self.dummy_file_name) return "OK" except Exception as e: trace("Error in check_fs_mount: {}".format(e))
def update_SAS_enclosure(self, *args, **kwargs): self._load_kwargs(kwargs) banner("PCC.Update SAS Enclosure") print("kwargs:-" + str(kwargs)) conn = BuiltIn().get_variable_value("${PCC_CONN}") payload = {"ledOn": json.loads(self.led_status.lower())} print("Payload:-" + str(payload)) trace("Payload:- %s " % (payload)) token = self.auth_data["token"] slot_id = self.get_sub_enclosure_slot_id(**kwargs) banner("Slot id is: {}".format(slot_id)) cmd_strct = """curl -k -X PUT --data \'{}\' -H "Content-type:application/json" -H "Authorization:Bearer {}" https://{}:9999/pccserver/v2/enclosures/1/slots/{}""" cmd = cmd_strct.format(json.dumps(payload), token, self.setup_ip, slot_id) print("Command:-" + str(cmd)) output = cli_run(self.setup_ip, self.user, self.password, cmd) serialise_output = json.loads(PccBase()._serialize_response( time.time(), output)['Result']['stdout']) print("Serialize Output:" + str(serialise_output)) trace("Serialize Output:- %s " % (serialise_output)) if serialise_output['status'] == 200 and serialise_output[ 'error'] == "": return serialise_output return "Error"
def modify_ceph_fs(self, *args, **kwargs): self._load_kwargs(kwargs) try: if 'name' not in kwargs: self.name = None if 'metadata_pool' not in kwargs: self.metadata_pool = None if 'data_pool' not in kwargs: self.data_pool = None elif 'data_pool' in kwargs: self.data_pool = ast.literal_eval(self.data_pool) if 'default_pool' not in kwargs: self.default_pool = None if 'ceph_cluster_id' not in kwargs: self.ceph_cluster_id = None payload = { "id":self.id, "name": self.name, "metadata_pool": self.metadata_pool, "data_pools": self.data_pool, "default_pool": self.default_pool, "ceph_cluster_id": self.ceph_cluster_id } conn = BuiltIn().get_variable_value("${PCC_CONN}") print(str(payload)) except Exception as e: trace("[update_] EXCEPTION: %s" % str(e)) raise Exception(e) return pcc.modify_ceph_fs(conn, payload)
def get_SAS_enclosures(self, *args, **kwargs): self._load_kwargs(kwargs) banner("PCC.Get SAS Enclosures") print("kwargs in Get SAS Enclosure :- " + str(kwargs)) conn = BuiltIn().get_variable_value("${PCC_CONN}") token = self.auth_data["token"] print("Authorization token:-" + str(token)) cmd = 'curl -k -X GET "https://{}:9999/pccserver/v2/enclosures" -H "accept: application/json" -H "Authorization: Bearer {}"'.format( self.setup_ip, token) output = cli_run(cmd=cmd, host_ip=self.setup_ip, linux_user=self.user, linux_password=self.password) serialise_output = json.loads( self._serialize_response(time.time(), output)['Result']['stdout']) print("Serialize Output:" + str(serialise_output)) trace("Serialize Output:- %s " % (serialise_output)) if serialise_output['status'] == 200 and serialise_output[ 'error'] == "": return serialise_output return "Error"
def get_ceph_version(self, *args, **kwargs): banner("Get Ceph Version") self._load_kwargs(kwargs) try: print("Kwargs are: {}".format(kwargs)) # Get Ceph Version #cmd = "ceph -v" #status = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.user, linux_password=self.password) #print("cmd: {} executed successfully and status is: {}".format(cmd, status)) #return status banner("PCC.Get Ceph Version [Name=%s]" % self.name) conn = BuiltIn().get_variable_value("${PCC_CONN}") print("conn is {}".format(conn)) ceph_ID = easy.get_ceph_cluster_id_by_name(conn, self.name) print("ceph_ID is {}".format(ceph_ID)) ceph_node_list = pcc.get_ceph_version_list(conn, str(ceph_ID)) print("ceph_node_list is {}".format(ceph_node_list)) ''' "ceph_version":"ceph version 14.2.20 (36274af6eb7f2a5055f2d53ad448f2694e9046a0) nautilus (stable)", "hostname":"qa-clusterhead-10" ''' ceph_ver_list = {} for node_data in ceph_node_list["Result"]["Data"]: print("ceph_version of hostname {} is {} ".format( node_data["hostname"], node_data["ceph_version"])) ceph_ver_list[ node_data["hostname"]] = node_data["ceph_version"] print("ceph_ver_list is {}".format(ceph_ver_list)) return ceph_ver_list except Exception as e: trace("Error in getting ceph version: {}".format(e))
def wait_until_node_ready(self, *args, **kwargs): """ Wait Until Node Ready [Args] (dict) conn: Connection dictionary obtained after logging in (str) Name: Name of the Node [Returns] (dict) Wait Time (dict) Error response: If Exception occured """ self._load_kwargs(kwargs) banner("PCC.Wait Until Node Ready") conn = BuiltIn().get_variable_value("${PCC_CONN}") ready = False time.sleep(10) time_waited = 0 PCC_TIMEOUT = 60 * 10 #10 minutes timeout = time.time() + PCC_TIMEOUT while not ready: ready = False node_list = pcc.get_nodes(conn)['Result']['Data'] for node in node_list: if str(node['Name']) == str(self.Name): if node['provisionStatus'] == 'Ready': trace("Node:{} is ready".format(self.Name)) return "OK" if "fail" in node['provisionStatus']: return "Wait until node ready status - Failed. Node Status is {}".format( node['provisionStatus']) if time.time() > timeout: return {"Error": "Timeout"} if not ready: trace("Node:{} is not yet ready".format(self.Name)) time.sleep(5) time_waited += 5
def get_stored_size_replicated_pool(self, *args, **kwargs): banner("Get Stored Size for Replicated Pool") self._load_kwargs(kwargs) try: cmd = "sudo ceph df detail | grep -w {}".format(self.pool_name) replicated_pool_stored_size = cli_run(cmd=cmd, host_ip=self.hostip, linux_user=self.user, linux_password=self.password) serialised_replicated_pool_stored_size = self._serialize_response( time.time(), replicated_pool_stored_size) cmd_output = str(serialised_replicated_pool_stored_size['Result'] ['stdout']).replace('\n', '').strip() splitting = cmd_output.split() print("splitting: {}".format(splitting)) print("value of replicated pool: {}".format(splitting[3])) print("Size of replicated pool: {}".format(splitting[4])) size_of_replicated_pool = convert(eval(splitting[3]), splitting[4]) print("Size of replicated pool is: {}".format( size_of_replicated_pool)) return size_of_replicated_pool except Exception as e: trace("Error in get_stored_size_replicated_pool: {}".format(e))
def cleanup_logs_by_rsyslog(self, *args, **kwargs): banner("CLI.Cleanup logs created by Rsyslog") self._load_kwargs(kwargs) print("Kwargs are: {}".format(kwargs)) try: cleanup_status = [] cmd = "sudo dd if=/dev/null of=/var/log/messages" for hostip in ast.literal_eval(self.host_ips): cmd_op=cli_run(hostip,self.linux_user,self.linux_password,cmd) print("cmd: {}\n ====== Output is {} ========".format(cmd, cmd_op)) if re.search("0\+0 records out",str(cmd_op)): cleanup_status.append("OK") else: cleanup_status.append("Rsyslog Cleanup failed on {}".format(hostip)) trace("Cleanup status is: {}".format(cleanup_status)) result = len(cleanup_status) > 0 and all(elem == "OK" for elem in cleanup_status) trace("Result is : {}".format(result)) if result: return "OK" else: return "Error: while Rsyslog cleanup. Cleanup status is: {}".format(cleanup_status) except Exception as e: return "Exception encountered while Rsyslog cleanup: {}".format(e)
def get(testdata_file, testdata_key): banner("TESTDATA.Get file=%s key=%s" % (testdata_file, testdata_key)) try: with open(TESTDATA_FILE_DIRECTORY + testdata_file) as json_file: test_data = json.load(json_file) return test_data[testdata_key] except Exception as e: trace("ERROR: %s" % str(e)) return {"Error": str(e)}
def remove_package_from_machine(self, *args, **kwargs): banner("CLI.Remove a package from machine") self._load_kwargs(kwargs) trace("Kwargs are: " + str(kwargs)) conn = BuiltIn().get_variable_value("${PCC_CONN}") OS_type = self.get_OS_version(host_ip=self.host_ip, linux_user=self.linux_user, linux_password=self.linux_password) if re.search("Ubuntu", str(OS_type)) or re.search( "Debian", str(OS_type)): cmd = "sudo apt-get --assume-yes remove {}".format( self.package_name) cmd_output = cli_run(cmd=cmd, host_ip=self.host_ip, linux_user=self.linux_user, linux_password=self.linux_password) serialised_status = self._serialize_response( time.time(), cmd_output) serialised_cmd_output = str( serialised_status['Result']['stdout']).replace('\n', '').strip() if re.search("Removing rsyslog", serialised_cmd_output): return "{} Package removed".format(self.package_name) elif re.search("is not installed", serialised_cmd_output): return "{} Package is not installed".format(self.package_name) else: return "Error while removing {} package".format( self.package_name) elif re.search("Red Hat", str(OS_type)) or re.search( "CentOS", str(OS_type)): cmd = "sudo yum -y remove {}".format(self.package_name) cmd_output = cli_run(cmd=cmd, host_ip=self.host_ip, linux_user=self.linux_user, linux_password=self.linux_password) serialised_status = self._serialize_response( time.time(), cmd_output) serialised_cmd_output = str( serialised_status['Result']['stdout']).replace('\n', '').strip() if re.search("Complete!", serialised_cmd_output): return "{} Package removed".format(self.package_name) elif re.search( "No Match for argument: {}".format(self.package_name), serialised_cmd_output): return "{} Package is not installed".format(self.package_name) else: return "Error while removing {} package".format( self.package_name) else: return "Check package remove failed"