def get_cert_info(cert_id, con_ssh=None): LOG.info('check the status of the current certificate') cmd = 'certificate-show ' + cert_id output = cli.system(cmd, ssh_client=con_ssh, fail_ok=False)[1] if output: table = table_parser.table(output) if table: actual_id = table_parser.get_value_two_col_table(table, 'uuid') actual_type = table_parser.get_value_two_col_table( table, 'certtype') actual_details = table_parser.get_value_two_col_table( table, 'details') actual_states = '' if not actual_details: # CGTS-9529 LOG.fatal('No details in output of certificate-show') LOG.fatal( 'Ignore it until the known issue CGTS-9529 fixed, output:' + output) # assert False, 'No details in output of certificate-show' else: LOG.debug('details from output of certificate-show: {}'.format( actual_details)) actual_states = eval(actual_details) LOG.debug('states: {}'.format(actual_states)) return 0, actual_id, actual_type, actual_states LOG.info('') return 1, actual_id, actual_type, actual_states else: LOG.info('no "details" in output') return 2, '', '', ''
def verify_vms(): LOG.fixture_step("Verifying system VMs after test ended...") after_vms_status = vm_helper.get_vms_info( fields=['status'], long=False, all_projects=True, auth_info=Tenant.get('admin')) # compare status between the status of each VMs before/after the test common_vms = set(before_vms_status) & set(after_vms_status) LOG.debug("VMs to verify: {}".format(common_vms)) failure_msgs = [] for vm in common_vms: before_status = before_vms_status[vm][0] post_status = after_vms_status[vm][0] if post_status.lower( ) != 'active' and post_status != before_status: msg = "VM {} is not in good state. Previous status: {}. " \ "Current status: {}". \ format(vm, before_status, post_status) failure_msgs.append(msg) assert not failure_msgs, '\n'.join(failure_msgs) LOG.info("VMs status verified.")
def table(output_lines, combine_multiline_entry=False, rstrip_value=False): """ Tempest table does not take into account when multiple lines are used for one entry. Such as neutron net-list -- if a net has multiple subnets, then tempest table will create multiple entries in table_['values'] param output_lines: output from cli command return: Dictionary of a table with.multi-line entry taken into account.table_['values'] is list of entries. If multi-line entry, then this entry itself is a list. """ table_ = __table(output_lines, rstrip_value=rstrip_value) rows = get_all_rows(table_) if not rows: if not table_['headers']: LOG.debug('No table returned') else: LOG.debug("Empty table returned") return table_ values = __convert_multilines_values(values=rows, merge_lines=combine_multiline_entry) table_['values'] = values return table_
def find_options(self, telnet_conn, end_of_menu=r"(utomatic(ally)?( boot)? in)".encode(), option_identifier=r"[A-Z][A-Za-z]".encode(), newline=r'(\x1b\[\d+;\d+H)+'.encode()): telnet_conn.expect([end_of_menu], 5, fail_ok=True) output = str.encode(telnet_conn.cmd_output) options = re.split(newline, output) options = [ option for option in options if re.search(option_identifier, option) ] for i in range(0, len(options)): self.options.append( KickstartOption(name=options[i].decode(), index=i)) current_option = self.get_current_option(telnet_conn) LOG.info("PXE ISO current option: {}; index {}".format( current_option.name, current_option.index)) self.index = current_option.index if "Controller Configuration" in current_option.name and current_option.index == 0: # increment indices of options by one for option in self.options: option.index += 1 LOG.debug("{} options are: {}".format( self.name, [option.name for option in self.options]))
def find_options( self, telnet_conn, end_of_menu=r"(utomatic(ally)?( boot)? in)|(Press (\[Tab\]|\'e\') to edit\s.*(\.)$)|" r"keys to change the selection\.".encode(), option_identifier=r"(?!Press)[A-Z][A-Za-z]".encode(), newline=r'(\x1b\[\d+;\d+H)+'.encode()): telnet_conn.expect([end_of_menu], 5, fail_ok=True) output = str.encode(telnet_conn.cmd_output) options = re.split(newline, output) options = [ option for option in options if re.search(option_identifier, option) ] for i in range(0, len(options)): self.options.append( KickstartOption(name=options[i].decode(), index=i)) current_option = self.get_current_option(telnet_conn) LOG.info("USB current option: {}; index {}".format( current_option.name, current_option.index)) self.index = current_option.index LOG.debug("{} options are: {}".format( self.name, [option.name for option in self.options]))
def check_url_access(url, headers=None, verify=True, fail_ok=False): """ Check the access to a given url Args: url(str): url to check headers(None|dict): request headers of the http request verify(bool|str): True: secure request False: equivalent to --insecure in curl cmd str: applies to https system. CA-Certificate path. e.g., verify=/path/to/cert fail_ok(bool): Returns(tuple): (status_code, response) - (1, <std_err>): An exception has occurred - (status_code, response): status code and response from requests call """ LOG.info('curl -i {}...'.format(url)) try: req = requests.get(url=url, headers=headers, verify=verify) except requests.exceptions.RequestException as e: if fail_ok: message = 'Exception trying to access {}: {}'.format(url, e) LOG.warn(message) return 1, message raise e LOG.info('Status: [{}]'.format(req.status_code)) LOG.debug( '\n{} {}\nHeaders: {}\nResponse code: {}\nResponse body: {}'.format( req.request.method, req.request.url, req.request.headers, req.status_code, req.text)) if not fail_ok: req.raise_for_status() return req.status_code, req.text
def __set_non_platform_lockout(current_values, expt_values): app_name = 'stx-openstack' service = 'keystone' namespace = 'openstack' section = 'conf.keystone.security_compliance' fields = ['lockout_duration', 'lockout_failure_attempts'] kv_pairs = {} for i in range(2): if current_values[i] != expt_values[i]: kv_pairs['{}.{}'.format(section, fields[i])] = expt_values[i] if not kv_pairs: LOG.info( 'stx-openstack keystone lockout values already set to: {}'.format( expt_values)) return container_helper.update_helm_override(chart=service, namespace=namespace, reset_vals=False, kv_pairs=kv_pairs) override_info = container_helper.get_helm_override_values( chart=service, namespace=namespace, fields='user_overrides') LOG.debug('override_info:{}'.format(override_info)) container_helper.apply_app(app_name=app_name, check_first=False, applied_timeout=1800) post_values = get_lockout_values(keystone='stx-openstack') assert expt_values == post_values, "lockout values did not set to expected after helm " \ "override update" LOG.info('stx-openstack keystone lockout values set successfully')
def __convert_multilines_values(values, merge_lines=False): line_count = len(values) if line_count == 1: return values entries = [] start_index = 0 # start_index for first entry for i in range(line_count): # line_count > 1 if code can get here. # if first value for the NEXT row is not empty string, then next row # is the start of a new entry, # and the current row is the last row of current entry if i == line_count - 1 or values[i + 1][0]: end_index = i if start_index == end_index: # single-line entry entry = values[start_index] else: # multi-line entry entry_lines = [values[index] for index in range(start_index, end_index + 1)] # each column value is a list entry_combined = [list(filter(None, list(t))) for t in zip(*entry_lines)] if merge_lines: entry = [' '.join(item) for item in entry_combined] else: # convert column value to string if list len is 1 entry = [item if len(item) > 1 else ' '.join(item) for item in entry_combined] LOG.debug("Multi-row entry found for: {}".format(entry[0])) entries.append(entry) start_index = i + 1 # start_index for next entry return entries
def __init__(self, name, index, key=None, tag=None, sub_menu=None): self.name = name self.index = index self.sub_menu = sub_menu option_name = self.name.lower() if key is None: has_key = re.search(r"(press|use)\W*(\w+)", option_name, re.IGNORECASE) if has_key: match = has_key.group(2) self.key = match.capitalize() if match.capitalize( ) in bios.TerminalKeys.Keys.keys() else match else: self.key = 'Enter' else: self.key = key if tag is None: # bios options if "boot menu" in option_name or "network boot" in option_name or "pxe boot" in option_name: tag = "boot menu" elif "setup" in option_name: tag = "setup" self.tag = tag LOG.debug("{} option tag is {} key {}".format( self.name, self.tag if self.tag else "None", self.key))
def get(rest_client, resource, auth=True): """ Test GET of <resource> with valid authentication. Args: n/a Prerequisites: system is running Test Setups: n/a Test Steps: - Using requests GET <resource> with proper authentication - Determine if expected status_code of 200 is received Test Teardown: n/a """ message = "Using requests GET {} with proper authentication" LOG.info(message.format(resource)) status_code, text = rest_client.get(resource=resource, auth=auth) message = "Retrieved: status_code: {} message: {}" LOG.debug(message.format(status_code, text)) if status_code == 404: skip("Unsupported resource in this configuration.") else: LOG.info("Determine if expected status_code of 200 is received") message = "Expected status_code of 200 - received {} and message {}" assert status_code == 200, message.format(status_code, text)
def create_server_group(name=None, policy='affinity', rule=None, fail_ok=False, auth_info=None, con_ssh=None, rtn_exist=False, field='id'): """ Create a server group with given criteria Args: name (str): name of the server group policy (str): affinity or anti_infinity rule (str|None): max_server_per_host can be specified when policy=anti-affinity fail_ok (bool): auth_info (dict): con_ssh (SSHClient): rtn_exist (bool): Whether to return existing server group that matches the given name field (str): id or name Returns (tuple): (rtn_code (int), err_msg_or_srv_grp_id (str)) - (0, <server_group_id>) # server group created successfully - (1, <stderr>) # create server group cli rejected """ # process server group metadata if name and rtn_exist: existing_grp = get_server_groups(name=name, strict=False, con_ssh=con_ssh, auth_info=auth_info, field=field) if existing_grp: LOG.debug("Returning existing server group {}".format( existing_grp[0])) return -1, existing_grp[0] # process server group name and policy if not name: name = 'grp_{}'.format(policy.replace('-', '_')) name = common.get_unique_name(name_str=name) args = '{}{} {}'.format('--rule {} '.format(rule) if rule else '', name, policy.replace('_', '-')) LOG.info("Creating server group with args: {}...".format(args)) exit_code, output = cli.nova('server-group-create', args, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if exit_code > 0: return 1, output table_ = table_parser.table(output) srv_grp_id = table_parser.get_values(table_, field)[0] LOG.info("Server group {} created successfully.".format(name)) return 0, srv_grp_id
def test_GET_idisks(sysinv_rest): """ Test GET of <resource> with valid authentication. Args: n/a Prerequisites: system is running Test Setups: n/a Test Steps: - Using requests GET <resource> with proper authentication - Determine if expected status_code of 200 is received Test Teardown: n/a """ r = sysinv_rest path = "/idisks/{}" hostnames = system_helper.get_hosts() for host in hostnames: disk_uuids = storage_helper.get_host_disks(host) for disk_uuid in disk_uuids: res = path.format(disk_uuid) message = "Using requests GET {} with proper authentication" LOG.tc_step(message.format(res)) status_code, text = r.get(resource=res, auth=True) message = "Retrieved: status_code: {} message: {}" LOG.debug(message.format(status_code, text)) if status_code == 404: pytest.skip("Unsupported resource in this configuration.") else: message = "Determine if expected code of 200 is received" LOG.tc_step(message) message = "Expected code of 200 - received {} and message {}" assert status_code == 200, message.format(status_code, text)
def get(sysinv_rest, resource): """ Test GET of <resource> with invalid authentication. Args: sysinv_rest resource Prerequisites: system is running Test Setups: n/a Test Steps: - Using requests GET <resource> without proper authentication - Determine if expected status_code of 401 is received Test Teardown: n/a """ message = "Using requests GET {} without proper authentication" LOG.tc_step(message.format(resource)) status_code, text = sysinv_rest.get(resource=resource, auth=False) message = "Retrieved: status_code: {} message: {}" LOG.debug(message.format(status_code, text)) LOG.tc_step("Determine if expected status_code of 401 is received") message = "Expected status_code of 401 - received {} and message {}" assert status_code == 401, message.format(status_code, text)
def get_custom_heat_files(file_name, file_dir=HEAT_CUSTOM_TEMPLATES, cli_client=None): """ Args: file_name: file_dir: cli_client: Returns: """ file_path = '{}/{}'.format(file_dir, file_name) if cli_client is None: cli_client = get_cli_client() if not cli_client.file_exists(file_path=file_path): LOG.debug('Create userdata directory if not already exists') cmd = 'mkdir -p {}'.format(file_dir) cli_client.exec_cmd(cmd, fail_ok=False) source_file = TestServerPath.CUSTOM_HEAT_TEMPLATES + file_name dest_path = common.scp_from_test_server_to_user_file_dir( source_path=source_file, dest_dir=file_dir, dest_name=file_name, timeout=300, con_ssh=cli_client) if dest_path is None: raise exceptions.CommonError( "Heat template file {} does not exist after download".format( file_path)) return file_path
def _get_values(table_, header1, value1, header2, strict=False, regex=False): """ Args: table_: header1: value1: header2: Returns (list): """ # get a list of rows where header1 contains value1 column1 = get_column(table_, header1) row_indexes = [] if regex: for i in range(len(column1)): if strict: res_ = re.match(value1, column1[i]) else: res_ = re.search(value1, column1[i]) if res_: row_indexes.append(i) else: row_indexes = __get_row_indexes_string(table_, header1, value1, strict) column2 = get_column(table_, header2) value2 = [column2[i] for i in row_indexes] LOG.debug("Returning matching {} value(s): {}".format(header2, value2)) return value2
def table_kube(output_lines, merge_lines=False): """ Parse single table from kubectl output. Args: output_lines (str|list): output of kubectl cmd merge_lines (bool): if a value spans on multiple lines, whether to join them or return as list Return dict with list of column names in 'headers' key and rows in 'values' key. """ table_ = {'headers': [], 'values': []} if not isinstance(output_lines, list): output_lines = output_lines.split('\n') if not output_lines or len(output_lines) < 2: return table_ if not output_lines[-1]: # skip last line if empty (just newline at the end) output_lines = output_lines[:-1] if not output_lines[0]: output_lines = output_lines[1:] if not output_lines: return table_ for i in range(len(output_lines)): line = output_lines[i] if ' ' not in line: LOG.debug('Invalid kube table line: {}'.format(line)) else: header_row = line output_lines = output_lines[i + 1:] break else: return table_ table_['headers'] = re.split(r'\s[\s]+', header_row) m = re.finditer(kute_sep, header_row) starts = [0] + [sep.start() + 2 for sep in m] col_count = len(starts) for line in output_lines: row = [] indices = list(starts) + [len(line)] for i in range(col_count): row.append(line[indices[i]:indices[i + 1]].strip()) table_['values'].append(row) if table_['values'] and len(table_['values'][0]) != len(table_['headers']): raise exceptions.CommonError( 'Unable to parse given lines: \n{}'.format(output_lines)) table_['values'] = __convert_multilines_values(table_['values'], merge_lines=merge_lines) return table_
def delete(self, resource="", auth=True, verify=None): headers = self.auth_header_select(auth) message = "baseURL: {} resource: {} headers: {}" LOG.debug(message.format(self.baseURL, resource, headers)) if verify is None: verify = self.verify r = requests.delete(self.baseURL + resource, headers=headers, verify=verify) return r.status_code, r.json()
def __exit__(self, *args): LOG.debug("Releasing lock") try: return self._lock.release() except RuntimeError: LOG.error("Lock did not release, lock was unlocked already") raise except: LOG.error("An unexpected error was caught when unlocking lock") raise
def get_process_info(name, cmd='', pid_file='', host='', process_type='sm', con_ssh=None, auth_info=Tenant.get('admin_platform')): """ Get the information of the process with the specified name Args: name (str): name of the process cmd (str): path of the executable pid_file (str): path of the file containing the process id host (str): host on which the process resides process_type (str): type of service/process, must be one of 'sm', 'pm', 'other' con_ssh: ssh connection/client to the active controller auth_info Returns: """ LOG.info('name:{} cmd={} pid_file={} host={} process_type={}'.format( name, cmd, pid_file, host, process_type)) active_controller = system_helper.get_active_controller_name(con_ssh=con_ssh, auth_info=auth_info) if not host: host = active_controller if process_type == 'sm': LOG.debug('to get_process_info for SM process:{} on host:{}'.format(name, host)) if host != active_controller: LOG.warn('Already swacted? host:{} is not the active controller now. Active controller is {}'.format( host, active_controller)) pid, name, impact, status, pid_file = get_process_from_sm(name, con_ssh=con_ssh, pid_file=pid_file) if status != 'enabled-active': LOG.warn('SM process is in status:{}, not "enabled-active"'.format(status)) if 'disabl' in status: LOG.warn('Wrong controller? Or controller already swacted, wait and try on the other controller') time.sleep(10) return get_process_from_sm(name, pid_file=pid_file) return -1, name, impact, status, pid_file else: return pid, name, impact, status, pid_file elif process_type == 'pmon': pid = get_pmon_process_id(pid_file, host, con_ssh=con_ssh) LOG.info('Found: PID={} for PMON process:{}'.format(pid, name)) return pid, name else: LOG.info('Try to find the process:{} using "ps"'.format(name)) pid = get_ancestor_process(name, host, cmd=cmd, con_ssh=con_ssh)[0] if -1 == pid: return -1, '' return pid, name
def patch(self, resource="", json_data={}, auth=True, verify=None): headers = self.auth_header_select(auth) message = "baseURL: {} resource: {} headers: {} data: {}" LOG.debug(message.format(self.baseURL, resource, headers, json_data)) if verify is None: verify = self.verify r = requests.patch(self.baseURL + resource, headers=headers, data=json_data, verify=verify) return r.status_code, r.json()
def check_rest_api(): LOG.info("Check sysinv REST API") sysinv_rest = Rest('sysinv', platform=True) resource = '/controller_fs' status_code, text = sysinv_rest.get(resource=resource, auth=True) message = "Retrieved: status_code: {} message: {}" LOG.debug(message.format(status_code, text)) LOG.info("Check status_code of 200 is received") message = "Expected status_code of 200 - received {} and message {}" assert status_code == 200, message.format(status_code, text)
def terminate_avconv(): limit = time.time() + 10 while time.time() < limit: time.sleep(0.1) if self._popen.poll() is not None: LOG.debug("Video stopped") return LOG.info("Killing video recorder process") os.kill(self._popen.pid, signal.SIGTERM)
def __init__(self, name, index=0, key=None, tag=None): tag_dict = { "os": "centos", "security": "standard", "type": None, "console": "serial" } super().__init__(name, index, key) option_name = self.name.lower() if tag is None: if "wrl" in option_name or "wrlinux" in option_name: tag_dict["os"] = "wrl" if "all-in-one" in option_name or "cpe" in option_name or "aio" in option_name: tag_dict["type"] = "cpe" elif "controller" in option_name: tag_dict["type"] = "standard" if "security" in option_name and "extended" in option_name: tag_dict["security"] = "extended" if "security profile enabled" in option_name: tag_dict["security"] = "extended" install_type = ProjVar.get_var("SYS_TYPE") if install_type == SysType.AIO_SX or install_type == SysType.AIO_DX: tag_dict["type"] = "cpe" elif install_type == SysType.REGULAR or install_type == SysType.STORAGE: tag_dict["type"] = "standard" if "lowlat" in option_name or "low lat" in option_name or "low_lat" in option_name: tag_dict["type"] = "lowlat" if "graphic" in option_name: tag_dict["console"] = "graphical" elif isinstance(tag, str): tag = tag.lower() if "all-in-one" in tag or "cpe" in tag or "aio" in tag: tag_dict["type"] = "cpe" if "standard" in tag: tag_dict["type"] = "standard" if "lowlat" in tag or "low lat" in tag or "low_lat" in tag: tag_dict["type"] = "lowlatency" if "security" in tag or "extended" in tag: tag_dict["security"] = "extended" elif isinstance(tag, dict): tag_dict = tag self.tag = tag_dict LOG.debug("Kickstart menu option {} tags are: {}".format( self.name, self.tag))
def pb_migrate_test(backup_info, con_ssh, vm_ids=None): """ Run migration test before doing system backup. Args: backup_info: - options for doing backup con_ssh: - current ssh connection vm_ids Return: None """ hyporvisors = host_helper.get_up_hypervisors(con_ssh=con_ssh) if len(hyporvisors) < 2: LOG.info( 'Only {} hyporvisors, it is not enougth to test migration'.format( len(hyporvisors))) LOG.info('Skip migration test') return 0 else: LOG.debug('There {} hyporvisors'.format(len(hyporvisors))) LOG.info('Randomly choose some VMs and do migrate:') target = random.choice(vm_ids) LOG.info('-OK, test migration of VM:{}'.format(target)) original_host = vm_helper.get_vm_host(target) LOG.info('Original host:{}'.format(original_host)) vm_helper.live_migrate_vm(target) current_host = vm_helper.get_vm_host(target) LOG.info('After live-migration, host:{}'.format(original_host)) if original_host == current_host: LOG.info('backup_info:{}'.format(backup_info)) LOG.warn( 'VM is still on its original host, live-migration failed? original host:{}' .format(original_host)) original_host = current_host vm_helper.cold_migrate_vm(target) current_host = vm_helper.get_vm_host(target) LOG.info('After code-migration, host:{}'.format(current_host)) if original_host == current_host: LOG.warn( 'VM is still on its original host, code-migration failed? original host:{}' .format(original_host))
def test_kpi_cyclictest_hypervisor(collect_kpi, prepare_test_session, get_hypervisor): if not collect_kpi: skip("KPI only test. Skip due to kpi collection is not enabled") global testable_hypervisors chosen_hypervisor = get_hypervisor cpu_info = testable_hypervisors[chosen_hypervisor] cpu_info['for_host_test'] = True LOG.info( 'Hypervisor chosen to run cyclictest: {}'.format(chosen_hypervisor)) active_controller_name = system_helper.get_active_controller_name() program = os.path.join(os.path.normpath(CYCLICTEST_DIR), os.path.basename(CYCLICTEST_EXE)) LOG.debug('program={}'.format(program)) with host_helper.ssh_to_host(chosen_hypervisor) as target_ssh: prep_test_on_host(target_ssh, chosen_hypervisor, program, active_controller_name) run_log, hist_file = run_cyclictest(target_ssh, program, chosen_hypervisor, cpu_info=cpu_info) LOG.info("Process and upload test results") local_run_log, local_hist_file = fetch_results_from_target( target_ssh=target_ssh, target_host=chosen_hypervisor, active_con_name=active_controller_name, run_log=run_log, hist_file=hist_file) testable_hypervisors[chosen_hypervisor]['for_host_test'] = False avg_val, six_nines_val = calculate_results(run_log=local_run_log, hist_file=local_hist_file, cores_to_ignore=None, num_cores=len( cpu_info['vm_cores'])) kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=CyclicTest.NAME_HYPERVISOR_AVG, kpi_val=six_nines_val, uptime=15, unit=CyclicTest.UNIT) kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=CyclicTest.NAME_HYPERVISOR_6_NINES, kpi_val=six_nines_val, uptime=15, unit=CyclicTest.UNIT)
def pytest_collection_modifyitems(items): # print("Collection modify") move_to_last = [] absolute_last = [] for item in items: # re-order tests: trylast_marker = item.get_closest_marker('trylast') abslast_marker = item.get_closest_marker('abslast') if abslast_marker: absolute_last.append(item) elif trylast_marker: move_to_last.append(item) priority_marker = item.get_closest_marker('priorities') if priority_marker is not None: priorities = priority_marker.args for priority in priorities: item.add_marker(eval("pytest.mark.{}".format(priority))) feature_marker = item.get_closest_marker('features') if feature_marker is not None: features = feature_marker.args for feature in features: item.add_marker(eval("pytest.mark.{}".format(feature))) # known issue marker known_issue_mark = item.get_closest_marker('known_issue') if known_issue_mark is not None: issue = known_issue_mark.args[0] msg = "{} has a workaround due to {}".format(item.nodeid, issue) print(msg) LOG.debug(msg=msg) item.add_marker(eval("pytest.mark.known_issue")) # add dc maker to all tests start with test_dc_xxx dc_maker = item.get_marker('dc') if not dc_maker and 'test_dc_' in item.nodeid: item.add_marker(pytest.mark.dc) # add trylast tests to the end for item in move_to_last: items.remove(item) items.append(item) for i in absolute_last: items.remove(i) items.append(i)
def add_routes_to_subcloud(subcloud, subcloud_table, fail_ok=False): LOG.debug("Add routes back to subcloud: {}".format(subcloud)) ssh_client = ControllerClient.get_active_controller(name=subcloud) for host_id in subcloud_table: comm_args = table_parser.get_multi_values( subcloud_table[host_id], ["ifname", "network", "prefix", "gateway"]) command = "host-route-add {} {} {} {} {}".format( host_id, comm_args[0][0], comm_args[1][0], comm_args[2][0], comm_args[3][0]) code, output = cli.system("host-route-list {}".format(host_id)) uuid_list = table_parser.get_values(table_parser.table(output), "uuid") if table_parser.get_values(subcloud_table[host_id], "uuid")[0] not in uuid_list: cli.system(command, ssh_client=ssh_client, fail_ok=fail_ok)
def list_pcrs(ssh_con, previous_operation=''): get_pcrs = 'tss2_getcapability -cap 5' LOG.debug('Listing PCRs using cmd:' + get_pcrs + 'after ' + previous_operation) code, output = ssh_con.exec_cmd(get_pcrs) msg = 'TPM command returned code:' + str( code) + ', output:' + ', command:' + get_pcrs if 0 == code: LOG.info(msg) else: assert False, msg return code, output
def _wait_for_results(con_target, run_log=None, hist_file=None, duration=60, start_file=None, end_file=None): wait_per_checking = max(duration / 20, 120) LOG.tc_step('Check the results every {} seconds'.format(wait_per_checking)) time.sleep(10) LOG.info('Check if started to run') if start_file: for _ in range(5): if con_target.file_exists(start_file): LOG.info('running') break time.sleep(2) else: assert False, 'Not even started?' total_timeout = min(duration + 120, 4000) end_time = time.time() + total_timeout cmd_timeout = max(int(duration / 20), 90) while time.time() < end_time: try: if con_target.file_exists(end_file): LOG.info('Run completed on {} !!!'.format(con_target.host)) cmd = 'tail -n 30 {} {}; echo'.format(run_log, hist_file) cmd_timeout = max(int(duration / 3), 900) output = con_target.exec_cmd(cmd, expect_timeout=cmd_timeout)[1] LOG.info('\n{}\n'.format(output)) return True else: LOG.info('Running ... on ' + con_target.host) output = con_target.exec_cmd('tail {}; echo'.format(run_log), expect_timeout=cmd_timeout)[1] LOG.info('\n{}\n'.format(output)) except (pexpect.ExceptionPexpect, exceptions.TiSError) as e: LOG.debug('ignore exception:{}'.format(e.__str__())) time.sleep(wait_per_checking) else: LOG.info('Timeout when running on target') assert False, 'Timeout when running on target after {} seconds'.format( total_timeout)
def sm_dump_table(output_lines): """ get sm dump table as dictionary Args: output_lines (output): output of sudo sm-dump command from a controller Returns (dict): table with following headers. {'headers': ["category", "name", "desired-state", "actual-state"]; 'values': [ ['Service_Groups', 'oam-services', 'active', 'active'], ['Service_Groups', 'controller-services', 'active', 'active'] ... ['Services', 'oam-ip', 'enabled-active', 'enabled-active'] ... ['Services', 'drbd-cinder, 'enabled-active', 'enabled-active'] ... ]} """ headers = ['category', 'name', 'desired-state', 'actual-state'] table_ = {'headers': headers, 'values': []} if not isinstance(output_lines, list): output_lines = output_lines.split('\n') if not output_lines[-1]: # skip last line if empty (just newline at the end) output_lines = output_lines[:-1] category = '' for line in output_lines: if __sm_delimeter_line.match(line): potential_category = __sm_category_line.findall(line) if potential_category: category = potential_category[0] LOG.debug("skipping delimiter line: {}".format(line)) continue elif not line.strip(): LOG.debug('skipping empty table line') continue row = [category] row_contents = list(__sm_item_line.findall( line))[:3] # Take the first 3 columns only. Could have up to 5. row += row_contents table_['values'].append(row) return table_