def test_lldp_neighbor_remote_port(): """ Tests if LLDP Neighbor remote_port exists on all hosts Test Steps: - Checks LLDP Neighbor remote_port to ensure it exists """ remote_port_missing = False LOG.tc_step("Parsing host-list for hostnames") hosts_tab = table_parser.table(cli.system('host-list')[1]) all_hosts = table_parser.get_column(hosts_tab, 'hostname') for host_name in all_hosts: LOG.tc_step( "Parsing host-lldp-neighbor-list for remote_ports on the " + host_name + " host") host = table_parser.table( cli.system('host-lldp-neighbor-list', '--nowrap {}'.format(host_name))[1]) host_remote_ports = table_parser.get_column(host, 'remote_port') for remote_port in host_remote_ports: LOG.tc_step("Checking LLDP remote_port to ensure it exists") if remote_port.lower() == 'none' or remote_port == '': LOG.tc_step("Port missing") remote_port_missing = True assert remote_port_missing is False, "Some remote ports are missing from 'system host-lldp-neighbor-list'"
def get_cli_timestamps(vol_id): table_ = table_parser.table(cli.system('show')[1]) sysinv_timestamp = table_parser.get_value_two_col_table(table_, 'created_at') table_ = table_parser.table(cli.openstack('volume show', vol_id, auth_info=Tenant.get('admin'))[1]) openstack_timestamp = table_parser.get_value_two_col_table(table_, 'created_at') return sysinv_timestamp, openstack_timestamp
def test_host_disk_wipe_unassigned_disk(): """ This test attempts to run system host-disk-wipe on a node using any unassigned disk. Command format is: system host-disk-wipe [--confirm] <hostname or id> <disk uuid> Note, host-disk-wipe is only applicable to controller and compute nodes. It cannot be used on the rootfs disk. It cannot be used for a disk that is used by a PV or has partitions used by a PV. Arguments: - None Test Steps: 1. Determine which disks are unassigned by comparing size_gib to available_gib in system host-disk-list 2. Attempt to wipe the disk 3. Expect it to pass Assumptions: - None """ computes = system_helper.get_hosts(personality="compute", availability="available") controllers = system_helper.get_hosts(personality="controller", availability="available") hosts = controllers + computes found_disk = False for host in hosts: LOG.info("Query disks on host {}".format(host)) disks = storage_helper.get_host_disks(host) for disk_uuid in disks: cmd = "host-disk-show {} {}".format(host, disk_uuid) rc, out = cli.system(cmd) size_gib = table_parser.get_value_two_col_table( table_parser.table(out), "size_gib") available_gib = table_parser.get_value_two_col_table( table_parser.table(out), "available_gib") if int(float(size_gib)) == int(float(available_gib)): found_disk = True LOG.tc_step("Attempting to wipe disk {} from host {}".format( disk_uuid, host)) cmd = 'host-disk-wipe --confirm {} {}'.format(host, disk_uuid) rc, out = cli.system(cmd, fail_ok=True) assert rc == 0, "Expected wipe disk to pass but instead failed" break if not found_disk: skip("No unassigned disks to run test")
def import_package(pkg, con_ssh=None, auth_info=None, fail_ok=False): """ Import Murano package Args: pkg: package name to import (full path) con_ssh (SSHClient): auth_info (dict) fail_ok (bool): whether return False or raise exception when some services fail to reach enabled-active state Returns: code, msg: return code and msg """ if pkg is None: raise ValueError("Package name has to be specified.") LOG.info("Importing Murano package {}".format(pkg)) code, output = cli.openstack('package import --exists-action u', pkg, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code > 0: return 1, output table_ = table_parser.table(output) pkg_id = table_parser.get_values(table_, 'ID') return 0, pkg_id
def delete_bundle(bundle_id, con_ssh=None, auth_info=None, fail_ok=False): """ Delete murano bundle Args: bundle_id: Bundle id to delete con_ssh (SSHClient): auth_info (dict) fail_ok (bool): whether return False or raise exception when some services fail to reach enabled-active state Returns: code, msg: return code and msg """ if bundle_id is None: raise ValueError("Murano bundle id has to be specified.") LOG.info("Deleting Murano bundle {}".format(bundle_id)) code, output = cli.openstack('bundle delete', bundle_id, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code > 0: return 1, output table_ = table_parser.table(output) pkg_id = table_parser.get_value_two_col_table(table_, 'id') return 0, pkg_id
def get_events(event_type, limit=None, header='message_id', con_ssh=None, auth_info=None, **filters): """ Args: event_type: limit header: con_ssh: auth_info: Returns: """ args = '' if limit: args = '--limit {}'.format(limit) if event_type or filters: if event_type: filters['event_type'] = event_type extra_args = ['{}={}'.format(k, v) for k, v in filters.items()] args += ' --filter {}'.format(';'.join(extra_args)) table_ = table_parser.table(cli.openstack('event list', args, ssh_client=con_ssh, auth_info=auth_info)[1]) return table_parser.get_values(table_, header)
def test_alarm_overwritten(): """ Verify the chronological order to the alarms Scenario: 1. Query the alarm table 2. Verify the list is shown most recent alarm to oldest (based on timestamp) [REQ-14] """ output = cli.fm('event-list', '--limit 10 --nowrap --nopaging --uuid')[1] alarm_table = table_parser.table(output, combine_multiline_entry=True) size = len(alarm_table['values']) LOG.info('Get the last entry in the alarm table') last_alarm = alarm_table['values'][size - 1][0] secondlast_alarm = alarm_table['values'][size - 2][0] LOG.info("last_alarm = %s" % last_alarm) LOG.info("secondlast_alarm = %s" % secondlast_alarm) time_1 = alarm_table['values'][size - 1][1] time_2 = alarm_table['values'][size - 2][1] # The last alarm should be older than the second last assert ( common.get_timedelta_for_isotimes(time_1, time_2).total_seconds() > 0 or time_1.split('.')[1] < time_2.split('.')[1])
def get_alarms(header='alarm_id', name=None, strict=False, auth_info=Tenant.get('admin'), con_ssh=None): """ Args: header name: strict: auth_info: con_ssh: Returns: """ table_ = table_parser.table(cli.openstack('alarm list', ssh_client=con_ssh, auth_info=auth_info)[1], combine_multiline_entry=True) if name is None: return table_parser.get_column(table_, header) return table_parser.get_values(table_, header, Name=name, strict=strict)
def get_cert_info(cert_id, con_ssh=None): LOG.info('check the status of the current certificate') cmd = 'certificate-show ' + cert_id output = cli.system(cmd, ssh_client=con_ssh, fail_ok=False)[1] if output: table = table_parser.table(output) if table: actual_id = table_parser.get_value_two_col_table(table, 'uuid') actual_type = table_parser.get_value_two_col_table( table, 'certtype') actual_details = table_parser.get_value_two_col_table( table, 'details') actual_states = '' if not actual_details: # CGTS-9529 LOG.fatal('No details in output of certificate-show') LOG.fatal( 'Ignore it until the known issue CGTS-9529 fixed, output:' + output) # assert False, 'No details in output of certificate-show' else: LOG.debug('details from output of certificate-show: {}'.format( actual_details)) actual_states = eval(actual_details) LOG.debug('states: {}'.format(actual_states)) return 0, actual_id, actual_type, actual_states LOG.info('') return 1, actual_id, actual_type, actual_states else: LOG.info('no "details" in output') return 2, '', '', ''
def get_pci_interface_stats_for_providernet( providernet_id, fields=('pci_pfs_configured', 'pci_pfs_used', 'pci_vfs_configured', 'pci_vfs_used'), auth_info=Tenant.get('admin'), con_ssh=None): """ get pci interface usage Args: providernet_id (str): id of a providernet fields: fields such as ('pci_vfs_configured', 'pci_pfs_used') auth_info (dict): con_ssh (SSHClient): Returns (tuple): tuple of integers """ if not providernet_id: raise ValueError("Providernet id is not provided.") table_ = table_parser.table( cli.nova('providernet-show', providernet_id, ssh_client=con_ssh, auth_info=auth_info)[1]) rtn_vals = [] for field in fields: pci_stat = int( table_parser.get_value_two_col_table(table_, field, strict=True)) rtn_vals.append(pci_stat) return tuple(rtn_vals)
def is_https_enabled(con_ssh=None, source_openrc=True, interface='public', auth_info=Tenant.get('admin_platform')): """ Check whether interface is https Args: con_ssh: source_openrc: interface: default is public auth_info: Returns True or False """ if not con_ssh: con_name = auth_info.get('region') if ( auth_info and ProjVar.get_var('IS_DC')) else None con_ssh = ControllerClient.get_active_controller(name=con_name) table_ = table_parser.table( cli.openstack('endpoint list', ssh_client=con_ssh, auth_info=auth_info, source_openrc=source_openrc)[1]) con_ssh.exec_cmd('unset OS_REGION_NAME') # Workaround filters = {'Service Name': 'keystone', 'Service Type': 'identity', 'Interface': interface} keystone_values = table_parser.get_values(table_=table_, target_header='URL', **filters) LOG.info('keystone {} URLs: {}'.format(interface, keystone_values)) return all('https' in i for i in keystone_values)
def get_flavor_values(flavor, fields, strict=True, con_ssh=None, auth_info=Tenant.get('admin')): """ Get flavor values for given fields via openstack flavor show Args: flavor (str): fields (str|list|tuple): strict (bool): strict search for field name or not con_ssh: auth_info: Returns (list): """ table_ = table_parser.table( cli.openstack('flavor show', flavor, ssh_client=con_ssh, auth_info=auth_info)[1]) return table_parser.get_multi_values_two_col_table( table_, fields, merge_lines=True, evaluate=True, strict=strict, dict_fields=('properties', ))
def get_provider_net_info(providernet_id, field='pci_pfs_configured', strict=True, auth_info=Tenant.get('admin'), con_ssh=None, rtn_int=True): """ Get provider net info from "nova providernet-show" Args: providernet_id (str): id of a providernet field (str): Field name such as pci_vfs_configured, pci_pfs_used, etc strict (bool): whether to perform a strict search on field name auth_info (dict): con_ssh (SSHClient): rtn_int (bool): whether to return integer or string Returns (int|str): value of specified field. Convert to integer by default unless rnt_int=False. """ if not providernet_id: raise ValueError("Providernet id is not provided.") table_ = table_parser.table( cli.nova('providernet-show', providernet_id, ssh_client=con_ssh, auth_info=auth_info)[1]) info_str = table_parser.get_value_two_col_table(table_, field, strict=strict) return int(info_str) if rtn_int else info_str
def get_flavors(name=None, memory=None, disk=None, ephemeral=None, swap=None, vcpu=None, rxtx=None, is_public=None, flv_id=None, long=False, con_ssh=None, auth_info=None, strict=True, field='id'): """ Get a flavor id with given criteria. If no criteria given, a random flavor will be returned. Args: name (str): name of a flavor memory (int): memory size in MB disk (int): size of the disk in GB ephemeral (int): size of ephemeral disk in GB swap (int): size of swap disk in GB vcpu (int): number of vcpus rxtx (str): is_public (bool): flv_id (str) long (bool) con_ssh (SSHClient): auth_info (dict): strict (bool): whether or not to perform strict search on provided values field (str|list|tuple) Returns (list): """ args = '--long' if long else '' table_ = table_parser.table( cli.openstack('flavor list', args, ssh_client=con_ssh, auth_info=auth_info)[1]) req_dict = { 'Name': name, 'RAM': memory, 'Disk': disk, 'Ephemeral': ephemeral, 'Swap': '' if str(swap) == '0' else swap, 'VCPUs': vcpu, 'RXTX Factor': rxtx, 'Is Public': is_public, 'ID': flv_id, } final_dict = {k: str(v) for k, v in req_dict.items() if v is not None} return table_parser.get_multi_values(table_, field, strict=strict, **final_dict)
def get_aggregate_values(aggregate, fields, con_ssh=None, auth_info=Tenant.get('admin'), fail_ok=False): """ Get values of a nova aggregate for given fields Args: aggregate (str): fields (str|list|tuple): con_ssh: auth_info (dict): fail_ok (bool) Returns (list): """ code, out = cli.openstack('aggregate show', aggregate, ssh_client=con_ssh, auth_info=auth_info, fail_ok=fail_ok) if code > 0: return [] table_ = table_parser.table(out) return table_parser.get_multi_values_two_col_table( table_, fields, evaluate=True, dict_fields=('properties', ))
def get_aggregates(field='name', name=None, avail_zone=None, con_ssh=None, auth_info=Tenant.get('admin')): """ Get a list of aggregates Args: field (str|list|tuple): id or name name (str|list): filter out the aggregates with given name if specified avail_zone (str): filter out the aggregates with given availability zone if specified con_ssh (SSHClient): auth_info (dict): Returns (list): """ kwargs = {} if avail_zone: kwargs['Availability Zone'] = avail_zone if name: kwargs['Name'] = name aggregates_tab = table_parser.table( cli.openstack('aggregate list', ssh_client=con_ssh, auth_info=auth_info)[1]) return table_parser.get_multi_values(aggregates_tab, field, **kwargs)
def deploy_env(env_id, session_id, con_ssh=None, auth_info=None, fail_ok=False): code, output = cli.openstack( 'environment deploy --session-id {} {}'.format(session_id, env_id), ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code == 1: return 1, output table_ = table_parser.table(output) deploy_id = table_parser.get_value_two_col_table(table_, 'id') if not deploy_id: msg = "Fail to get the deploy id; session-id {}; environment " \ "id {}".format(session_id, env_id) if fail_ok: return 2, msg else: raise exceptions.MuranoError(msg) return 0, deploy_id
def get_apps(field='status', application=None, con_ssh=None, auth_info=Tenant.get('admin_platform'), rtn_dict=False, **kwargs): """ Get applications values for give apps and fields via system application-list Args: application (str|list|tuple): field (str|list|tuple): con_ssh: auth_info: rtn_dict: **kwargs: extra filters other than application Returns (list|dict): list of list, or dict with app name(str) as key and values(list) for given fields for each app as value """ table_ = table_parser.table( cli.system('application-list', ssh_client=con_ssh, auth_info=auth_info)[1]) if application: kwargs['application'] = application return table_parser.get_multi_values(table_, fields=field, rtn_dict=rtn_dict, zip_values=True, **kwargs)
def get_metric_values(metric_id=None, metric_name=None, resource_id=None, fields='id', fail_ok=False, auth_info=Tenant.get('admin'), con_ssh=None): """ Get metric info via 'openstack metric show' Args: metric_id (str|None): metric_name (str|None): Only used if metric_id is not provided resource_id (str|None): Only used if metric_id is not provided fields (str|list|tuple): field name fail_ok (bool): auth_info: con_ssh: Returns (list): """ if metric_id is None and metric_name is None: raise ValueError("metric_id or metric_name has to be provided.") if metric_id: arg = metric_id else: if resource_id: arg = '--resource-id {} "{}"'.format(resource_id, metric_name) else: if not fail_ok: raise ValueError("resource_id needs to be provided when using metric_name") arg = '"{}"'.format(metric_name) code, output = cli.openstack('openstack metric show', arg, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code > 0: return output table_ = table_parser.table(output) return table_parser.get_multi_values_two_col_table(table_, fields)
def get_app_values(app_name, fields, con_ssh=None, auth_info=Tenant.get('admin_platform')): """ Get values from system application-show Args: app_name: fields (str|list|tuple): con_ssh: auth_info: Returns: """ if isinstance(fields, str): fields = [fields] table_ = table_parser.table(cli.system('application-show', app_name, ssh_client=con_ssh, auth_info=auth_info)[1], combine_multiline_entry=True) values = table_parser.get_multi_values_two_col_table(table_, fields=fields) return values
def get_subcloud_status(subcloud, field='availability', auth_info=Tenant.get('admin_platform', 'RegionOne'), con_ssh=None, source_openrc=None): """ Args: subcloud: field: auth_info: con_ssh: source_openrc: Returns: """ LOG.info("Auth_info: {}".format(auth_info)) table_ = table_parser.table( cli.dcmanager('subcloud list', ssh_client=con_ssh, auth_info=auth_info, source_openrc=source_openrc)[1]) arg_dict = {'name': subcloud} kwargs = {key: val for key, val in arg_dict.items() if val is not None} status = table_parser.get_values(table_, target_header=field, **kwargs) return status[0]
def get_helm_overrides(field='overrides namespaces', app_name='stx-openstack', charts=None, auth_info=Tenant.get('admin_platform'), con_ssh=None): """ Get helm overrides values via system helm-override-list Args: field (str): app_name charts (None|str|list|tuple): auth_info: con_ssh: Returns (list): """ table_ = table_parser.table( cli.system('helm-override-list', app_name, ssh_client=con_ssh, auth_info=auth_info)[1]) if charts: table_ = table_parser.filter_table(table_, **{'chart name': charts}) vals = table_parser.get_multi_values(table_, fields=field, evaluate=True) return vals
def get_hw_compatible_hosts(hosts): """ Given a list of hosts return a dict of hardware compatible ones, if any. Arguments: - Hosts (list) Returns: - Dict mapping hash to hosts """ hardware = {} hardware_hash = {} for host in hosts: rc, out = cli.system("host-disk-list {} --nowrap".format(host)) table_ = table_parser.table(out) device_nodes = table_parser.get_column(table_, "device_node") device_type = table_parser.get_column(table_, "device_type") size_gib = table_parser.get_column(table_, "size_gib") hardware[host] = list(zip(device_nodes, device_type, size_gib)) LOG.info("Hardware present on host {}: {}".format( host, hardware[host])) hardware_hash[host] = hash(str(hardware[host])) LOG.info("Host {} has hash {}".format(host, hardware_hash[host])) # Create reverse lookup of hash to hosts hash_to_hosts = {} for key, value in hardware_hash.items(): hash_to_hosts.setdefault(value, []).append(key) LOG.info( "These are the hardware compatible hosts: {}".format(hash_to_hosts)) return hash_to_hosts
def get_aggregated_measures(field='value', resource_type=None, metrics=None, start=None, stop=None, overlap=None, refresh=None, resource_ids=None, extra_query=None, fail_ok=False, auth_info=Tenant.get('admin'), con_ssh=None): """ Get measurements via 'openstack metric measures aggregation' Args: field (str): header of a column resource_type (str|None): used in --resource-type <resource_type> metrics (str|list|tuple|None): used in --metric <metric1> [metric2 ...] start (str|None): used in --start <start> stop (str|None): used in --stop <stop> refresh (bool): used in --refresh overlap (str|None): overlap percentage. used in --needed-overlap <overlap> resource_ids (str|list|tuple|None): used in --query "id=<resource_id1>[ or id=<resource_id2> ...]" extra_query (str|None): used in --query <extra_query> fail_ok: auth_info: con_ssh: Returns (list): list of strings """ LOG.info("Getting aggregated measurements...") args_dict = { 'resource-type': resource_type, 'metric': metrics, 'start': start, 'stop': stop, 'needed-overlap': overlap, 'refresh': refresh, } args = common.parse_args(args_dict, vals_sep=' ') query_str = '' if resource_ids: if isinstance(resource_ids, str): resource_ids = [resource_ids] resource_ids = ['id={}'.format(val) for val in resource_ids] query_str = ' or '.join(resource_ids) if extra_query: if resource_ids: query_str += ' and ' query_str += '{}'.format(extra_query) if query_str: args += ' --query "{}"'.format(query_str) code, out = cli.openstack('metric measures aggregation', args, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code > 0: return 1, out table_ = table_parser.table(out) return 0, table_parser.get_values(table_, field)
def get_image_values(image, fields, auth_info=Tenant.get('admin'), con_ssh=None, fail_ok=False): """ Get glance image values from openstack image show Args: image: fields: auth_info: con_ssh: fail_ok Returns (list): """ if isinstance(fields, str): fields = (fields, ) code, output = cli.openstack('image show', image, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code > 0: return [None] * len(fields) table_ = table_parser.table(output) values = table_parser.get_multi_values_two_col_table( table_, fields, merge_lines=True, evaluate=True, dict_fields='properties') return values
def get_ifs_to_mod(host, network_type, mtu_val): table_ = table_parser.table(cli.system('host-if-list', '{} --nowrap'.format(host))[1]) if_class = network_type network = '' if network_type in PLATFORM_NET_TYPES: if_class = 'platform' table_ = table_parser.filter_table(table_, **{'class': if_class}) # exclude unmatched platform interfaces from the table. if 'platform' == if_class: platform_ifs = table_parser.get_values(table_, target_header='name', **{'class': 'platform'}) for pform_if in platform_ifs: if_nets = host_helper.get_host_interface_values(host=host, interface=pform_if, fields='networks')[0] if_nets = [if_net.strip() for if_net in if_nets.split(sep=',')] if network not in if_nets: table_ = table_parser.filter_table(table_, strict=True, exclude=True, name=pform_if) uses_if_names = table_parser.get_values(table_, 'name', exclude=True, **{'uses i/f': '[]'}) non_uses_if_names = table_parser.get_values(table_, 'name', exclude=False, **{'uses i/f': '[]'}) uses_if_first = False if uses_if_names: current_mtu = int( host_helper.get_host_interface_values(host, interface=uses_if_names[0], fields=['imtu'])[0]) if current_mtu <= mtu_val: uses_if_first = True if uses_if_first: if_names = uses_if_names + non_uses_if_names else: if_names = non_uses_if_names + uses_if_names return if_names
def test_horizon_sysconfig_oam_cancel_edit(sys_config_pg): """ Test oam edit and display: Setups: - Login as Admin - Go to Admin > Platform > System Configuration Teardown: - Back to System Configuration Page - Logout Test Steps: - Check oam details display - Edit the OAM but not submit """ LOG.tc_step('Check OAM IP display') sys_config_pg.go_to_oam_ip_tab() oam_table = table_parser.table(cli.system('oam-show')[1]) expt_horizon = {} if system_helper.get_system_values(fields='system_mode')[0] == 'simplex': headers_map = sys_config_pg.oam_table.SIMPLEX_OAM_MAP else: headers_map = sys_config_pg.oam_table.OAM_MAP for cli_header in headers_map: horizon_header = headers_map[cli_header] expt_horizon[horizon_header] = table_parser.get_value_two_col_table( oam_table, field=cli_header) table_name = sys_config_pg.oam_table.name sys_config_pg.check_horizon_displays(table_name=table_name, expt_horizon=expt_horizon) LOG.tc_step('Edit the OAM but not submit') sys_config_pg.edit_oam(cancel=True) horizon.test_result = True
def create_server_group(name=None, policy='affinity', rule=None, fail_ok=False, auth_info=None, con_ssh=None, rtn_exist=False, field='id'): """ Create a server group with given criteria Args: name (str): name of the server group policy (str): affinity or anti_infinity rule (str|None): max_server_per_host can be specified when policy=anti-affinity fail_ok (bool): auth_info (dict): con_ssh (SSHClient): rtn_exist (bool): Whether to return existing server group that matches the given name field (str): id or name Returns (tuple): (rtn_code (int), err_msg_or_srv_grp_id (str)) - (0, <server_group_id>) # server group created successfully - (1, <stderr>) # create server group cli rejected """ # process server group metadata if name and rtn_exist: existing_grp = get_server_groups(name=name, strict=False, con_ssh=con_ssh, auth_info=auth_info, field=field) if existing_grp: LOG.debug("Returning existing server group {}".format( existing_grp[0])) return -1, existing_grp[0] # process server group name and policy if not name: name = 'grp_{}'.format(policy.replace('-', '_')) name = common.get_unique_name(name_str=name) args = '{}{} {}'.format('--rule {} '.format(rule) if rule else '', name, policy.replace('_', '-')) LOG.info("Creating server group with args: {}...".format(args)) exit_code, output = cli.nova('server-group-create', args, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if exit_code > 0: return 1, output table_ = table_parser.table(output) srv_grp_id = table_parser.get_values(table_, field)[0] LOG.info("Server group {} created successfully.".format(name)) return 0, srv_grp_id
def test_system_type(): """ Verify the System Type can be retrieved from SysInv and is correct Test Steps: - Determine the System Type based on whether the system is CPE or not - Retrieve the System Type information from SystInv - Compare the types and verify they are the same, fail the test case otherwise Notes: - Covers SysInv test-cases: 66) Query the product type on CPE system using CLI 67) Query the product type on STD system using CLI """ LOG.tc_step('Determine the real System Type the lab') if system_helper.is_aio_system(): expt_system_type = SystemType.CPE else: expt_system_type = SystemType.STANDARD LOG.tc_step('Get System Type from system inventory') table_ = table_parser.table(cli.system('show')[1]) displayed_system_type = table_parser.get_value_two_col_table( table_, 'system_type') LOG.tc_step( 'Verify the expected System Type is the same as that from System Inventory' ) assert expt_system_type == displayed_system_type, 'Expected system_type is: {}; Displayed system type: {}.'. \ format(expt_system_type, displayed_system_type)
def get_stacks(name=None, field='id', con_ssh=None, auth_info=None, all_=True): """ Get the stacks list based on name if given for a given tenant. Args: con_ssh (SSHClient): If None, active controller ssh will be used. auth_info (dict): Tenant dict. If None, primary tenant will be used. all_ (bool): whether to display all stacks for admin user name (str): Given name for the heat stack field (str|list|tuple) Returns (list): list of heat stacks. """ args = '' if auth_info is not None: if auth_info['user'] == 'admin' and all_: args = '--a' table_ = table_parser.table( cli.openstack('stack list', positional_args=args, ssh_client=con_ssh, auth_info=auth_info)[1]) kwargs = {'Stack Name': name} if name else {} return table_parser.get_multi_values(table_, field, **kwargs)