Beispiel #1
0
def get_compute_services(field,
                         con_ssh=None,
                         auth_info=Tenant.get('admin'),
                         **kwargs):
    """
    Get values from compute services list

    System: Regular, Small footprint

    Args:
        field (str)
        con_ssh (SSHClient):
        auth_info (dict):
        kwargs: Valid keys: Id, Binary, Host, Zone, Status, State, Updated At

    Returns (list): a list of hypervisors in given zone
    """
    table_ = table_parser.table(
        cli.openstack('compute service list',
                      ssh_client=con_ssh,
                      auth_info=auth_info)[1])
    return table_parser.get_values(table_, field, **kwargs)
Beispiel #2
0
    def _recover_hosts(hostnames, scope):
        if system_helper.is_aio_simplex():
            LOG.fixture_step('{} Recover simplex host'.format(scope))
            host_helper.recover_simplex(fail_ok=False)
            return

        # Recover hosts for non-simplex system
        hostnames = sorted(set(hostnames))
        table_ = table_parser.table(cli.system('host-list')[1])
        table_ = table_parser.filter_table(table_, hostname=hostnames)

        # unlocked_hosts = table_parser.get_values(table_, 'hostname', administrative='unlocked')
        locked_hosts = table_parser.get_values(table_,
                                               'hostname',
                                               administrative='locked')

        err_msg = []
        if locked_hosts:
            LOG.fixture_step("({}) Unlock hosts: {}".format(
                scope, locked_hosts))
            # Hypervisor state will be checked later in wait_for_hosts_ready which handles platform only deployment
            res1 = host_helper.unlock_hosts(hosts=locked_hosts,
                                            fail_ok=True,
                                            check_hypervisor_up=False)
            for host in res1:
                if res1[host][0] not in [0, 4]:
                    err_msg.append(
                        "Not all host(s) unlocked successfully. Detail: {}".
                        format(res1))
        #
        # if unlocked_hosts:
        #     LOG.fixture_step("({}) Wait for hosts to becomes available or degraded: {}".format(scope, unlocked_hosts))
        #     res2 = host_helper.wait_for_hosts_states(unlocked_hosts, timeout=HostTimeout.REBOOT, check_interval=10,
        #                                              fail_ok=True, availability=['available'])
        #     if not res2:
        #         err_msg.append("Somtable_ = table_parser.table(e host(s) from {} are not available.".format(unlocked_hosts))

        host_helper.wait_for_hosts_ready(hostnames)
Beispiel #3
0
def get_stack_resources(stack,
                        field='resource_name',
                        auth_info=None,
                        con_ssh=None,
                        **kwargs):
    """

    Args:
        stack (str): id (or name) for the heat stack. ID is required if admin user is used to display tenant resource.
        field: values to return
        auth_info:
        con_ssh:
        kwargs: key/value pair to filer out the values to return

    Returns (list):

    """
    table_ = table_parser.table(
        cli.openstack('stack resource list --long',
                      stack,
                      ssh_client=con_ssh,
                      auth_info=auth_info)[1])
    return table_parser.get_values(table_, target_header=field, **kwargs)
Beispiel #4
0
def get_alarms(header='alarm_id', name=None, strict=False,
               auth_info=Tenant.get('admin'), con_ssh=None):
    """

    Args:
        header
        name:
        strict:
        auth_info:
        con_ssh:

    Returns:

    """

    table_ = table_parser.table(cli.openstack('alarm list',
                                              ssh_client=con_ssh,
                                              auth_info=auth_info)[1],
                                combine_multiline_entry=True)
    if name is None:
        return table_parser.get_column(table_, header)

    return table_parser.get_values(table_, header, Name=name, strict=strict)
Beispiel #5
0
def is_https_enabled(con_ssh=None,
                     source_openrc=True,
                     auth_info=Tenant.get('admin_platform')):
    if not con_ssh:
        con_name = auth_info.get('region') if (
            auth_info and ProjVar.get_var('IS_DC')) else None
        con_ssh = ControllerClient.get_active_controller(name=con_name)

    table_ = table_parser.table(
        cli.openstack('endpoint list',
                      ssh_client=con_ssh,
                      auth_info=auth_info,
                      source_openrc=source_openrc)[1])
    con_ssh.exec_cmd('unset OS_REGION_NAME')  # Workaround
    filters = {
        'Service Name': 'keystone',
        'Service Type': 'identity',
        'Interface': 'public'
    }
    keystone_pub = table_parser.get_values(table_=table_,
                                           target_header='URL',
                                           **filters)[0]
    return 'https' in keystone_pub
def test_alarm_suppression(alarm_test_prep):
    """
       Verify suppression and unsuppression of active alarm and query alarms.

       Test Setup:
           - Unsuppress all alarms
             Generate alarms
       Test Steps:

            Suppress alarms
            Verify alarm supressed
            Generate alarm again
            Verify suppressed alarms no in active
            Unsuppressed alarm
            Verify unsuppressed in active alarm list.
            Delete last active alarm
       Test Teardown:
           - Unsuppress all alarms
    """
    LOG.tc_step('Suppress generated alarm and Verify it is suppressed')
    alarm_uuid = alarm_test_prep
    query_active_alarm = system_helper.get_alarms_table(query_key='uuid',
                                                        query_value=alarm_uuid)
    alarm_id = table_parser.get_values(table_=query_active_alarm,
                                       target_header='Alarm ID',
                                       **{"UUID": alarm_uuid})[0]
    assert '300.005' == alarm_id
    # alarm_id = ''.join(alarm_id)
    system_helper.suppress_event(alarm_id=alarm_id)

    LOG.tc_step('Generate Alarm again and Verify not in the Active list')
    system_helper.generate_event(event_id=alarm_id)
    alarms = system_helper.get_alarms(alarm_id=alarm_id)
    assert not alarms, "300.005 alarm appears in the active alarms table after regenerating"

    LOG.tc_step('UnSuppress alarm and verify it is unsuppressed')
    system_helper.unsuppress_event(alarm_id=alarm_id)
def test_horizon_sysconfig_controllerfs_cancel_edit(sys_config_pg):
    """
    Test controller filesystem edit and display:

    Setups:
        - Login as Admin
        - Go to Admin > Platform > System Configuration

    Teardown:
        - Back to System Configuration Page
        - Logout

    Test Steps:
        - Check storage name and its size
        - Edit controller filesystem but not submit
    """
    LOG.tc_step('Check controller filesystem display')
    sys_config_pg.go_to_controller_filesystem_tab()
    controllerfs_table = table_parser.table(cli.system('controllerfs-list')[1])
    headers_map = sys_config_pg.controllerfs_table.CONTROLERFS_MAP
    storage_names = table_parser.get_values(controllerfs_table,
                                            target_header='FS Name')
    cli_headers = list(headers_map)
    for name in storage_names:
        expt_horzion = {}
        cli_values = storage_helper.get_controllerfs_values(filesystem=name,
                                                            fields=cli_headers)
        for i in range(len(cli_headers)):
            horizon_header = headers_map[cli_headers[i]]
            expt_horzion[horizon_header] = cli_values[i]
        table_name = sys_config_pg.controllerfs_table.name
        sys_config_pg.check_horizon_displays(table_name=table_name,
                                             expt_horizon=expt_horzion)

    LOG.tc_step('Edit controller filesystem but not submit')
    sys_config_pg.edit_filesystem(cancel=True)
    horizon.test_result = True
def test_horizon_sysconfig_addrpool_add_delete(sys_config_pg):
    """
    Test the address pools edit and display:

    Setups:
        - Login as Admin
        - Go to Admin > Platform > System Configuration

    Teardown:
        - Back to System Configuration Page
        - Logout

    Test Steps:
        - Check address pools display
        - Create a new address pool
        - Check the address pool is in the list
        - Delete the address pool
        - Check the address pool is absent in the list
    """
    sys_config_pg.go_to_address_pools_tab()
    LOG.tc_step('Check address pools display')
    addr_table = table_parser.table(cli.system('addrpool-list --nowrap')[1])
    uuid_list = table_parser.get_values(addr_table, target_header='uuid')
    for uuid in uuid_list:
        expt_horizon = {}
        name = table_parser.get_values(addr_table,
                                       target_header='name',
                                       **{'uuid': uuid})[0]
        expt_horizon['Name'] = name

        prefix = table_parser.get_values(addr_table,
                                         target_header='prefix',
                                         **{'uuid': uuid})[0]
        cli_network_val = table_parser.get_values(addr_table, 'network',
                                                  **{'uuid': uuid})
        cli_network_val = cli_network_val[0][0] + cli_network_val[0][
            1] + '/' + prefix
        expt_horizon['Network'] = cli_network_val

        cli_order_val = table_parser.get_values(addr_table, 'order',
                                                **{'uuid': uuid})[0]
        expt_horizon['Allocation Order'] = cli_order_val

        cli_ranges_list = eval(
            table_parser.get_values(addr_table, 'ranges', **{'uuid': uuid})[0])
        cli_ranges = ','.join(cli_ranges_list)
        expt_horizon['Address Ranges'] = cli_ranges
        table_name = sys_config_pg.address_pools_table.name
        sys_config_pg.check_horizon_displays(table_name=table_name,
                                             expt_horizon=expt_horizon)

    LOG.tc_step('Create a new address pool')
    address_name = helper.gen_resource_name('address_name')
    sys_config_pg.create_address_pool(name=address_name,
                                      network='192.168.0.0/24')
    assert sys_config_pg.is_address_present(address_name)

    LOG.tc_step('Delete the address pool')
    sys_config_pg.delete_address_pool(address_name)

    LOG.tc_step('Check the address pool is absent in the list')
    assert not sys_config_pg.is_address_present(address_name)
    horizon.test_result = True
Beispiel #9
0
def test_merge_lines():
    port_table = table_parser.table(cli.openstack('port list')[1])

    fixed_ips = table_parser.get_values(port_table, 'fixed_ips', merge_lines=True)
    for i in fixed_ips:
        assert isinstance(i, str)
Beispiel #10
0
def _test_increase_ceph_mon():
    """
    Increase the size of ceph-mon.  Only applicable to a storage system.

    Fails until CGTS-8216

    Test steps:
    1.  Determine the current size of ceph-mon
    2.  Attempt to modify ceph-mon to invalid values
    3.  Check if there is free space to increase ceph-mon
    4.  Attempt to increase ceph-mon
    5.  Wait for config out-of-date alarms to raise
    6.  Lock/unlock all affected nodes (controllers and storage)
    7.  Wait for alarms to clear
    8.  Check that ceph-mon has the correct updated value

    Enhancement:
    1.  Possibly check there is enough disk space for ceph-mon to increase.  Not sure if
    this is required since there always seems to be some space on the rootfs.

    """
    table_ = table_parser.table(cli.system("ceph-mon-list")[1])
    ceph_mon_gib = table_parser.get_values(table_, "ceph_mon_gib",
                                           **{"hostname": "controller-0"})[0]
    LOG.info("ceph_mon_gib is currently: {}".format(ceph_mon_gib))

    LOG.tc_step("Attempt to modify ceph-mon to invalid values")
    invalid_cmg = ['19', '41', 'fds']
    for value in invalid_cmg:
        host = "controller-0"
        cli.system("ceph-mon-modify {} ceph_mon_gib={}".format(host, value),
                   fail_ok=True)

    if int(ceph_mon_gib) >= 30:
        skip("Insufficient disk space to execute test")

    ceph_mon_gib_avail = 40 - int(ceph_mon_gib)
    new_ceph_mon_gib = math.trunc(ceph_mon_gib_avail / 10) + int(ceph_mon_gib)

    LOG.tc_step("Increase ceph_mon_gib to {}".format(new_ceph_mon_gib))
    hosts = system_helper.get_controllers()
    for host in hosts:
        cli.system("ceph-mon-modify {} ceph_mon_gib={}".format(
            host, new_ceph_mon_gib))
        # We only need to do this for one controller now and it applies to both
        break

    LOG.info("Wait for expected alarms to appear")
    storage_hosts = system_helper.get_storage_nodes()
    total_hosts = hosts + storage_hosts
    for host in total_hosts:
        system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE,
                                     entity_id="host={}".format(host))

    LOG.tc_step("Lock/unlock all affected nodes")
    for host in storage_hosts:
        HostsToRecover.add(host)
        host_helper.lock_host(host)
        host_helper.unlock_host(host)
        system_helper.wait_for_alarm_gone(
            alarm_id=EventLogID.CONFIG_OUT_OF_DATE,
            entity_id="host={}".format(host))
        time.sleep(10)

    standby = system_helper.get_standby_controller_name()
    active = system_helper.get_active_controller_name()
    HostsToRecover.add(standby)
    host_helper.lock_host(standby)
    host_helper.unlock_host(standby)
    system_helper.wait_for_alarm_gone(alarm_id=EventLogID.CONFIG_OUT_OF_DATE,
                                      entity_id="host={}".format(standby))
    time.sleep(10)
    host_helper.swact_host(active)
    HostsToRecover.add(active)
    host_helper.lock_host(active)
    host_helper.unlock_host(active)
    system_helper.wait_for_alarm_gone(alarm_id=EventLogID.CONFIG_OUT_OF_DATE,
                                      entity_id="host={}".format(active))

    table_ = table_parser.table(cli.system("ceph-mon-list")[1])
    ceph_mon_gib = table_parser.get_values(table_, "ceph_mon_gib",
                                           **{"hostname": "controller-0"})[0]
    assert ceph_mon_gib != new_ceph_mon_gib, "ceph-mon did not change"
Beispiel #11
0
def _test_system_alarm_on_host_lock():
    """
    Verify fm event-list command in the system upon host-lock

    Scenario:
    1. Execute "fm alarm-list" command in the system.
    2. Lock one compute and wait 30 seconds.
    3. Verify commands return list of active alarms in table with expected
    rows.
    """

    LOG.info("Execute fm alarm-list. Verify header of " +
             "a table consist of correct items")

    # Get and save the list of existing alarms present in the system
    res, out = cli.fm('alarm-list')
    alarm_list = table_parser.table(out)

    if len(alarm_list['values']) == 0:
        LOG.info("There are no alarms are not present in the alarm list")

    current_alarms = []
    for alarm in alarm_list['values']:
        if re.match(".", alarm[0].strip()) is not None:
            current_alarms.append(alarm[0])
            LOG.info("The current alarms in the system are: "
                     "{0}".format(alarm[0]))

    # Get the historical list of alarms
    hist_alarm_table = system_helper.get_events_table(limit=15, show_uuid=True)

    # Check that a valid alarm header is present
    alarm_header = [
        'UUID', 'Time Stamp', 'State', 'Event Log ID', 'Reason Text',
        'Entity Instance ID', 'Severity'
    ]
    if hist_alarm_table['headers'] != alarm_header:
        LOG.info("Fields in table not correct actual {0} expected {1}".format(
            hist_alarm_table['headers'], alarm_header))

    # Verify the existing alarms are present in the historical list in state 'set'
    for name in current_alarms:
        kwargs = {"Event Log ID": name}
        alarm_state = table_parser.get_values(hist_alarm_table, 'State',
                                              **kwargs)
        LOG.info('alarm: %s  state: %s' % (name, alarm_state))
        if alarm_state != ['set']:
            LOG.info('Alarm state is incorrect')
            test_res = False
            break

    # Raise a new alarm by locking a compute node
    # Get the compute
        LOG.info("Lock compute and wait 30 seconds")
    host = 'compute-1'
    if system_helper.is_aio_duplex():
        host = system_helper.get_standby_controller_name()

    HostsToRecover.add(host, scope='function')
    host_helper.lock_host(host)
    time.sleep(20)

    # Verify the new alarm is present in the historical alarm and active alarm lists
    LOG.info("Verify alarm-list command returns list of active alarms")
    res, out = cli.fm('alarm-list')
    new_active_alarm_table = table_parser.table(out)

    if len(alarm_list['values']) == 0:
        LOG.info("There are no alarms are not present in the alarm list")

    # Save the list of new alarms present in the list
    new_alarms = []
    for alarm in new_active_alarm_table['values']:
        if (re.match(".", alarm[0].strip()) is not None):
            new_alarms.append(alarm[0])
            LOG.info("The alarm ID in the alarm list table is: "
                     "{0}".format(alarm[0]))

    # Identify the new alarms
    new_alarm_list = list(set(new_alarms) - set(current_alarms))
    LOG.info(new_alarm_list)

    # Verify the new alarms are present in the historical list in state 'set'
    # Get the historical list of alarms
    hist_alarm_table = system_helper.get_events_table(limit=15, show_uuid=True)

    for name in new_alarm_list:
        kwargs = {"Event Log ID": name}
        alarm_state = table_parser.get_values(hist_alarm_table, 'State',
                                              **kwargs)
        LOG.info('new alarm: %s  state: %s' % (name, alarm_state))
        if alarm_state != ['set']:
            LOG.info('Alarm state is incorrect')
            test_res = False
            break

    # Clear the alarm by unlocking the compute node
        LOG.info("Unlock compute and wait 30 seconds")
    compute_ssh = host_helper.unlock_host(host)
    time.sleep(30)

    #Verify the alarm clear is shown in the historical table
    LOG.info("Verify event-list command returns list of active alarms")
    hist_alarm_table = system_helper.get_events_table(limit=15, show_uuid=True)

    for name in new_alarm_list:
        kwargs = {"Event Log ID": name}
        alarm_state = table_parser.get_values(hist_alarm_table, 'State',
                                              **kwargs)
        LOG.info('new alarm: %s  state: %s' % (name, alarm_state))
        if alarm_state != ['clear']:
            LOG.info('Alarm state is incorrect')
            test_res = False
            break

    #Verify the alarm disappears from the active alarm table
    LOG.info("Verify alarm-list command returns list of active alarms")
    res, out = cli.fm('alarm-list')
    new_active_alarm_table = table_parser.table(out)

    active_alarms = []
    for alarm in new_active_alarm_table['values']:
        if re.match(".", alarm[0].strip()) is not None:
            active_alarms.append(alarm[0])
            LOG.info("The alarm ID in the alarm list table is: "
                     "{0}".format(alarm[0]))

    # Identify the new alarms
    for name in new_alarm_list:
        if name in active_alarms:
            LOG.info("The alarm was not cleared from the active alarm table")
            test_res = False
            break
Beispiel #12
0
def test_horizon_host_details_display(host_inventory_pg, host_name):
    """
    Test the host details display:

    Setups:
        - Login as Admin
        - Go to Admin > Platform > Host Inventory > Controller-0

    Test Steps:
        - Test host controller-0 overview display
        - Test host controller-0 processor display
        - Test host controller-0 memory display
        - Test host controller-0 storage display
        - Test host controller-0 ports display
        - Test host controller-0 lldp display

    Teardown:
        - Logout
    """
    host_table = host_inventory_pg.hosts_table(host_name)
    host_details_pg = host_inventory_pg.go_to_host_detail_page(host_name)

    # OVERVIEW TAB
    LOG.tc_step('Test host: {} overview display'.format(host_name))
    host_details_pg.go_to_overview_tab()
    horizon_vals = host_details_pg.host_detail_overview(
        host_table.driver).get_content()
    fields_map = host_details_pg.host_detail_overview(
        host_table.driver).OVERVIEW_INFO_HEADERS_MAP
    cli_host_vals = system_helper.get_host_values(host_name,
                                                  list(fields_map.keys()),
                                                  rtn_dict=True)
    for field in fields_map:
        horizon_header = fields_map[field]
        cli_host_val = str(cli_host_vals[field])
        horizon_val = horizon_vals.get(horizon_header)
        if horizon_val is None:
            horizon_val = 'None'
            assert cli_host_val == horizon_val, '{} display incorrectly'.format(
                horizon_header)
        else:
            assert cli_host_val.upper() in horizon_val.upper(
            ), '{} display incorrectly'.format(horizon_header)
    LOG.info('Host: {} overview display correct'.format(host_name))

    # PROCESSOR TAB
    LOG.tc_step('Test host {} processor display'.format(host_name))
    host_details_pg.go_to_processor_tab()
    cpu_table = table_parser.table(
        cli.system('host-cpu-list {}'.format(host_name))[1])
    expt_cpu_info = {
        'Processor Model:':
        table_parser.get_values(cpu_table, 'processor_model')[0],
        'Processors:':
        str(len(set(table_parser.get_values(cpu_table, 'processor'))))
    }

    horizon_cpu_info = host_details_pg.inventory_details_processor_info.get_content(
    )
    assert horizon_cpu_info['Processor Model:'] == expt_cpu_info[
        'Processor Model:']
    assert horizon_cpu_info['Processors:'] == expt_cpu_info['Processors:']

    # MEMORY TABLE
    LOG.tc_step('Test host {} memory display'.format(host_name))
    checking_list = ['mem_total(MiB)', 'mem_avail(MiB)']

    host_details_pg.go_to_memory_tab()
    memory_table = table_parser.table(
        cli.system('host-memory-list {}'.format(host_name))[1])
    colume_names = host_details_pg.memory_table.column_names
    processor_list = table_parser.get_values(memory_table, colume_names[0])
    cli_memory_table_dict = table_parser.row_dict_table(memory_table,
                                                        colume_names[0],
                                                        lower_case=False)

    for processor in processor_list:
        horizon_vm_pages_val = host_details_pg.get_memory_table_info(
            processor, colume_names[2])
        horizon_memory_val = host_details_pg.get_memory_table_info(
            processor, 'Memory')
        if cli_memory_table_dict[processor][
                'hugepages(hp)_configured'] == 'False':
            assert horizon_vm_pages_val is None, 'Horizon {} display incorrectly'.format(
                colume_names[2])
        else:
            for field in checking_list:
                assert cli_memory_table_dict[processor][
                    field] in horizon_memory_val, 'Memory {} display incorrectly'

    # STORAGE TABLE
    #   This test will loop each table and test their display
    #   Test may fail in following case:
    #   1. disk table's Size header eg. Size(GiB) used different unit such as Size (MiB), Size (TiB)
    #   2. lvg table may display different:
    #   Case 1: Name | State | Access | Size (GiB) | Avail Size(GiB) | Current Physical Volume - Current Logical Volumes
    #   Case 2: Name | State | Access | Size                         | Current Physical Volume - Current Logical Volumes
    #   Case 2 Size values in horizon are rounded by 2 digits but in CLI not rounded

    LOG.tc_step('Test host {} storage display'.format(host_name))
    host_details_pg.go_to_storage_tab()

    cmd_list = [
        'host-disk-list {}'.format(host_name),
        'host-disk-partition-list {}'.format(host_name),
        'host-lvg-list {}'.format(host_name),
        'host-pv-list {}'.format(host_name)
    ]
    table_names = [
        'disk table', 'disk partition table', 'local volume groups table',
        'physical volumes table'
    ]

    horizon_storage_tables = [
        host_details_pg.storage_disks_table,
        host_details_pg.storage_partitions_table,
        host_details_pg.storage_lvg_table, host_details_pg.storage_pv_table
    ]
    cli_storage_tables = []
    for cmd in cmd_list:
        cli_storage_tables.append(table_parser.table(cli.system(cmd)[1]))

    for i in range(len(horizon_storage_tables)):
        horizon_table = horizon_storage_tables[i]
        unique_key = horizon_table.column_names[0]
        horizon_row_dict_table = host_details_pg.get_horizon_row_dict(
            horizon_table, key_header_index=0)
        cli_table = cli_storage_tables[i]
        table_dict_unique_key = list(horizon_table.HEADERS_MAP.keys())[list(
            horizon_table.HEADERS_MAP.values()).index(unique_key)]

        cli_row_dict_storage_table = table_parser.row_dict_table(
            cli_table, table_dict_unique_key, lower_case=False)
        for key_header in horizon_row_dict_table:
            for cli_header in horizon_table.HEADERS_MAP:
                horizon_header = horizon_table.HEADERS_MAP[cli_header]
                horizon_row_dict = horizon_row_dict_table[key_header]
                cli_row_dict = cli_row_dict_storage_table[key_header]
                # Solve parser issue: e.g. Size (GiB)' should be '558.029' not ['5589.', '029']
                cli_val = cli_row_dict[cli_header]
                if isinstance(cli_val, list):
                    cli_row_dict[cli_header] = ''.join(cli_val)
                assert horizon_row_dict[horizon_header] == cli_row_dict[cli_header], \
                    'In {}: disk: {} {} display incorrectly'.format(table_names[i], key_header, horizon_header)
        LOG.info('{} display correct'.format(table_names[i]))

    # PORT TABLE
    LOG.tc_step('Test host {} port display'.format(host_name))
    host_details_pg.go_to_ports_tab()
    horizon_port_table = host_details_pg.ports_table()
    cli_port_table = table_parser.table(
        cli.system('host-ethernet-port-list {}'.format(host_name))[1])
    horizon_row_dict_port_table = host_details_pg.get_horizon_row_dict(
        horizon_port_table, key_header_index=0)

    cli_row_dict_port_table = table_parser.row_dict_table(cli_port_table,
                                                          'name',
                                                          lower_case=False)
    for ethernet_name in cli_row_dict_port_table:
        for cli_header in horizon_port_table.HEADERS_MAP:
            horizon_header = horizon_port_table.HEADERS_MAP[cli_header]
            horizon_row_dict = horizon_row_dict_port_table[ethernet_name]
            cli_row_dict = cli_row_dict_port_table[ethernet_name]
            if cli_header not in cli_row_dict and cli_header == 'mac address':
                cli_val = cli_row_dict['macaddress']
            else:
                cli_val = cli_row_dict[cli_header]
            horizon_val = horizon_row_dict[horizon_header]
            # Solve table parser issue: MAC Address returns list eg: ['a4:bf:01:35:4a:', '32']
            if isinstance(cli_val, list):
                cli_val = ''.join(cli_val)
            assert cli_val in horizon_val, '{} display incorrectly'.format(
                horizon_header)

    # LLDP TABLE
    LOG.tc_step('Test host {} lldp display'.format(host_name))
    host_details_pg.go_to_lldp_tab()
    lldp_list_table = table_parser.table(
        cli.system('host-lldp-neighbor-list {}'.format(host_name))[1])
    lldp_uuid_list = table_parser.get_values(lldp_list_table, 'uuid')
    horizon_lldp_table = host_details_pg.lldp_table()
    cli_row_dict_lldp_table = {}
    horizon_row_dict_lldp_table = host_details_pg.get_horizon_row_dict(
        horizon_lldp_table, key_header_index=1)
    for uuid in lldp_uuid_list:
        cli_row_dict = {}
        lldp_show_table = table_parser.table(
            cli.system('lldp-neighbor-show {}'.format(uuid))[1])
        row_dict_key = table_parser.get_value_two_col_table(
            lldp_show_table, 'port_identifier')
        for cli_header in horizon_lldp_table.HEADERS_MAP:
            horizon_header = horizon_lldp_table.HEADERS_MAP[cli_header]
            horizon_row_dict = horizon_row_dict_lldp_table[row_dict_key]
            cli_row_dict[cli_header] = table_parser.get_value_two_col_table(
                lldp_show_table, cli_header)
            cli_row_dict_lldp_table[row_dict_key] = cli_row_dict
            assert cli_row_dict[cli_header] == horizon_row_dict[horizon_header], \
                'lldp neighbor:{} {} display incorrectly'.format(row_dict_key, horizon_header)

    horizon.test_result = True
Beispiel #13
0
def verify_basic_template(template_name=None,
                          con_ssh=None,
                          auth_info=None,
                          delete_after_swact=False):
    """
        Create/Delete heat stack and verify the resource are created/deleted as expeted
            - Create a heat stack with the given template
            - Verify heat stack is created sucessfully
            - Verify heat resources are created
            - Delete Heat stack and verify resource deletion
        Args:
            con_ssh (SSHClient): If None, active controller ssh will be used.
            template_name (str): template to be used to create heat stack.
            auth_info (dict): Tenant dict. If None, primary tenant will be used.
            delete_after_swact
    """

    t_name, yaml = template_name.split('.')
    params = getattr(Heat, t_name)['params']
    heat_user = getattr(Heat, t_name)['heat_user']
    to_verify = getattr(Heat, t_name)['verify']
    if heat_user is 'admin':
        auth_info = Tenant.get('admin')

    table_ = table_parser.table(cli.heat('stack-list', auth_info=auth_info)[1])
    names = table_parser.get_values(table_, 'stack_name')
    stack_name = common.get_unique_name(t_name, existing_names=names)
    template_path = os.path.join(ProjVar.get_var('USER_FILE_DIR'), HEAT_PATH,
                                 template_name)
    if params:
        params = {
            param_: heat_helper.get_heat_params(param_name=param_)
            for param_ in params
        }

    LOG.tc_step("Creating Heat Stack using template %s", template_name)
    heat_helper.create_stack(stack_name=stack_name,
                             template=template_path,
                             parameters=params,
                             cleanup='function',
                             auth_info=auth_info,
                             con_ssh=con_ssh)

    for item in to_verify:
        LOG.tc_step("Verifying Heat created resources %s for stack %s", item,
                    stack_name)
        verify_heat_resource(to_verify=item,
                             template_name=t_name,
                             stack_name=stack_name,
                             auth_info=auth_info)
    LOG.info("Stack {} resources are created as expected.".format(stack_name))

    if hasattr(HeatUpdate, t_name):
        LOG.tc_step("Updating stack %s", stack_name)
        update_stack(stack_name,
                     template_name,
                     ssh_client=con_ssh,
                     auth_info=auth_info,
                     fail_ok=False)

    if delete_after_swact:
        host_helper.swact_host()

    LOG.tc_step("Delete heat stack {} ".format(stack_name))
    heat_helper.delete_stack(stack=stack_name,
                             auth_info=auth_info,
                             fail_ok=False)

    LOG.info("Stack {} deleted successfully.".format(stack_name))

    LOG.tc_step(
        "Verifying resource deletion after heat stack {} is deleted".format(
            stack_name))
    for item in to_verify:
        LOG.tc_step("Verifying Heat resources deletion %s for stack %s", item,
                    stack_name)
        code, msg = verify_heat_resource(to_verify=item,
                                         template_name=t_name,
                                         stack_name=stack_name,
                                         fail_ok=True,
                                         auth_info=auth_info)
        assert 1 == code, "Heat resource {} still exist after stack {} deletion".format(
            item, stack_name)
Beispiel #14
0
def get_subclouds(field='name',
                  name=None,
                  avail=None,
                  sync=None,
                  mgmt=None,
                  deploy=None,
                  auth_info=Tenant.get('admin_platform', 'RegionOne'),
                  con_ssh=None,
                  source_openrc=None,
                  rtn_dict=False,
                  evaluate=False,
                  strict=True,
                  regex=False,
                  filter_subclouds=True):
    """
    Get subclouds values
    Args:
        field (str | tuple): fields of value to get
        name (str): subcloud name
        avail (str): subcloud availability status
        sync (str): subcloud sync status
        mgmt (str): subcloud management status
        deploy (str): subcloud deploy status
        auth_info (dict):
        con_ssh (SSHClient):
        source_openrc (None|bool):
        rtn_dict (bool): whether to return dict of field/value pairs
        evaluate (bool): whether to convert value to python data type
        strict (bool): True to use re.match, False to use re.search
        regex (bool): whether to use regex to find value(s)
        filter_subclouds (bool): whether to filter out the subclouds that are not in
                                 the --subcloud_list arg

    Returns (list | dict):
        when rtn_dict is False, list of values
        when rtn_dict is True, dict of field/values pairs

    """
    table_ = table_parser.table(
        cli.dcmanager('subcloud list',
                      ssh_client=con_ssh,
                      auth_info=auth_info,
                      source_openrc=source_openrc)[1])
    arg_map = {
        'name': name,
        'availability': avail,
        'sync': sync,
        'management': mgmt,
        'deploy status': deploy
    }
    kwargs = {key: val for key, val in arg_map.items() if val}
    if filter_subclouds:
        filtered_subclouds = table_parser.get_values(table_,
                                                     target_header=field,
                                                     **kwargs)
        subcloud_list = ProjVar.get_var('SUBCLOUD_LIST')
        if subcloud_list:
            filtered_subclouds = [
                subcloud for subcloud in filtered_subclouds
                if subcloud in subcloud_list
            ]
            LOG.info('filtered_subclouds: {}'.format(filtered_subclouds))
        return filtered_subclouds
    else:
        return table_parser.get_multi_values(table_,
                                             field,
                                             rtn_dict=rtn_dict,
                                             evaluate=evaluate,
                                             strict=strict,
                                             regex=regex,
                                             **kwargs)
Beispiel #15
0
def get_failed_live_migrate_action_id(vm_id):
    action_table = table_parser.table(cli.openstack('event list', vm_id)[1])
    req_id = table_parser.get_values(action_table, 'Request ID', **{'Action': 'live-migration', 'Message': 'Error'})
    assert req_id, "request id for failed live migration not found"
    return req_id[0]
Beispiel #16
0
def test_dc_fault_scenario(subcloud_to_test):
    """
    Test Fault Scenario on Distributed Cloud
    Args:
        subcloud_to_test (str): module fixture

    Setup:
        - Make sure there is consistency between alarm summary on
        Central Cloud and on subclouds

    Test Steps:
        - Make subcloud offline (e. g. delete route)
        Step1:
        - Ensure suncloud shows offline
        Step2:
        - Raise alarm on subcloud
        - Ensure relative alarm raised on subcloud,
        - Ensure system alarm-summary on subcloud has changed
        - Ensure  dcmanager alarm summary on system controller has no change
        Step3:
        - Resume connectivity to subcloud (e. g. add route back)
        - Ensure suncloud shows online and in-sync
        - Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system
        controller
        Step4:
        - Clean alarm on subcloud
        - Ensure relative alarm cleared on subcloud
        - Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system
        controller
    """
    ssh_central = ControllerClient.get_active_controller(name="RegionOne")
    ssh_subcloud = ControllerClient.get_active_controller(
        name=subcloud_to_test)
    subcloud_table = {}
    try:
        code, output = cli.dcmanager(
            "subcloud show {}".format(subcloud_to_test),
            ssh_client=ssh_central)
        gateway = table_parser.get_value_two_col_table(
            table_parser.table(output), "management_gateway_ip")
        code, hosts_raw = cli.system("host-list", ssh_client=ssh_subcloud)
        hosts_id = table_parser.get_values(table_parser.table(hosts_raw), 'id')
        for host_id in hosts_id:
            code, route_raw = cli.system("host-route-list {}".format(host_id),
                                         ssh_client=ssh_subcloud)
            route_table = table_parser.filter_table(
                table_parser.table(route_raw), **{'gateway': gateway})
            subcloud_table[host_id] = route_table

        LOG.tc_step(
            "Delete route for subcloud: {} and wait for it to go offline.".
            format(subcloud_to_test))
        ssh_subcloud = ControllerClient.get_active_controller(
            name=subcloud_to_test)
        for host_id in subcloud_table:
            command = "host-route-delete {}".format(
                table_parser.get_values(subcloud_table[host_id], "uuid")[0])
            cli.system(command, ssh_client=ssh_subcloud)

        dc_helper.wait_for_subcloud_status(subcloud_to_test,
                                           avail=SubcloudStatus.AVAIL_OFFLINE,
                                           timeout=DCTimeout.SYNC,
                                           con_ssh=ssh_central)

        LOG.tc_step("Raise alarm on subcloud: {}".format(subcloud_to_test))
        ssh_subcloud = ControllerClient.get_active_controller(
            name=subcloud_to_test)
        code_sub_before, output_sub_before = cli.fm("alarm-summary",
                                                    ssh_client=ssh_subcloud)
        code_central_before, output_central_before = cli.dcmanager(
            'alarm summary')
        ssh_subcloud.exec_cmd(
            "fmClientCli -c \"### ###300.005###clear###system.vm###host="
            "testhost-0### ###critical### ###processing-error###cpu-cycles-limit-exceeded"
            "### ###True###True###'\"",
            fail_ok=False)
        LOG.info("Ensure relative alarm was raised at subcloud: {}".format(
            subcloud_to_test))
        system_helper.wait_for_alarm(
            alarm_id=EventLogID.PROVIDER_NETWORK_FAILURE, con_ssh=ssh_subcloud)
        code_sub_after, output_sub_after = cli.fm("alarm-summary",
                                                  ssh_client=ssh_subcloud)
        code_central_after, output_central_after = cli.dcmanager(
            'alarm summary')
        LOG.info(
            "Ensure fm alarm summary on subcloud: {} has changed but dcmanager alarm"
            "summary has not changed".format(subcloud_to_test))
        assert output_central_before == output_central_after and output_sub_before != \
            output_sub_after

        add_routes_to_subcloud(subcloud_to_test, subcloud_table)

        dc_helper.wait_for_subcloud_status(subcloud_to_test,
                                           avail=SubcloudStatus.AVAIL_ONLINE,
                                           sync=SubcloudStatus.SYNCED,
                                           timeout=DCTimeout.SYNC,
                                           con_ssh=ssh_central)
        alarm_summary_add_and_del(subcloud_to_test)

    finally:
        cli.dcmanager("subcloud show {}".format(subcloud_to_test),
                      ssh_client=ssh_central,
                      fail_ok=True)
        add_routes_to_subcloud(subcloud_to_test, subcloud_table, fail_ok=True)
        LOG.info("Clear alarm on subcloud: {}".format(subcloud_to_test))
        ssh_subcloud.exec_cmd('fmClientCli -D host=testhost-0')
        check_alarm_summary_match_subcloud(subcloud=subcloud_to_test)
Beispiel #17
0
def update_dovetail_mgmt_interface():
    """
    Update dovetail vm mgmt interface on cumulus system.
    Since cumulus system is on different version. This helper function requires use cli matches the cumulus tis.

    Returns:

    """
    expt_mgmt_net = get_expt_mgmt_net()
    if not expt_mgmt_net:
        skip('{} mgmt net is not found in Cumulus tis-lab project'.format(
            ProjVar.get_var('LAB')['name']))

    with ssh_to_cumulus_server() as cumulus_con:
        cumulus_auth = CumulusCreds.TENANT_TIS_LAB
        vm_id = vm_helper.get_vm_id_from_name(vm_name='dovetail',
                                              con_ssh=cumulus_con,
                                              auth_info=cumulus_auth)

        dovetail_networks = vm_helper.get_vms(vms=vm_id,
                                              field='Networks',
                                              con_ssh=cumulus_con,
                                              auth_info=cumulus_auth)[0]

        actual_nets = dovetail_networks.split(sep=';')
        prev_mgmt_nets = []
        for net in actual_nets:
            net_name, net_ip = net.split('=')
            if '-MGMT-net' in net_name:
                prev_mgmt_nets.append(net_name)

        attach = True
        if expt_mgmt_net in prev_mgmt_nets:
            attach = False
            prev_mgmt_nets.remove(expt_mgmt_net)
            LOG.info("{} interface already attached to Dovetail vm".format(
                expt_mgmt_net))

        if prev_mgmt_nets:
            LOG.info("Detach interface(s) {} from dovetail vm".format(
                prev_mgmt_nets))
            vm_ports_table = table_parser.table(
                cli.nova('interface-list',
                         vm_id,
                         ssh_client=cumulus_con,
                         auth_info=cumulus_auth)[1])
            for prev_mgmt_net in prev_mgmt_nets:
                prev_net_id = network_helper.get_net_id_from_name(
                    net_name=prev_mgmt_net,
                    con_ssh=cumulus_con,
                    auth_info=cumulus_auth)

                prev_port = table_parser.get_values(vm_ports_table, 'Port ID',
                                                    **{'Net ID':
                                                       prev_net_id})[0]
                detach_arg = '{} {}'.format(vm_id, prev_port)
                cli.nova('interface-detach',
                         detach_arg,
                         ssh_client=cumulus_con,
                         auth_info=cumulus_auth)

        mgmt_net_id = network_helper.get_net_id_from_name(
            net_name=expt_mgmt_net,
            con_ssh=cumulus_con,
            auth_info=cumulus_auth)
        if attach:
            LOG.info("Attach {} to dovetail vm".format(expt_mgmt_net))
            args = '--net-id {} {}'.format(mgmt_net_id, vm_id)
            cli.nova('interface-attach',
                     args,
                     ssh_client=cumulus_con,
                     auth_info=cumulus_auth)

        vm_ports_table = table_parser.table(
            cli.nova('interface-list',
                     vm_id,
                     ssh_client=cumulus_con,
                     auth_info=cumulus_auth)[1])
        mgmt_mac = table_parser.get_values(vm_ports_table, 'MAC Addr',
                                           **{'Net ID': mgmt_net_id})[0]

    ComplianceCreds.set_host(Dovetail.TEST_NODE)
    ComplianceCreds.set_user(Dovetail.USERNAME)
    ComplianceCreds.set_password(Dovetail.PASSWORD)
    with ssh_to_compliance_server() as dovetail_ssh:
        if not attach and network_helper.ping_server('192.168.204.3',
                                                     ssh_client=dovetail_ssh,
                                                     fail_ok=True)[0] == 0:
            return
        LOG.info("Bring up dovetail mgmt interface and assign ip")
        eth_name = network_helper.get_eth_for_mac(dovetail_ssh,
                                                  mac_addr=mgmt_mac)
        dovetail_ssh.exec_sudo_cmd('ip link set dev {} up'.format(eth_name))
        dovetail_ssh.exec_sudo_cmd('dhclient {}'.format(eth_name),
                                   expect_timeout=180)
        dovetail_ssh.exec_cmd('ip addr')
        network_helper.ping_server(server='192.168.204.3',
                                   ssh_client=dovetail_ssh,
                                   fail_ok=False)
Beispiel #18
0
def ovs_dpdk_1_core():
    LOG.fixture_step("Review the ovs-dpdk vswitch be in just 1 core")
    vswitch_type = "ovs-dpdk"
    cpu_function = "vswitch"
    proc = "0"
    host_list = host_helper.get_hypervisors()
    for host in host_list:
        with host_helper.ssh_to_host(host) as node_ssh:
            cmd = "cat /proc/meminfo | grep Hugepagesize | awk '{print $2}'"
            hp = int(
                node_ssh.exec_cmd(cmd=cmd, fail_ok=False,
                                  get_exit_code=False)[1])
        mem = host_helper.get_host_memories(
            host=host,
            headers=("app_hp_avail_2M", "app_hp_avail_1G", "mem_avail(MiB)",
                     "vs_hp_total"))
        if hp == 1048576:
            if int(mem[proc][3]) < 2 or mem[proc][1] < 10:
                HostsToRecover.add(hostnames=host, scope="module")
                host_helper.lock_host(host=host)
                if int(mem[proc][3]) < 2:
                    args = ' -f vswitch -1G {} {} {}'.format(2, host, proc)
                    cli.system('host-memory-modify', args)
                    host_helper.modify_host_cpu(host=host,
                                                cpu_function=cpu_function,
                                                **{"p{}".format(proc): 1})
                    # TODO maybe find a better option than sleep since we can't wait for applyying
                    # container_helper.wait_for_apps_status(apps='stx-openstack',
                    #                                       status=AppStatus.APPLYING)
                    time.sleep(60)
                    container_helper.wait_for_apps_status(
                        apps='stx-openstack',
                        status=AppStatus.APPLIED,
                        check_interval=30)
                if mem[proc][1] < 10:
                    args = ' -1G {} {} {}'.format(10, host, proc)
                    cli.system('host-memory-modify', args)
                host_helper.unlock_host(host=host)
        if hp == 2048:
            if int(mem[proc][3]) < 512 or mem[proc][0] < 2500:
                host_helper.lock_host(host=host)
                if int(mem[proc][3]) < 512:
                    system_helper.modify_system(
                        **{"vswitch_type": vswitch_type})
                    vswitch_args = ' -f vswitch -2M {} {} {}'.format(
                        512, host, proc)
                    cli.system('host-memory-modify', vswitch_args)
                    host_helper.modify_host_cpu(host=host,
                                                cpu_function=cpu_function,
                                                **{"p{}".format(proc): 1})
                    # TODO maybe find a better option than sleep since we can't wait for applyying
                    # container_helper.wait_for_apps_status(apps='stx-openstack',
                    #                                     status=AppStatus.APPLIED)
                    time.sleep(60)
                    container_helper.wait_for_apps_status(
                        apps='stx-openstack',
                        status=AppStatus.APPLIED,
                        check_interval=30)
                if mem[proc][0] < 2500:
                    args = ' -2M {} {} {}'.format(2500, host, proc)
                    cli.system('host-memory-modify', args)
                host_helper.unlock_host(host=host)

        test_table = host_helper.get_host_cpu_list_table(host=host)
        curr_assigned_function_list = table_parser.get_values(
            test_table, "assigned_function")
        assert "vSwitch" in curr_assigned_function_list
Beispiel #19
0
def get_imported_load_version(con_ssh=None):
    table_ = table_parser.table(cli.system('load-list', ssh_client=con_ssh)[1])
    table_ = table_parser.filter_table(table_, state='imported')

    return table_parser.get_values(table_, 'software_version')