示例#1
0
def test_delete_unlocked_node_negative():
    """
    Attempts to delete each unlocked node.
    Fails if one unlocked node does get deleted.

    Test Steps:
        - Creates a list of every unlocked host
        - Iterate through each host and attempt to delete it
        - Verify that each host rejected the delete request

    """

    hosts = system_helper.get_hosts(administrative='unlocked')

    deleted_nodes = []

    for node in hosts:
        LOG.tc_step("attempting to delete {}".format(node))
        LOG.info("{} state: {}".format(node, system_helper.get_host_values(node, fields='administrative')[0]))
        res, out = cli.system('host-delete', node, fail_ok=True)

        LOG.tc_step("Delete request - result: {}\tout: {}".format(res, out))

        assert 1 == res, "FAIL: The delete request for {} was not rejected".format(node)

        LOG.tc_step("Confirming that the node was not deleted")
        res, out = cli.system('host-show', node, fail_ok=True)

        if 'host not found' in out or res != 0:
            # the node was deleted even though it said it wasn't
            LOG.tc_step("{} was deleted.".format(node))
            deleted_nodes.append(node)

    assert not deleted_nodes, "Fail: Delete request for the following node(s) " \
                              "{} was accepted.".format(deleted_nodes)
示例#2
0
def _modify_firewall_rules(firewall_rules_path):
    """
    :param firewall_rules_path: Path to the firewalls rules file (including the file name)
    """
    dc_region = 'RegionOne' if ProjVar.get_var('IS_DC') else None

    ssh_client = ControllerClient.get_active_controller(name=dc_region)
    LOG.info("Install firewall rules: {}".format(firewall_rules_path))
    auth_info = Tenant.get('admin_platform', dc_region=dc_region)
    start_time = common.get_date_in_format(ssh_client=ssh_client)
    time.sleep(1)
    cli.system('firewall-rules-install',
               firewall_rules_path,
               ssh_client=ssh_client,
               auth_info=auth_info)

    def _wait_for_config_apply(auth_info_, con_ssh_=None):
        controllers = system_helper.get_controllers(auth_info=auth_info_,
                                                    con_ssh=con_ssh_)
        for controller in controllers:
            system_helper.wait_for_events(
                start=start_time,
                fail_ok=False,
                timeout=60,
                entity_instance_id='host=controller',
                event_log_id=EventLogID.CONFIG_OUT_OF_DATE,
                auth_info=auth_info_,
                con_ssh=con_ssh_,
                **{
                    'Entity Instance ID': 'host={}'.format(controller),
                    'State': 'set'
                })
            # Extend timeout for controller-1 config-out-date clear to 5min due to CGTS-8497
            system_helper.wait_for_events(
                start=start_time,
                fail_ok=False,
                timeout=300,
                entity_instance_id='host=controller',
                event_log_id=EventLogID.CONFIG_OUT_OF_DATE,
                auth_info=auth_info_,
                con_ssh=con_ssh_,
                **{
                    'Entity Instance ID': 'host={}'.format(controller),
                    'State': 'clear'
                })

    LOG.info("Wait for config to apply on both controllers")
    _wait_for_config_apply(auth_info_=auth_info, con_ssh_=ssh_client)

    if ProjVar.get_var('IS_DC'):
        subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
        LOG.info(
            "Wait for sync audit for {} in dcmanager.log".format(subcloud))
        dc_helper.wait_for_sync_audit(subclouds=subcloud)

        LOG.info("Wait for config apply on {}".format(subcloud))
        _wait_for_config_apply(auth_info_=Tenant.get('admin_platform'))

    # Ensures iptables has enough time to populate the list with new ports
    time.sleep(10)
示例#3
0
def test_lldp_neighbor_remote_port():
    """
    Tests if LLDP Neighbor remote_port exists on all hosts

    Test Steps:
        - Checks LLDP Neighbor remote_port to ensure it exists
    """

    remote_port_missing = False

    LOG.tc_step("Parsing host-list for hostnames")
    hosts_tab = table_parser.table(cli.system('host-list')[1])
    all_hosts = table_parser.get_column(hosts_tab, 'hostname')

    for host_name in all_hosts:

        LOG.tc_step(
            "Parsing host-lldp-neighbor-list for remote_ports on the " +
            host_name + " host")
        host = table_parser.table(
            cli.system('host-lldp-neighbor-list',
                       '--nowrap {}'.format(host_name))[1])
        host_remote_ports = table_parser.get_column(host, 'remote_port')

        for remote_port in host_remote_ports:

            LOG.tc_step("Checking LLDP remote_port to ensure it exists")
            if remote_port.lower() == 'none' or remote_port == '':
                LOG.tc_step("Port missing")
                remote_port_missing = True

    assert remote_port_missing is False, "Some remote ports are missing from 'system host-lldp-neighbor-list'"
示例#4
0
def test_clis():
    print(CliAuth.get_var('HTTPS'))
    cli.system('host-list')
    cli.system('host-show controller-0')
    cli.openstack('server list')
    cli.openstack('stack list')
    ceilometer_helper.get_alarms()
    keystone_helper.get_endpoints()
    cli.openstack('router list')
    cli.openstack('volume list')
    cli.openstack('image list')
示例#5
0
def test_host_disk_wipe_unassigned_disk():
    """
    This test attempts to run system host-disk-wipe on a node using any
    unassigned disk.

    Command format is:

    system host-disk-wipe [--confirm] <hostname or id> <disk uuid>

    Note, host-disk-wipe is only applicable to controller and compute nodes. It
    cannot be used on the rootfs disk.  It cannot be used for a disk that is
    used by a PV or has partitions used by a PV.

    Arguments:
    - None

    Test Steps:
    1.  Determine which disks are unassigned by comparing size_gib to
    available_gib in system host-disk-list
    2.  Attempt to wipe the disk
    3.  Expect it to pass

    Assumptions:
    - None
    """
    computes = system_helper.get_hosts(personality="compute",
                                       availability="available")
    controllers = system_helper.get_hosts(personality="controller",
                                          availability="available")
    hosts = controllers + computes

    found_disk = False
    for host in hosts:
        LOG.info("Query disks on host {}".format(host))
        disks = storage_helper.get_host_disks(host)
        for disk_uuid in disks:
            cmd = "host-disk-show {} {}".format(host, disk_uuid)
            rc, out = cli.system(cmd)
            size_gib = table_parser.get_value_two_col_table(
                table_parser.table(out), "size_gib")
            available_gib = table_parser.get_value_two_col_table(
                table_parser.table(out), "available_gib")
            if int(float(size_gib)) == int(float(available_gib)):
                found_disk = True
                LOG.tc_step("Attempting to wipe disk {} from host {}".format(
                    disk_uuid, host))
                cmd = 'host-disk-wipe --confirm {} {}'.format(host, disk_uuid)
                rc, out = cli.system(cmd, fail_ok=True)
                assert rc == 0, "Expected wipe disk to pass but instead failed"
                break

    if not found_disk:
        skip("No unassigned disks to run test")
示例#6
0
def get_hugepage_pod_file():
    """
    Fixture used to return the hugepage deployment file

        - Get the compute-0 if exist, else standby controller
        - Check 2M hugepages configured, elsif check 1G is configured
            else lock,configure 2G of 1G hugepages and unlock host
        - Call modify_yaml function to modify the yaml
          file with the values
        - Modified file scps to host to deploy hugepages pod
        - Deletes the hugepages pod from the host after the test

    """
    if system_helper.is_aio_duplex():
        hostname = system_helper.get_standby_controller_name()
    else:
        hostname = system_helper.get_hypervisors()[0]
    LOG.fixture_step("Checking hugepage values on {}".format(hostname))
    proc_id = 0
    out = host_helper.get_host_memories(hostname,
                                        ('app_hp_avail_2M', 'app_hp_avail_1G'),
                                        proc_id)
    if out[proc_id][0] > 0:
        hugepage_val = "{}Mi".format(out[proc_id][0])
        hugepage_str = "hugepages-2Mi"
    elif out[proc_id][1] > 0:
        hugepage_val = "{}Gi".format(out[proc_id][1])
        hugepage_str = "hugepages-1Gi"
    else:
        hugepage_val = "{}Gi".format(2)
        cmd = "{} -1G {}".format(proc_id, 2)
        hugepage_str = "hugepages-1Gi"
        HostsToRecover.add(hostname)
        host_helper.lock_host(hostname)
        LOG.fixture_step("Configuring hugepage values {} on {}".format(
            hugepage_val, hostname))
        cli.system('host-memory-modify {} {}'.format(hostname, cmd),
                   ssh_client=None,
                   auth_info=Tenant.get('admin_platform'))
        host_helper.unlock_host(hostname)
    LOG.fixture_step("{} {} pod will be configured on {} proc id {}".format(
        hugepage_str, hugepage_val, hostname, proc_id))
    file_dir, file_name = modify_yaml("utils/test_files/",
                                      "hugepages_pod.yaml", hugepage_str,
                                      hugepage_val)
    source_path = "{}/{}".format(file_dir, file_name)
    home_dir = HostLinuxUser.get_home()
    common.scp_from_localhost_to_active_controller(source_path,
                                                   dest_path=home_dir)
    yield file_name
    LOG.fixture_step("Delete hugepages pod")
    kube_helper.delete_resources(resource_names="hugepages-pod")
示例#7
0
def add_routes_to_subcloud(subcloud, subcloud_table, fail_ok=False):
    LOG.debug("Add routes back to subcloud: {}".format(subcloud))
    ssh_client = ControllerClient.get_active_controller(name=subcloud)
    for host_id in subcloud_table:
        comm_args = table_parser.get_multi_values(
            subcloud_table[host_id],
            ["ifname", "network", "prefix", "gateway"])
        command = "host-route-add {} {} {} {} {}".format(
            host_id, comm_args[0][0], comm_args[1][0], comm_args[2][0],
            comm_args[3][0])
        code, output = cli.system("host-route-list {}".format(host_id))
        uuid_list = table_parser.get_values(table_parser.table(output), "uuid")
        if table_parser.get_values(subcloud_table[host_id],
                                   "uuid")[0] not in uuid_list:
            cli.system(command, ssh_client=ssh_client, fail_ok=fail_ok)
示例#8
0
def get_helm_overrides(field='overrides namespaces',
                       app_name='stx-openstack',
                       charts=None,
                       auth_info=Tenant.get('admin_platform'),
                       con_ssh=None):
    """
    Get helm overrides values via system helm-override-list
    Args:
        field (str):
        app_name
        charts (None|str|list|tuple):
        auth_info:
        con_ssh:

    Returns (list):

    """
    table_ = table_parser.table(
        cli.system('helm-override-list',
                   app_name,
                   ssh_client=con_ssh,
                   auth_info=auth_info)[1])

    if charts:
        table_ = table_parser.filter_table(table_, **{'chart name': charts})

    vals = table_parser.get_multi_values(table_, fields=field, evaluate=True)

    return vals
示例#9
0
def get_app_values(app_name,
                   fields,
                   con_ssh=None,
                   auth_info=Tenant.get('admin_platform')):
    """
    Get values from system application-show
    Args:
        app_name:
        fields (str|list|tuple):
        con_ssh:
        auth_info:

    Returns:

    """
    if isinstance(fields, str):
        fields = [fields]

    table_ = table_parser.table(cli.system('application-show',
                                           app_name,
                                           ssh_client=con_ssh,
                                           auth_info=auth_info)[1],
                                combine_multiline_entry=True)
    values = table_parser.get_multi_values_two_col_table(table_, fields=fields)
    return values
示例#10
0
def get_apps(field='status',
             application=None,
             con_ssh=None,
             auth_info=Tenant.get('admin_platform'),
             rtn_dict=False,
             **kwargs):
    """
    Get applications values for give apps and fields via system application-list
    Args:
        application (str|list|tuple):
        field (str|list|tuple):
        con_ssh:
        auth_info:
        rtn_dict:
        **kwargs: extra filters other than application

    Returns (list|dict):
        list of list, or
        dict with app name(str) as key and values(list) for given fields for
        each app as value

    """
    table_ = table_parser.table(
        cli.system('application-list', ssh_client=con_ssh,
                   auth_info=auth_info)[1])
    if application:
        kwargs['application'] = application

    return table_parser.get_multi_values(table_,
                                         fields=field,
                                         rtn_dict=rtn_dict,
                                         zip_values=True,
                                         **kwargs)
示例#11
0
def test_modify_memory_when_unlocked_negative(get_host):
    """

    US51396_tc04_cannt_modify_unlocked (53 Cannot modify memory setting when unlocked using CLI)

    Attempt to modify memory when it's unlocked, and ensure it's rejected.

    Setup:
        - check if there is at least two compute nodes
        - check if the compute node is in unlocked state (TODO)

    Test Steps:
        - modify the huge page on the unlocked compute node
        - make sure it fail as expected

    Teardown:
        - Nothing

    """
    hostname = get_host

    LOG.tc_step("Attempt to the modify memory of unlocked host")
    args = "-2M 0 1G 0 {} 1".format(hostname)
    exit_code, output = cli.system('host-memory-modify', args, fail_ok=True)

    LOG.tc_step(
        "Verify host-memory-modify command rejected when host is unlocked")
    assert exit_code == 1, "system host-memory-modify is not rejected when {} is unlocked".\
        format(hostname)
示例#12
0
    def test_invalid_huge_page_input(self, get_host, proc, pages):
        """
        (55 Invalid inputs for number of hugepages will be rejected GUI in sysinv testplan)
        given invalid huge page number in a compute node and verify that it failed after modification

        Setup:
            - check if there is at least two compute nodes

        Test Steps:
            - lock compute node
            - modify the huge page on the locked compute node
            - unlock the compute node
            - compare the huge page number with the the expected huge page number

        Teardown:
            - Might be good idea to reset the host memory to what it was before

        """
        host_to_modify = get_host

        LOG.tc_step("Lock host")
        HostsToRecover.add(host_to_modify, scope='class')
        host_helper.lock_host(host_to_modify)

        # config the page number after lock the compute node
        LOG.tc_step(
            'Attempt to modify host memory with invalid page input and ensure it is rejected'
        )
        args = "{} {} {}".format(host_to_modify, proc, pages)
        code, output = cli.system('host-memory-modify', args, fail_ok=True)

        assert 1 == code, "host-memory-modify allows invalid args: {}".format(
            args)
示例#13
0
def get_ifs_to_mod(host, network_type, mtu_val):
    table_ = table_parser.table(cli.system('host-if-list', '{} --nowrap'.format(host))[1])

    if_class = network_type
    network = ''
    if network_type in PLATFORM_NET_TYPES:
        if_class = 'platform'

    table_ = table_parser.filter_table(table_, **{'class': if_class})
    # exclude unmatched platform interfaces from the table.
    if 'platform' == if_class:
        platform_ifs = table_parser.get_values(table_, target_header='name', **{'class': 'platform'})
        for pform_if in platform_ifs:
            if_nets = host_helper.get_host_interface_values(host=host, interface=pform_if, fields='networks')[0]
            if_nets = [if_net.strip() for if_net in if_nets.split(sep=',')]
            if network not in if_nets:
                table_ = table_parser.filter_table(table_, strict=True, exclude=True, name=pform_if)

    uses_if_names = table_parser.get_values(table_, 'name', exclude=True, **{'uses i/f': '[]'})
    non_uses_if_names = table_parser.get_values(table_, 'name', exclude=False, **{'uses i/f': '[]'})
    uses_if_first = False
    if uses_if_names:
        current_mtu = int(
            host_helper.get_host_interface_values(host, interface=uses_if_names[0], fields=['imtu'])[0])
        if current_mtu <= mtu_val:
            uses_if_first = True

    if uses_if_first:
        if_names = uses_if_names + non_uses_if_names
    else:
        if_names = non_uses_if_names + uses_if_names

    return if_names
示例#14
0
    def _recover_hosts(hostnames, scope):
        if system_helper.is_aio_simplex():
            LOG.fixture_step('{} Recover simplex host'.format(scope))
            host_helper.recover_simplex(fail_ok=False)
            return

        # Recover hosts for non-simplex system
        hostnames = sorted(set(hostnames))
        table_ = table_parser.table(cli.system('host-list')[1])
        table_ = table_parser.filter_table(table_, hostname=hostnames)

        # unlocked_hosts = table_parser.get_values(table_, 'hostname',
        # administrative='unlocked')
        locked_hosts = table_parser.get_values(table_,
                                               'hostname',
                                               administrative='locked')

        err_msg = []
        if locked_hosts:
            LOG.fixture_step("({}) Unlock hosts: {}".format(
                scope, locked_hosts))
            # Hypervisor state will be checked later in wait_for_hosts_ready
            # which handles platform only deployment
            res1 = host_helper.unlock_hosts(hosts=locked_hosts,
                                            fail_ok=True,
                                            check_hypervisor_up=False)
            for host in res1:
                if res1[host][0] not in [0, 4]:
                    err_msg.append(
                        "Not all host(s) unlocked successfully. Detail: "
                        "{}".format(res1))

        host_helper.wait_for_hosts_ready(hostnames)
示例#15
0
def get_helm_override_values(chart, namespace, app_name='stx-openstack',
                             fields=('combined_overrides',),
                             auth_info=Tenant.get('admin_platform'),
                             con_ssh=None):
    """
    Get helm-override values for given chart via system helm-override-show
    Args:
        chart (str):
        namespace (str):
        app_name (str)
        fields (str|tuple|list):
        auth_info:
        con_ssh:

    Returns (list): list of parsed yaml formatted output. e.g., list of dict,
    list of list, list of str

    """
    args = '{} {} {}'.format(app_name, chart, namespace)
    table_ = table_parser.table(
        cli.system('helm-override-show', args, ssh_client=con_ssh,
                   auth_info=auth_info)[1],
        rstrip_value=True)

    if isinstance(fields, str):
        fields = (fields,)

    values = []
    for field in fields:
        value = table_parser.get_value_two_col_table(table_, field=field,
                                                     merge_lines=False)
        values.append(yaml.load('\n'.join(value)))

    return values
示例#16
0
def test_system_type():
    """
    Verify the System Type can be retrieved from SysInv and is correct

    Test Steps:
        - Determine the System Type based on whether the system is CPE or not
        - Retrieve the System Type information from SystInv
        - Compare the types and verify they are the same, fail the test case
        otherwise

    Notes:
        - Covers SysInv test-cases:
            66) Query the product type on CPE system using CLI
            67) Query the product type on STD system using CLI
    """

    LOG.tc_step('Determine the real System Type the lab')
    if system_helper.is_aio_system():
        expt_system_type = SystemType.CPE
    else:
        expt_system_type = SystemType.STANDARD

    LOG.tc_step('Get System Type from system inventory')
    table_ = table_parser.table(cli.system('show')[1])
    displayed_system_type = table_parser.get_value_two_col_table(
        table_, 'system_type')

    LOG.tc_step(
        'Verify the expected System Type is the same as that from System Inventory'
    )
    assert expt_system_type == displayed_system_type, 'Expected system_type is: {}; Displayed system type: {}.'. \
        format(expt_system_type, displayed_system_type)
示例#17
0
def test_system_ntp_modify():
    """
    Test that ntp servers were initially configured and can be reconfigured

    Test Steps:
        - Execute system ntp-show
        - Verify that ntpservers field contains a list of 3 ntp servers
        - Update ntp with new ntp servers
        - Lock/unlock controllers to get rid of config out of date alarm
        - After lock and unlock verify that alarms cleared
    """

    LOG.tc_step("Check 'system ntp-show' contains expected fields")
    table_ = table_parser.table(cli.system('ntp-show')[1])
    expt_sub_fields = ['uuid', 'ntpservers', 'isystem_uuid', 'created_at', 'updated_at']

    actual_fields = table_parser.get_column(table_, 'Property')
    LOG.tc_step("Actual ntp fields Names are {}".format(actual_fields))
    assert set(expt_sub_fields) <= set(actual_fields), "Some expected fields are not included in system show table."

    LOG.tc_step("Modify 'system ntp-modify' and verify that it contains expected fields")
    ntp_pool = NtpPool.NTP_POOL_1
    if sorted(system_helper.get_ntp_values(fields='ntpservers')[0].split(',')) == sorted(ntp_pool.split(',')):
        ntp_pool = NtpPool.NTP_POOL_2

    system_helper.modify_ntp(ntp_servers=ntp_pool)
示例#18
0
def get_cert_info(cert_id, con_ssh=None):
    LOG.info('check the status of the current certificate')
    cmd = 'certificate-show ' + cert_id
    output = cli.system(cmd, ssh_client=con_ssh, fail_ok=False)[1]
    if output:
        table = table_parser.table(output)
        if table:
            actual_id = table_parser.get_value_two_col_table(table, 'uuid')
            actual_type = table_parser.get_value_two_col_table(
                table, 'certtype')
            actual_details = table_parser.get_value_two_col_table(
                table, 'details')
            actual_states = ''
            if not actual_details:
                # CGTS-9529
                LOG.fatal('No details in output of certificate-show')
                LOG.fatal(
                    'Ignore it until the known issue CGTS-9529 fixed, output:'
                    + output)
                # assert False, 'No details in output of certificate-show'
            else:
                LOG.debug('details from output of certificate-show: {}'.format(
                    actual_details))
                actual_states = eval(actual_details)
                LOG.debug('states: {}'.format(actual_states))
                return 0, actual_id, actual_type, actual_states

            LOG.info('')
            return 1, actual_id, actual_type, actual_states
    else:
        LOG.info('no "details" in output')

    return 2, '', '', ''
示例#19
0
def test_assign_rootfs_disk_to_pv():
    """
    This test attempts to create a PV with type Disk on the rootfs.  This is
    expected to fail.

    Assumptions:
    * None

    Test Steps:
    * Determine which disk is the rootfs
    * Attempt to create a PV on that disk using a PV type of Disk.

    Teardown:
    * None
    """

    computes = system_helper.get_hosts(personality="compute")
    hosts = system_helper.get_controllers() + computes

    rootfs = storage_helper.get_hosts_rootfs(hosts)

    for host in rootfs:
        uuid = rootfs[host]
        # cmd = "host-pv-add -t disk {} cgts-vg {}".format(host, uuid[0])
        cmd = "host-pv-add {} cgts-vg {}".format(host, uuid[0])
        rc, out = cli.system(cmd, fail_ok=True)
        assert rc != 0, "Expected PV creation to fail but instead succeeded"
示例#20
0
def test_host_disk_wipe_rootfs():
    """
    This test attempts to run system host-disk-wipe on a node using the rootfs
    disk.  Command format is:

    system host-disk-wipe [--confirm] <hostname or id> <disk uuid>

    Note, host-disk-wipe is only applicable to controller and compute nodes. It
    cannot be used on the rootfs disk.  It cannot be used for a disk that is
    used by a PV or has partitions used by a PV.

    Arguments:
    - None

    Test Steps:
    1.  Determine which is the rootfs disk
    2.  Attempt to wipe the disk
    3.  Expect it to fail for every node

    Assumptions:
    - None
    """
    computes = system_helper.get_hosts(personality="compute")
    storage = system_helper.get_hosts(personality="storage")
    hosts = system_helper.get_controllers() + computes + storage

    LOG.tc_step("Gather rootfs disks")
    rootfs = storage_helper.get_hosts_rootfs(hosts)

    for host in rootfs:
        uuid = rootfs[host]
        LOG.tc_step("Attempting to wipe {} from {}".format(uuid[0], host))
        cmd = 'host-disk-wipe --confirm {} {}'.format(host, uuid[0])
        rc, out = cli.system(cmd, fail_ok=True)
        assert rc != 0, "Expected wipe disk to fail but instead succeeded"
示例#21
0
def _suppress_unsuppress_sensor(sensor_name,
                                host,
                                set_suppress='False',
                                sensor_group=False):
    """main suppress/unsuppress routine."""

    # Get the uuid of the sensor to be suppressed
    res = 0
    sensor_uuid = get_sensor_uuid(sensor_name, host, sensor_group)

    # Check if the sensor is already suppressed
    sensor_showtable = get_sensor_showtable(sensor_uuid, host, sensor_group)
    sensor_suppression_value = table_parser.get_value_two_col_table(
        sensor_showtable, 'suppress')
    print('Suppression: {}'.format(sensor_suppression_value))

    if sensor_group is True:
        sysinv_action = 'host-sensorgroup-modify'
    else:
        sysinv_action = 'host-sensor-modify'

    # If not already suppressed, then suppress the sensor or sensor group
    if sensor_suppression_value != set_suppress:
        # The sensor is not suppressed/unsuppressed, so execute the action
        res, out = cli.system(sysinv_action,
                              '{} {} suppress={}'.format(
                                  host, sensor_uuid, set_suppress),
                              fail_ok=True)

    print('Result: {}'.format(res))
    return res == 0
示例#22
0
def get_hw_compatible_hosts(hosts):
    """
    Given a list of hosts return a dict of hardware compatible ones, if any.

    Arguments:
    - Hosts (list)

    Returns:
    - Dict mapping hash to hosts
    """

    hardware = {}
    hardware_hash = {}
    for host in hosts:
        rc, out = cli.system("host-disk-list {} --nowrap".format(host))
        table_ = table_parser.table(out)
        device_nodes = table_parser.get_column(table_, "device_node")
        device_type = table_parser.get_column(table_, "device_type")
        size_gib = table_parser.get_column(table_, "size_gib")
        hardware[host] = list(zip(device_nodes, device_type, size_gib))
        LOG.info("Hardware present on host {}: {}".format(
            host, hardware[host]))
        hardware_hash[host] = hash(str(hardware[host]))
        LOG.info("Host {} has hash {}".format(host, hardware_hash[host]))

    # Create reverse lookup of hash to hosts
    hash_to_hosts = {}
    for key, value in hardware_hash.items():
        hash_to_hosts.setdefault(value, []).append(key)

    LOG.info(
        "These are the hardware compatible hosts: {}".format(hash_to_hosts))
    return hash_to_hosts
def test_horizon_sysconfig_oam_cancel_edit(sys_config_pg):
    """
    Test oam edit and display:

    Setups:
        - Login as Admin
        - Go to Admin > Platform > System Configuration

    Teardown:
        - Back to System Configuration Page
        - Logout

    Test Steps:
        - Check oam details display
        - Edit the OAM but not submit
    """
    LOG.tc_step('Check OAM IP display')
    sys_config_pg.go_to_oam_ip_tab()
    oam_table = table_parser.table(cli.system('oam-show')[1])
    expt_horizon = {}
    if system_helper.get_system_values(fields='system_mode')[0] == 'simplex':
        headers_map = sys_config_pg.oam_table.SIMPLEX_OAM_MAP
    else:
        headers_map = sys_config_pg.oam_table.OAM_MAP
    for cli_header in headers_map:
        horizon_header = headers_map[cli_header]
        expt_horizon[horizon_header] = table_parser.get_value_two_col_table(
            oam_table, field=cli_header)
    table_name = sys_config_pg.oam_table.name
    sys_config_pg.check_horizon_displays(table_name=table_name,
                                         expt_horizon=expt_horizon)

    LOG.tc_step('Edit the OAM but not submit')
    sys_config_pg.edit_oam(cancel=True)
    horizon.test_result = True
示例#24
0
def verify_cli(sub_auth=None, central_auth=None):
    auths = [central_auth, sub_auth]
    auths = [auth for auth in auths if auth]

    for auth in auths:
        cli.system('host-list', fail_ok=False, auth_info=auth)
        cli.fm('alarm-list', fail_ok=False, auth_info=auth)
        cli.openstack('server list --a', fail_ok=False, auth_info=auth)
        cli.openstack('image list', fail_ok=False, auth_info=auth)
        cli.openstack('volume list --a', fail_ok=False, auth_info=auth)
        cli.openstack('user list', fail_ok=False, auth_info=auth)
        cli.openstack('router list', fail_ok=False, auth_info=auth)

    if sub_auth:
        cli.openstack('stack list', fail_ok=False, auth_info=sub_auth)
        cli.openstack('alarm list', fail_ok=False, auth_info=sub_auth)
        cli.openstack('metric status', fail_ok=False, auth_info=sub_auth)
示例#25
0
def _test_invalid_firewall_rules(delete_file):
    """
    Verify invalid firewall install files name & invalid file
    Test Setup:
        - SCP iptables.rules from test server to lab

    Test Steps:
        - Install custom firewall rules with invalid file path
        - Verify install failed with valid reason
        - Install custom firewall rules with invalid file
        - Verify install failed with valid reason

    """
    invalid_rules_file, invalid_rules_path, firewall_rules_path, cli_client = delete_file
    LOG.info("firewall rules path {}".format(firewall_rules_path))

    LOG.tc_step("Install firewall rules with invalid file name {}".format(
        invalid_rules_path))
    code, output = cli.system('firewall-rules-install',
                              invalid_rules_path,
                              fail_ok=True)

    LOG.tc_step("Verify Install firewall rules failed with invalid file name")
    LOG.info("Invalid fireall rules return code:[{}] & output: [{}]".format(
        code, output))

    assert 'Could not open file' in output, "Unexpected error"
    assert code == 1, "Invalid firewall rules install expected to fail, reason received {}".format(
        output)

    LOG.tc_step("Install firewall rules with invalid file")
    cmd = "cp {} {}".format(firewall_rules_path, invalid_rules_file)
    code, output = cli_client.exec_cmd(cmd)
    LOG.info("Code: {} output: {}".format(code, output))
    cli_client.exec_cmd("sed -e '3i invalid' -i {}".format(invalid_rules_file))

    LOG.tc_step("Install firewall rules with invalid file name {}".format(
        invalid_rules_file))
    code, output = cli.system('firewall-rules-install',
                              invalid_rules_file,
                              fail_ok=True)
    LOG.info("Invalid firewall rules return code:[{}] & output: [{}]".format(
        code, output))

    assert 'Error in custom firewall rule file' in output, "Unexpected output"
    assert code == 1, "Invalid firewall rules exit code"
示例#26
0
def get_imported_load_state(load_id, load_version=None, con_ssh=None):
    table_ = table_parser.table(cli.system('load-list', ssh_client=con_ssh)[1])
    if load_version:
        table_ = table_parser.filter_table(table_, id=load_id, software_version=load_version)
    else:
        table_ = table_parser.filter_table(table_, id=load_id)

    return (table_parser.get_values(table_, 'state')).pop()
示例#27
0
def get_imported_load_id(load_version=None, con_ssh=None):
    table_ = table_parser.table(cli.system('load-list', ssh_client=con_ssh)[1])
    if load_version:
        table_ = table_parser.filter_table(table_, state='imported', software_version=load_version)
    else:
        table_ = table_parser.filter_table(table_, state='imported')

    return table_parser.get_values(table_, 'id')[0]
示例#28
0
def update_helm_override(chart,
                         namespace,
                         app_name='stx-openstack',
                         yaml_file=None,
                         kv_pairs=None,
                         reset_vals=False,
                         reuse_vals=False,
                         auth_info=Tenant.get('admin_platform'),
                         con_ssh=None,
                         fail_ok=False):
    """
    Update helm_override values for given chart
    Args:
        chart:
        namespace:
        app_name
        yaml_file:
        kv_pairs:
        reset_vals:
        reuse_vals:
        fail_ok
        con_ssh
        auth_info

    Returns (tuple):
        (0, <overrides>(str|list|dict))     # cmd accepted.
        (1, <std_err>)  #  system helm-override-update cmd rejected

    """
    args = '{} {} {}'.format(app_name, chart, namespace)
    if reset_vals:
        args = '--reset-values {}'.format(args)
    if reuse_vals:
        args = '--reuse-values {}'.format(args)
    if yaml_file:
        args = '--values {} {}'.format(yaml_file, args)
    if kv_pairs:
        cmd_overrides = ','.join(
            ['{}={}'.format(k, v) for k, v in kv_pairs.items()])
        args = '--set {} {}'.format(cmd_overrides, args)

    code, output = cli.system('helm-override-update',
                              args,
                              ssh_client=con_ssh,
                              fail_ok=fail_ok,
                              auth_info=auth_info)
    if code != 0:
        return 1, output

    table_ = table_parser.table(output, rstrip_value=True)
    overrides = table_parser.get_value_two_col_table(table_, 'user_overrides')
    overrides = yaml.load('\n'.join(overrides))
    # yaml.load converts str to bool, int, float; but does not convert
    # None type. Updates are not verified here since it is rather complicated
    # to verify properly.
    LOG.info("Helm-override updated : {}".format(overrides))

    return 0, overrides
示例#29
0
def pre_check_upgrade():
    # con_ssh = ControllerClient.get_active_controller()

    ProjVar.set_var(SOURCE_OPENRC=True)
    is_simplex = system_helper.is_aio_simplex()
    # check if all nodes are unlocked

    admin_states = system_helper.get_hosts(field='administrative')
    assert set(admin_states) == {'unlocked'}

    # check no active alarms in system

    table_ = table_parser.table(cli.system('alarm-list')[1])
    alarm_severity_list = table_parser.get_column(table_, "Severity")

    LOG.info("Alarm Severity List: {}".format(alarm_severity_list))
    assert "major" or "critical" not in alarm_severity_list, \
        "Active alarms in system. Clear alarms before beginning upgrade"

    # check if system is patch current
    assert patching_helper.is_patch_current(
        con_ssh), "System is not patch current"

    # check if Controller-0 is the active
    active_controller = system_helper.get_active_controller_name(
        con_ssh=con_ssh)
    assert active_controller == "controller-0", "The active controller is " \
                                                "not controller-0. Make controller-0 " \
                                                "active before starting upgrade. Current " \
                                                "active controller is {}".format(active_controller)

    # check if upgrade version is supported
    current_version = system_helper.get_sw_version()
    upgrade_version = UpgradeVars.get_upgrade_var('upgrade_version')
    backup_dest_path = BackupVars.get_backup_var('BACKUP_DEST_PATH')

    if upgrade_version is None:
        upgrade_version = [
            u[1] for u in SUPPORTED_UPGRADES if u[0] == current_version
        ][0]
        UpgradeVars.set_upgrade_var(upgrade_version=upgrade_version)
        UpgradeVars.set_upgrade_var(tis_build_dir=BuildServerPath.
                                    LATEST_HOST_BUILD_PATHS[upgrade_version])
        UpgradeVars.set_upgrade_var(
            patch_dir=BuildServerPath.PATCH_DIR_PATHS[upgrade_version])
    LOG.info("Current version = {}; Upgrade version = {}".format(
        current_version, upgrade_version))

    if upgrade_version == "16.10":
        UpgradeVars.set_upgrade_var(orchestration_after=None)

    assert [current_version, upgrade_version
            ] in SUPPORTED_UPGRADES, "Upgrade from {} to {} is not supported"

    if is_simplex:
        assert backup_dest_path is not None, "Simplex Upgrade need backup destianation path please add " \
                                             "--backup_path=< >"
示例#30
0
def get_cli_timestamps(vol_id):

    table_ = table_parser.table(cli.system('show')[1])
    sysinv_timestamp = table_parser.get_value_two_col_table(table_, 'created_at')

    table_ = table_parser.table(cli.openstack('volume show', vol_id, auth_info=Tenant.get('admin'))[1])
    openstack_timestamp = table_parser.get_value_two_col_table(table_, 'created_at')

    return  sysinv_timestamp, openstack_timestamp