Beispiel #1
0
def get_ifs_to_mod(host, network_type, mtu_val):
    table_ = table_parser.table(cli.system('host-if-list', '{} --nowrap'.format(host))[1])

    if_class = network_type
    network = ''
    if network_type in PLATFORM_NET_TYPES:
        if_class = 'platform'

    table_ = table_parser.filter_table(table_, **{'class': if_class})
    # exclude unmatched platform interfaces from the table.
    if 'platform' == if_class:
        platform_ifs = table_parser.get_values(table_, target_header='name', **{'class': 'platform'})
        for pform_if in platform_ifs:
            if_nets = host_helper.get_host_interface_values(host=host, interface=pform_if, fields='networks')[0]
            if_nets = [if_net.strip() for if_net in if_nets.split(sep=',')]
            if network not in if_nets:
                table_ = table_parser.filter_table(table_, strict=True, exclude=True, name=pform_if)

    uses_if_names = table_parser.get_values(table_, 'name', exclude=True, **{'uses i/f': '[]'})
    non_uses_if_names = table_parser.get_values(table_, 'name', exclude=False, **{'uses i/f': '[]'})
    uses_if_first = False
    if uses_if_names:
        current_mtu = int(
            host_helper.get_host_interface_values(host, interface=uses_if_names[0], fields=['imtu'])[0])
        if current_mtu <= mtu_val:
            uses_if_first = True

    if uses_if_first:
        if_names = uses_if_names + non_uses_if_names
    else:
        if_names = non_uses_if_names + uses_if_names

    return if_names
def check_device_list_against_pci_list(lspci_list_info, sysinv_device_list_tab):
    """
    Checks the host pci info against the output of cli system host-device-list
    Args:
        lspci_list_info: host's pci co-processor list from lspci command
        sysinv_device_list_tab: pci list from cli system host-device-list

    Returns:

    """

    LOG.info("Checking all devices are included in the list")
    sysinv_device_list_tab = table_parser.filter_table(sysinv_device_list_tab, **{'class id': DevClassID.QAT_VF})

    assert len(lspci_list_info) == len(sysinv_device_list_tab['values']), \
        "host devices list:{} and pci list:{} mismatch".format(sysinv_device_list_tab['values'], lspci_list_info)

    # check if pci attribute values are the identical
    for pci in lspci_list_info:
        sysinv_tab = table_parser.filter_table(sysinv_device_list_tab, **{'name': pci['pci_name']})
        assert pci['vendor_name'] == table_parser.get_column(sysinv_tab, 'vendor name')[0]
        assert pci['vendor_id'] == table_parser.get_column(sysinv_tab, 'vendor id')[0]
        assert pci['device_id'] == table_parser.get_column(sysinv_tab, 'device id')[0]
        assert pci['class_id'] == table_parser.get_column(sysinv_tab, 'class id')[0]
        assert pci['pci_address'] == table_parser.get_column(sysinv_tab, 'address')[0]

    LOG.info("All host devices are listed")
Beispiel #3
0
def get_imported_load_state(load_id, load_version=None, con_ssh=None):
    table_ = table_parser.table(cli.system('load-list', ssh_client=con_ssh)[1])
    if load_version:
        table_ = table_parser.filter_table(table_, id=load_id, software_version=load_version)
    else:
        table_ = table_parser.filter_table(table_, id=load_id)

    return (table_parser.get_values(table_, 'state')).pop()
Beispiel #4
0
def get_imported_load_id(load_version=None, con_ssh=None):
    table_ = table_parser.table(cli.system('load-list', ssh_client=con_ssh)[1])
    if load_version:
        table_ = table_parser.filter_table(table_, state='imported', software_version=load_version)
    else:
        table_ = table_parser.filter_table(table_, state='imported')

    return table_parser.get_values(table_, 'id')[0]
Beispiel #5
0
    def _recover_hosts(hostnames, scope):
        if system_helper.is_aio_simplex():
            LOG.fixture_step('{} Recover simplex host'.format(scope))
            host_helper.recover_simplex(fail_ok=False)
            return

        # Recover hosts for non-simplex system
        hostnames = sorted(set(hostnames))
        table_ = table_parser.table(cli.system('host-list')[1])
        table_ = table_parser.filter_table(table_, hostname=hostnames)

        # unlocked_hosts = table_parser.get_values(table_, 'hostname',
        # administrative='unlocked')
        locked_hosts = table_parser.get_values(table_,
                                               'hostname',
                                               administrative='locked')

        err_msg = []
        if locked_hosts:
            LOG.fixture_step("({}) Unlock hosts: {}".format(
                scope, locked_hosts))
            # Hypervisor state will be checked later in wait_for_hosts_ready
            # which handles platform only deployment
            res1 = host_helper.unlock_hosts(hosts=locked_hosts,
                                            fail_ok=True,
                                            check_hypervisor_up=False)
            for host in res1:
                if res1[host][0] not in [0, 4]:
                    err_msg.append(
                        "Not all host(s) unlocked successfully. Detail: "
                        "{}".format(res1))

        host_helper.wait_for_hosts_ready(hostnames)
Beispiel #6
0
def get_helm_overrides(field='overrides namespaces',
                       app_name='stx-openstack',
                       charts=None,
                       auth_info=Tenant.get('admin_platform'),
                       con_ssh=None):
    """
    Get helm overrides values via system helm-override-list
    Args:
        field (str):
        app_name
        charts (None|str|list|tuple):
        auth_info:
        con_ssh:

    Returns (list):

    """
    table_ = table_parser.table(
        cli.system('helm-override-list',
                   app_name,
                   ssh_client=con_ssh,
                   auth_info=auth_info)[1])

    if charts:
        table_ = table_parser.filter_table(table_, **{'chart name': charts})

    vals = table_parser.get_multi_values(table_, fields=field, evaluate=True)

    return vals
Beispiel #7
0
def get_upgrade_state(con_ssh=None):

    output = cli.system('upgrade-show', ssh_client=con_ssh)[1]

    if ("+" and "-" and "|") in output:
        table_ = table_parser.table(output)
        table_ = table_parser.filter_table(table_, Property="state")
        return table_parser.get_column(table_, "Value")
    else:
        return output
Beispiel #8
0
def get_hosts_upgrade_target_release(hostnames, con_ssh=None):
    """
    Gets the target release of a upgrade hosts
    Args:
        hostnames(str/list):  specifies the host or list of hosts
        con_ssh:

    Returns:  list of target releases.

    """
    if isinstance(hostnames, str):
        hostnames = [hostnames]

    table_ = table_parser.table(cli.system('host-upgrade-list', ssh_client=con_ssh)[1])
    table_ = table_parser.filter_table(table_, hostname=hostnames)
    return table_parser.get_column(table_, "target_release")
Beispiel #9
0
def get_hosts_upgrade_running_release(hostnames, con_ssh=None):
    """
    Gets the running_release of host(s)
    Args:
        hostnames (str/list): specifies the host or list of hosts
        con_ssh:

    Returns: list of running release ids.

    """
    if isinstance(hostnames, str):
        hostnames = [hostnames]

    table_ = table_parser.table(cli.system('host-upgrade-list', ssh_client=con_ssh)[1])
    table_ = table_parser.filter_table(hostname=hostnames, table_=table_)
    return table_parser.get_column(table_, "running_release")
Beispiel #10
0
def get_system_active_controller():
    global con_ssh
    if con_ssh is None:
        con_ssh = ControllerClient.get_active_controller()

    cmd = "source /etc/platform/openrc; system servicegroup-list"
    table_ = table_parser.table(con_ssh.exec_cmd(cmd)[1])
    table_ = table_parser.filter_table(
        table_, service_group_name='controller-services')
    controllers = table_parser.get_values(table_,
                                          'hostname',
                                          state='active',
                                          strict=False)
    LOG.debug(" Active controller(s): {}".format(controllers))
    if isinstance(controllers, str):
        controllers = [controllers]

    return controllers
Beispiel #11
0
def get_images(long=False,
               images=None,
               field='id',
               auth_info=Tenant.get('admin'),
               con_ssh=None,
               strict=True,
               exclude=False,
               **kwargs):
    """
    Get a list of image id(s) that matches the criteria
    Args:
        long (bool)
        images (str|list): ids of images to filter from
        field(str|list|tuple): id or name
        auth_info (dict):
        con_ssh (SSHClient):
        strict (bool): match full string or substring for the value(s) given
        in kwargs.
            This is only applicable if kwargs key-val pair(s) are provided.
        exclude (bool): whether to exclude item containing the string/pattern
        in kwargs.
            e.g., search for images that don't contain 'raw'
        **kwargs: header-value pair(s) to filter out images from given image
        list. e.g., Status='active', Name='centos'

    Returns (list): list of image ids

    """
    args = '--long' if long else ''
    table_ = table_parser.table(
        cli.openstack('image list',
                      args,
                      ssh_client=con_ssh,
                      auth_info=auth_info)[1])
    if images:
        table_ = table_parser.filter_table(table_, ID=images)

    return table_parser.get_multi_values(table_,
                                         field,
                                         strict=strict,
                                         exclude=exclude,
                                         **kwargs)
Beispiel #12
0
def _wait_for_upgrade_data_migration_complete(timeout=1800, check_interval=60, auth_info=Tenant.get('admin_platform'),
                                              fail_ok=False, con_ssh=None):
    """
    Waits until upgrade data migration is complete or fail
    Args:
        timeout (int): MAX seconds to wait for data migration to complete
        fail_ok (bool): if true return error code
        con_ssh (SSHClient):
        auth_info (str):

    Returns (tuple):
        (0, "Upgrade data migration complete.")
        (1, "Upgrade dat migration failed. Applicable only if ail_ok")
        (2, "Upgrade data migration timeout out before complete. Applicable only if fail_ok")
        (3, "Timeout waiting the Host upgrade data migration to complete. Applicable if fail_ok ")

    """

    endtime = time.time() + timeout
    while time.time() < endtime:
        upgrade_progress_tab = table_parser.table(
            cli.system('upgrade-show', ssh_client=con_ssh, auth_info=auth_info)[1])
        upgrade_progress_tab = table_parser.filter_table(upgrade_progress_tab, Property="state")
        if "data-migration-complete" in table_parser.get_column(upgrade_progress_tab, 'Value'):
            LOG.info("Upgrade data migration is complete")
            return 0, "Upgrade data migration is complete"
        elif "data-migration-failed" in table_parser.get_column(upgrade_progress_tab, 'Value'):
            err_msg = "Host Upgrade data migration failed."
            LOG.warning(err_msg)
            if fail_ok:
                return 1, err_msg
            else:
                raise exceptions.HostError(err_msg)

        time.sleep(check_interval)

    err_msg = "Timed out waiting for upgrade data migration to complete state"
    if fail_ok:
        LOG.warning(err_msg)
        return 3, err_msg
    else:
        raise exceptions.HostError(err_msg)
Beispiel #13
0
def wait_for_delete_imported_load(load_id, timeout=120, check_interval=5, fail_ok=False, con_ssh=None,
                                  auth_info=Tenant.get('admin_platform')):
    LOG.info("Waiting for imported load  {} to be deleted from the load-list ".format(load_id))
    end_time = time.time() + timeout
    while time.time() < end_time:
        table_ = table_parser.table(cli.system('load-list', ssh_client=con_ssh, auth_info=auth_info)[1])

        table_ = table_parser.filter_table(table_, **{'id': load_id})
        if len(table_parser.get_values(table_, 'id')) == 0:
            return True
        else:
            if 'deleting' in table_parser.get_column(table_, 'state'):
                cli.system('load-delete', load_id, ssh_client=con_ssh, fail_ok=True)
        time.sleep(check_interval)

    else:
        err_msg = "Timed out waiting for load {} to get deleted".format(load_id)
        if fail_ok:
            LOG.warning(err_msg)
            return False
        else:
            raise exceptions.TimeoutException(err_msg)
Beispiel #14
0
    def _recover_hosts(hostnames, scope):
        if system_helper.is_aio_simplex():
            LOG.fixture_step('{} Recover simplex host'.format(scope))
            host_helper.recover_simplex(fail_ok=False)
            return

        # Recover hosts for non-simplex system
        hostnames = sorted(set(hostnames))
        table_ = table_parser.table(cli.system('host-list')[1])
        table_ = table_parser.filter_table(table_, hostname=hostnames)

        # unlocked_hosts = table_parser.get_values(table_, 'hostname', administrative='unlocked')
        locked_hosts = table_parser.get_values(table_,
                                               'hostname',
                                               administrative='locked')

        err_msg = []
        if locked_hosts:
            LOG.fixture_step("({}) Unlock hosts: {}".format(
                scope, locked_hosts))
            # Hypervisor state will be checked later in wait_for_hosts_ready which handles platform only deployment
            res1 = host_helper.unlock_hosts(hosts=locked_hosts,
                                            fail_ok=True,
                                            check_hypervisor_up=False)
            for host in res1:
                if res1[host][0] not in [0, 4]:
                    err_msg.append(
                        "Not all host(s) unlocked successfully. Detail: {}".
                        format(res1))
        #
        # if unlocked_hosts:
        #     LOG.fixture_step("({}) Wait for hosts to becomes available or degraded: {}".format(scope, unlocked_hosts))
        #     res2 = host_helper.wait_for_hosts_states(unlocked_hosts, timeout=HostTimeout.REBOOT, check_interval=10,
        #                                              fail_ok=True, availability=['available'])
        #     if not res2:
        #         err_msg.append("Somtable_ = table_parser.table(e host(s) from {} are not available.".format(unlocked_hosts))

        host_helper.wait_for_hosts_ready(hostnames)
Beispiel #15
0
def get_nodes(hosts=None,
              status=None,
              field='STATUS',
              exclude=False,
              con_ssh=None,
              fail_ok=False):
    """
    Get nodes values via 'kubectl get nodes'
    Args:
        hosts (None|str|list|tuple): table filter
        status (None|str|list|tuple): table filter
        field (str|list|tuple): any header of the nodes table
        exclude (bool): whether to exclude rows with given criteria
        con_ssh:
        fail_ok:

    Returns (None|list): None if cmd failed.

    """
    code, output = exec_kube_cmd('get',
                                 args='nodes',
                                 con_ssh=con_ssh,
                                 fail_ok=fail_ok)
    if code > 0:
        return None

    table_ = table_parser.table_kube(output)
    if hosts or status:
        table_ = table_parser.filter_table(table_,
                                           exclude=exclude,
                                           **{
                                               'NAME': hosts,
                                               'STATUS': status
                                           })

    return table_parser.get_multi_values(table_, field)
Beispiel #16
0
def test_vm_numa_node_settings(vcpus, numa_nodes, numa_node0, numa_node1,
                               no_simplex, check_numa_num):
    """
    Test NUMA nodes settings in flavor extra specs are successfully applied to a vm

    Args:
        vcpus (int): Number of vcpus to set when creating flavor
        numa_nodes (int): Number of NUMA nodes to set in flavor extra specs
        numa_node0 (int): node.0 value in flavor extra specs
        numa_node1 (int): node.1 value in flavor extra specs

    Test Steps:
        - Create a flavor with given number of vcpus specified
        - Add numa_nodes related extra specs
        - Boot a vm with flavor
        - Run vm-topology
        - Verify vcpus, numa nodes, cpulist for specific vm reflects the settings in flavor
        - Ensure that all virtual NICs are associated with guest virtual numa node 0 (tests TC5069)

    Teardown:
        - Delete created vm, volume, and flavor

    """
    if check_numa_num < numa_nodes:
        skip("Number of processors - {} is less than required numa nodes - {}".
             format(check_numa_num, numa_nodes))

    LOG.tc_step("Create flavor with {} vcpus".format(vcpus))
    flavor = nova_helper.create_flavor('numa_vm', vcpus=vcpus)[1]
    ResourceCleanup.add('flavor', flavor, scope='function')

    extra_specs = {
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.NUMA_NODES: numa_nodes,
        FlavorSpec.NUMA_0: numa_node0
    }
    if numa_node1 is not None:
        extra_specs[FlavorSpec.NUMA_1] = numa_node1

    LOG.tc_step("Set following extra specs for flavor {}: {}.".format(
        extra_specs, flavor))
    nova_helper.set_flavor(flavor, **extra_specs)

    LOG.tc_step("Boot vm with flavor {}.".format(flavor))
    vm_id = vm_helper.boot_vm(flavor=flavor, cleanup='function')[1]

    LOG.tc_step("Verify cpu info for vm {} via vm-topology.".format(vm_id))
    nova_tab, libvirt_tab = system_helper.get_vm_topology_tables(
        'servers', 'libvirt')

    # Filter out the line for vm under test
    nova_tab = table_parser.filter_table(nova_tab, ID=vm_id)
    libvirt_tab = table_parser.filter_table(libvirt_tab, uuid=vm_id)

    instance_topology = table_parser.get_column(nova_tab,
                                                'instance_topology')[0]
    cpulist = table_parser.get_column(libvirt_tab, 'cpulist')[0]
    if '-' in cpulist:
        cpulist = cpulist.split(sep='-')
        cpulist_len = int(cpulist[1]) - int(cpulist[0]) + 1
    else:
        cpulist_len = len(cpulist.split(sep=','))
    vcpus_libvirt = int(table_parser.get_column(libvirt_tab, 'vcpus')[0])
    nodelist = table_parser.get_column(libvirt_tab, 'nodelist')[0]

    if isinstance(instance_topology, str):
        instance_topology = [instance_topology]

    # Each numa node will have an entry for given instance, thus number of entries should be the same as number of
    # numa nodes for the vm
    assert numa_nodes == len(instance_topology), \
        "Number of numa node entries for vm {} is different than number of NUMA nodes set in flavor".format(vm_id)

    expected_node_vals = [
        int(val) for val in [numa_node0, numa_node1] if val is not None
    ]
    actual_node_vals = []
    for actual_node_info in instance_topology:
        actual_node_val = int(
            re.findall(InstanceTopology.NODE, actual_node_info)[0])
        actual_node_vals.append(actual_node_val)

    assert expected_node_vals == actual_node_vals, \
        "Individual NUMA node value(s) for vm {} is different than numa_node setting in flavor".format(vm_id)

    assert vcpus == vcpus_libvirt, \
        "Number of vcpus for vm {} in libvirt view is different than what's set in flavor.".format(vm_id)

    assert vcpus == cpulist_len, \
        "Number of entries in cpulist for vm {} in libvirt view is different than number of vcpus set in flavor".format(
                vm_id)

    if '-' in nodelist:
        nodelist = nodelist.split(sep='-')
        nodelist_len = int(nodelist[1]) - int(nodelist[0]) + 1
    else:
        nodelist_len = 1 if nodelist else 0

    assert numa_nodes == nodelist_len, \
        "nodelist for vm {} in libvirt view does not match number of numa nodes set in flavor".format(vm_id)

    if system_helper.is_avs():
        # TC5069
        LOG.tc_step(
            "Check via vshell that all vNICs are associated with the host NUMA node that guest numa0 maps to"
        )
        host = vm_helper.get_vm_host(vm_id)
        actual_ports = network_helper.get_ports(vm_id)
        with host_helper.ssh_to_host(host) as compute_ssh:
            for port_id in actual_ports:
                ports_tab = table_parser.table(
                    compute_ssh.exec_cmd("vshell port-show {}".format(port_id),
                                         fail_ok=False)[1])
                socket_id = int(
                    table_parser.get_value_two_col_table(ports_tab,
                                                         field='socket-id'))
                assert socket_id == numa_node0, "NIC is not associated with numa-node0"
Beispiel #17
0
def get_resources(field='NAME',
                  namespace=None,
                  all_namespaces=None,
                  resource_names=None,
                  resource_type='pod',
                  labels=None,
                  field_selectors=None,
                  con_ssh=None,
                  fail_ok=False,
                  grep=None,
                  exclude=False,
                  strict=True,
                  **kwargs):
    """
    Get resources values for single resource type via kubectl get
    Args:
        field (str|tuple|list)
        namespace (None|str): e.g., kube-system, openstack, default.
        all_namespaces (bool|None): used only when namespace is unspecified
        resource_names (str|None|list|tuple): e.g., calico-typha
        resource_type (str): e.g., "deployments.apps", "pod", "service"
        labels (dict|str\list|tuple): Used only when resource_names are
            unspecified
        field_selectors (dict|str|list|tuple): Used only when resource_names
        are unspecified
        con_ssh:
        fail_ok:
        grep (str|None): grep on cmd output
        exclude
        strict
        **kwargs: table filters for post processing return values

    Returns (list):
        key is the name prefix, e.g., service, default, deployment.apps,
        replicaset.apps
        value is a list. Each item is a dict rep for a row with lowercase keys.
            e.g., [{'name': 'cinder-api', 'age': '4d19h', ... },  ...]

    """
    name_filter = None
    if resource_names and ((all_namespaces and not namespace)
                           or field_selectors or labels):
        name_filter = {'name': resource_names}
        resource_names = None

    code, tables = __get_resource_tables(namespace=namespace,
                                         all_namespaces=all_namespaces,
                                         resource_types=resource_type,
                                         resource_names=resource_names,
                                         labels=labels,
                                         field_selectors=field_selectors,
                                         con_ssh=con_ssh,
                                         fail_ok=fail_ok,
                                         grep=grep)
    if code > 0:
        output = tables
        if 'NAME ' not in output:  # no resource returned
            return []

        output = output.split('\nError from server')[0]
        tables = table_parser.tables_kube(output)

    final_table = tables[0]
    if len(tables) > 1:
        combined_values = final_table['values']
        column_count = len(combined_values)
        for table_ in tables[1:]:
            table_values = table_['values']
            combined_values = [
                combined_values[i] + table_values[i]
                for i in range(column_count)
            ]
        final_table['values'] = combined_values

    if name_filter:
        final_table = table_parser.filter_table(final_table, **name_filter)

    return table_parser.get_multi_values(final_table,
                                         fields=field,
                                         zip_values=True,
                                         strict=strict,
                                         exclude=exclude,
                                         **kwargs)
def test_system_alarms_and_events_on_lock_unlock_compute(no_simplex):
    """
    Verify fm alarm-show command

    Test Steps:
    - Delete active alarms
    - Lock a host
    - Check active alarm generated for host lock
    - Check relative values are the same in fm alarm-list and fm alarm-show
    <uuid>
    - Check host lock 'set' event logged via fm event-list
    - Unlock host
    - Check active alarms cleared via fm alarm-list
    - Check host lock 'clear' event logged via fm event-list
    """

    # Remove following step because it's unnecessary and fails the test when
    # alarm is re-generated
    # # Clear the alarms currently present
    # LOG.tc_step("Clear the alarms table")
    # system_helper.delete_alarms()

    # Raise a new alarm by locking a compute node
    # Get the compute
    compute_host = host_helper.get_up_hypervisors()[0]
    if compute_host == system_helper.get_active_controller_name():
        compute_host = system_helper.get_standby_controller_name()
        if not compute_host:
            skip('Standby controller unavailable')

    LOG.tc_step("Lock a nova hypervisor host {}".format(compute_host))
    pre_lock_time = common.get_date_in_format()
    HostsToRecover.add(compute_host)
    host_helper.lock_host(compute_host)

    LOG.tc_step("Check host lock alarm is generated")
    post_lock_alarms = \
        system_helper.wait_for_alarm(field='UUID', entity_id=compute_host,
                                     reason=compute_host,
                                     alarm_id=EventLogID.HOST_LOCK,
                                     strict=False,
                                     fail_ok=False)[1]

    LOG.tc_step(
        "Check related fields in fm alarm-list and fm alarm-show are of the "
        "same values")
    post_lock_alarms_tab = system_helper.get_alarms_table(uuid=True)

    alarms_l = ['Alarm ID', 'Entity ID', 'Severity', 'Reason Text']
    alarms_s = ['alarm_id', 'entity_instance_id', 'severity', 'reason_text']

    # Only 1 alarm since we are now checking the specific alarm ID
    for post_alarm in post_lock_alarms:
        LOG.tc_step(
            "Verify {} for alarm {} in alarm-list are in sync with "
            "alarm-show".format(
                alarms_l, post_alarm))

        alarm_show_tab = table_parser.table(cli.fm('alarm-show', post_alarm)[1])
        alarm_list_tab = table_parser.filter_table(post_lock_alarms_tab,
                                                   UUID=post_alarm)

        for i in range(len(alarms_l)):
            alarm_l_val = table_parser.get_column(alarm_list_tab,
                                                  alarms_l[i])[0]
            alarm_s_val = table_parser.get_value_two_col_table(alarm_show_tab,
                                                               alarms_s[i])

            assert alarm_l_val == alarm_s_val, \
                "{} value in alarm-list: {} is different than alarm-show: " \
                "{}".format(alarms_l[i], alarm_l_val, alarm_s_val)

    LOG.tc_step("Check host lock is logged via fm event-list")
    system_helper.wait_for_events(entity_instance_id=compute_host,
                                  start=pre_lock_time, timeout=60,
                                  event_log_id=EventLogID.HOST_LOCK,
                                  fail_ok=False, **{'state': 'set'})

    pre_unlock_time = common.get_date_in_format()
    LOG.tc_step("Unlock {}".format(compute_host))
    host_helper.unlock_host(compute_host)

    LOG.tc_step("Check host lock active alarm cleared")
    alarm_sets = [(EventLogID.HOST_LOCK, compute_host)]
    system_helper.wait_for_alarms_gone(alarm_sets, fail_ok=False)

    LOG.tc_step("Check host lock clear event logged")
    system_helper.wait_for_events(event_log_id=EventLogID.HOST_LOCK,
                                  start=pre_unlock_time,
                                  entity_instance_id=compute_host,
                                  fail_ok=False, **{'state': 'clear'})
Beispiel #19
0
def test_dc_fault_scenario(subcloud_to_test):
    """
    Test Fault Scenario on Distributed Cloud
    Args:
        subcloud_to_test (str): module fixture

    Setup:
        - Make sure there is consistency between alarm summary on
        Central Cloud and on subclouds

    Test Steps:
        - Make subcloud offline (e. g. delete route)
        Step1:
        - Ensure suncloud shows offline
        Step2:
        - Raise alarm on subcloud
        - Ensure relative alarm raised on subcloud,
        - Ensure system alarm-summary on subcloud has changed
        - Ensure  dcmanager alarm summary on system controller has no change
        Step3:
        - Resume connectivity to subcloud (e. g. add route back)
        - Ensure suncloud shows online and in-sync
        - Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system
        controller
        Step4:
        - Clean alarm on subcloud
        - Ensure relative alarm cleared on subcloud
        - Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system
        controller
    """
    ssh_central = ControllerClient.get_active_controller(name="RegionOne")
    ssh_subcloud = ControllerClient.get_active_controller(
        name=subcloud_to_test)
    subcloud_table = {}
    try:
        code, output = cli.dcmanager(
            "subcloud show {}".format(subcloud_to_test),
            ssh_client=ssh_central)
        gateway = table_parser.get_value_two_col_table(
            table_parser.table(output), "management_gateway_ip")
        code, hosts_raw = cli.system("host-list", ssh_client=ssh_subcloud)
        hosts_id = table_parser.get_values(table_parser.table(hosts_raw), 'id')
        for host_id in hosts_id:
            code, route_raw = cli.system("host-route-list {}".format(host_id),
                                         ssh_client=ssh_subcloud)
            route_table = table_parser.filter_table(
                table_parser.table(route_raw), **{'gateway': gateway})
            subcloud_table[host_id] = route_table

        LOG.tc_step(
            "Delete route for subcloud: {} and wait for it to go offline.".
            format(subcloud_to_test))
        ssh_subcloud = ControllerClient.get_active_controller(
            name=subcloud_to_test)
        for host_id in subcloud_table:
            command = "host-route-delete {}".format(
                table_parser.get_values(subcloud_table[host_id], "uuid")[0])
            cli.system(command, ssh_client=ssh_subcloud)

        dc_helper.wait_for_subcloud_status(subcloud_to_test,
                                           avail=SubcloudStatus.AVAIL_OFFLINE,
                                           timeout=DCTimeout.SYNC,
                                           con_ssh=ssh_central)

        LOG.tc_step("Raise alarm on subcloud: {}".format(subcloud_to_test))
        ssh_subcloud = ControllerClient.get_active_controller(
            name=subcloud_to_test)
        code_sub_before, output_sub_before = cli.fm("alarm-summary",
                                                    ssh_client=ssh_subcloud)
        code_central_before, output_central_before = cli.dcmanager(
            'alarm summary')
        ssh_subcloud.exec_cmd(
            "fmClientCli -c \"### ###300.005###clear###system.vm###host="
            "testhost-0### ###critical### ###processing-error###cpu-cycles-limit-exceeded"
            "### ###True###True###'\"",
            fail_ok=False)
        LOG.info("Ensure relative alarm was raised at subcloud: {}".format(
            subcloud_to_test))
        system_helper.wait_for_alarm(
            alarm_id=EventLogID.PROVIDER_NETWORK_FAILURE, con_ssh=ssh_subcloud)
        code_sub_after, output_sub_after = cli.fm("alarm-summary",
                                                  ssh_client=ssh_subcloud)
        code_central_after, output_central_after = cli.dcmanager(
            'alarm summary')
        LOG.info(
            "Ensure fm alarm summary on subcloud: {} has changed but dcmanager alarm"
            "summary has not changed".format(subcloud_to_test))
        assert output_central_before == output_central_after and output_sub_before != \
            output_sub_after

        add_routes_to_subcloud(subcloud_to_test, subcloud_table)

        dc_helper.wait_for_subcloud_status(subcloud_to_test,
                                           avail=SubcloudStatus.AVAIL_ONLINE,
                                           sync=SubcloudStatus.SYNCED,
                                           timeout=DCTimeout.SYNC,
                                           con_ssh=ssh_central)
        alarm_summary_add_and_del(subcloud_to_test)

    finally:
        cli.dcmanager("subcloud show {}".format(subcloud_to_test),
                      ssh_client=ssh_central,
                      fail_ok=True)
        add_routes_to_subcloud(subcloud_to_test, subcloud_table, fail_ok=True)
        LOG.info("Clear alarm on subcloud: {}".format(subcloud_to_test))
        ssh_subcloud.exec_cmd('fmClientCli -D host=testhost-0')
        check_alarm_summary_match_subcloud(subcloud=subcloud_to_test)
Beispiel #20
0
def get_imported_load_version(con_ssh=None):
    table_ = table_parser.table(cli.system('load-list', ssh_client=con_ssh)[1])
    table_ = table_parser.filter_table(table_, state='imported')

    return table_parser.get_values(table_, 'software_version')
Beispiel #21
0
def get_upgraded_host_names(upgrade_release, con_ssh=None):

    table_ = table_parser.table(cli.system('host-upgrade-list', ssh_client=con_ssh)[1])
    table_ = table_parser.filter_table(table_, target_release=upgrade_release)
    return table_parser.get_column(table_, "hostname")