Esempio n. 1
0
def check_for_upgrade_abort():
    upgrade_info = dict()
    lab = InstallVars.get_install_var('LAB')
    upgrade_info['LAB'] = lab
    table_ = upgrade_helper.system_upgrade_show()[1]
    print("Upgrade show {}".format(table_))
    if "No upgrade in progress" in table_:
        LOG.warning("No upgrade in progress, cannot be aborted")
        return 1, None

    upgrade_release = table_parser.get_value_two_col_table(
        table_, "to_release")
    current_release = table_parser.get_value_two_col_table(
        table_, "from_release")
    upgraded_hostnames = upgrade_helper.get_upgraded_host_names(
        upgrade_release)
    upgraded = len(upgraded_hostnames)
    upgrade_info['current_release'] = current_release
    upgrade_info['upgrade_release'] = upgrade_release
    upgrade_info['upgraded_hostnames'] = upgraded_hostnames

    if upgraded >= 2:
        LOG.warning(
            "Both controllers are upgraded; Full system installation required to abort"
            ": {} ".format(upgraded_hostnames))
        return 2, upgrade_info
    elif upgraded == 1:
        LOG.warning(
            "Only one  controller is upgraded; In service abort is possible: "
            "{} ".format(upgraded_hostnames))
        return 0, upgrade_info
    else:
        LOG.warning("No host is upgraded. ")
        return 3, upgrade_info
Esempio n. 2
0
def get_cert_info(cert_id, con_ssh=None):
    LOG.info('check the status of the current certificate')
    cmd = 'certificate-show ' + cert_id
    output = cli.system(cmd, ssh_client=con_ssh, fail_ok=False)[1]
    if output:
        table = table_parser.table(output)
        if table:
            actual_id = table_parser.get_value_two_col_table(table, 'uuid')
            actual_type = table_parser.get_value_two_col_table(
                table, 'certtype')
            actual_details = table_parser.get_value_two_col_table(
                table, 'details')
            actual_states = ''
            if not actual_details:
                # CGTS-9529
                LOG.fatal('No details in output of certificate-show')
                LOG.fatal(
                    'Ignore it until the known issue CGTS-9529 fixed, output:'
                    + output)
                # assert False, 'No details in output of certificate-show'
            else:
                LOG.debug('details from output of certificate-show: {}'.format(
                    actual_details))
                actual_states = eval(actual_details)
                LOG.debug('states: {}'.format(actual_states))
                return 0, actual_id, actual_type, actual_states

            LOG.info('')
            return 1, actual_id, actual_type, actual_states
    else:
        LOG.info('no "details" in output')

    return 2, '', '', ''
Esempio n. 3
0
def get_cli_timestamps(vol_id):

    table_ = table_parser.table(cli.system('show')[1])
    sysinv_timestamp = table_parser.get_value_two_col_table(table_, 'created_at')

    table_ = table_parser.table(cli.openstack('volume show', vol_id, auth_info=Tenant.get('admin'))[1])
    openstack_timestamp = table_parser.get_value_two_col_table(table_, 'created_at')

    return  sysinv_timestamp, openstack_timestamp
Esempio n. 4
0
def test_host_disk_wipe_unassigned_disk():
    """
    This test attempts to run system host-disk-wipe on a node using any
    unassigned disk.

    Command format is:

    system host-disk-wipe [--confirm] <hostname or id> <disk uuid>

    Note, host-disk-wipe is only applicable to controller and compute nodes. It
    cannot be used on the rootfs disk.  It cannot be used for a disk that is
    used by a PV or has partitions used by a PV.

    Arguments:
    - None

    Test Steps:
    1.  Determine which disks are unassigned by comparing size_gib to
    available_gib in system host-disk-list
    2.  Attempt to wipe the disk
    3.  Expect it to pass

    Assumptions:
    - None
    """
    computes = system_helper.get_hosts(personality="compute",
                                       availability="available")
    controllers = system_helper.get_hosts(personality="controller",
                                          availability="available")
    hosts = controllers + computes

    found_disk = False
    for host in hosts:
        LOG.info("Query disks on host {}".format(host))
        disks = storage_helper.get_host_disks(host)
        for disk_uuid in disks:
            cmd = "host-disk-show {} {}".format(host, disk_uuid)
            rc, out = cli.system(cmd)
            size_gib = table_parser.get_value_two_col_table(
                table_parser.table(out), "size_gib")
            available_gib = table_parser.get_value_two_col_table(
                table_parser.table(out), "available_gib")
            if int(float(size_gib)) == int(float(available_gib)):
                found_disk = True
                LOG.tc_step("Attempting to wipe disk {} from host {}".format(
                    disk_uuid, host))
                cmd = 'host-disk-wipe --confirm {} {}'.format(host, disk_uuid)
                rc, out = cli.system(cmd, fail_ok=True)
                assert rc == 0, "Expected wipe disk to pass but instead failed"
                break

    if not found_disk:
        skip("No unassigned disks to run test")
Esempio n. 5
0
def get_pci_interface_stats_for_providernet(
        providernet_id,
        fields=('pci_pfs_configured', 'pci_pfs_used', 'pci_vfs_configured',
                'pci_vfs_used'),
        auth_info=Tenant.get('admin'),
        con_ssh=None):
    """
    get pci interface usage
    Args:
        providernet_id (str): id of a providernet
        fields: fields such as ('pci_vfs_configured', 'pci_pfs_used')
        auth_info (dict):
        con_ssh (SSHClient):

    Returns (tuple): tuple of integers

    """
    if not providernet_id:
        raise ValueError("Providernet id is not provided.")

    table_ = table_parser.table(
        cli.nova('providernet-show',
                 providernet_id,
                 ssh_client=con_ssh,
                 auth_info=auth_info)[1])
    rtn_vals = []
    for field in fields:
        pci_stat = int(
            table_parser.get_value_two_col_table(table_, field, strict=True))
        rtn_vals.append(pci_stat)
    return tuple(rtn_vals)
Esempio n. 6
0
def get_provider_net_info(providernet_id,
                          field='pci_pfs_configured',
                          strict=True,
                          auth_info=Tenant.get('admin'),
                          con_ssh=None,
                          rtn_int=True):
    """
    Get provider net info from "nova providernet-show"

    Args:
        providernet_id (str): id of a providernet
        field (str): Field name such as pci_vfs_configured, pci_pfs_used, etc
        strict (bool): whether to perform a strict search on field name
        auth_info (dict):
        con_ssh (SSHClient):
        rtn_int (bool): whether to return integer or string

    Returns (int|str): value of specified field. Convert to integer by default unless rnt_int=False.

    """
    if not providernet_id:
        raise ValueError("Providernet id is not provided.")

    table_ = table_parser.table(
        cli.nova('providernet-show',
                 providernet_id,
                 ssh_client=con_ssh,
                 auth_info=auth_info)[1])
    info_str = table_parser.get_value_two_col_table(table_,
                                                    field,
                                                    strict=strict)
    return int(info_str) if rtn_int else info_str
def test_horizon_sysconfig_oam_cancel_edit(sys_config_pg):
    """
    Test oam edit and display:

    Setups:
        - Login as Admin
        - Go to Admin > Platform > System Configuration

    Teardown:
        - Back to System Configuration Page
        - Logout

    Test Steps:
        - Check oam details display
        - Edit the OAM but not submit
    """
    LOG.tc_step('Check OAM IP display')
    sys_config_pg.go_to_oam_ip_tab()
    oam_table = table_parser.table(cli.system('oam-show')[1])
    expt_horizon = {}
    if system_helper.get_system_values(fields='system_mode')[0] == 'simplex':
        headers_map = sys_config_pg.oam_table.SIMPLEX_OAM_MAP
    else:
        headers_map = sys_config_pg.oam_table.OAM_MAP
    for cli_header in headers_map:
        horizon_header = headers_map[cli_header]
        expt_horizon[horizon_header] = table_parser.get_value_two_col_table(
            oam_table, field=cli_header)
    table_name = sys_config_pg.oam_table.name
    sys_config_pg.check_horizon_displays(table_name=table_name,
                                         expt_horizon=expt_horizon)

    LOG.tc_step('Edit the OAM but not submit')
    sys_config_pg.edit_oam(cancel=True)
    horizon.test_result = True
Esempio n. 8
0
def test_system_type():
    """
    Verify the System Type can be retrieved from SysInv and is correct

    Test Steps:
        - Determine the System Type based on whether the system is CPE or not
        - Retrieve the System Type information from SystInv
        - Compare the types and verify they are the same, fail the test case
        otherwise

    Notes:
        - Covers SysInv test-cases:
            66) Query the product type on CPE system using CLI
            67) Query the product type on STD system using CLI
    """

    LOG.tc_step('Determine the real System Type the lab')
    if system_helper.is_aio_system():
        expt_system_type = SystemType.CPE
    else:
        expt_system_type = SystemType.STANDARD

    LOG.tc_step('Get System Type from system inventory')
    table_ = table_parser.table(cli.system('show')[1])
    displayed_system_type = table_parser.get_value_two_col_table(
        table_, 'system_type')

    LOG.tc_step(
        'Verify the expected System Type is the same as that from System Inventory'
    )
    assert expt_system_type == displayed_system_type, 'Expected system_type is: {}; Displayed system type: {}.'. \
        format(expt_system_type, displayed_system_type)
Esempio n. 9
0
def get_helm_override_values(chart, namespace, app_name='stx-openstack',
                             fields=('combined_overrides',),
                             auth_info=Tenant.get('admin_platform'),
                             con_ssh=None):
    """
    Get helm-override values for given chart via system helm-override-show
    Args:
        chart (str):
        namespace (str):
        app_name (str)
        fields (str|tuple|list):
        auth_info:
        con_ssh:

    Returns (list): list of parsed yaml formatted output. e.g., list of dict,
    list of list, list of str

    """
    args = '{} {} {}'.format(app_name, chart, namespace)
    table_ = table_parser.table(
        cli.system('helm-override-show', args, ssh_client=con_ssh,
                   auth_info=auth_info)[1],
        rstrip_value=True)

    if isinstance(fields, str):
        fields = (fields,)

    values = []
    for field in fields:
        value = table_parser.get_value_two_col_table(table_, field=field,
                                                     merge_lines=False)
        values.append(yaml.load('\n'.join(value)))

    return values
Esempio n. 10
0
def _suppress_unsuppress_sensor(sensor_name,
                                host,
                                set_suppress='False',
                                sensor_group=False):
    """main suppress/unsuppress routine."""

    # Get the uuid of the sensor to be suppressed
    res = 0
    sensor_uuid = get_sensor_uuid(sensor_name, host, sensor_group)

    # Check if the sensor is already suppressed
    sensor_showtable = get_sensor_showtable(sensor_uuid, host, sensor_group)
    sensor_suppression_value = table_parser.get_value_two_col_table(
        sensor_showtable, 'suppress')
    print('Suppression: {}'.format(sensor_suppression_value))

    if sensor_group is True:
        sysinv_action = 'host-sensorgroup-modify'
    else:
        sysinv_action = 'host-sensor-modify'

    # If not already suppressed, then suppress the sensor or sensor group
    if sensor_suppression_value != set_suppress:
        # The sensor is not suppressed/unsuppressed, so execute the action
        res, out = cli.system(sysinv_action,
                              '{} {} suppress={}'.format(
                                  host, sensor_uuid, set_suppress),
                              fail_ok=True)

    print('Result: {}'.format(res))
    return res == 0
Esempio n. 11
0
def deploy_env(env_id,
               session_id,
               con_ssh=None,
               auth_info=None,
               fail_ok=False):

    code, output = cli.openstack(
        'environment deploy --session-id {} {}'.format(session_id, env_id),
        ssh_client=con_ssh,
        fail_ok=fail_ok,
        auth_info=auth_info)

    if code == 1:
        return 1, output

    table_ = table_parser.table(output)
    deploy_id = table_parser.get_value_two_col_table(table_, 'id')
    if not deploy_id:
        msg = "Fail to get the deploy id; session-id {}; environment " \
              "id {}".format(session_id, env_id)
        if fail_ok:
            return 2, msg
        else:
            raise exceptions.MuranoError(msg)

    return 0, deploy_id
Esempio n. 12
0
def wait_for_upgrade_states(states, timeout=60, check_interval=6, fail_ok=False):
    """
     Waits for the  upgrade state to be changed.

     Args:
         states:
         timeout:
         check_interval
         fail_ok

     Returns:

     """
    end_time = time.time() + timeout
    if not states:
        raise ValueError("Expected host state(s) has to be specified via keyword argument states")
    state_match = False
    while time.time() < end_time:
        table_ = system_upgrade_show()[1]
        act_state = table_parser.get_value_two_col_table(table_, "state")
        if act_state == states:
            state_match = True
            break
        time.sleep(check_interval)
    msg = "{} state was not reached ".format(states)
    if state_match:
        return True
    if fail_ok:
        LOG.warning(msg)
        return False
    raise exceptions.TimeoutException(msg)
Esempio n. 13
0
def delete_bundle(bundle_id, con_ssh=None, auth_info=None, fail_ok=False):
    """
    Delete murano bundle
    Args:
        bundle_id: Bundle id to delete
        con_ssh (SSHClient):
        auth_info (dict)
        fail_ok (bool): whether return False or raise exception when some
            services fail to reach enabled-active state

    Returns:
        code, msg: return code and msg

        """

    if bundle_id is None:
        raise ValueError("Murano bundle id has to be specified.")

    LOG.info("Deleting Murano bundle {}".format(bundle_id))
    code, output = cli.openstack('bundle delete',
                                 bundle_id,
                                 ssh_client=con_ssh,
                                 fail_ok=fail_ok,
                                 auth_info=auth_info)

    if code > 0:
        return 1, output

    table_ = table_parser.table(output)
    pkg_id = table_parser.get_value_two_col_table(table_, 'id')
    return 0, pkg_id
Esempio n. 14
0
def update_helm_override(chart,
                         namespace,
                         app_name='stx-openstack',
                         yaml_file=None,
                         kv_pairs=None,
                         reset_vals=False,
                         reuse_vals=False,
                         auth_info=Tenant.get('admin_platform'),
                         con_ssh=None,
                         fail_ok=False):
    """
    Update helm_override values for given chart
    Args:
        chart:
        namespace:
        app_name
        yaml_file:
        kv_pairs:
        reset_vals:
        reuse_vals:
        fail_ok
        con_ssh
        auth_info

    Returns (tuple):
        (0, <overrides>(str|list|dict))     # cmd accepted.
        (1, <std_err>)  #  system helm-override-update cmd rejected

    """
    args = '{} {} {}'.format(app_name, chart, namespace)
    if reset_vals:
        args = '--reset-values {}'.format(args)
    if reuse_vals:
        args = '--reuse-values {}'.format(args)
    if yaml_file:
        args = '--values {} {}'.format(yaml_file, args)
    if kv_pairs:
        cmd_overrides = ','.join(
            ['{}={}'.format(k, v) for k, v in kv_pairs.items()])
        args = '--set {} {}'.format(cmd_overrides, args)

    code, output = cli.system('helm-override-update',
                              args,
                              ssh_client=con_ssh,
                              fail_ok=fail_ok,
                              auth_info=auth_info)
    if code != 0:
        return 1, output

    table_ = table_parser.table(output, rstrip_value=True)
    overrides = table_parser.get_value_two_col_table(table_, 'user_overrides')
    overrides = yaml.load('\n'.join(overrides))
    # yaml.load converts str to bool, int, float; but does not convert
    # None type. Updates are not verified here since it is rather complicated
    # to verify properly.
    LOG.info("Helm-override updated : {}".format(overrides))

    return 0, overrides
Esempio n. 15
0
def wait_for_backup_status(backup_id,
                           target_status='available',
                           timeout=1800,
                           wait_between_check=30,
                           fail_ok=False,
                           con_ssh=None):
    """
    Wait the specified cinder-backup to reach certain status.

    Args:
        backup_id:
            - id of the cinder-backup

        target_status:
            - the expected status to wait, by default it's 'available'

        timeout:
            - how long to wait if the cinder-backup does not reach expected status,
                1800 seconds by default

        wait_between_check:
            - interval between checking the status, 30 seconds by default

        fail_ok:
            - if the test case should be failed if any error occurs, False by default

        con_ssh:
            - current ssh connection the lab

    Return:
        error-code:
            -   0   -- success
            -   1   -- failed
        error-msg:
            -   message about the reason of failure
    """

    cmd = 'cinder backup-show ' + backup_id
    end_time = time.time() + timeout

    output = ''
    while time.time() < end_time:
        rc, output = con_ssh.exec_cmd(cmd)
        table_ = table_parser.table(output)
        status = table_parser.get_value_two_col_table(table_, 'status')
        if status.lower() == target_status.lower():
            break
        time.sleep(wait_between_check)

    else:
        msg = 'Backup:{} did not reach status:{} in {} seconds'.format(
            backup_id, target_status, timeout)
        LOG.warn(msg + 'output:' + output)
        assert fail_ok, msg
        return 1, msg

    return 0, 'all cinder backup:{} reached status:{} after {} seconds'.format(
        backup_id, target_status, timeout)
Esempio n. 16
0
def copy_flavor(origin_flavor, new_name=None, con_ssh=None):
    """
    Extract the info from an existing flavor and create a new flavor that is has identical info

    Args:
        origin_flavor (str): id of an existing flavor to extract the info from
        new_name:
        con_ssh:

    Returns (str): flavor_id

    """
    table_ = table_parser.table(
        cli.openstack('flavor show',
                      origin_flavor,
                      ssh_client=con_ssh,
                      auth_info=Tenant.get('admin'))[1])

    extra_specs = table_parser.get_value_two_col_table(table_, 'properties')
    extra_specs = table_parser.convert_value_to_dict(value=extra_specs)
    ephemeral = table_parser.get_value_two_col_table(table_,
                                                     'ephemeral',
                                                     strict=False)
    disk = table_parser.get_value_two_col_table(table_, 'disk')
    is_public = table_parser.get_value_two_col_table(table_,
                                                     'is_public',
                                                     strict=False)
    ram = table_parser.get_value_two_col_table(table_, 'ram')
    rxtx_factor = table_parser.get_value_two_col_table(table_, 'rxtx_factor')
    swap = table_parser.get_value_two_col_table(table_, 'swap')
    vcpus = table_parser.get_value_two_col_table(table_, 'vcpus')
    old_name = table_parser.get_value_two_col_table(table_, 'name')

    if not new_name:
        new_name = "{}-{}".format(old_name, new_name)
    swap = swap if swap else 0
    new_flavor_id = create_flavor(name=new_name,
                                  vcpus=vcpus,
                                  ram=ram,
                                  swap=swap,
                                  root_disk=disk,
                                  ephemeral=ephemeral,
                                  is_public=is_public,
                                  rxtx_factor=rxtx_factor,
                                  con_ssh=con_ssh)[1]
    set_flavor(new_flavor_id, con_ssh=con_ssh, **extra_specs)

    return new_flavor_id
Esempio n. 17
0
def _get_local_storageprofle_details(name_id=None):
    if not name_id:
        return {}

    table = table_parser.table(
        cli.system('storprofile-show {}'.format(name_id))[1])

    lvgsetting = {}

    # FIXME: should be 'profile name', CGTS-4432
    profile_name_header = 'hostname'
    name = table_parser.get_value_two_col_table(table, profile_name_header)
    lvgsetting['name'] = name

    diskconfig = table_parser.get_value_two_col_table(table, 'diskconfig')
    disks = dict([kv.split(':') for kv in diskconfig.split(';')])
    disks = [{k: v} for k, v in disks.items()]
    lvgsetting.update({'disk': disks})

    # pvconfig = table_parser.get_value_two_col_table(table, 'physical volume config')

    lvgconfig = table_parser.get_value_two_col_table(
        table, 'logical volume group config')
    lvgname = lvgconfig.split(',')[0].strip()
    lvgsetting = {'lvm_vg_name': lvgname}

    lvgbackings = dict(
        [kv.split(':') for kv in lvgconfig.split(',')[1].split(';')])
    lvgsetting.update({k.strip(): v.strip() for k, v in lvgbackings.items()})
    # from xml
    #  'localstorageProfile':
    #      {'lvg': [
    #          {'lvm_vg_name': 'nova-local',
    #           'concurrent_disk_operations': '2',
    #           'instance_backing': 'image'}],
    #          'name': 'with_ceph_image_local_storage_backed',
    #          'disk': [{'size': '228936', 'node': '/dev/sdb'
    #          }]
    #      }
    #  }

    return lvgsetting
Esempio n. 18
0
def test_delete_host_partitions():
    """
    This test creates host partitions and the teardown deletes them.

    Arguments:
    * None

    Test Steps:
    * Create a partition on each host

    Teardown:
    * Re-create those partitions
    """
    global partitions_to_restore
    partitions_to_restore = {}

    computes = system_helper.get_hosts(personality="compute")
    hosts = system_helper.get_controllers() + computes

    usable_disks = False
    for host in hosts:
        disks = storage_helper.get_host_disks(host)
        free_disks = storage_helper.get_host_disks_with_free_space(host, disks)
        if not free_disks:
            continue

        for disk_uuid in free_disks:
            size_gib = float(free_disks[disk_uuid])
            partition_chunks = int(size_gib)
            if partition_chunks < 2:
                LOG.info(
                    "Skip disk {} due to insufficient space".format(disk_uuid))
                continue
            usable_disks = True
            LOG.info("Creating partition on {}".format(host))
            rc, out = storage_helper.create_host_partition(host,
                                                           disk_uuid,
                                                           "1",
                                                           fail_ok=False,
                                                           wait=False)
            assert rc == 0, "Partition creation was expected to succeed but instead failed"
            # Check that first disk was created
            uuid = table_parser.get_value_two_col_table(
                table_parser.table(out), "uuid")
            storage_helper.wait_for_host_partition_status(host=host,
                                                          uuid=uuid,
                                                          timeout=CP_TIMEOUT)
            partitions_to_restore[host] = []
            partitions_to_restore[host].append(uuid)
            # Only test one disk on each host
            break

    if not usable_disks:
        skip("Did not find disks with sufficient space to test with.")
Esempio n. 19
0
def abort_upgrade(con_ssh=None, timeout=60, fail_ok=False):
    """
    Aborts upgrade
    Args:
        con_ssh (SSHClient):
        timeout (int)
        fail_ok (bool):

    Returns (tuple):
        (0, dict/list)
        (1, <stderr>)   # cli returns stderr, applicable if fail_ok is true

    """
    if con_ssh is None:
        con_ssh = ControllerClient.get_active_controller()

    cmd = "source /etc/nova/openrc; system upgrade-abort"
    con_ssh.send(cmd)
    end_time = time.time() + timeout
    rc = 1
    while time.time() < end_time:
        index = con_ssh.expect([con_ssh.prompt,  Prompt.YES_N_PROMPT], timeout=timeout)
        if index == 1:
            con_ssh.send('yes')
            index = con_ssh.expect([con_ssh.prompt, Prompt.CONFIRM_PROMPT], timeout=timeout)
            if index == 1:
                con_ssh.send('abort')
                index = con_ssh.expect([con_ssh.prompt, Prompt.CONFIRM_PROMPT], timeout=timeout)
        if index == 0:
            rc = con_ssh.exec_cmd("echo $?")[0]
            con_ssh.flush()
            break

    if rc != 0:
        err_msg = "CLI system upgrade-abort rejected"
        LOG.warning(err_msg)
        if fail_ok:
            return 1, err_msg
        else:
            raise exceptions.CLIRejected(err_msg)

    table_ = system_upgrade_show()[1]
    state = table_parser.get_value_two_col_table(table_, "state")
    if "aborting" in state:
        return 0, "Upgrade aborting"
    else:
        err_msg = "Upgrade abort failed"
        if fail_ok:
            LOG.warn(err_msg)
            return 1, err_msg
        else:
            raise exceptions.CLIRejected(err_msg)
Esempio n. 20
0
def get_user_token(field='id', con_ssh=None, auth_info=Tenant.get('admin')):
    """
    Return an authentication token for the admin.

    Args:
        field (str):
        con_ssh (SSHClient):
        auth_info
    Returns (list): a list containing at most one authentication token

    """
    table_ = table_parser.table(cli.openstack('token issue', ssh_client=con_ssh, auth_info=auth_info)[1])
    token = table_parser.get_value_two_col_table(table_, field)
    return token
Esempio n. 21
0
def get_sensor_audit_interval(sensorgroup_name, host):
    """ get sensor audit interval value."""

    # Get the value of the sensor audit interval
    sensor_uuid = get_sensor_uuid(sensorgroup_name, host, True)

    sysinv_action = 'host-sensorgroup-show'

    # Set the audit interval
    res, out = cli.system('{}'.format(sysinv_action),
                          '{} {}'.format(host, sensor_uuid),
                          fail_ok=True)

    table = table_parser.table(out)
    audit_interval = table_parser.get_value_two_col_table(
        table, 'audit_interval_group')

    return audit_interval
Esempio n. 22
0
def traffic_with_preset_configs(ixncfg, ixia_session=None):
    with ExitStack() as stack:
        if ixia_session is None:
            LOG.info("ixia_session not supplied, creating")
            from keywords import ixia_helper
            ixia_session = ixia_helper.IxiaSession()
            ixia_session.connect()
            stack.callback(ixia_session.disconnect)

        ixia_session.load_config(ixncfg)

        subnet_table = table_parser.table(
            cli.openstack('subnet list', auth_info=Tenant.get('admin'))[1])
        cidrs = list(
            map(ipaddress.ip_network,
                table_parser.get_column(subnet_table, 'Subnet')))
        for vport in ixia_session.getList(ixia_session.getRoot(), 'vport'):
            for interface in ixia_session.getList(vport, 'interface'):
                if ixia_session.testAttributes(interface, enabled='true'):
                    ipv4_interface = ixia_session.getList(interface, 'ipv4')[0]
                    gw = ipaddress.ip_address(
                        ixia_session.getAttribute(ipv4_interface, 'gateway'))
                    vlan_interface = ixia_session.getList(interface, 'vlan')[0]
                    for cidr in cidrs:
                        if gw in cidr:
                            net_id = table_parser.get_values(subnet_table,
                                                             'Network',
                                                             cidr=cidr)[0]
                            table = table_parser.table(
                                cli.openstack(
                                    'network show',
                                    net_id,
                                    auth_info=Tenant.get('admin'))[1])
                            seg_id = table_parser.get_value_two_col_table(
                                table, "provider:segmentation_id")
                            ixia_session.configure(vlan_interface,
                                                   vlanEnable=True,
                                                   vlanId=str(seg_id))
                            LOG.info(
                                "vport {} interface {} gw {} vlan updated to {}"
                                .format(vport, interface, gw, seg_id))
Esempio n. 23
0
def get_cpu_info(hypervisor):
    output = cli.openstack('hypervisor show ' + hypervisor,
                           auth_info=Tenant.get('admin'))[1]
    table = table_parser.table(output)
    cpu_info = table_parser.get_value_two_col_table(table, 'cpu_info')

    cpu_table = host_helper.get_host_cpu_list_table(hypervisor)
    thread_ids = table_parser.get_columns(cpu_table, ['thread'])
    num_threads = len(set(ids[0] for ids in thread_ids))
    LOG.info('per_core_threads:{}'.format(num_threads))

    core_function = table_parser.get_columns(cpu_table,
                                             ['log_core', 'assigned_function'])

    vm_cores = []
    for core, assigned in core_function:
        if assigned == TARGET_CPU_TYPE:
            vm_cores.append(int(core))

    LOG.info('vm_cores={}'.format(vm_cores))
    return eval(cpu_info), num_threads, vm_cores, len(core_function)
Esempio n. 24
0
def get_sensors_action(sensor_uuid,
                       host,
                       event_level='actions_critical',
                       sensor_group=False):
    """

    Args:
        sensor_uuid : UUID of the sensor.
        host : node that the sensor belongs to
        event_level : level of action expected
        sensor_group : group tht the sensor belongs to
    Returns:

    """

    # Get the sensor action from the sensor show table
    sensor_showtable = get_sensor_showtable(sensor_uuid, host, sensor_group)
    sensor_action = table_parser.get_value_two_col_table(
        sensor_showtable, event_level)

    return sensor_action
Esempio n. 25
0
def create_session(env_id, con_ssh=None, auth_info=None, fail_ok=False):
    """
    Create a Murano Session
    Args:
        env_id:
        con_ssh:
        auth_info:
        fail_ok:

    Returns:

    """

    if env_id is None:
        raise ValueError("Murano env id has to be specified.")

    LOG.info("Creating a Murano Session in Environment {}".format(env_id))
    code, output = cli.openstack('environment session create',
                                 env_id,
                                 ssh_client=con_ssh,
                                 fail_ok=fail_ok,
                                 auth_info=auth_info)

    if code > 1:
        return 1, output

    table_ = table_parser.table(output)
    session_id = table_parser.get_value_two_col_table(table_, 'id')
    if session_id != '':
        msg = "Session successfully created session {}".format(session_id)
        LOG.info(msg)
        return 0, session_id
    else:
        msg = "Fail to get Session id: {}".format(output)
        LOG.info(msg)
        if fail_ok:
            return 2, msg
        else:
            raise exceptions.MuranoError(msg)
def test_horizon_sysconfig_ceph_storage_pools_cancel_edit(sys_config_pg):
    """
    Test ceph storage pools edit and display:

    Setups:
        - Login as Admin
        - Go to Admin > Platform > System Configuration

    Teardown:
        - Back to System Configuration Page
        - Logout

    Test Steps:
        - Check ceph storage pools display
        - Edit ceph storage pools but not submit
    """
    LOG.tc_step('Check ceph storage pools display')
    sys_config_pg.go_to_ceph_storage_pools_tab()
    ceph_table = table_parser.table(
        cli.system('storage-backend-show ceph-store')[1])
    expt_horizon = {}
    headers_map = sys_config_pg.ceph_storage_pools_table.CEPH_STORAGE_POOLS_MAP
    table_name = sys_config_pg.ceph_storage_pools_table.name
    for cli_header in headers_map:
        horizon_header = headers_map[cli_header]
        cli_val = table_parser.get_value_two_col_table(ceph_table,
                                                       field=cli_header)
        if cli_val != 'None':
            expt_horizon[horizon_header] = cli_val
        else:
            expt_horizon[horizon_header] = '-'
    sys_config_pg.check_horizon_displays(table_name=table_name,
                                         expt_horizon=expt_horizon)

    LOG.tc_step('Edit ceph storage pools but not submit')
    tier_name = expt_horizon.get('Ceph Storage Tier')
    sys_config_pg.edit_storage_pool(tier_name=tier_name, cancel=True)
    horizon.test_result = True
Esempio n. 27
0
def import_bundle(bundle,
                  is_public=False,
                  con_ssh=None,
                  auth_info=None,
                  fail_ok=False):
    """
    Import Murano bundle
    Args:
        bundle: name of the bundle (full path)
        is_public: flag to set
        con_ssh (SSHClient):
        auth_info (dict)
        fail_ok (bool): whether return False or raise exception when some
            services fail to reach enabled-active state

    Returns:
        code, msg: return code and msg

        """

    if bundle is None:
        raise ValueError("Murano bundle name has to be specified.")

    LOG.info("Importing Murano bundle {}".format(bundle))
    args = bundle if not is_public else '--is-public {}'.format(bundle)
    code, output = cli.openstack('bundle import',
                                 args,
                                 ssh_client=con_ssh,
                                 fail_ok=fail_ok,
                                 auth_info=auth_info)

    if code > 0:
        return 1, output

    table_ = table_parser.table(output)
    pkg_id = table_parser.get_value_two_col_table(table_, 'id')
    return 0, pkg_id
Esempio n. 28
0
def test_increase_host_partition_size_beyond_avail_disk_space():
    """
    This test attempts to increase the size of an existing host partition
    beyond the available space on disk.  It is expected to fail.

    Assumptions:
    * Partitions are available in Ready state.

    Test steps:
    * Create partition
    * Modify the partition to consume over than the available disk space

    Teardown:
    * Delete created partitions

    """

    global partitions_to_restore
    partitions_to_restore = {}

    computes = system_helper.get_hosts(personality="compute")
    hosts = system_helper.get_controllers() + computes

    usable_disks = False
    for host in hosts:
        disks = storage_helper.get_host_disks(host)
        free_disks = storage_helper.get_host_disks_with_free_space(host, disks)
        if not free_disks:
            continue

        for disk_uuid in free_disks:
            size_gib = float(free_disks[disk_uuid])
            partition_chunks = int(size_gib)
            if partition_chunks < 2:
                LOG.info(
                    "Skip disk {} due to insufficient space".format(disk_uuid))
                continue
            usable_disks = True
            LOG.info("Creating partition on {}".format(host))
            rc, out = storage_helper.create_host_partition(host,
                                                           disk_uuid,
                                                           "1",
                                                           fail_ok=False,
                                                           wait=False)
            assert rc == 0, "Partition creation was expected to succeed but instead failed"
            # Check that first disk was created
            uuid = table_parser.get_value_two_col_table(
                table_parser.table(out), "uuid")
            storage_helper.wait_for_host_partition_status(host=host,
                                                          uuid=uuid,
                                                          timeout=CP_TIMEOUT)
            partitions_to_restore[host] = []
            partitions_to_restore[host].append(uuid)

            device_node = storage_helper.get_host_partition_values(
                host, uuid, "device_node")[0]
            device_node = device_node.rstrip(string.digits)
            if device_node.startswith("/dev/nvme"):
                device_node = device_node[:-1]
            size_gib += 1
            LOG.tc_step(
                "Modifying partition {} from size 1 to size {} from host {} on device node {}"
                .format(uuid, int(size_gib), host, device_node))
            rc, out = storage_helper.modify_host_partition(host,
                                                           uuid,
                                                           str(int(size_gib)),
                                                           fail_ok=True)
            assert rc != 0, "Expected partition modification to fail and instead it succeeded"
            LOG.info(out)
            # Only test one disk on each host
            break

    if not usable_disks:
        skip("Did not find disks with sufficient space to test with.")
Esempio n. 29
0
def test_decrease_host_partition_size():
    """
    This test attempts to decrease the size of an existing host partition.  It
    is expected to fail since decreasing the size of a partition is not
    supported.


    Test Steps:
    * Create a partition
    * Modify the partition to decrease its size

    Teardown:
    * Delete created partition

    """
    global partitions_to_restore
    partitions_to_restore = {}

    computes = system_helper.get_hosts(personality="compute")
    hosts = system_helper.get_controllers() + computes

    usable_disks = False
    for host in hosts:
        disks = storage_helper.get_host_disks(host)
        free_disks = storage_helper.get_host_disks_with_free_space(host, disks)
        if not free_disks:
            continue

        for disk_uuid in free_disks:
            size_gib = float(free_disks[disk_uuid])
            partition_chunks = int(size_gib)
            if partition_chunks < 2:
                LOG.info(
                    "Skip disk {} due to insufficient space".format(disk_uuid))
                continue
            usable_disks = True
            LOG.info("Creating partition on {}".format(host))
            rc, out = storage_helper.create_host_partition(host,
                                                           disk_uuid,
                                                           "1",
                                                           fail_ok=False,
                                                           wait=False)
            assert rc == 0, "Partition creation was expected to succeed but instead failed"
            # Check that first disk was created
            uuid = table_parser.get_value_two_col_table(
                table_parser.table(out), "uuid")
            storage_helper.wait_for_host_partition_status(host=host,
                                                          uuid=uuid,
                                                          timeout=CP_TIMEOUT)
            partitions_to_restore[host] = []
            partitions_to_restore[host].append(uuid)

            device_node, size_gib = storage_helper.get_host_partition_values(
                host, uuid, ("device_node", "size_gib"))
            total_size = int(size_gib) - 1
            LOG.tc_step(
                "Modifying partition {} from size {} to size {} from host {} on device node {}"
                .format(uuid, int(size_gib), str(total_size), host,
                        device_node[:-1]))
            rc, out = storage_helper.modify_host_partition(host,
                                                           uuid,
                                                           str(total_size),
                                                           fail_ok=True)
            assert rc != 0, "Expected partition modification to fail and instead it succeeded"
            # Only test one disk on each host
            break

    if not usable_disks:
        skip("Did not find disks with sufficient space to test with.")
Esempio n. 30
0
def test_attempt_host_unlock_during_partition_creation():
    """
    This test attempts to unlock a host while a partition is being created.  It
    is expected to fail.

    Assumptions:
    * There's some free disk space available

    Test steps:
    * Query the hosts to determine disk space
    * Lock host
    * Create a partition but don't wait for completion
    * Attempt to unlock the host that is hosting the partition that is created

    Teardown:
    * Delete created partitions

    DISABLED since unlock while creating is not blocked.

    """

    global partitions_to_restore
    partitions_to_restore = {}

    computes = system_helper.get_hosts(personality="compute")
    hosts = system_helper.get_controllers() + computes

    # Filter out active controller
    active_controller = system_helper.get_active_controller_name()
    print("This is active controller: {}".format(active_controller))
    hosts.remove(active_controller)

    usable_disks = False
    for host in hosts:
        disks = storage_helper.get_host_disks(host)
        free_disks = storage_helper.get_host_disks_with_free_space(host, disks)
        if not free_disks:
            continue

        for uuid in free_disks:
            size_gib = float(free_disks[uuid])
            if size_gib < 2.0:
                LOG.info("Skip this disk due to insufficient space")
                continue

            LOG.tc_step("Lock {} and create a partition for disk {}".format(
                host, uuid))
            HostsToRecover.add(host)
            host_helper.lock_host(host)
            usable_disks = True
            LOG.info("Creating partition on {}".format(host))
            rc, out = storage_helper.create_host_partition(host,
                                                           uuid,
                                                           int(size_gib),
                                                           wait=False)
            uuid = table_parser.get_value_two_col_table(
                table_parser.table(out), "uuid")
            partitions_to_restore[host] = []
            partitions_to_restore[host].append(uuid)

            LOG.tc_step(
                "Attempt to unlock host and ensure it's rejected when partition is "
                "being created")
            rc_ = host_helper.unlock_host(host,
                                          fail_ok=True,
                                          check_first=False)[0]
            assert rc_ != 0, "Unlock attempt unexpectedly passed"

            LOG.tc_step("wait for partition to be created")
            storage_helper.wait_for_host_partition_status(host=host,
                                                          uuid=uuid,
                                                          timeout=CP_TIMEOUT)

            container_helper.wait_for_apps_status(apps='platform-integ-apps',
                                                  status=AppStatus.APPLIED,
                                                  check_interval=10)
            # Only test one disk on each host
            break
        # Do it on one host only
        break

    if not usable_disks:
        skip("Did not find disks with sufficient space to test with.")