예제 #1
0
def verify_dns_on_central_and_subcloud(primary_subcloud,
                                       fail_ok=False,
                                       sc_dns=None):
    res = []
    for region in ('RegionOne', primary_subcloud):
        # take snapshot
        orig_dns_servers = system_helper.get_dns_servers(
            auth_info=Tenant.get('admin_platform', dc_region=region))
        if not sc_dns or set(sc_dns) <= set(orig_dns_servers):
            LOG.info("Modify dns server to public dns")
            system_helper.set_dns_servers(nameservers=['8.8.8.8'],
                                          auth_info=Tenant.get(
                                              'admin_platform',
                                              dc_region=region))
        LOG.info("Check dns on {}".format(region))
        con_ssh = ControllerClient.get_active_controller(name=region)
        code, out = con_ssh.exec_cmd('nslookup -timeout=1 www.google.com',
                                     fail_ok=fail_ok,
                                     expect_timeout=30)
        res.append(code)
        # revert
        system_helper.set_dns_servers(nameservers=orig_dns_servers,
                                      auth_info=Tenant.get('admin_platform',
                                                           dc_region=region))
    return res
예제 #2
0
    def list_status():
        LOG.fixture_step("Listing heat resources and nova migrations")
        stacks = heat_helper.get_stacks(auth_info=Tenant.get('admin'))
        for stack in stacks:
            heat_helper.get_stack_resources(stack=stack, auth_info=Tenant.get('admin'))

        nova_helper.get_migration_list_table()
예제 #3
0
파일: setups.py 프로젝트: ashishshah1/test
def set_region(region=None):
    """
    set global variable region.
    This needs to be called after CliAuth.set_vars, since the custom region
    value needs to override what is
    specified in openrc file.

    local region and auth url is saved in CliAuth, while the remote region
    and auth url is saved in Tenant.

    Args:
        region: region to set

    """
    local_region = CliAuth.get_var('OS_REGION_NAME')
    if not region:
        if ProjVar.get_var('IS_DC'):
            region = 'SystemController'
        else:
            region = local_region
    Tenant.set_region(region=region)
    ProjVar.set_var(REGION=region)
    if re.search(SUBCLOUD_PATTERN, region):
        # Distributed cloud, lab specified is a subcloud.
        urls = keystone_helper.get_endpoints(region=region,
                                             field='URL',
                                             interface='internal',
                                             service_name='keystone')
        if not urls:
            raise ValueError(
                "No internal endpoint found for region {}. Invalid value for "
                "--region with specified lab."
                "sub-cloud tests can be run on controller, but not the other "
                "way round".format(region))
        Tenant.set_platform_url(urls[0])
예제 #4
0
def download_openrc_files(quit_driver=True):
    """
    Download openrc files from Horizon to <LOG_DIR>/horizon/.

    """
    LOG.info("Download openrc files from horizon")
    local_dir = os.path.join(ProjVar.get_var('LOG_DIR'), 'horizon')

    from utils.horizon.pages import loginpage
    rc_files = []
    login_pg = loginpage.LoginPage()
    login_pg.go_to_target_page()
    try:
        for auth_info in (Tenant.get('admin'), Tenant.get('tenant1'), Tenant.get('tenant2')):
            user = auth_info['user']
            password = auth_info['password']
            openrc_file = '{}-openrc.sh'.format(user)
            home_pg = login_pg.login(user, password=password)
            home_pg.download_rc_v3()
            home_pg.log_out()
            openrc_path = os.path.join(local_dir, openrc_file)
            assert os.path.exists(openrc_path), "{} not found after download".format(openrc_file)
            rc_files.append(openrc_path)

    finally:
        if quit_driver:
            HorizonDriver.quit_driver()

    LOG.info("openrc files are successfully downloaded to: {}".format(local_dir))
    return rc_files
예제 #5
0
def pci_prep():
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()
    primary_tenant_name = primary_tenant['tenant']
    vm_helper.set_quotas(tenant=primary_tenant_name, cores=100)
    vm_helper.set_quotas(tenant=other_tenant['tenant'], cores=100)
    return primary_tenant, primary_tenant_name, other_tenant
예제 #6
0
def revert_https(request):
    central_auth = Tenant.get('admin_platform', dc_region='RegionOne')
    sub_auth = Tenant.get('admin_platform')

    origin_https_sub = keystone_helper.is_https_enabled(auth_info=sub_auth)
    origin_https_central = keystone_helper.is_https_enabled(
        auth_info=central_auth)

    def _revert():
        LOG.fixture_step(
            "Revert central https config to {}.".format(origin_https_central))
        security_helper.modify_https(enable_https=origin_https_central,
                                     auth_info=central_auth)

        LOG.fixture_step(
            "Revert subcloud https config to {}.".format(origin_https_sub))
        security_helper.modify_https(enable_https=origin_https_central,
                                     auth_info=sub_auth)

        LOG.fixture_step("Verify cli's on subcloud and central region.".format(
            origin_https_sub))
        verify_cli(sub_auth, central_auth)

    request.addfinalizer(_revert)

    return origin_https_sub, origin_https_central, central_auth, sub_auth
예제 #7
0
def revert_https(request):
    """
    Fixture for get the current http mode of the system, and if the test fails,
    leave the system in the same mode than before
    """
    central_auth = Tenant.get('admin_platform', dc_region='RegionOne')
    sub_auth = Tenant.get('admin_platform')
    use_dnsname = (bool(common.get_dnsname()) and
                   bool(common.get_dnsname(region=ProjVar.get_var('PRIMARY_SUBCLOUD'))))

    origin_https_sub = keystone_helper.is_https_enabled(auth_info=sub_auth)
    origin_https_central = keystone_helper.is_https_enabled(auth_info=central_auth)

    def _revert():
        LOG.fixture_step("Revert central https config to {}.".format(origin_https_central))
        security_helper.modify_https(enable_https=origin_https_central, auth_info=central_auth)

        LOG.fixture_step("Revert subcloud https config to {}.".format(origin_https_sub))
        security_helper.modify_https(enable_https=origin_https_central, auth_info=sub_auth)

        LOG.fixture_step("Verify cli's on subcloud and central region.".format(origin_https_sub))
        verify_cli(sub_auth, central_auth)

    request.addfinalizer(_revert)

    return origin_https_sub, origin_https_central, central_auth, sub_auth, use_dnsname
예제 #8
0
 def disable_snat():
     LOG.fixture_step("Disable SNAT on tenant router")
     try:
         network_helper.set_router_gateway(enable_snat=False)
     finally:
         LOG.fixture_step("Revert primary tenant to {}".format(primary_tenant['tenant']))
         Tenant.set_primary(primary_tenant)
예제 #9
0
def _modify_firewall_rules(firewall_rules_path):
    """
    :param firewall_rules_path: Path to the firewalls rules file (including the file name)
    """
    dc_region = 'RegionOne' if ProjVar.get_var('IS_DC') else None

    ssh_client = ControllerClient.get_active_controller(name=dc_region)
    LOG.info("Install firewall rules: {}".format(firewall_rules_path))
    auth_info = Tenant.get('admin_platform', dc_region=dc_region)
    start_time = common.get_date_in_format(ssh_client=ssh_client)
    time.sleep(1)
    cli.system('firewall-rules-install',
               firewall_rules_path,
               ssh_client=ssh_client,
               auth_info=auth_info)

    def _wait_for_config_apply(auth_info_, con_ssh_=None):
        controllers = system_helper.get_controllers(auth_info=auth_info_,
                                                    con_ssh=con_ssh_)
        for controller in controllers:
            system_helper.wait_for_events(
                start=start_time,
                fail_ok=False,
                timeout=60,
                entity_instance_id='host=controller',
                event_log_id=EventLogID.CONFIG_OUT_OF_DATE,
                auth_info=auth_info_,
                con_ssh=con_ssh_,
                **{
                    'Entity Instance ID': 'host={}'.format(controller),
                    'State': 'set'
                })
            # Extend timeout for controller-1 config-out-date clear to 5min due to CGTS-8497
            system_helper.wait_for_events(
                start=start_time,
                fail_ok=False,
                timeout=300,
                entity_instance_id='host=controller',
                event_log_id=EventLogID.CONFIG_OUT_OF_DATE,
                auth_info=auth_info_,
                con_ssh=con_ssh_,
                **{
                    'Entity Instance ID': 'host={}'.format(controller),
                    'State': 'clear'
                })

    LOG.info("Wait for config to apply on both controllers")
    _wait_for_config_apply(auth_info_=auth_info, con_ssh_=ssh_client)

    if ProjVar.get_var('IS_DC'):
        subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
        LOG.info(
            "Wait for sync audit for {} in dcmanager.log".format(subcloud))
        dc_helper.wait_for_sync_audit(subclouds=subcloud)

        LOG.info("Wait for config apply on {}".format(subcloud))
        _wait_for_config_apply(auth_info_=Tenant.get('admin_platform'))

    # Ensures iptables has enough time to populate the list with new ports
    time.sleep(10)
예제 #10
0
def launch_lab_setup_tenants_vms():
    home_dir = HostLinuxUser.get_home()
    stack1 = "{}/lab_setup-tenant1-resources.yaml".format(home_dir)
    stack1_name = "lab_setup-tenant1-resources"
    stack2 = "{}/lab_setup-tenant2-resources.yaml".format(home_dir)
    stack2_name = "lab_setup-tenant2-resources"
    script_name = "{}/create_resource_stacks.sh".format(home_dir)

    con_ssh = ControllerClient.get_active_controller()
    if con_ssh.file_exists(file_path=script_name):
        cmd1 = 'chmod 755 ' + script_name
        con_ssh.exec_cmd(cmd1)
        con_ssh.exec_cmd(script_name, fail_ok=False)

    stack_id_t1 = heat_helper.get_stacks(name=stack1_name,
                                         auth_info=Tenant.get('tenant1'))
    # may be better to delete all tenant stacks if any
    if not stack_id_t1:
        heat_helper.create_stack(stack_name=stack1_name,
                                 template=stack1,
                                 auth_info=Tenant.get('tenant1'),
                                 timeout=1000,
                                 cleanup=None)
    stack_id_t2 = heat_helper.get_stacks(name=stack2_name,
                                         auth_info=Tenant.get('tenant2'))
    if not stack_id_t2:
        heat_helper.create_stack(stack_name=stack2_name,
                                 template=stack2,
                                 auth_info=Tenant.get('tenant2'),
                                 timeout=1000,
                                 cleanup=None)

    LOG.info("Checking all VMs are in active state")
    vms = get_all_vms()
    vm_helper.wait_for_vms_values(vms=vms, fail_ok=False)
예제 #11
0
def test_launch_vms_for_traffic():
    stack1 = "/home/sysadmin/lab_setup-tenant1-resources.yaml"
    stack1_name = "lab_setup-tenant1-resources"
    stack2 = "/home/sysadmin/lab_setup-tenant2-resources.yaml"
    stack2_name = "lab_setup-tenant2-resources"
    script_name = "/home/sysadmin/create_resource_stacks.sh"

    con_ssh = ControllerClient.get_active_controller()
    if con_ssh.file_exists(file_path=script_name):
        cmd1 = 'chmod 755 ' + script_name
        con_ssh.exec_cmd(cmd1)
        con_ssh.exec_cmd(script_name, fail_ok=False)
    # may be better to delete all tenant stacks if any
    heat_helper.create_stack(stack_name=stack1_name,
                             template=stack1,
                             auth_info=Tenant.get('tenant1'),
                             timeout=1000,
                             cleanup=None)
    heat_helper.create_stack(stack_name=stack2_name,
                             template=stack2,
                             auth_info=Tenant.get('tenant2'),
                             timeout=1000,
                             cleanup=None)
    LOG.info("Checking all VMs are in active state")
    vms = system_test_helper.get_all_vms()
    vm_helper.wait_for_vms_values(vms=vms, fail_ok=False)
예제 #12
0
def test_non_primary_tenant():
    vm_1 = vm_helper.boot_vm(cleanup='function',
                             auth_info=Tenant.get('tenant1'))[1]
    vm_2 = vm_helper.launch_vms(vm_type='dpdk',
                                auth_info=Tenant.get('tenant1'))[0][0]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_1)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_2)
    vm_helper.ping_vms_from_natbox(vm_ids=vm_2)
    vm_helper.ping_vms_from_vm(vm_2, vm_1, net_types='mgmt')
예제 #13
0
def set_user(user, name=None, project=None, password=None, project_doamin=None,
             email=None, description=None,
             enable=None, fail_ok=False, auth_info=Tenant.get('admin'),
             con_ssh=None):
    LOG.info("Updating {}...".format(user))
    arg = ''
    optional_args = {
        'name': name,
        'project': project,
        'password': password,
        'project-domain': project_doamin,
        'email': email,
        'description': description,
    }
    for key, val in optional_args.items():
        if val is not None:
            arg += "--{} '{}' ".format(key, val)

    if enable is not None:
        arg += '--{} '.format('enable' if enable else 'disable')

    if not arg.strip():
        raise ValueError(
            "Please specify the param(s) and value(s) to change to")

    arg += user

    code, output = cli.openstack('user set', arg, ssh_client=con_ssh, timeout=120,
                                 fail_ok=fail_ok, auth_info=auth_info)

    if code > 0:
        return 1, output

    if name or project or password:
        tenant_dictname = user
        if auth_info and auth_info.get('platform'):
            tenant_dictname += '_platform'
        Tenant.update(tenant_dictname, username=name, password=password,
                      tenant=project)

    if password and user == 'admin':
        from consts.proj_vars import ProjVar
        if ProjVar.get_var('REGION') != 'RegionOne':
            LOG.info(
                "Run openstack_update_admin_password on secondary region "
                "after admin password change")
            if not con_ssh:
                con_ssh = ControllerClient.get_active_controller()
            with con_ssh.login_as_root(timeout=30) as con_ssh:
                con_ssh.exec_cmd(
                    "echo 'y' | openstack_update_admin_password '{}'".format(
                        password))

    msg = 'User {} updated successfully'.format(user)
    LOG.info(msg)
    return 0, output
예제 #14
0
    def list_status():
        LOG.fixture_step("Listing heat resources and nova migrations")
        stacks = heat_helper.get_stacks(auth_info=Tenant.get('admin'))
        for stack in stacks:
            heat_helper.get_stack_resources(stack=stack,
                                            auth_info=Tenant.get('admin'))

        # stack_id = heat_helper.get_stacks(name=HeatTemplate.SYSTEM_TEST_HEAT_NAME)
        #
        # # if stack_id:
        # #     code, msg = heat_helper.delete_stack(stack_name=HeatTemplate.SYSTEM_TEST_HEAT_NAME)

        nova_helper.get_migration_list_table()
예제 #15
0
def pytest_collectstart():
    """
    Set up the ssh session at collectstart. Because skipif condition is evaluated at the collecting test cases phase.
    """
    global con_ssh
    con_ssh = setups.setup_tis_ssh(InstallVars.get_install_var("LAB"))
    InstallVars.set_install_var(con_ssh=con_ssh)
    auth = setups.get_auth_via_openrc(con_ssh)
    if auth:
        CliAuth.set_vars(**setups.get_auth_via_openrc(con_ssh))

    Tenant.set_platform_url(CliAuth.get_var('OS_AUTH_URL'))
    Tenant.set_region(CliAuth.get_var('OS_REGION_NAME'))
예제 #16
0
    def pcipt_prep(self, request, pci_prep):
        primary_tenant, primary_tenant_name, other_tenant = pci_prep
        vif_model = 'pci-passthrough'

        net_type, pci_net_name, pci_net_id, pnet_id, pnet_name, other_pcipt_net_name, other_pcipt_net_id = \
            get_pci_net(request, vif_model, primary_tenant, primary_tenant_name, other_tenant)
        pci_hosts = get_pci_hosts(vif_model, pnet_name)
        if len(pci_hosts) < 2:
            skip('Less than 2 hosts with {} interface configured'.format(
                vif_model))

        # TODO: feature unavailable atm. Update required
        # pfs_conf, pfs_use_init = nova_helper.get_pci_interface_stats_for_providernet(
        #         pnet_id, fields=('pci_pfs_configured', 'pci_pfs_used'))
        # if pfs_conf < 2:
        #     skip('Less than 2 {} interfaces configured on system'.format(vif_model))
        pfs_use_init = None

        # Get initial host with least vcpus
        LOG.fixture_step(
            "Calculate number of vms and number of vcpus for each vm")
        vm_num = 2
        min_vcpu_host, min_cores_per_proc = get_host_with_min_vm_cores_per_proc(
            pci_hosts)
        vm_vcpus = int(min_cores_per_proc / (vm_num / 2))

        LOG.fixture_step(
            "Get seg_id for {} for vlan tagging on pci-passthough device later"
            .format(pci_net_id))
        seg_id = network_helper.get_network_values(
            network=pci_net_id,
            fields='segmentation_id',
            strict=False,
            auth_info=Tenant.get('admin'))[0]
        assert seg_id, 'Segmentation id of pci net {} is not found'.format(
            pci_net_id)

        if other_pcipt_net_name:
            extra_pcipt_seg_id = network_helper.get_network_values(
                network=other_pcipt_net_name,
                fields='segmentation_id',
                strict=False,
                auth_info=Tenant.get('admin'))[0]
            seg_id = {
                pci_net_name: seg_id,
                other_pcipt_net_name: extra_pcipt_seg_id
            }

        nics = get_pci_vm_nics(vif_model, pci_net_id, other_pcipt_net_id)

        return net_type, pci_net_name, pci_hosts, pnet_id, nics, min_vcpu_host, seg_id, vm_num, vm_vcpus, pfs_use_init
예제 #17
0
def pytest_collectstart():
    """
    Set up the ssh session at collectstart. Because skipif condition is evaluated at the collecting test cases phase.
    """
    global con_ssh
    lab = ProjVar.get_var("LAB")
    if 'vbox' in lab['short_name']:
        con_ssh = setups.setup_vbox_tis_ssh(lab)
    else:
        con_ssh = setups.setup_tis_ssh(lab)
    ProjVar.set_var(con_ssh=con_ssh)
    CliAuth.set_vars(**setups.get_auth_via_openrc(con_ssh))
    Tenant.set_region(region=CliAuth.get_var('OS_REGION_NAME'))
    Tenant.set_platform_url(url='OS_AUTH_URL')
def test_admin_password(scenario, less_than_two_cons, _revert_admin_pw):
    """
    Test the admin password change

    Test Steps:
        - lock standby controller change password and unlock
        - change passowrd and swact
        - check alarams

    """
    if 'swact' in scenario and less_than_two_cons:
        skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS)

    host = system_helper.get_standby_controller_name()
    assert host, "No standby controller on system"

    if scenario == "lock_standby_change_pswd":
        # lock the standby
        LOG.tc_step("Attempting to lock {}".format(host))
        res, out = host_helper.lock_host(host=host)
        LOG.tc_step("Result of the lock was: {}".format(res))

    # change password
    prev_pswd = Tenant.get('admin')['password']
    post_pswd = '!{}9'.format(prev_pswd)

    LOG.tc_step('Changing admin password to {}'.format(post_pswd))
    code, output = keystone_helper.set_user('admin', password=post_pswd, auth_info=Tenant.get(
        'admin_platform'))

    # assert "Warning: 'admin' password changed. Please wait 5 minutes before Locking/Unlocking
    # the controllers" in output
    LOG.tc_step("Sleep for 180 seconds after admin password change")
    time.sleep(180)  # CGTS-6928

    LOG.tc_step("Check admin password is updated in keyring")
    assert post_pswd == security_helper.get_admin_password_in_keyring()

    if scenario == "change_pswd_swact":
        LOG.tc_step("Swact active controller")
        host_helper.swact_host()
    else:
        LOG.tc_step("Unlock host {}".format(host))
        res = host_helper.unlock_host(host)
        LOG.info("Unlock hosts result: {}".format(res))

    LOG.tc_step("Check admin password is updated in keyring")
    assert post_pswd == security_helper.get_admin_password_in_keyring()
예제 #19
0
def get_aggregated_measures(field='value', resource_type=None, metrics=None,
                            start=None, stop=None, overlap=None,
                            refresh=None, resource_ids=None, extra_query=None,
                            fail_ok=False, auth_info=Tenant.get('admin'),
                            con_ssh=None):
    """
    Get measurements via 'openstack metric measures aggregation'
    Args:
        field (str): header of a column
        resource_type (str|None):  used in --resource-type <resource_type>
        metrics (str|list|tuple|None): used in --metric <metric1> [metric2 ...]
        start (str|None): used in --start <start>
        stop (str|None): used in --stop <stop>
        refresh (bool): used in --refresh
        overlap (str|None): overlap percentage. used in
            --needed-overlap <overlap>
        resource_ids (str|list|tuple|None): used in --query "id=<resource_id1>[
            or id=<resource_id2> ...]"
        extra_query (str|None): used in --query <extra_query>
        fail_ok:
        auth_info:
        con_ssh:

    Returns (list): list of strings

    """
    LOG.info("Getting aggregated measurements...")
    args_dict = {
        'resource-type': resource_type,
        'metric': metrics,
        'start': start,
        'stop': stop,
        'needed-overlap': overlap,
        'refresh': refresh,
    }

    args = common.parse_args(args_dict, vals_sep=' ')
    query_str = ''
    if resource_ids:
        if isinstance(resource_ids, str):
            resource_ids = [resource_ids]
        resource_ids = ['id={}'.format(val) for val in resource_ids]
        query_str = ' or '.join(resource_ids)

    if extra_query:
        if resource_ids:
            query_str += ' and '
        query_str += '{}'.format(extra_query)

    if query_str:
        args += ' --query "{}"'.format(query_str)

    code, out = cli.openstack('metric measures aggregation', args,
                              ssh_client=con_ssh, fail_ok=fail_ok,
                              auth_info=auth_info)
    if code > 0:
        return 1, out

    table_ = table_parser.table(out)
    return 0, table_parser.get_values(table_, field)
예제 #20
0
def app_upload_apply(con_ssh=None, auth_info=Tenant.get('admin_platform')):
    """
    Upload stx-monitor
    Apply stx-monitor
    """

    # Do application upload stx-monitor.
    app_dir = HostLinuxUser.get_home()
    tar_file = os.path.join(app_dir, STX_MONITOR_TAR)
    LOG.info("Upload %s" % tar_file)
    container_helper.upload_app(
        tar_file=tar_file,
        app_name=STX_MONITOR_APP_NAME,
        con_ssh=con_ssh,
        auth_info=auth_info,
        uploaded_timeout=3600,
    )

    # Do application apply stx-monitor.
    LOG.info("Apply %s" % STX_MONITOR_APP_NAME)
    container_helper.apply_app(app_name=STX_MONITOR_APP_NAME,
                               applied_timeout=3600,
                               check_interval=60,
                               con_ssh=con_ssh,
                               auth_info=auth_info)
예제 #21
0
def get_helm_overrides(field='overrides namespaces',
                       app_name='stx-openstack',
                       charts=None,
                       auth_info=Tenant.get('admin_platform'),
                       con_ssh=None):
    """
    Get helm overrides values via system helm-override-list
    Args:
        field (str):
        app_name
        charts (None|str|list|tuple):
        auth_info:
        con_ssh:

    Returns (list):

    """
    table_ = table_parser.table(
        cli.system('helm-override-list',
                   app_name,
                   ssh_client=con_ssh,
                   auth_info=auth_info)[1])

    if charts:
        table_ = table_parser.filter_table(table_, **{'chart name': charts})

    vals = table_parser.get_multi_values(table_, fields=field, evaluate=True)

    return vals
예제 #22
0
def is_stx_openstack_deployed(applied_only=False,
                              con_ssh=None,
                              auth_info=Tenant.get('admin_platform'),
                              force_check=False):
    """
    Whether stx-openstack application  is deployed.
    Args:
        applied_only (bool): if True, then only return True when application
            is in applied state
        con_ssh:
        auth_info:
        force_check:

    Returns (bool):

    """
    openstack_deployed = ProjVar.get_var('OPENSTACK_DEPLOYED')
    if not applied_only and not force_check and openstack_deployed is not None:
        return openstack_deployed

    openstack_status = get_apps(application='stx-openstack',
                                field='status',
                                con_ssh=con_ssh,
                                auth_info=auth_info)

    LOG.info("{}".format(openstack_status))

    res = False
    if openstack_status and 'appl' in openstack_status[0].lower():
        res = True
        if applied_only and openstack_status[0] != AppStatus.APPLIED:
            res = False

    return res
예제 #23
0
    def revert():
        reverted = False
        try:
            LOG.fixture_step("Manage primary subcloud {} if unmanaged".format(primary_subcloud))
            dc_helper.manage_subcloud(primary_subcloud)

            LOG.fixture_step("Revert NTP config if changed")
            res = system_helper.modify_ntp(ntp_servers=central_ntp, auth_info=central_auth, check_first=True,
                                           clear_alarm=False)[0]
            if res != -1:
                LOG.fixture_step("Lock unlock config out-of-date hosts in central region")
                system_helper.wait_and_clear_config_out_of_date_alarms(auth_info=central_auth,
                                                                       wait_with_best_effort=True)

                LOG.fixture_step("Lock unlock config out-of-date hosts in {}".format(primary_subcloud))
                dc_helper.wait_for_subcloud_ntp_config(subcloud=primary_subcloud, expected_ntp=central_ntp,
                                                       clear_alarm=True)

                if managed_subcloud:
                    LOG.fixture_step("Lock unlock config out-of-date hosts in {}".format(managed_subcloud))
                    dc_helper.wait_for_subcloud_ntp_config(subcloud=managed_subcloud, expected_ntp=central_ntp,
                                                           clear_alarm=True)

            if subclouds_to_revert:
                LOG.fixture_step("Manage unmanaged subclouds and check they are unaffected")
                for subcloud in subclouds_to_revert:
                    dc_helper.manage_subcloud(subcloud)
                    assert not system_helper.get_alarms(alarm_id=EventLogID.CONFIG_OUT_OF_DATE,
                                                        auth_info=Tenant.get('admin_platform', dc_region=subcloud))
            reverted = True

        finally:
            if not reverted:
                for subcloud in subclouds_to_revert:
                    dc_helper.manage_subcloud(subcloud)
예제 #24
0
def test_dc_dns_modify(ensure_synced, scenario):
    """
    Update DNS servers on central region and check it is propagated to subclouds
    Args:
        ensure_synced: test fixture
        scenario: DNS change scenario

    Setups:
        - Ensure primary subcloud is managed and DNS config is valid and synced

    Test Steps:
        - Un-manage primary subcloud
        - Configure DNS servers on central region to new value based on given scenario
        - Wait for new DNS config to sync over to managed online subclouds
        - Ensure DNS config is not updated on unmanaged primary subcloud
        - Re-manage primary subcloud and ensure DNS config syncs over
        - Verify nslookup works in Central Region and primary subcloud

    Teardown:
        - Reset DNS servers to original value (module)

    """
    primary_subcloud, managed_subclouds, prev_dns_servers = ensure_synced
    new_dns_servers = compose_new_dns_servers(
        scenario=scenario, prev_dns_servers=prev_dns_servers)

    LOG.tc_step("Unmanage {}".format(primary_subcloud))
    dc_helper.unmanage_subcloud(subcloud=primary_subcloud, check_first=True)

    LOG.tc_step(
        "Reconfigure DNS servers on central region from {} to {}".format(
            prev_dns_servers, new_dns_servers))
    system_helper.set_dns_servers(new_dns_servers,
                                  auth_info=Tenant.get('admin_platform',
                                                       dc_region='RegionOne'))

    LOG.tc_step(
        "Wait for new DNS config to sync over to managed online subclouds")
    for managed_sub in managed_subclouds:
        dc_helper.wait_for_subcloud_dns_config(subcloud=managed_sub,
                                               expected_dns=new_dns_servers)

    LOG.tc_step(
        "Ensure DNS config is not updated on unmanaged subcloud: {}".format(
            primary_subcloud))
    code = dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud,
                                                  expected_dns=new_dns_servers,
                                                  timeout=60,
                                                  fail_ok=True)[0]
    assert 1 == code, "Actual return code: {}".format(code)

    LOG.tc_step('Re-manage {} and ensure DNS config syncs over'.format(
        primary_subcloud))
    dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=False)
    dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud,
                                           expected_dns=new_dns_servers)

    LOG.tc_step('Verify nslookup works in Central Region and {}'.format(
        primary_subcloud))
    verify_dns_on_central_and_subcloud(primary_subcloud)
예제 #25
0
def get_alarms(header='alarm_id',
               name=None,
               strict=False,
               auth_info=Tenant.get('admin'),
               con_ssh=None):
    """

    Args:
        header
        name:
        strict:
        auth_info:
        con_ssh:

    Returns:

    """

    table_ = table_parser.table(cli.openstack('alarm list',
                                              ssh_client=con_ssh,
                                              auth_info=auth_info)[1],
                                combine_multiline_entry=True)
    if name is None:
        return table_parser.get_column(table_, header)

    return table_parser.get_values(table_, header, Name=name, strict=strict)
예제 #26
0
def get_subcloud_status(subcloud,
                        field='availability',
                        auth_info=Tenant.get('admin_platform', 'RegionOne'),
                        con_ssh=None,
                        source_openrc=None):
    """

    Args:
        subcloud:
        field:
        auth_info:
        con_ssh:
        source_openrc:

    Returns:

    """

    LOG.info("Auth_info: {}".format(auth_info))
    table_ = table_parser.table(
        cli.dcmanager('subcloud list',
                      ssh_client=con_ssh,
                      auth_info=auth_info,
                      source_openrc=source_openrc)[1])
    arg_dict = {'name': subcloud}
    kwargs = {key: val for key, val in arg_dict.items() if val is not None}
    status = table_parser.get_values(table_, target_header=field, **kwargs)
    return status[0]
예제 #27
0
def is_https_enabled(con_ssh=None, source_openrc=True, interface='public',
                     auth_info=Tenant.get('admin_platform')):
    """
    Check whether interface is https
    Args:
        con_ssh:
        source_openrc:
        interface: default is public
        auth_info:
    Returns True or False
    """
    if not con_ssh:
        con_name = auth_info.get('region') if (
                auth_info and ProjVar.get_var('IS_DC')) else None
        con_ssh = ControllerClient.get_active_controller(name=con_name)

    table_ = table_parser.table(
        cli.openstack('endpoint list', ssh_client=con_ssh, auth_info=auth_info,
                      source_openrc=source_openrc)[1])
    con_ssh.exec_cmd('unset OS_REGION_NAME')  # Workaround
    filters = {'Service Name': 'keystone', 'Service Type': 'identity',
               'Interface': interface}
    keystone_values = table_parser.get_values(table_=table_, target_header='URL',
                                              **filters)
    LOG.info('keystone {} URLs: {}'.format(interface, keystone_values))
    return all('https' in i for i in keystone_values)
예제 #28
0
def get_metric_values(metric_id=None, metric_name=None, resource_id=None, fields='id', fail_ok=False,
                      auth_info=Tenant.get('admin'), con_ssh=None):
    """
    Get metric info via 'openstack metric show'
    Args:
        metric_id (str|None):
        metric_name (str|None): Only used if metric_id is not provided
        resource_id (str|None):  Only used if metric_id is not provided
        fields (str|list|tuple): field name
        fail_ok (bool):
        auth_info:
        con_ssh:

    Returns (list):

    """
    if metric_id is None and metric_name is None:
        raise ValueError("metric_id or metric_name has to be provided.")

    if metric_id:
        arg = metric_id
    else:
        if resource_id:
            arg = '--resource-id {} "{}"'.format(resource_id, metric_name)
        else:
            if not fail_ok:
                raise ValueError("resource_id needs to be provided when using metric_name")
            arg = '"{}"'.format(metric_name)

    code, output = cli.openstack('openstack metric show', arg, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info)
    if code > 0:
        return output

    table_ = table_parser.table(output)
    return table_parser.get_multi_values_two_col_table(table_, fields)
예제 #29
0
def verify_user(user_name,
                password,
                is_admin=True,
                expect_fail=False,
                keystone=None):
    scenario = ' and expect failure' if expect_fail else ''
    LOG.info('Run {} OpenStack command with {} role {}'.format(
        keystone, 'admin' if is_admin else 'member', scenario))

    dict_name = '{}_platform'.format(
        user_name) if keystone == 'platform' else user_name
    auth_info = Tenant.get(dict_name)
    auth_info = copy.deepcopy(auth_info)
    auth_info['password'] = password
    if is_admin:
        command = 'endpoint list'
        code, output = cli.openstack(command,
                                     fail_ok=expect_fail,
                                     auth_info=auth_info)
    else:
        command = 'user show {}'.format(user_name)
        code, output = cli.openstack(command,
                                     fail_ok=expect_fail,
                                     auth_info=auth_info)

    message = 'command:{}\nauth_info:{}\noutput:{}'.format(
        command, auth_info, output)

    if expect_fail:
        assert 1 == code, "OpenStack command ran successfully while rejection is " \
                          "expected: {}".format(message)
예제 #30
0
def subclouds_to_test(request):

    LOG.info("Gather DNS config and subcloud management info")
    sc_auth = Tenant.get('admin_platform', dc_region='SystemController')
    dns_servers = system_helper.get_dns_servers(auth_info=sc_auth)

    subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')

    def revert():
        LOG.fixture_step("Manage {} if unmanaged".format(subcloud))
        dc_helper.manage_subcloud(subcloud)

        LOG.fixture_step("Revert DNS config if changed")
        system_helper.set_dns_servers(nameservers=dns_servers,
                                      auth_info=sc_auth)

    request.addfinalizer(revert)

    managed_subclouds = dc_helper.get_subclouds(mgmt='managed', avail='online')
    if subcloud in managed_subclouds:
        managed_subclouds.remove(subcloud)

    ssh_map = ControllerClient.get_active_controllers_map()
    managed_subclouds = [
        subcloud for subcloud in managed_subclouds if subcloud in ssh_map
    ]

    return subcloud, managed_subclouds