def revert():
        LOG.fixture_step("Manage {} if unmanaged".format(subcloud))
        dc_helper.manage_subcloud(subcloud)

        LOG.fixture_step("Revert DNS config if changed")
        system_helper.set_dns_servers(nameservers=dns_servers,
                                      auth_info=sc_auth)
Exemple #2
0
 def revert():
     LOG.fixture_step("Revert network quotas to original values.")
     for tenant_id_, quotas in tenants_quotas.items():
         network_quota_, subnet_quota_ = quotas
         vm_helper.set_quotas(tenant=tenant_id,
                              networks=network_quota_,
                              subnets=subnet_quota_)
def cleanup_app():
    if container_helper.get_apps(application=HELM_APP_NAME):
        LOG.fixture_step("Remove {} app if applied".format(HELM_APP_NAME))
        container_helper.remove_app(app_name=HELM_APP_NAME)

        LOG.fixture_step("Delete {} app".format(HELM_APP_NAME))
        container_helper.delete_app(app_name=HELM_APP_NAME)
def sys_config_pg(admin_home_pg):
    LOG.fixture_step('Go to Admin > Platform > System Configuration')
    system_configuration_pg = systemconfigurationpage.SystemConfigurationPage(
        admin_home_pg.driver)
    system_configuration_pg.go_to_target_page()

    return system_configuration_pg
Exemple #5
0
def setup_qos(request, avs_required):
    LOG.fixture_step("Creating new QoS")
    scheduler = {'weight': 100}
    qos_new = network_helper.create_qos(scheduler=scheduler,
                                        description="Test QoS",
                                        cleanup='function')[1]
    LOG.fixture_step("Retrieving network ids and Qos'")
    internal_net_id = network_helper.get_internal_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    qos_internal = network_helper.get_network_values(network=internal_net_id,
                                                     fields='wrs-tm:qos')[0]
    qos_mgmt = network_helper.get_network_values(network=mgmt_net_id,
                                                 fields='wrs-tm:qos')[0]

    def reset():
        LOG.fixture_step("Resetting QoS for tenant and internal networks")

        network_helper.update_net_qos(net_id=internal_net_id,
                                      qos_id=qos_internal)
        network_helper.update_net_qos(net_id=mgmt_net_id, qos_id=qos_mgmt)

        LOG.fixture_step("Deleting created QoS")
        network_helper.delete_qos(qos_new)

    request.addfinalizer(reset)
    return internal_net_id, mgmt_net_id, qos_new
Exemple #6
0
def fix_usefixture(request):
    LOG.fixture_step("I'm a usefixture fixture step")

    def fix_teardown():
        LOG.fixture_step("I'm a usefixture teardown")

    request.addfinalizer(fix_teardown)
Exemple #7
0
def add_route_for_vm_access(compliance_client):
    """
    Add ip route on compliance test node to access vm from it
    Args:
        compliance_client:

    Returns:

    """
    LOG.fixture_step(
        "Add routes to access VM from compliance server if not already done")
    cidrs = network_helper.get_subnets(
        name="tenant[1|2].*-mgmt0-subnet0|external-subnet0",
        regex=True,
        field='cidr',
        auth_info=Tenant.get('admin'))
    cidrs_to_add = [
        r'{}.0/24'.format(re.findall(r'(.*).\d+/\d+', item)[0])
        for item in cidrs
    ]
    for cidr in cidrs_to_add:
        if compliance_client.exec_cmd(
                'ip route | grep "{}"'.format(cidr))[0] != 0:
            compliance_client.exec_sudo_cmd('ip route add {} via {}'.format(
                cidr, VM_ROUTE_VIA))
def setup_alias(request):
    LOG.fixture_step("Create nova device list for gpu device")
    nova_gpu_alias = _get_nova_alias(class_id=DevClassID.GPU, dev_type='gpu')
    LOG.fixture_step("Create nova device list for usb device")
    nova_usb_alias = _get_nova_alias(class_id=DevClassID.USB,
                                     dev_type='user',
                                     regex=True)

    def revert_alias_setup():

        service = 'nova'
        gpu_uuid = system_helper.get_service_parameter_values(
            field='uuid', service=service, section='pci_alias', name='gpu')[0]
        user_uuid = system_helper.get_service_parameter_values(
            field='uuid', service=service, section='pci_alias', name='user')[0]
        LOG.fixture_step("Delete service parameter uuid {} ".format(gpu_uuid))
        system_helper.delete_service_parameter(uuid=gpu_uuid)
        LOG.fixture_step("Delete service parameter uuid {} ".format(user_uuid))
        system_helper.delete_service_parameter(uuid=user_uuid)

        system_helper.apply_service_parameters(service, wait_for_config=True)

        # CGTS-9637  cpu usage high
        time.sleep(120)

    request.addfinalizer(revert_alias_setup)

    return nova_gpu_alias, nova_usb_alias
Exemple #9
0
    def _recover_hosts(hostnames, scope):
        if system_helper.is_aio_simplex():
            LOG.fixture_step('{} Recover simplex host'.format(scope))
            host_helper.recover_simplex(fail_ok=False)
            return

        # Recover hosts for non-simplex system
        hostnames = sorted(set(hostnames))
        table_ = table_parser.table(cli.system('host-list')[1])
        table_ = table_parser.filter_table(table_, hostname=hostnames)

        # unlocked_hosts = table_parser.get_values(table_, 'hostname',
        # administrative='unlocked')
        locked_hosts = table_parser.get_values(table_,
                                               'hostname',
                                               administrative='locked')

        err_msg = []
        if locked_hosts:
            LOG.fixture_step("({}) Unlock hosts: {}".format(
                scope, locked_hosts))
            # Hypervisor state will be checked later in wait_for_hosts_ready
            # which handles platform only deployment
            res1 = host_helper.unlock_hosts(hosts=locked_hosts,
                                            fail_ok=True,
                                            check_hypervisor_up=False)
            for host in res1:
                if res1[host][0] not in [0, 4]:
                    err_msg.append(
                        "Not all host(s) unlocked successfully. Detail: "
                        "{}".format(res1))

        host_helper.wait_for_hosts_ready(hostnames)
def deploy_delete_kubectl_app(request):
    app_name = 'resource-consumer'
    app_params = \
        '--image=gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4' \
        + ' --expose' \
        + ' --service-overrides=' \
        + "'{ " + '"spec": { "type": "LoadBalancer" } }' \
        + "' --port 8080 --requests='cpu=1000m,memory=1024Mi'"

    LOG.fixture_step("Create {} test app by kubectl run".format(app_name))
    sub_cmd = "run {}".format(app_name)
    kube_helper.exec_kube_cmd(sub_cmd=sub_cmd, args=app_params, fail_ok=False)

    LOG.fixture_step("Check {} test app is created ".format(app_name))
    pod_name = kube_helper.get_pods(field='NAME',
                                    namespace='default',
                                    name=app_name,
                                    strict=False)[0]

    def delete_app():
        LOG.fixture_step("Delete {} pod if exists after test "
                         "run".format(app_name))
        kube_helper.delete_resources(resource_names=app_name,
                                     resource_types=('deployment', 'service'),
                                     namespace='default',
                                     post_check=False)
        kube_helper.wait_for_resources_gone(resource_names=pod_name,
                                            namespace='default')

    request.addfinalizer(delete_app)

    kube_helper.wait_for_pods_status(pod_names=pod_name,
                                     namespace='default',
                                     fail_ok=False)
    return app_name, pod_name
Exemple #11
0
def setup_host_install(request, get_patch_name):
    con_ssh = ControllerClient.get_active_controller()
    hosts = host_helper.get_up_hypervisors()
    host = hosts[len(hosts) - 1]
    if host == system_helper.get_active_controller_name():
        host = hosts[len(hosts) - 2]
    host_helper.lock_host(host)

    patch_name = get_patch_name
    LOG.fixture_step("Applying {} to patching controller".format(patch_name))
    con_ssh.exec_sudo_cmd('sw-patch upload test_patches/{}.patch'.format(
        patch_name))
    con_ssh.exec_sudo_cmd('sw-patch apply {}'.format(patch_name))

    def delete_patch():
        LOG.fixture_step("Removing {} from patching controller".format(
            patch_name))
        con_ssh.exec_sudo_cmd('sw-patch remove {}'.format(patch_name))
        con_ssh.exec_sudo_cmd('sw-patch delete {}'.format(patch_name))
        LOG.fixture_step("Reinstalling {} to revert the patch".format(patch_name))
        con_ssh.exec_sudo_cmd('sw-patch host-install {}'.format(host),
                              expect_timeout=timeout.CLI_TIMEOUT)
        host_helper.unlock_host(host)

    request.addfinalizer(delete_patch)
    return patch_name, host
Exemple #12
0
    def list_status():
        LOG.fixture_step("Listing heat resources and nova migrations")
        stacks = heat_helper.get_stacks(auth_info=Tenant.get('admin'))
        for stack in stacks:
            heat_helper.get_stack_resources(stack=stack, auth_info=Tenant.get('admin'))

        nova_helper.get_migration_list_table()
Exemple #13
0
    def verify_vms():
        LOG.fixture_step("Verifying system VMs after test ended...")
        after_vms_status = vm_helper.get_vms_info(
            fields=['status'],
            long=False,
            all_projects=True,
            auth_info=Tenant.get('admin'))

        # compare status between the status of each VMs before/after the test
        common_vms = set(before_vms_status) & set(after_vms_status)
        LOG.debug("VMs to verify: {}".format(common_vms))
        failure_msgs = []
        for vm in common_vms:
            before_status = before_vms_status[vm][0]
            post_status = after_vms_status[vm][0]

            if post_status.lower(
            ) != 'active' and post_status != before_status:
                msg = "VM {} is not in good state. Previous status: {}. " \
                      "Current status: {}". \
                    format(vm, before_status, post_status)
                failure_msgs.append(msg)

        assert not failure_msgs, '\n'.join(failure_msgs)
        LOG.info("VMs status verified.")
Exemple #14
0
def __get_system_crash_and_coredumps(scope):
    LOG.fixture_step(
        "({}) Getting existing system crash reports and coredumps before test "
        "{} begins.".format(scope, scope))

    core_dumps_and_reports = host_helper.get_coredumps_and_crashreports()
    return core_dumps_and_reports
Exemple #15
0
def router_info(request):
    global result_
    result_ = False

    LOG.fixture_step(
        "Disable SNAT and update router to DVR if not already done.")

    router_id = network_helper.get_tenant_router()
    network_helper.set_router_gateway(router_id, enable_snat=False)
    is_dvr = network_helper.get_router_values(router_id,
                                              fields='distributed',
                                              auth_info=Tenant.get('admin'))[0]

    def teardown():
        post_dvr = network_helper.get_router_values(
            router_id, fields='distributed', auth_info=Tenant.get('admin'))[0]
        if post_dvr != is_dvr:
            network_helper.set_router_mode(router_id, distributed=is_dvr)

    request.addfinalizer(teardown)

    if not is_dvr:
        network_helper.set_router_mode(router_id,
                                       distributed=True,
                                       enable_on_failure=False)

    result_ = True
    return router_id
Exemple #16
0
def modify_system_backing(request):
    """
    Issues in this fixture:
    - Hardcoded compute name
        - compute could be in bad state
        - Does not work for CPE lab
    - Lock unlock 6 times    (could be only 3 times)
    - Did not check original storage backing    (lab could be configured with local_lvm by default)

    """
    hostname, storage_backing = request.param

    LOG.fixture_step("Modify {} storage backing to {}".format(
        hostname, storage_backing))
    host_helper.lock_host(hostname)
    host_helper.set_host_storage_backing(hostname,
                                         inst_backing=storage_backing,
                                         lock=False)
    host_helper.unlock_hosts(hostname)

    def revert_host():
        LOG.fixture_step(
            "Revert {} storage backing to local_image".format(hostname))
        host_helper.lock_host(hostname)
        host_helper.set_host_storage_backing(hostname,
                                             inst_backing='local_image',
                                             lock=False)
        host_helper.unlock_hosts(hostname)

    request.addfinalizer(revert_host)

    return storage_backing
Exemple #17
0
def setup_test_session(global_setup):
    """
    Setup primary tenant and Nax Box ssh before the first test gets executed.
    STX ssh was already set up at collecting phase.
    """
    LOG.fixture_step("(session) Setting up test session...")
    setups.setup_primary_tenant(ProjVar.get_var('PRIMARY_TENANT'))

    global con_ssh
    if not con_ssh:
        con_ssh = ControllerClient.get_active_controller()
    # set build id to be used to upload/write test results
    setups.set_build_info(con_ssh)

    # Ensure tis and natbox (if applicable) ssh are connected
    con_ssh.connect(retry=True, retry_interval=3, retry_timeout=300)

    # set up natbox connection and copy keyfile
    natbox_dict = ProjVar.get_var('NATBOX')
    global natbox_ssh
    natbox_ssh = setups.setup_natbox_ssh(natbox_dict, con_ssh=con_ssh)

    # set global var for sys_type
    setups.set_sys_type(con_ssh=con_ssh)

    # rsync files between controllers
    setups.copy_test_files()
    def base_setup(self):

        flavor_id = nova_helper.create_flavor(name='dedicated')[1]
        ResourceCleanup.add('flavor', flavor_id, scope='class')

        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        mgmt_net_id = network_helper.get_mgmt_net_id()
        tenant_net_id = network_helper.get_tenant_net_id()
        internal_net_id = network_helper.get_internal_net_id()

        nics = [{'net-id': mgmt_net_id},
                {'net-id': tenant_net_id},
                {'net-id': internal_net_id}]

        LOG.fixture_step(
            "(class) Boot a base vm with following nics: {}".format(nics))
        base_vm = vm_helper.boot_vm(name='multiports_base',
                                    flavor=flavor_id, nics=nics,
                                    cleanup='class',
                                    reuse_vol=False)[1]

        vm_helper.wait_for_vm_pingable_from_natbox(base_vm)
        vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types='data')

        return base_vm, flavor_id, mgmt_net_id, tenant_net_id, internal_net_id
Exemple #19
0
 def disable_snat():
     LOG.fixture_step("Disable SNAT on tenant router")
     try:
         network_helper.set_router_gateway(enable_snat=False)
     finally:
         LOG.fixture_step("Revert primary tenant to {}".format(primary_tenant['tenant']))
         Tenant.set_primary(primary_tenant)
Exemple #20
0
 def teardown():
     if home_pg:
         LOG.fixture_step('Logout')
         home_pg.log_out()
     recorder.stop()
     if horizon.test_result:
         recorder.clear()
Exemple #21
0
def __create_image(img_os, scope):
    if not img_os:
        img_os = GuestImages.DEFAULT['guest']

    LOG.fixture_step(
        "({}) Get or create a glance image with {} guest OS".format(
            scope, img_os))
    img_info = GuestImages.IMAGE_FILES[img_os]
    img_id = glance_helper.get_image_id_from_name(img_os, strict=True)
    if not img_id:
        if img_info[0] is not None:
            image_path = glance_helper.scp_guest_image(img_os=img_os)
        else:
            img_dir = GuestImages.DEFAULT['image_dir']
            image_path = "{}/{}".format(img_dir, img_info[2])

        disk_format = 'raw' if img_os in [
            'cgcs-guest', 'tis-centos-guest', 'vxworks'
        ] else 'qcow2'
        img_id = glance_helper.create_image(name=img_os,
                                            source_image_file=image_path,
                                            disk_format=disk_format,
                                            container_format='bare',
                                            cleanup=scope)[1]

    return img_id
Exemple #22
0
    def teardown():
        LOG.fixture_step('Back to Volumes page')
        volumes_pg.go_to_target_page()

        LOG.fixture_step('Delete volume {}'.format(volume_name))
        volumes_pg.delete_volume(volume_name)
        assert volumes_pg.is_volume_deleted(volume_name)
Exemple #23
0
def configure_tis(hosts, request):
    """
    - Modify host ssh configs to allow root access
    - Update system quotas
    Args:
        hosts
        request:

    Returns (tuple): (controllers(list), computes(list))

    """

    LOG.fixture_step(
        "Modify sshd_config on hosts to allow root access: {}".format(hosts))
    hosts_configured = []
    try:
        __config_sshd(hosts=hosts, hosts_configured=hosts_configured)
    finally:
        if hosts_configured:

            def _revert_sshd():
                LOG.fixture_step(
                    "Revert sshd configs on: {}".format(hosts_configured))
                __config_sshd(hosts=hosts, revert=True)

            request.addfinalizer(_revert_sshd)

    LOG.fixture_step("Update quotas for admin project")
    compliance_helper.create_tenants_and_update_quotas(new_tenants_index=None)

    return hosts_configured
def _flavors(hosts_pci_device_info):
    """
    Creates all flavors required for this test module
    """
    # Create flavor using first device.
    pci_alias = list(hosts_pci_device_info.values())[0][0]['pci_alias']
    flavor_parms = {'flavor_qat_vf_1': [2, 1024, 2, 1],
                    'flavor_resize_qat_vf_1': [4, 2048, 2, 1],
                    'flavor_qat_vf_4': [2, 1024, 2, 4],
                    'flavor_resize_qat_vf_4': [2, 2048, 2, 4],
                    'flavor_qat_vf_32': [2, 1024, 2, 32],
                    'flavor_qat_vf_33': [2, 1024, 2, 33],
                    'flavor_none': [1, 1024, 2, 0],
                    'flavor_resize_none': [2, 2048, 2, 0],
                    'flavor_resize_qat_vf_32': [4, 2048, 2, 32],
                    }

    flavors = {}
    for k, v in flavor_parms.items():
        vf = v[3]
        LOG.fixture_step("Create a flavor with {} Coletro Creek crypto VF....".format(vf))
        flavor_id = nova_helper.create_flavor(name=k, vcpus=v[0], ram=v[1], root_disk=v[2])[1]
        ResourceCleanup.add('flavor', flavor_id, scope='module')
        if vf > 0:
            extra_spec = {FlavorSpec.PCI_PASSTHROUGH_ALIAS: '{}:{}'.format(pci_alias, vf),
                          # FlavorSpec.NUMA_NODES: '2',
                          # feature deprecated. May need to update test case as well.
                          FlavorSpec.CPU_POLICY: 'dedicated'}

            nova_helper.set_flavor(flavor_id, **extra_spec)
        flavors[k] = flavor_id

    return flavors
def keyfile_setup(request):
    """
    setup the public key file on the lab under /home/root/.ssh/authorized_keys

    Args:
        request: pytset arg

    Returns (str):

    """
    # copy the authorized key from test server to lab under /home/root/.ssh/authorized_keys
    LOG.fixture_step("copy id_rsa.pub key file from test server to lab")
    source = '/folk/svc-cgcsauto/.ssh/id_rsa.pub'
    destination = HostLinuxUser.get_home()
    common.scp_from_test_server_to_active_controller(source_path=source,
                                                     dest_dir=destination)

    con_ssh = ControllerClient.get_active_controller()
    sysadmin_keyfile = HostLinuxUser.get_home() + '/id_rsa.pub'
    LOG.fixture_step("Logging in as root")
    with con_ssh.login_as_root() as root_ssh:
        LOG.info("Logged in as root")
        root_ssh.exec_cmd('mkdir -p /home/root/.ssh')
        root_ssh.exec_cmd('touch /home/root/.ssh/authorized_keys')
        root_ssh.exec_cmd('cat ' + sysadmin_keyfile +
                          '  >> /home/root/.ssh/authorized_keys')

    def delete_keyfile():
        LOG.fixture_step("cleanup files from the lab as root")
        # clean up id_rsa.pub from sysadmin folder and authorized_keys in /home/root/.ssh/
        con_ssh.exec_cmd('rm {}/id_rsa.pub'.format(HostLinuxUser.get_home()))
        con_ssh.exec_sudo_cmd('rm -f /home/root/.ssh/authorized_keys')

    request.addfinalizer(delete_keyfile)
    def delete_edgex():
        LOG.fixture_step("Destroying EdgeX-on-Kubernetes")
        con_ssh.exec_cmd(EDGEX_STOP, 180)

        LOG.fixture_step("Removing EdgeX-on-Kubernetes")
        con_ssh.exec_cmd('rm -rf {} {}'.format(os.path.join(WRSROOT_HOME, EDGEX_ARCHIVE),
                                               os.path.join(WRSROOT_HOME, EDGEX_HOME)))
Exemple #27
0
def generate_alarms(request):
    alarm_id = '300.005'

    def del_alarms():
        LOG.fixture_step(
            "Delete 300.005 alarms and ensure they are removed from alarm-list"
        )
        alarms_tab = system_helper.get_alarms_table(uuid=True)
        alarm_uuids = table_parser.get_values(table_=alarms_tab,
                                              target_header='UUID',
                                              **{'Alarm ID': alarm_id})
        if alarm_uuids:
            system_helper.delete_alarms(alarms=alarm_uuids)

        post_del_alarms = system_helper.get_alarms(alarm_id=alarm_id)
        assert not post_del_alarms, "300.005 alarm still exits after deletion"

    request.addfinalizer(del_alarms)

    LOG.fixture_step("Generate 10 active alarms with alarm_id 900.00x")
    alarm_gen_base = "fmClientCli -c '### ###300.005###set###system.vm###host=autohost-{}### ###critical###" \
                     "Automation test###processing-error###cpu-cycles-limit-exceeded### ###True###True###'"

    con_ssh = ControllerClient.get_active_controller()
    for i in range(10):
        LOG.info("Create an critical alarm with id {}".format(alarm_id))
        alarm_gen_cmd = alarm_gen_base.format(i)
        con_ssh.exec_cmd(alarm_gen_cmd, fail_ok=False)
        time.sleep(1)

    return alarm_id
Exemple #28
0
 def verify_alarms():
     LOG.fixture_step(
         "({}) Verifying system alarms in central region after test {} ended."
         .format(scope, scope))
     check_helper.check_alarms(before_alarms=before_alarms,
                               auth_info=auth_info,
                               con_ssh=con_ssh)
     LOG.info("({}) fm alarms verified in central region.".format(scope))
    def remove_on_teardown():
        LOG.info("Check vm status and delete if in bad state")
        for vm_ in vms:
            if vm_helper.get_vm_status(vm_) != VMStatus.ACTIVE:
                vm_helper.delete_vms(vm_, remove_cleanup='module')

        LOG.fixture_step("Remove test patches")
        remove_test_patches()
Exemple #30
0
def fix_testparam(request):
    LOG.fixture_step("I'm a testparam fixture step")

    def fix_teardown():
        LOG.fixture_step("I'm a testparam teardown")

    request.addfinalizer(fix_teardown)
    return "testparam returned"