def patch_function_check(request):
    vms = vm_helper.get_vms(name='patch', strict=False)
    boot_vm = False if len(vms) == 2 else True
    if not boot_vm:
        for vm in vms:
            if vm_helper.get_vm_status(vm) != VMStatus.ACTIVE or not vm_helper.ping_vms_from_natbox(vm, fail_ok=True):
                boot_vm = True
                break

    if boot_vm:
        if vms:
            vm_helper.delete_vms(vms, remove_cleanup='module')
        vms = []
        for source in ('volume', 'image'):
            vms.append(vm_helper.boot_vm(name='patch_{}'.format(source), source=source, cleanup='module')[1])

    def remove_on_teardown():
        LOG.info("Check vm status and delete if in bad state")
        for vm_ in vms:
            if vm_helper.get_vm_status(vm_) != VMStatus.ACTIVE:
                vm_helper.delete_vms(vm_, remove_cleanup='module')

        LOG.fixture_step("Remove test patches")
        remove_test_patches()
    request.addfinalizer(remove_on_teardown)

    return vms
示例#2
0
def __delete_vm_scale_stack():
    stack_name = VM_SCALE_STACK
    LOG.fixture_step("Delete heat stack{}".format(stack_name))
    heat_helper.delete_stack(stack=stack_name)

    LOG.fixture_step("Check heat vms are all deleted")
    heat_vms = vm_helper.get_vms(strict=False, name=stack_name)
    assert not heat_vms, "Heat vms still exist on system after heat stack deletion"
示例#3
0
def vm_scaling_stack():
    stack_name = VM_SCALE_STACK
    heat_vms = vm_helper.get_vms(strict=False, name=stack_name)
    if len(heat_vms) == 1:
        return stack_name, heat_vms[0]

    if heat_vms:
        __delete_vm_scale_stack()

    stack_name, vm_id = __launch_vm_scale_stack()
    return stack_name, vm_id
示例#4
0
def verify_heat_resource(to_verify=None,
                         template_name=None,
                         stack_name=None,
                         auth_info=None,
                         fail_ok=False):
    """
        Verify the heat resource creation/deletion for given resources

        Args:
            to_verify (list): Resources to verify creation or deletion.
            template_name (str): template to be used to create heat stack.
            stack_name(str): stack name used to create the stack
            auth_info
            fail_ok

        Returns (int): return 0 if success 1 if failure

    """
    LOG.info("Verifying heat resource {}".format(to_verify))

    rtn_code = 0
    msg = "Heat resource {} appeared".format(to_verify)
    item_verified = to_verify

    if to_verify is 'volume':
        LOG.info("Verifying volume")
        vol_name = getattr(Heat, template_name)['vol_name']
        resource_found = cinder_helper.get_volumes(name=vol_name)

    elif to_verify is 'ceilometer_alarm':
        resource_found = ceilometer_helper.get_alarms(name=stack_name,
                                                      strict=False)

    elif to_verify is 'neutron_port':
        port_name = getattr(Heat, template_name)['port_name']
        if port_name is None:
            port_name = stack_name
        resource_found = network_helper.get_ports(port_name=port_name)

    elif to_verify is 'neutron_provider_net_range':
        resource_found = network_helper.get_network_segment_ranges(
            field='name', physical_network='sample_physnet_X')

    elif to_verify is 'nova_server_group':
        resource_found = nova_helper.get_server_groups(name=stack_name)

    elif to_verify is 'vm':
        vm_name = getattr(Heat, template_name)['vm_name']
        resource_found = vm_helper.get_vms(vms=vm_name, strict=False)

    elif to_verify is 'nova_flavor':
        resource_found = nova_helper.get_flavors(name='sample-flavor')

    elif to_verify is 'neutron_net':
        resource_found = network_helper.get_tenant_net_id(
            net_name='sample-net')

    elif to_verify is 'image':
        resource_found = glance_helper.get_image_id_from_name(
            name='sample_image')

    elif to_verify is 'subnet':
        resource_found = network_helper.get_subnets(name='sample_subnet')

    elif to_verify is 'floating_ip':
        resource_found = network_helper.get_floating_ips()

    elif to_verify is 'router':
        resource_found = network_helper.get_tenant_router(
            router_name='sample_router', auth_info=auth_info)

    elif to_verify is 'router_gateway':
        item_verified = 'sample_gateway_router'
        resource_found = network_helper.get_tenant_router(
            router_name='sample_gateway_router', auth_info=auth_info)
        if resource_found:
            item_verified = to_verify
            resource_found = network_helper.get_router_ext_gateway_info(
                router_id=resource_found, auth_info=auth_info)

    elif to_verify is 'router_interface':
        item_verified = 'sample_if_router'
        router_id = network_helper.get_tenant_router(
            router_name='sample_if_router', auth_info=auth_info)
        resource_found = router_id
        if resource_found:
            item_verified = 'sample_if_subnet'
            subnets = network_helper.get_subnets(name='sample_if_subnet',
                                                 auth_info=auth_info)
            resource_found = subnets
            if resource_found:
                item_verified = to_verify
                router_subnets = network_helper.get_router_subnets(
                    router=router_id, auth_info=auth_info)
                resource_found = resource_found[0] in router_subnets

    elif to_verify is 'security_group':
        resource_found = network_helper.get_security_groups(
            name='SecurityGroupDeluxe')
    elif to_verify is 'key_pair':
        kp_name = getattr(Heat, template_name)['key_pair_name']
        resource_found = nova_helper.get_keypairs(name=kp_name)
    elif to_verify is 'neutron_qos':
        resource_found = network_helper.get_qos_policies(name='SampleQoS',
                                                         auth_info=auth_info)
    else:
        raise ValueError("Unknown item to verify: {}".format(to_verify))

    if not resource_found:
        msg = "Heat stack {} resource {} does not exist".format(
            stack_name, item_verified)
        if fail_ok:
            rtn_code = 1
        else:
            assert resource_found, msg

    LOG.info(msg)
    return rtn_code, msg
def test_detection_of_failed_instance(launch_instances):
    con_ssh = ssh.ControllerClient.get_active_controller()
    start_date_cmd = ("python -c \"import datetime; "
                      "print str(datetime.datetime.now())[:-3]\"")
    kill_cmd = (start_date_cmd + "&& sudo pkill -SIGKILL qemu")
    vm_host = vm_helper.get_vm_host(launch_instances)
    vm_name = vm_helper.get_vm_name_from_id(launch_instances)
    end_date_cmd = ("grep -r \"{}\" /var/log/nfv-vim.log | "
                    "grep \"powering-off\" | "
                    "tail -1 | "
                    "awk '{{print$1}}'".format(vm_name))

    res = list()

    for i in range(20):
        LOG.tc_step("Start of iter {}".format(i))
        try:
            st = str()
            et = str()

            vm_helper.get_vms()

            with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as con_0_ssh:
                end_time = time.time() + 120
                while time.time() < end_time:
                    con_0_ssh.send(cmd="pgrep qemu")
                    con_0_ssh.expect()
                    matches = re.findall("\n([0-9]+)\n", con_0_ssh.cmd_output)
                    time.sleep(5)
                    if matches:
                        break
                else:
                    raise exceptions.TimeoutException("Timed out waiting for qemu process")

                con_0_ssh.send(cmd=kill_cmd)
                index = con_0_ssh.expect(["Password:"******"Timed out waiting for end time")

            diff = et_date - st_date
            LOG.info("\nstart time = {}\nend time = {}".format(st, et))
            LOG.info("\ndiff = {}".format(diff))
            res.append(diff)
        finally:
            time.sleep(5)
            vm_helper.start_vms(launch_instances)

    def calc_avg(lst):
        rtrn_sum = datetime.timedelta()
        for i in lst:
            LOG.info("Iter {}: {}".format(lst.index(i), i))
            rtrn_sum += i
        return rtrn_sum/len(lst)

    final_res = calc_avg(res)
    LOG.info("Avg time is : {}".format(final_res))
示例#6
0
def test_dvr_vms_network_connection(vms_num, srv_grp_policy, server_groups,
                                    router_info):
    """
    Test vms East West connection by pinging vms' data network from vm

    Args:
        vms_num (int): number of vms to boot
        srv_grp_policy (str): affinity to boot vms on same host, anti-affinity to boot vms on
            different hosts
        server_groups: test fixture to return affinity and anti-affinity server groups
        router_info (str): id of tenant router

    Skip Conditions:
        - Only one nova host on the system

    Setups:
        - Enable DVR    (module)

    Test Steps
        - Update router to distributed if not already done
        - Boot given number of vms with specific server group policy to schedule vms on
            same or different host(s)
        - Ping vms' over data and management networks from one vm to test NS and EW traffic

    Teardown:
        - Delete vms
        - Revert router to

    """
    # Increase instance quota count if needed
    current_vms = len(vm_helper.get_vms(strict=False))
    quota_needed = current_vms + vms_num
    vm_helper.ensure_vms_quotas(quota_needed)

    if srv_grp_policy == 'anti-affinity' and len(
            host_helper.get_up_hypervisors()) == 1:
        skip("Only one nova host on the system.")

    LOG.tc_step("Update router to distributed if not already done")
    router_id = router_info
    is_dvr = network_helper.get_router_values(router_id,
                                              fields='distributed',
                                              auth_info=Tenant.get('admin'))[0]
    if not is_dvr:
        network_helper.set_router_mode(router_id, distributed=True)

    LOG.tc_step("Boot {} vms with server group policy {}".format(
        vms_num, srv_grp_policy))
    affinity_grp, anti_affinity_grp = server_groups(soft=True)
    srv_grp_id = affinity_grp if srv_grp_policy == 'affinity' else anti_affinity_grp

    vms = []
    tenant_net_id = network_helper.get_tenant_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    internal_vif = {'net-id': internal_net_id}
    if system_helper.is_avs():
        internal_vif['vif-model'] = 'avp'

    nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}, internal_vif]
    for i in range(vms_num):
        vol = cinder_helper.create_volume()[1]
        ResourceCleanup.add(resource_type='volume', resource_id=vol)
        vm_id = vm_helper.boot_vm('dvr_ew_traffic',
                                  source='volume',
                                  source_id=vol,
                                  nics=nics,
                                  cleanup='function',
                                  hint={'group': srv_grp_id})[1]
        vms.append(vm_id)
        LOG.tc_step("Wait for vm {} pingable from NatBox".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    from_vm = vms[0]
    LOG.tc_step(
        "Ping vms over management and data networks from vm {}, and verify "
        "ping successful.".format(from_vm))
    vm_helper.ping_vms_from_vm(to_vms=vms,
                               from_vm=from_vm,
                               fail_ok=False,
                               net_types=['data', 'mgmt', 'internal'])
示例#7
0
def update_dovetail_mgmt_interface():
    """
    Update dovetail vm mgmt interface on cumulus system.
    Since cumulus system is on different version. This helper function requires use cli matches the cumulus tis.

    Returns:

    """
    expt_mgmt_net = get_expt_mgmt_net()
    if not expt_mgmt_net:
        skip('{} mgmt net is not found in Cumulus tis-lab project'.format(
            ProjVar.get_var('LAB')['name']))

    with ssh_to_cumulus_server() as cumulus_con:
        cumulus_auth = CumulusCreds.TENANT_TIS_LAB
        vm_id = vm_helper.get_vm_id_from_name(vm_name='dovetail',
                                              con_ssh=cumulus_con,
                                              auth_info=cumulus_auth)

        dovetail_networks = vm_helper.get_vms(vms=vm_id,
                                              field='Networks',
                                              con_ssh=cumulus_con,
                                              auth_info=cumulus_auth)[0]

        actual_nets = dovetail_networks.split(sep=';')
        prev_mgmt_nets = []
        for net in actual_nets:
            net_name, net_ip = net.split('=')
            if '-MGMT-net' in net_name:
                prev_mgmt_nets.append(net_name)

        attach = True
        if expt_mgmt_net in prev_mgmt_nets:
            attach = False
            prev_mgmt_nets.remove(expt_mgmt_net)
            LOG.info("{} interface already attached to Dovetail vm".format(
                expt_mgmt_net))

        if prev_mgmt_nets:
            LOG.info("Detach interface(s) {} from dovetail vm".format(
                prev_mgmt_nets))
            vm_ports_table = table_parser.table(
                cli.nova('interface-list',
                         vm_id,
                         ssh_client=cumulus_con,
                         auth_info=cumulus_auth)[1])
            for prev_mgmt_net in prev_mgmt_nets:
                prev_net_id = network_helper.get_net_id_from_name(
                    net_name=prev_mgmt_net,
                    con_ssh=cumulus_con,
                    auth_info=cumulus_auth)

                prev_port = table_parser.get_values(vm_ports_table, 'Port ID',
                                                    **{'Net ID':
                                                       prev_net_id})[0]
                detach_arg = '{} {}'.format(vm_id, prev_port)
                cli.nova('interface-detach',
                         detach_arg,
                         ssh_client=cumulus_con,
                         auth_info=cumulus_auth)

        mgmt_net_id = network_helper.get_net_id_from_name(
            net_name=expt_mgmt_net,
            con_ssh=cumulus_con,
            auth_info=cumulus_auth)
        if attach:
            LOG.info("Attach {} to dovetail vm".format(expt_mgmt_net))
            args = '--net-id {} {}'.format(mgmt_net_id, vm_id)
            cli.nova('interface-attach',
                     args,
                     ssh_client=cumulus_con,
                     auth_info=cumulus_auth)

        vm_ports_table = table_parser.table(
            cli.nova('interface-list',
                     vm_id,
                     ssh_client=cumulus_con,
                     auth_info=cumulus_auth)[1])
        mgmt_mac = table_parser.get_values(vm_ports_table, 'MAC Addr',
                                           **{'Net ID': mgmt_net_id})[0]

    ComplianceCreds.set_host(Dovetail.TEST_NODE)
    ComplianceCreds.set_user(Dovetail.USERNAME)
    ComplianceCreds.set_password(Dovetail.PASSWORD)
    with ssh_to_compliance_server() as dovetail_ssh:
        if not attach and network_helper.ping_server('192.168.204.3',
                                                     ssh_client=dovetail_ssh,
                                                     fail_ok=True)[0] == 0:
            return
        LOG.info("Bring up dovetail mgmt interface and assign ip")
        eth_name = network_helper.get_eth_for_mac(dovetail_ssh,
                                                  mac_addr=mgmt_mac)
        dovetail_ssh.exec_sudo_cmd('ip link set dev {} up'.format(eth_name))
        dovetail_ssh.exec_sudo_cmd('dhclient {}'.format(eth_name),
                                   expect_timeout=180)
        dovetail_ssh.exec_cmd('ip addr')
        network_helper.ping_server(server='192.168.204.3',
                                   ssh_client=dovetail_ssh,
                                   fail_ok=False)