Exemple #1
0
def router_info(request):
    global result_
    result_ = False

    LOG.fixture_step(
        "Disable SNAT and update router to DVR if not already done.")

    router_id = network_helper.get_tenant_router()
    network_helper.set_router_gateway(router_id, enable_snat=False)
    is_dvr = network_helper.get_router_values(router_id,
                                              fields='distributed',
                                              auth_info=Tenant.get('admin'))[0]

    def teardown():
        post_dvr = network_helper.get_router_values(
            router_id, fields='distributed', auth_info=Tenant.get('admin'))[0]
        if post_dvr != is_dvr:
            network_helper.set_router_mode(router_id, distributed=is_dvr)

    request.addfinalizer(teardown)

    if not is_dvr:
        network_helper.set_router_mode(router_id,
                                       distributed=True,
                                       enable_on_failure=False)

    result_ = True
    return router_id
def _router_info(request):

    LOG.fixture_step("Get router info.")
    router_id = network_helper.get_tenant_router()
    LOG.info("Router id: {}".format(router_id))

    router_name, is_dvr = network_helper.get_router_values(
        router_id=router_id, fields=('name', 'distributed'))
    LOG.info("Router name: {}".format(router_name))

    ext_gateway_info = network_helper.get_router_ext_gateway_info(
        router_id=router_id)['external_fixed_ips'][0]
    gateway_ip = ext_gateway_info.get('ip_address', None)
    LOG.info("Gateway IP used for router {} is {}".format(
        router_name, gateway_ip))

    ext_gateway_subnet = ext_gateway_info.get('subnet_id', None)
    LOG.info("Router {} external subnet id {}".format(router_name,
                                                      ext_gateway_subnet))

    router_subnets = network_helper.get_router_subnets(router=router_id)
    LOG.info("Router {} mgmt ports subnet ids {}".format(
        router_name, router_subnets))

    def recover():
        LOG.fixture_step("Ensure tenant router exists")
        post_router_id = network_helper.get_tenant_router()
        if not post_router_id:
            LOG.fixture_step(
                "Router not exist, create new router {}".format(router_name))
            post_router_id = network_helper.create_router(name=router_name)[1]

        LOG.fixture_step("Ensure tenant router gateway recovered")
        teardown_gateway_info = network_helper.get_router_ext_gateway_info(
            router_id=post_router_id)
        if teardown_gateway_info != ext_gateway_info:
            LOG.fixture_step("Set tenant router gateway info")
            _set_external_gatewayway_info(post_router_id, ext_gateway_subnet,
                                          gateway_ip, is_dvr)

        LOG.fixture_step(
            "Ensure all interfaces added to router {}".format(post_router_id))
        teardown_subnets = network_helper.get_router_subnets(
            router=post_router_id)
        subnets_to_add = list(set(router_subnets) - set(teardown_subnets))
        if subnets_to_add:
            LOG.fixture_step("Add subnets to router {}: {}".format(
                post_router_id, subnets_to_add))
            _add_router_interfaces(post_router_id, subnets_to_add,
                                   ext_gateway_subnet)

    request.addfinalizer(recover)

    return router_id, router_name, gateway_ip, ext_gateway_info, \
        router_subnets, ext_gateway_subnet, is_dvr
Exemple #3
0
def test_dvr_update_router(router_info, _bring_up_router):
    """
    Test update router to distributed and non-distributed

    Args:
        router_info (str): router_id (str)

    Setups:
        - Get the router id and original distributed setting

    Test Steps:
        - Boot a vm before updating router and ping vm from NatBox
        - Change the distributed value of the router and verify it's updated
        successfully
        - Verify router is in ACTIVE state
        - Verify vm can still be ping'd from NatBox
        - Repeat the three steps above with the distributed value reverted to
        original value

    Teardown:
        - Delete vm
        - Revert router to it's original distributed setting if not already
        done so

    """
    global result_
    result_ = False
    router_id = router_info

    LOG.tc_step("Boot a vm before updating router and ping vm from NatBox")
    vm_id = vm_helper.boot_vm(name='dvr_update',
                              reuse_vol=False,
                              cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    for update_to_val in [False, True]:
        LOG.tc_step("Update router distributed to {}".format(update_to_val))
        network_helper.set_router_mode(router_id,
                                       distributed=update_to_val,
                                       enable_on_failure=False)

        # Wait for 30 seconds to allow the router update completes
        time.sleep(30)
        LOG.tc_step(
            "Verify router is in active state and vm can be ping'd from NatBox"
        )
        assert RouterStatus.ACTIVE == \
            network_helper.get_router_values(router_id,
                                             fields='status')[0], \
            "Router is not in active state after updating distributed to " \
            "{}.".format(update_to_val)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    result_ = True
Exemple #4
0
def snat_setups(request):
    find_dvr = 'True' if request.param == 'distributed' else 'False'

    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info in [primary_tenant, other_tenant]:
        tenant_router = network_helper.get_tenant_router(auth_info=auth_info)
        is_dvr_router = network_helper.get_router_values(router_id=tenant_router,
                                                         fields='distributed')[0]
        if find_dvr == str(is_dvr_router):
            LOG.fixture_step("Setting primary tenant to {}".format(common.get_tenant_name(auth_info)))
            Tenant.set_primary(auth_info)
            break
    else:
        skip("No {} router found on system.".format(request.param))

    LOG.fixture_step("Update router to enable SNAT")
    network_helper.set_router_gateway(enable_snat=True)     # Check snat is handled by the keyword

    def disable_snat():
        LOG.fixture_step("Disable SNAT on tenant router")
        try:
            network_helper.set_router_gateway(enable_snat=False)
        finally:
            LOG.fixture_step("Revert primary tenant to {}".format(primary_tenant['tenant']))
            Tenant.set_primary(primary_tenant)
    request.addfinalizer(disable_snat)

    LOG.fixture_step("Boot a VM from volume")
    vm_id = vm_helper.boot_vm(name='snat', reuse_vol=False, cleanup='module')[1]

    if system_helper.is_avs():
        LOG.fixture_step("Attempt to ping from NatBox and ensure if fails")
        ping_res = vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, fail_ok=True, use_fip=False)
        assert ping_res is False, "VM can still be ping'd from outside after SNAT enabled without floating ip."

    LOG.fixture_step("Create a floating ip and associate it to VM")
    floatingip = network_helper.create_floating_ip(cleanup='module')[1]
    network_helper.associate_floating_ip_to_vm(floatingip, vm_id)

    LOG.fixture_step("Ping vm's floating ip from NatBox and ensure it's reachable")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, use_fip=True)

    return vm_id, floatingip
Exemple #5
0
 def teardown():
     post_dvr = network_helper.get_router_values(
         router_id, fields='distributed', auth_info=Tenant.get('admin'))[0]
     if post_dvr != is_dvr:
         network_helper.set_router_mode(router_id, distributed=is_dvr)
Exemple #6
0
def test_dvr_vms_network_connection(vms_num, srv_grp_policy, server_groups,
                                    router_info):
    """
    Test vms East West connection by pinging vms' data network from vm

    Args:
        vms_num (int): number of vms to boot
        srv_grp_policy (str): affinity to boot vms on same host, anti-affinity to boot vms on
            different hosts
        server_groups: test fixture to return affinity and anti-affinity server groups
        router_info (str): id of tenant router

    Skip Conditions:
        - Only one nova host on the system

    Setups:
        - Enable DVR    (module)

    Test Steps
        - Update router to distributed if not already done
        - Boot given number of vms with specific server group policy to schedule vms on
            same or different host(s)
        - Ping vms' over data and management networks from one vm to test NS and EW traffic

    Teardown:
        - Delete vms
        - Revert router to

    """
    # Increase instance quota count if needed
    current_vms = len(vm_helper.get_vms(strict=False))
    quota_needed = current_vms + vms_num
    vm_helper.ensure_vms_quotas(quota_needed)

    if srv_grp_policy == 'anti-affinity' and len(
            host_helper.get_up_hypervisors()) == 1:
        skip("Only one nova host on the system.")

    LOG.tc_step("Update router to distributed if not already done")
    router_id = router_info
    is_dvr = network_helper.get_router_values(router_id,
                                              fields='distributed',
                                              auth_info=Tenant.get('admin'))[0]
    if not is_dvr:
        network_helper.set_router_mode(router_id, distributed=True)

    LOG.tc_step("Boot {} vms with server group policy {}".format(
        vms_num, srv_grp_policy))
    affinity_grp, anti_affinity_grp = server_groups(soft=True)
    srv_grp_id = affinity_grp if srv_grp_policy == 'affinity' else anti_affinity_grp

    vms = []
    tenant_net_id = network_helper.get_tenant_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    internal_vif = {'net-id': internal_net_id}
    if system_helper.is_avs():
        internal_vif['vif-model'] = 'avp'

    nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}, internal_vif]
    for i in range(vms_num):
        vol = cinder_helper.create_volume()[1]
        ResourceCleanup.add(resource_type='volume', resource_id=vol)
        vm_id = vm_helper.boot_vm('dvr_ew_traffic',
                                  source='volume',
                                  source_id=vol,
                                  nics=nics,
                                  cleanup='function',
                                  hint={'group': srv_grp_id})[1]
        vms.append(vm_id)
        LOG.tc_step("Wait for vm {} pingable from NatBox".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    from_vm = vms[0]
    LOG.tc_step(
        "Ping vms over management and data networks from vm {}, and verify "
        "ping successful.".format(from_vm))
    vm_helper.ping_vms_from_vm(to_vms=vms,
                               from_vm=from_vm,
                               fail_ok=False,
                               net_types=['data', 'mgmt', 'internal'])