def router_info(request): global result_ result_ = False LOG.fixture_step( "Disable SNAT and update router to DVR if not already done.") router_id = network_helper.get_tenant_router() network_helper.set_router_gateway(router_id, enable_snat=False) is_dvr = network_helper.get_router_values(router_id, fields='distributed', auth_info=Tenant.get('admin'))[0] def teardown(): post_dvr = network_helper.get_router_values( router_id, fields='distributed', auth_info=Tenant.get('admin'))[0] if post_dvr != is_dvr: network_helper.set_router_mode(router_id, distributed=is_dvr) request.addfinalizer(teardown) if not is_dvr: network_helper.set_router_mode(router_id, distributed=True, enable_on_failure=False) result_ = True return router_id
def recover(): LOG.fixture_step("Ensure tenant router exists") post_router_id = network_helper.get_tenant_router() if not post_router_id: LOG.fixture_step( "Router not exist, create new router {}".format(router_name)) post_router_id = network_helper.create_router(name=router_name)[1] LOG.fixture_step("Ensure tenant router gateway recovered") teardown_gateway_info = network_helper.get_router_ext_gateway_info( router_id=post_router_id) if teardown_gateway_info != ext_gateway_info: LOG.fixture_step("Set tenant router gateway info") _set_external_gatewayway_info(post_router_id, ext_gateway_subnet, gateway_ip, is_dvr) LOG.fixture_step( "Ensure all interfaces added to router {}".format(post_router_id)) teardown_subnets = network_helper.get_router_subnets( router=post_router_id) subnets_to_add = list(set(router_subnets) - set(teardown_subnets)) if subnets_to_add: LOG.fixture_step("Add subnets to router {}: {}".format( post_router_id, subnets_to_add)) _add_router_interfaces(post_router_id, subnets_to_add, ext_gateway_subnet)
def _router_info(request): LOG.fixture_step("Get router info.") router_id = network_helper.get_tenant_router() LOG.info("Router id: {}".format(router_id)) router_name, is_dvr = network_helper.get_router_values( router_id=router_id, fields=('name', 'distributed')) LOG.info("Router name: {}".format(router_name)) ext_gateway_info = network_helper.get_router_ext_gateway_info( router_id=router_id)['external_fixed_ips'][0] gateway_ip = ext_gateway_info.get('ip_address', None) LOG.info("Gateway IP used for router {} is {}".format( router_name, gateway_ip)) ext_gateway_subnet = ext_gateway_info.get('subnet_id', None) LOG.info("Router {} external subnet id {}".format(router_name, ext_gateway_subnet)) router_subnets = network_helper.get_router_subnets(router=router_id) LOG.info("Router {} mgmt ports subnet ids {}".format( router_name, router_subnets)) def recover(): LOG.fixture_step("Ensure tenant router exists") post_router_id = network_helper.get_tenant_router() if not post_router_id: LOG.fixture_step( "Router not exist, create new router {}".format(router_name)) post_router_id = network_helper.create_router(name=router_name)[1] LOG.fixture_step("Ensure tenant router gateway recovered") teardown_gateway_info = network_helper.get_router_ext_gateway_info( router_id=post_router_id) if teardown_gateway_info != ext_gateway_info: LOG.fixture_step("Set tenant router gateway info") _set_external_gatewayway_info(post_router_id, ext_gateway_subnet, gateway_ip, is_dvr) LOG.fixture_step( "Ensure all interfaces added to router {}".format(post_router_id)) teardown_subnets = network_helper.get_router_subnets( router=post_router_id) subnets_to_add = list(set(router_subnets) - set(teardown_subnets)) if subnets_to_add: LOG.fixture_step("Add subnets to router {}: {}".format( post_router_id, subnets_to_add)) _add_router_interfaces(post_router_id, subnets_to_add, ext_gateway_subnet) request.addfinalizer(recover) return router_id, router_name, gateway_ip, ext_gateway_info, \ router_subnets, ext_gateway_subnet, is_dvr
def router_info(request): LOG.fixture_step("Enable snat on tenant router") router_id = network_helper.get_tenant_router() network_helper.set_router_gateway(router_id, enable_snat=True) def teardown(): LOG.fixture_step("Disable snat on tenant router") network_helper.set_router_gateway(router_id, enable_snat=False) request.addfinalizer(teardown) return router_id
def snat_setups(request): find_dvr = 'True' if request.param == 'distributed' else 'False' primary_tenant = Tenant.get_primary() other_tenant = Tenant.get_secondary() for auth_info in [primary_tenant, other_tenant]: tenant_router = network_helper.get_tenant_router(auth_info=auth_info) is_dvr_router = network_helper.get_router_values(router_id=tenant_router, fields='distributed')[0] if find_dvr == str(is_dvr_router): LOG.fixture_step("Setting primary tenant to {}".format(common.get_tenant_name(auth_info))) Tenant.set_primary(auth_info) break else: skip("No {} router found on system.".format(request.param)) LOG.fixture_step("Update router to enable SNAT") network_helper.set_router_gateway(enable_snat=True) # Check snat is handled by the keyword def disable_snat(): LOG.fixture_step("Disable SNAT on tenant router") try: network_helper.set_router_gateway(enable_snat=False) finally: LOG.fixture_step("Revert primary tenant to {}".format(primary_tenant['tenant'])) Tenant.set_primary(primary_tenant) request.addfinalizer(disable_snat) LOG.fixture_step("Boot a VM from volume") vm_id = vm_helper.boot_vm(name='snat', reuse_vol=False, cleanup='module')[1] if system_helper.is_avs(): LOG.fixture_step("Attempt to ping from NatBox and ensure if fails") ping_res = vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, fail_ok=True, use_fip=False) assert ping_res is False, "VM can still be ping'd from outside after SNAT enabled without floating ip." LOG.fixture_step("Create a floating ip and associate it to VM") floatingip = network_helper.create_floating_ip(cleanup='module')[1] network_helper.associate_floating_ip_to_vm(floatingip, vm_id) LOG.fixture_step("Ping vm's floating ip from NatBox and ensure it's reachable") vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, use_fip=True) return vm_id, floatingip
def teardown(): LOG.fixture_step("Delete portforwarding rules") router_id = network_helper.get_tenant_router() pf_ids = network_helper.get_portforwarding_rules(router_id) network_helper.delete_portforwarding_rules(pf_ids)
def verify_heat_resource(to_verify=None, template_name=None, stack_name=None, auth_info=None, fail_ok=False): """ Verify the heat resource creation/deletion for given resources Args: to_verify (list): Resources to verify creation or deletion. template_name (str): template to be used to create heat stack. stack_name(str): stack name used to create the stack auth_info fail_ok Returns (int): return 0 if success 1 if failure """ LOG.info("Verifying heat resource {}".format(to_verify)) rtn_code = 0 msg = "Heat resource {} appeared".format(to_verify) item_verified = to_verify if to_verify is 'volume': LOG.info("Verifying volume") vol_name = getattr(Heat, template_name)['vol_name'] resource_found = cinder_helper.get_volumes(name=vol_name) elif to_verify is 'ceilometer_alarm': resource_found = ceilometer_helper.get_alarms(name=stack_name, strict=False) elif to_verify is 'neutron_port': port_name = getattr(Heat, template_name)['port_name'] if port_name is None: port_name = stack_name resource_found = network_helper.get_ports(port_name=port_name) elif to_verify is 'neutron_provider_net_range': resource_found = network_helper.get_network_segment_ranges( field='name', physical_network='sample_physnet_X') elif to_verify is 'nova_server_group': resource_found = nova_helper.get_server_groups(name=stack_name) elif to_verify is 'vm': vm_name = getattr(Heat, template_name)['vm_name'] resource_found = vm_helper.get_vms(vms=vm_name, strict=False) elif to_verify is 'nova_flavor': resource_found = nova_helper.get_flavors(name='sample-flavor') elif to_verify is 'neutron_net': resource_found = network_helper.get_tenant_net_id( net_name='sample-net') elif to_verify is 'image': resource_found = glance_helper.get_image_id_from_name( name='sample_image') elif to_verify is 'subnet': resource_found = network_helper.get_subnets(name='sample_subnet') elif to_verify is 'floating_ip': resource_found = network_helper.get_floating_ips() elif to_verify is 'router': resource_found = network_helper.get_tenant_router( router_name='sample_router', auth_info=auth_info) elif to_verify is 'router_gateway': item_verified = 'sample_gateway_router' resource_found = network_helper.get_tenant_router( router_name='sample_gateway_router', auth_info=auth_info) if resource_found: item_verified = to_verify resource_found = network_helper.get_router_ext_gateway_info( router_id=resource_found, auth_info=auth_info) elif to_verify is 'router_interface': item_verified = 'sample_if_router' router_id = network_helper.get_tenant_router( router_name='sample_if_router', auth_info=auth_info) resource_found = router_id if resource_found: item_verified = 'sample_if_subnet' subnets = network_helper.get_subnets(name='sample_if_subnet', auth_info=auth_info) resource_found = subnets if resource_found: item_verified = to_verify router_subnets = network_helper.get_router_subnets( router=router_id, auth_info=auth_info) resource_found = resource_found[0] in router_subnets elif to_verify is 'security_group': resource_found = network_helper.get_security_groups( name='SecurityGroupDeluxe') elif to_verify is 'key_pair': kp_name = getattr(Heat, template_name)['key_pair_name'] resource_found = nova_helper.get_keypairs(name=kp_name) elif to_verify is 'neutron_qos': resource_found = network_helper.get_qos_policies(name='SampleQoS', auth_info=auth_info) else: raise ValueError("Unknown item to verify: {}".format(to_verify)) if not resource_found: msg = "Heat stack {} resource {} does not exist".format( stack_name, item_verified) if fail_ok: rtn_code = 1 else: assert resource_found, msg LOG.info(msg) return rtn_code, msg
def _prepare_test(vm1, vm2, get_hosts, with_router): """ VMs: VM1: under test (primary tenant) VM2: traffic observer """ vm1_host = vm_helper.get_vm_host(vm1) vm2_host = vm_helper.get_vm_host(vm2) vm1_router = network_helper.get_tenant_router( auth_info=Tenant.get_primary()) vm2_router = network_helper.get_tenant_router( auth_info=Tenant.get_secondary()) vm1_router_host = network_helper.get_router_host(router=vm1_router) vm2_router_host = network_helper.get_router_host(router=vm2_router) targets = list(get_hosts) if vm1_router_host == vm2_router_host: end_time = time.time() + 360 while time.time() < end_time: vm1_router_host = network_helper.get_router_host( router=vm1_router) vm2_router_host = network_helper.get_router_host( router=vm2_router) if vm1_router_host != vm2_router_host: break else: assert vm1_router_host != vm2_router_host, "two routers are located on the same compute host" if not with_router: """ Setup: VM1 on COMPUTE-A VM2 not on COMPUTE-A ROUTER1 on COMPUTE-B ROUTER2 on COMPUTE-C """ if len(get_hosts) < 3: skip( "Lab not suitable for without_router, requires at least three hypervisors" ) LOG.tc_step( "Ensure VM2, ROUTER2 not on COMPUTE-A, for simplicity, ensure they are on the same compute" ) if vm2_host != vm2_router_host: vm_helper.live_migrate_vm(vm_id=vm2, destination_host=vm2_router_host) vm2_host = vm_helper.get_vm_host(vm2) assert vm2_host == vm2_router_host, "live-migration failed" host_observer = vm2_host LOG.tc_step( "Ensure VM1 and (ROUTER1, VM2, ROUTER2) are on different hosts" ) if vm1_router_host in targets: # ensure vm1_router_host is not selected for vm1 # vm1_router_host can be backed by any type of storage targets.remove(vm1_router_host) if vm2_host in targets: targets.remove(vm2_host) if vm1_host in targets: host_src_evacuation = vm1_host else: assert targets, "no suitable compute for vm1, after excluding ROUTER1, VM2, ROUTER2 's hosts" host_src_evacuation = targets[0] vm_helper.live_migrate_vm(vm_id=vm1, destination_host=host_src_evacuation) vm1_host = vm_helper.get_vm_host(vm1) assert vm1_host == host_src_evacuation, "live-migration failed" # verify setup vm1_host = vm_helper.get_vm_host(vm1) vm2_host = vm_helper.get_vm_host(vm2) vm1_router_host = network_helper.get_router_host(router=vm1_router) vm2_router_host = network_helper.get_router_host(router=vm2_router) assert vm1_router_host != vm1_host and vm2_host != vm1_host and vm2_router_host != vm1_host, \ "setup is incorrect" else: """ Setup: VM1, ROUTER1 on COMPUTE-A VM2 not on COMPUTE-A ROUTER2 on COMPUTE-B """ LOG.tc_step("Ensure VM1, ROUTER1 on COMPUTE-A") # VM1 must be sitting on ROUTER1's host, thus vm1_router_host must be backed by local_image assert vm1_router_host in targets, "vm1_router_host is not backed by local_image" if vm1_host != vm1_router_host: vm_helper.live_migrate_vm(vm_id=vm1, destination_host=vm1_router_host) vm1_host = vm_helper.get_vm_host(vm1) assert vm1_host == vm1_router_host, "live-migration failed" host_src_evacuation = vm1_host LOG.tc_step( "Ensure VM2, ROUTER2 not on COMPUTE-A, for simplicity, ensure they are on the same compute" ) targets.remove(host_src_evacuation) if vm2_host in targets: host_observer = vm2_host else: assert targets, "no suitable compute for vm2, after excluding COMPUTE-A" host_observer = targets[0] vm_helper.live_migrate_vm(vm_id=vm2, destination_host=host_observer) vm2_host = vm_helper.get_vm_host(vm2) assert vm2_host == host_observer, "live-migration failed" # verify setup vm1_host = vm_helper.get_vm_host(vm1) vm2_host = vm_helper.get_vm_host(vm2) vm1_router_host = network_helper.get_router_host(router=vm1_router) vm2_router_host = network_helper.get_router_host(router=vm2_router) assert vm1_host == vm1_router_host and vm2_host != vm1_host and vm2_router_host != vm1_host, \ "setup is incorrect" assert vm1_host == host_src_evacuation and vm2_host == host_observer, "setup is incorrect" LOG.info("Evacuate: VM {} on {}, ROUTER on {}".format( vm1, vm1_host, vm1_router_host)) LOG.info("Observer: VM {} on {}, ROUTER on {}".format( vm2, vm2_host, vm2_router_host)) return host_src_evacuation, host_observer
def _router_up(): if result_ is False: router_id = network_helper.get_tenant_router() network_helper.set_router(router=router_id, fail_ok=False, enable=True)