def disable_snat(): LOG.fixture_step("Disable SNAT on tenant router") try: network_helper.set_router_gateway(enable_snat=False) finally: LOG.fixture_step("Revert primary tenant to {}".format(primary_tenant['tenant'])) Tenant.set_primary(primary_tenant)
def snat_setups(request): find_dvr = 'True' if request.param == 'distributed' else 'False' primary_tenant = Tenant.get_primary() other_tenant = Tenant.get_secondary() for auth_info in [primary_tenant, other_tenant]: tenant_router = network_helper.get_tenant_router(auth_info=auth_info) is_dvr_router = network_helper.get_router_values(router_id=tenant_router, fields='distributed')[0] if find_dvr == str(is_dvr_router): LOG.fixture_step("Setting primary tenant to {}".format(common.get_tenant_name(auth_info))) Tenant.set_primary(auth_info) break else: skip("No {} router found on system.".format(request.param)) LOG.fixture_step("Update router to enable SNAT") network_helper.set_router_gateway(enable_snat=True) # Check snat is handled by the keyword def disable_snat(): LOG.fixture_step("Disable SNAT on tenant router") try: network_helper.set_router_gateway(enable_snat=False) finally: LOG.fixture_step("Revert primary tenant to {}".format(primary_tenant['tenant'])) Tenant.set_primary(primary_tenant) request.addfinalizer(disable_snat) LOG.fixture_step("Boot a VM from volume") vm_id = vm_helper.boot_vm(name='snat', reuse_vol=False, cleanup='module')[1] if system_helper.is_avs(): LOG.fixture_step("Attempt to ping from NatBox and ensure if fails") ping_res = vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, fail_ok=True, use_fip=False) assert ping_res is False, "VM can still be ping'd from outside after SNAT enabled without floating ip." LOG.fixture_step("Create a floating ip and associate it to VM") floatingip = network_helper.create_floating_ip(cleanup='module')[1] network_helper.associate_floating_ip_to_vm(floatingip, vm_id) LOG.fixture_step("Ping vm's floating ip from NatBox and ensure it's reachable") vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, use_fip=True) return vm_id, floatingip
def get_pci_net(request, vif_model, primary_tenant, primary_tenant_name, other_tenant): LOG.fixture_step("Get a PCI network to boot vm from pci providernet") # pci_nets = network_helper.get_pci_nets(vif=interface, rtn_val='name') tenant_net = "{}-net" other_pcipt_net_name = other_pcipt_net_id = None # This assumes pci hosts are configured with the same provider networks pci_net_name = network_helper.get_pci_vm_network(pci_type=vif_model) if isinstance(pci_net_name, list): pci_net_name, other_pcipt_net_name = pci_net_name other_pcipt_net_id = network_helper.get_net_id_from_name( other_pcipt_net_name) if not pci_net_name: skip('No {} net found on up host(s)'.format(vif_model)) if 'mgmt' in pci_net_name: skip("Only management networks have {} interface.".format(vif_model)) if 'internal' in pci_net_name: net_type = 'internal' else: net_type = 'data' if tenant_net.format(primary_tenant_name) not in pci_net_name: Tenant.set_primary(other_tenant) def revert_tenant(): Tenant.set_primary(primary_tenant) request.addfinalizer(revert_tenant) pci_net_id = network_helper.get_net_id_from_name(net_name=pci_net_name) pnet_name = network_helper.get_network_values( network=pci_net_id, fields='provider:physical_network')[0] pnet_id = None LOG.info("PCI network selected to boot vm: {}".format(pci_net_name)) if vif_model == 'pci-sriov': return net_type, pci_net_name, pci_net_id, pnet_id, pnet_name else: return net_type, pci_net_name, pci_net_id, pnet_id, pnet_name, other_pcipt_net_name, other_pcipt_net_id
def setup_primary_tenant(tenant): Tenant.set_primary(tenant) LOG.info("Primary Tenant for test session is set to {}".format( Tenant.get(tenant)['tenant']))
def revert_tenant(): Tenant.set_primary(primary_tenant)
def vms_with_upgrade(): """ Test test_vms_with_upgrade is for create various vms before upgrade Skip conditions: - Less than two hosts configured with storage backing under test Setups: - Add admin role to primary tenant (module) Test Steps: - Create flv_rootdisk without ephemeral or swap disks, and set storage backing extra spec - Create flv_ephemswap with ephemeral AND swap disks, and set storage backing extra spec - Boot following vms and wait for them to be pingable from NatBox: - Boot vm1 from volume with flavor flv_rootdisk - Boot vm2 from volume with flavor flv_localdisk - Boot vm3 from image with flavor flv_rootdisk - Boot vm4 from image with flavor flv_rootdisk, and attach a volume to it - Boot vm5 from image with flavor flv_localdisk - start upgrade ....Follows upgrade procedure - Ping NAT during the upgrade before live migration - complete upgrade. Teardown: - Not complete ....Delete created vms, volumes, flavors """ ProjVar.set_var(SOURCE_OPENRC=True) Tenant.set_primary('tenant2') LOG.fixture_step("Create a flavor without ephemeral or swap disks") flavor_1 = nova_helper.create_flavor('flv_rootdisk')[1] ResourceCleanup.add('flavor', flavor_1) LOG.fixture_step("Create another flavor with ephemeral and swap disks") flavor_2 = nova_helper.create_flavor('flv_ephemswap', ephemeral=1, swap=512)[1] ResourceCleanup.add('flavor', flavor_2) LOG.fixture_step( "Boot vm1 from volume with flavor flv_rootdisk and wait for it pingable from NatBox" ) vm1_name = "vol_root" vm1 = vm_helper.boot_vm(vm1_name, flavor=flavor_1, cleanup='function')[1] LOG.fixture_step( "Boot vm2 from volume with flavor flv_localdisk and wait for it pingable from NatBox" ) vm2_name = "vol_ephemswap" vm2 = vm_helper.boot_vm(vm2_name, flavor=flavor_2, cleanup='function')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm1) vm_helper.wait_for_vm_pingable_from_natbox(vm2) LOG.fixture_step( "Boot vm3 from image with flavor flv_rootdisk and wait for it pingable from NatBox" ) vm3_name = "image_root" vm3 = vm_helper.boot_vm(vm3_name, flavor=flavor_1, cleanup='function')[1] LOG.fixture_step( "Boot vm4 from image with flavor flv_rootdisk, attach a volume to it and wait for it " "pingable from NatBox") vm4_name = 'image_root_attachvol' vm4 = vm_helper.boot_vm(vm4_name, flavor_1, cleanup='function')[1] vol = cinder_helper.create_volume(bootable=False)[1] ResourceCleanup.add('volume', vol) vm_helper.attach_vol_to_vm(vm4, vol_id=vol) LOG.fixture_step( "Boot vm5 from image with flavor flv_localdisk and wait for it pingable from NatBox" ) vm5_name = 'image_ephemswap' vm5 = vm_helper.boot_vm(vm5_name, flavor_2, source='image', cleanup='function')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm4) vm_helper.wait_for_vm_pingable_from_natbox(vm5) vms = [vm1, vm2, vm3, vm4, vm5] return vms
def setup_primary_tenant(): Tenant.set_primary(Tenant.get('tenant1'))