示例#1
0
def test_ping_vms_from_natbox(vm_count):
    if vm_count == 'all':
        vm_ids = None
    else:
        vm_ids = vm_helper.get_any_vms(count=vm_count)

    assert vm_ids != []

    vm_helper.ping_vms_from_natbox(vm_ids=vm_ids, fail_ok=False)
示例#2
0
def test_non_primary_tenant():
    vm_1 = vm_helper.boot_vm(cleanup='function',
                             auth_info=Tenant.get('tenant1'))[1]
    vm_2 = vm_helper.launch_vms(vm_type='dpdk',
                                auth_info=Tenant.get('tenant1'))[0][0]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_1)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_2)
    vm_helper.ping_vms_from_natbox(vm_ids=vm_2)
    vm_helper.ping_vms_from_vm(vm_2, vm_1, net_types='mgmt')
示例#3
0
def test_snat_reset_router_ext_gateway(snat_setups):
    """
    Test VM external access after evacuation.

    Args:
        snat_setups (tuple): returns vm id and fip. Enable snat, create vm and attach floating ip.

    Test Setups:
        - Find a tenant router that is dvr or non-dvr based on the parameter
        - Enable SNAT on tenant router
        - boot a vm and attach a floating ip
        - Ping vm from NatBox

    Test Steps:
        - Ping outside from VM
        - Clear router gateway
        - Verify vm cannot be ping'd from NatBox
        - Set router gateway
        - Verify vm can be ping'd from NatBox
        - Verify vm can ping outside

    Test Teardown:
        - Delete the created vm     (module)
        - Disable SNAT on router    (module)
    """
    vm_, fip = snat_setups
    LOG.tc_step("Ping vm management net ip from NatBox")
    vm_helper.ping_vms_from_natbox(vm_, use_fip=False)
    # vm_helper.ping_vms_from_natbox(vm_, use_fip=True)

    LOG.tc_step("Ping outside from VM".format(vm_))
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Disassociate floatingip from vm and verify it's successful.")
    network_helper.unset_floating_ip(floating_ip=fip, port=True)
    # assert not network_helper.get_floating_ip_info(fip=fip, field='fixed_ip_address'), \
    #     "Floating ip {} still attached to fixed ip".format(fip)

    LOG.tc_step("Clear router gateway and verify vm cannot be ping'd from NatBox")
    fixed_ip = network_helper.get_router_ext_gateway_info()['external_fixed_ips'][0]['ip_address']
    network_helper.clear_router_gateway(check_first=False)
    ping_res = vm_helper.ping_vms_from_natbox(vm_, fail_ok=True, use_fip=False)[0]
    assert ping_res is False, "VM can still be ping'd from outside after clearing router gateway."

    LOG.tc_step("Set router gateway with the same fixed ip")
    network_helper.set_router_gateway(clear_first=False, fixed_ips=fixed_ip, enable_snat=True)

    LOG.tc_step("Verify SNAT is enabled by default after setting router gateway.")
    assert network_helper.get_router_ext_gateway_info()['enable_snat'], "SNAT is not enabled by default."

    LOG.tc_step("Associate floating ip to vm")
    network_helper.associate_floating_ip_to_vm(floating_ip=fip, vm_id=vm_)

    LOG.tc_step("Verify vm can ping to and be ping'd from outside")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, fail_ok=False)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)
示例#4
0
def test_snat_evacuate_vm(snat_setups, snat):
    """
    Test VM external access after evacuation.

    Args:
        snat_setups (tuple): returns vm id and fip. Enable snat, create vm and attach floating ip.
        snat (bool): whether or not to enable SNAT on router

    Test Setups (module):
        - Find a tenant router that is dvr or non-dvr based on the parameter
        - Enable SNAT on tenant router
        - boot a vm and attach a floating ip
        - Ping vm from NatBox

    Test Steps:
        - Ping VM from NatBox
        - Reboot vm host
        - Verify vm is evacuated to other host
        - Verify vm can still ping outside

    Test Teardown:
        - Delete the created vm     (module)
        - Disable snat  (module)

    """
    vm_ = snat_setups[0]

    snat = True if snat == 'snat_enabled' else False
    LOG.tc_step("Update tenant router external gateway to set SNAT to {}".format(snat))
    network_helper.set_router_gateway(enable_snat=snat)

    time.sleep(30)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=True)

    host = vm_helper.get_vm_host(vm_)

    LOG.tc_step("Ping VM from NatBox".format(vm_))
    vm_helper.ping_vms_from_natbox(vm_, use_fip=False)
    # vm_helper.ping_vms_from_natbox(vm_, use_fip=True)

    LOG.tc_step("Evacuate vm")
    vm_helper.evacuate_vms(host=host, vms_to_check=vm_)

    LOG.tc_step("Verify vm can still ping outside")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat, timeout=VMTimeout.DHCP_RETRY)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    host_helper.wait_for_hosts_ready(hosts=host)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=False)
    if snat:
        vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=True)
def patch_function_check(request):
    vms = vm_helper.get_vms(name='patch', strict=False)
    boot_vm = False if len(vms) == 2 else True
    if not boot_vm:
        for vm in vms:
            if vm_helper.get_vm_status(vm) != VMStatus.ACTIVE or not vm_helper.ping_vms_from_natbox(vm, fail_ok=True):
                boot_vm = True
                break

    if boot_vm:
        if vms:
            vm_helper.delete_vms(vms, remove_cleanup='module')
        vms = []
        for source in ('volume', 'image'):
            vms.append(vm_helper.boot_vm(name='patch_{}'.format(source), source=source, cleanup='module')[1])

    def remove_on_teardown():
        LOG.info("Check vm status and delete if in bad state")
        for vm_ in vms:
            if vm_helper.get_vm_status(vm_) != VMStatus.ACTIVE:
                vm_helper.delete_vms(vm_, remove_cleanup='module')

        LOG.fixture_step("Remove test patches")
        remove_test_patches()
    request.addfinalizer(remove_on_teardown)

    return vms
示例#6
0
def ping_vms_from_nat(request):
    """
    TODO: - should only compare common vms
        - should pass as long as after test ping results are good regardless
        of the pre test results
        - if post test ping failed, then compare it with pre test ping to see
        if it's a okay failure.
        - better to re-utilize the check vm fixture so that we don't need to
        retrieving the info again.
            i.e., use fixture inside a fixture.

    Args:
        request:

    Returns:

    """
    LOG.info("Gathering VMs ping to NAT before test begins.")

    before_ping_result = vm_helper.ping_vms_from_natbox()

    def verify_nat_ping():
        after_ping_result = vm_helper.ping_vms_from_natbox()

        assert before_ping_result == after_ping_result

        LOG.info("Ping from NAT Box to VMs verified.")

    request.addfinalizer(verify_nat_ping)
    return
示例#7
0
def test_fip():
    vm_id = vm_helper.boot_vm(name='snat', reuse_vol=False,
                              cleanup='module')[1]

    LOG.tc_step("Ping from NatBox")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id,
                                               fail_ok=False,
                                               use_fip=False)

    LOG.tc_step("Create a floating ip and associate it to VM")
    floatingip = network_helper.create_floating_ip(cleanup='function')[1]
    network_helper.associate_floating_ip_to_vm(floatingip, vm_id)

    # vm_helper.ping_vms_from_natbox(vm_id, use_fip=False)      TODO: used to work before Mitaka, but should not work?
    LOG.tc_step("Ping vm's floating ip from NatBox and ensure it's pingable")
    vm_helper.ping_vms_from_natbox(vm_id, use_fip=True)

    LOG.tc_step("Attempt to ping vm's private ip from NatBox")
    vm_helper.ping_vms_from_natbox(vm_id, use_fip=False)
示例#8
0
    def test_evacuate_vms_with_inst_backing(self, hosts_per_backing,
                                            storage_backing):
        """
        Test evacuate vms with various vm storage configs and host instance
        backing configs

        Args:
            storage_backing: storage backing under test

        Skip conditions:
            - Less than two hosts configured with storage backing under test

        Setups:
            - Add admin role to primary tenant (module)

        Test Steps:
            - Create flv_rootdisk without ephemeral or swap disks, and set
            storage backing extra spec
            - Create flv_ephemswap with ephemeral AND swap disks, and set
            storage backing extra spec
            - Boot following vms on same host and wait for them to be
            pingable from NatBox:
                - Boot vm1 from volume with flavor flv_rootdisk
                - Boot vm2 from volume with flavor flv_localdisk
                - Boot vm3 from image with flavor flv_rootdisk
                - Boot vm4 from image with flavor flv_rootdisk, and attach a
                volume to it
                - Boot vm5 from image with flavor flv_localdisk
            - sudo reboot -f on vms host
            - Ensure evacuation for all 5 vms are successful (vm host
            changed, active state, pingable from NatBox)

        Teardown:
            - Delete created vms, volumes, flavors
            - Remove admin role from primary tenant (module)

        """
        hosts = hosts_per_backing.get(storage_backing, [])
        if len(hosts) < 2:
            skip(
                SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING.format(
                    storage_backing))

        target_host = hosts[0]

        LOG.tc_step("Create a flavor without ephemeral or swap disks")
        flavor_1 = nova_helper.create_flavor(
            'flv_rootdisk', storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_1, scope='function')

        LOG.tc_step("Create another flavor with ephemeral and swap disks")
        flavor_2 = nova_helper.create_flavor(
            'flv_ephemswap',
            ephemeral=1,
            swap=512,
            storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_2, scope='function')

        LOG.tc_step("Boot vm1 from volume with flavor flv_rootdisk and wait "
                    "for it pingable from NatBox")
        vm1_name = "vol_root"
        vm1 = vm_helper.boot_vm(vm1_name,
                                flavor=flavor_1,
                                source='volume',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vms_info = {
            vm1: {
                'ephemeral': 0,
                'swap': 0,
                'vm_type': 'volume',
                'disks': vm_helper.get_vm_devices_via_virsh(vm1)
            }
        }
        vm_helper.wait_for_vm_pingable_from_natbox(vm1)

        LOG.tc_step("Boot vm2 from volume with flavor flv_localdisk and wait "
                    "for it pingable from NatBox")
        vm2_name = "vol_ephemswap"
        vm2 = vm_helper.boot_vm(vm2_name,
                                flavor=flavor_2,
                                source='volume',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm2)
        vms_info[vm2] = {
            'ephemeral': 1,
            'swap': 512,
            'vm_type': 'volume',
            'disks': vm_helper.get_vm_devices_via_virsh(vm2)
        }

        LOG.tc_step(
            "Boot vm3 from image with flavor flv_rootdisk and wait for "
            "it pingable from NatBox")
        vm3_name = "image_root"
        vm3 = vm_helper.boot_vm(vm3_name,
                                flavor=flavor_1,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm3)
        vms_info[vm3] = {
            'ephemeral': 0,
            'swap': 0,
            'vm_type': 'image',
            'disks': vm_helper.get_vm_devices_via_virsh(vm3)
        }

        LOG.tc_step("Boot vm4 from image with flavor flv_rootdisk, attach a "
                    "volume to it and wait for it "
                    "pingable from NatBox")
        vm4_name = 'image_root_attachvol'
        vm4 = vm_helper.boot_vm(vm4_name,
                                flavor_1,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vol = cinder_helper.create_volume(bootable=False)[1]
        ResourceCleanup.add('volume', vol, scope='function')
        vm_helper.attach_vol_to_vm(vm4, vol_id=vol, mount=False)

        vm_helper.wait_for_vm_pingable_from_natbox(vm4)
        vms_info[vm4] = {
            'ephemeral': 0,
            'swap': 0,
            'vm_type': 'image_with_vol',
            'disks': vm_helper.get_vm_devices_via_virsh(vm4)
        }

        LOG.tc_step("Boot vm5 from image with flavor flv_localdisk and wait "
                    "for it pingable from NatBox")
        vm5_name = 'image_ephemswap'
        vm5 = vm_helper.boot_vm(vm5_name,
                                flavor_2,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm5)
        vms_info[vm5] = {
            'ephemeral': 1,
            'swap': 512,
            'vm_type': 'image',
            'disks': vm_helper.get_vm_devices_via_virsh(vm5)
        }

        LOG.tc_step("Check all VMs are booted on {}".format(target_host))
        vms_on_host = vm_helper.get_vms_on_host(hostname=target_host)
        vms = [vm1, vm2, vm3, vm4, vm5]
        assert set(vms) <= set(vms_on_host), "VMs booted on host: {}. " \
                                             "Current vms on host: {}". \
            format(vms, vms_on_host)

        for vm_ in vms:
            LOG.tc_step("Touch files under vm disks {}: "
                        "{}".format(vm_, vms_info[vm_]))
            file_paths, content = touch_files_under_vm_disks(
                vm_, **vms_info[vm_])
            vms_info[vm_]['file_paths'] = file_paths
            vms_info[vm_]['content'] = content

        LOG.tc_step("Reboot target host {}".format(target_host))
        vm_helper.evacuate_vms(host=target_host,
                               vms_to_check=vms,
                               ping_vms=True)

        LOG.tc_step("Check files after evacuation")
        for vm_ in vms:
            LOG.info("--------------------Check files for vm {}".format(vm_))
            check_helper.check_vm_files(vm_id=vm_,
                                        vm_action='evacuate',
                                        storage_backing=storage_backing,
                                        prev_host=target_host,
                                        **vms_info[vm_])
        vm_helper.ping_vms_from_natbox(vms)
def check_vms(vms):
    for vm in vms:
        assert vm_helper.get_vm_status(vm) == VMStatus.ACTIVE
        vm_helper.ping_vms_from_natbox(fail_ok=False)
def test_system_upgrade(vms_with_upgrade, upgrade_setup,
                        check_system_health_query_upgrade):
    LOG.info("Boot VM before upgrade ")
    vms = vms_with_upgrade
    vm_helper.ping_vms_from_natbox(vms)
    lab = upgrade_setup['lab']
    current_version = upgrade_setup['current_version']
    upgrade_version = upgrade_setup['upgrade_version']

    controller0 = lab['controller-0']
    upgrade_helper.ensure_host_provisioned(controller0.name)
    force = False
    LOG.tc_step("Checking system health for upgrade .....")
    if check_system_health_query_upgrade[0] == 0:
        LOG.info("System health OK for upgrade......")
    elif check_system_health_query_upgrade[0] == 2:
        LOG.info(
            "System health indicate minor alarms; using --force option to start upgrade......"
        )
        force = True
    else:
        assert False, "System health query upgrade failed: {}".format(
            check_system_health_query_upgrade[1])

    LOG.tc_step("Starting upgrade from release {} to target release {}".format(
        current_version, upgrade_version))
    upgrade_helper.system_upgrade_start(force=force)
    LOG.info("upgrade started successfully......")

    # upgrade standby controller
    LOG.tc_step("Upgrading controller-1")
    upgrade_helper.upgrade_host("controller-1", lock=True)
    LOG.info("Host controller-1 is upgraded successfully......")

    vm_helper.ping_vms_from_natbox(vms)
    # unlock upgraded controller-1
    LOG.tc_step("Unlocking controller-1 after upgrade......")
    host_helper.unlock_host("controller-1",
                            available_only=True,
                            check_hypervisor_up=False)
    LOG.info("Host controller-1 unlocked after upgrade......")

    # Swact to standby controller-1
    LOG.tc_step("Swacting to controller-1 .....")
    rc, output = host_helper.swact_host(hostname="controller-0")
    assert rc == 0, "Failed to swact: {}".format(output)
    LOG.info("Swacted and  controller-1 has become active......")

    # upgrade  controller-0
    LOG.tc_step("Upgrading  controller-0......")

    LOG.info("Ensure controller-0 is provisioned before upgrade.....")
    upgrade_helper.ensure_host_provisioned(controller0.name)
    LOG.info("Host {} is provisioned for upgrade.....".format(
        controller0.name))

    # open vlm console for controller-0 for boot through mgmt interface
    LOG.info("Opening a vlm console for controller-0 .....")
    install_helper.open_vlm_console_thread("controller-0")

    LOG.info("Starting {} upgrade.....".format(controller0.name))
    upgrade_helper.upgrade_host(controller0.name, lock=True)
    LOG.info("controller-0 is upgraded successfully.....")

    # unlock upgraded controller-0
    LOG.tc_step("Unlocking controller-0 after upgrade......")
    host_helper.unlock_host(controller0.name, available_only=True)
    LOG.info("Host {} unlocked after upgrade......".format(controller0.name))
    vm_helper.ping_vms_from_natbox(vms)
    upgrade_hosts = install_helper.get_non_controller_system_hosts()
    LOG.info(
        "Starting upgrade of the other system hosts: {}".format(upgrade_hosts))

    for host in upgrade_hosts:
        LOG.tc_step("Starting {} upgrade.....".format(host))
        if "storage" in host:
            # wait for replication  to be healthy
            storage_helper.wait_for_ceph_health_ok()

        upgrade_helper.upgrade_host(host, lock=True)
        LOG.info("{} is upgraded successfully.....".format(host))
        LOG.tc_step("Unlocking {} after upgrade......".format(host))
        host_helper.unlock_host(host, available_only=True)
        LOG.info("Host {} unlocked after upgrade......".format(host))
        LOG.info("Host {} upgrade complete.....".format(host))
        vm_helper.ping_vms_from_natbox(vms)

    # Activate the upgrade
    LOG.tc_step("Activating upgrade....")
    upgrade_helper.activate_upgrade()
    LOG.info("Upgrade activate complete.....")

    # Make controller-0 the active controller
    # Swact to standby controller-0
    LOG.tc_step("Making controller-0 active.....")
    rc, output = host_helper.swact_host(hostname="controller-1")
    assert rc == 0, "Failed to swact: {}".format(output)
    LOG.info("Swacted to controller-0 ......")

    # Complete upgrade
    LOG.tc_step("Completing upgrade from  {} to {}".format(
        current_version, upgrade_version))
    upgrade_helper.complete_upgrade()
    LOG.info("Upgrade is complete......")

    LOG.info("Lab: {} upgraded successfully".format(lab['name']))

    # Delete the previous load
    LOG.tc_step("Deleting  {} load... ".format(current_version))
    upgrade_helper.delete_imported_load()
    LOG.tc_step("Delete  previous load version {}".format(current_version))
示例#11
0
def lock_unlock_host(backup_info, con_ssh, vms):
    """
    Do lock & unlock hosts test before system backup.

    Args:
        backup_info:
            - options for system backup

        con_ssh:
            - current ssh connection to the target

        vms:
            - VMs on which their host to test
    Return:
        None
    """

    active_controller_name = system_helper.get_active_controller_name()

    target_vm = random.choice(vms)
    LOG.info('lock and unlock the host of VM:{}'.format(target_vm))

    target_host = vm_helper.get_vm_host(target_vm, con_ssh=con_ssh)
    if target_host == active_controller_name:
        if not system_helper.is_aio_simplex():
            LOG.warning(
                'Attempt to lock the active controller on a non-simplex system'
            )
            host_helper.swact_host()

    active_controller_name = system_helper.get_active_controller_name()

    LOG.info('lock and unlock:{}'.format(target_host))

    host_helper.lock_host(target_host)
    if not system_helper.is_aio_simplex():
        LOG.info('check if the VM is pingable')
        vm_helper.ping_vms_from_natbox(target_vm)
    else:
        LOG.info(
            'skip pinging vm after locking the only node in a simlex system')

    LOG.info('unlock:{}'.format(target_host))
    host_helper.unlock_host(target_host)

    system_helper.wait_for_host_values(target_host,
                                       administrative='unlocked',
                                       availability='available',
                                       vim_progress_status='services-enabled')
    for tried in range(5):
        pingable, message = vm_helper.ping_vms_from_natbox(target_vm,
                                                           fail_ok=(tried < 4))
        if pingable:
            LOG.info('failed to ping VM:{}, try again in 20 seconds'.format(
                target_vm))
            time.sleep(20)
        else:
            LOG.info('Succeeded to ping VM:{}'.format(target_vm))
            break
    if backup_info.get('dest', 'local') == 'usb':
        if active_controller_name != 'controller-0':
            LOG.info(
                'current active_controller: ' + active_controller_name +
                ', restore to controller-0 in case it was not after swact')
            host_helper.swact_host()
            active_controller_name = system_helper.get_active_controller_name()
            LOG.info(
                'current active_controller should be restored to controller-0, actual:'
                + active_controller_name)
示例#12
0
    def verify_nat_ping():
        after_ping_result = vm_helper.ping_vms_from_natbox()

        assert before_ping_result == after_ping_result

        LOG.info("Ping from NAT Box to VMs verified.")