def _perform_nova_actions(vms_dict, flavors, vfs=None):
    for vm_name, vm_id in vms_dict.items():
        LOG.tc_step("Cold migrate VM {} ....".format(vm_name))
        vm_helper.cold_migrate_vm(vm_id=vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step("Live migrate VM {} ....".format(vm_name))
        expt_codes = [0] if 'vm_no_crypto' in vm_name else [1, 6]
        code, msg = vm_helper.live_migrate_vm(vm_id=vm_id, fail_ok=True)
        assert code in expt_codes, "Expect live migrate to fail for vm with pci device attached. Actual: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step("Suspend/Resume VM {} ....".format(vm_name))
        vm_helper.suspend_vm(vm_id)
        vm_helper.resume_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        if vfs is None:
            resize_flavor_id = flavors["flavor_resize_qat_vf_1"] if "no_crypto" not in vm_name else \
                flavors["flavor_resize_none"]
        else:
            resize_flavor_id = flavors['flavor_resize_qat_vf_{}'.format(vfs)]

        LOG.info("Resizing VM {} to new flavor {} ...".format(vm_name, resize_flavor_id))
        vm_helper.resize_vm(vm_id, resize_flavor_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)
Esempio n. 2
0
def resize_to(vm_type, vm_id):
    if vm_type == 'autorc':
        flavor = g_flavors['vtpm']
    elif vm_type == 'non_autorc':
        flavor = g_flavors['autorc']
    elif vm_type == 'vtpm':
        flavor = g_flavors['autorc']
    else:
        flavor = g_flavors['vtpm']

    LOG.info('-resize to another flavor with vTPM/auto-recovery enabled')
    vm_helper.resize_vm(vm_id, flavor_id=flavor)
Esempio n. 3
0
def test_vcpu_model_resize(source_model, dest_model):
    """

    Args:
        source_model:
        dest_model:

    Test Steps:
        - Create a source flavor with 4G root disk and vcpu model extra spec as specified in source_model
        - Create a dest flavor with 5G root disk and vcpu model extra spec as specified in dest_model
        - Launch a vm from image with source flavor
        - Check vcpu_model is successfully applied
        - Resize the vm with dest flavor
        - Check new vcpu_model is successfully applied

    Teardown:
        - Delete created vm, image, flavors

    """
    LOG.tc_step(
        "Create a source flavor with 4G root disk and vcpu model extra spec: {}"
        .format(source_model))
    source_flv = _create_flavor_vcpu_model(vcpu_model=source_model,
                                           root_disk_size=4)

    LOG.tc_step(
        "Create a destination flavor with 5G root disk and vcpu model extra spec: {}"
        .format(source_model))
    dest_flv = _create_flavor_vcpu_model(vcpu_model=dest_model,
                                         root_disk_size=5)

    LOG.tc_step(
        "Launch a vm from image with source flavor {}".format(source_flv))
    vm_id = vm_helper.boot_vm(flavor=source_flv,
                              source='image',
                              cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    check_vm_cpu_model(vm_id=vm_id, vcpu_model=source_model)

    expt_arch = None
    if source_model == dest_model == 'Passthrough':
        # Ensure vm resize to host with exact same cpu model when vcpu_model is passthrough
        host = vm_helper.get_vm_host(vm_id)
        expt_arch = host_helper.get_host_cpu_model(host)

    LOG.tc_step("Resize vm to destination flavor {}".format(dest_flv))
    vm_helper.resize_vm(vm_id, flavor_id=dest_flv)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    check_vm_cpu_model(vm_id, vcpu_model=dest_model, expt_arch=expt_arch)
Esempio n. 4
0
    def test_resize_vm_shared_cpu_negative(self, vcpus, cpu_policy, shared_vcpu, basic_vm):
        """
        Test resize request is rejected if system does not meet the shared_cpu requirement(s) in the flavor

        Args:
            vcpus (int): number of vcpus in flavor
            cpu_policy (str): cpu_policy in flavor extra specs
            shared_vcpu (int):
            basic_vm (str): id of a basic vm to attempt resize on

        Setup:
            - Boot a basic vm (module)

        Test Steps:
            - Create a flavor with given number of vcpus
            - Set extra specs for cpu_policy, shared_vcpu
            - Attempt to resize the basic vm with the flavor
            - Ensure cli is rejected and proper error returned

        Teardowns:
            - Delete created vm and volume (module)

        """
        vm_id, storage_backing = basic_vm
        LOG.tc_step("Create a flavor with {} vcpus. Set extra specs with: {} cpu_policy, {} shared_vcpu".format(
                vcpus, cpu_policy, shared_vcpu))
        flavor = nova_helper.create_flavor(name='shared_cpu', vcpus=vcpus, storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor, scope='module')
        nova_helper.set_flavor(flavor, **{FlavorSpec.CPU_POLICY: cpu_policy})
        nova_helper.set_flavor(flavor, **{FlavorSpec.SHARED_VCPU: shared_vcpu})

        LOG.tc_step("Attempt to resize vm with invalid flavor, and verify resize request is rejected.")
        code, msg = vm_helper.resize_vm(vm_id, flavor, fail_ok=True)
        assert code == 1, "Resize vm request is not rejected"
        assert re.search(ResizeVMErr.SHARED_NOT_ENABLED.format('0'), msg)

        LOG.tc_step("Ensure VM is still pingable after resize reject")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)
Esempio n. 5
0
def test_vm_with_max_vnics_attached_during_boot(base_vm, guest_os, nic_arg,
                                                boot_source):
    """
    Setups:
        - Boot a base vm with mgmt net and tenant_port_id (module)

    Test Steps:
        - Boot a vm with 1 mgmt and 15 avp/virtio Interfaces
        - Perform nova action (live migrate --force, live migrate, rebuild, reboot hard/soft, resize revert, resize)
        - ping between base_vm and vm_under_test over mgmt & tenant network

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm
    vif_type = 'avp' if system_helper.is_avs() else None

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if re.search(GuestImages.TIS_GUEST_PATTERN,
                                guest_os) else 'function'
    image_id = glance_helper.get_guest_image(guest_os=guest_os,
                                             cleanup=cleanup)

    # TODO Update vif model config. Right now vif model avp still under implementation
    nics = [mgmt_nic]
    for i in range(15):
        if nic_arg == 'port_id':
            port_id = network_helper.create_port(tenant_net_id,
                                                 'tenant_port-{}'.format(i),
                                                 wrs_vif=vif_type,
                                                 cleanup='function')[1]
            nics.append({'port-id': port_id})
        else:
            nics.append({'net-id': tenant_net_id, 'vif-model': vif_type})

    LOG.tc_step(
        "Boot a {} vm and flavor from {} with 1 mgmt and 15 data interfaces".
        format(guest_os, boot_source))
    vm_under_test = vm_helper.boot_vm('max_vifs-{}-{}'.format(
        guest_os, boot_source),
                                      nics=nics,
                                      source=boot_source,
                                      image_id=image_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]

    vm_ports_count = len(network_helper.get_ports(server=vm_under_test))
    expt_vnics = 16
    LOG.info("vnics attached to VM: {}".format(vm_ports_count))
    assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

    _ping_vm_data(vm_under_test, vm_under_test, action='boot')
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='configure routes')

    destination_host = vm_helper.get_dest_host_for_live_migrate(
        vm_id=vm_under_test)
    if destination_host:
        # LOG.tc_step("Perform following action(s) on vm {}: {}".format(vm_under_test, 'live-migrate --force'))
        # vm_helper.live_migrate_vm(vm_id=vm_under_test, destination_host=destination_host, force=True)
        # _ping_vm_data(vm_under_test, base_vm_id, action='live migrate --force')

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_under_test, 'live-migrate'))
        vm_helper.live_migrate_vm(vm_id=vm_under_test)
        _ping_vm_data(vm_under_test, base_vm_id, action='live-migrate')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'hard reboot'))
    vm_helper.reboot_vm(vm_id=vm_under_test, hard=True)
    _ping_vm_data(vm_under_test, base_vm_id, action='hard reboot')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'soft reboot'))
    vm_helper.reboot_vm(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='soft rebuild')

    LOG.tc_step('Create destination flavor')
    dest_flavor_id = nova_helper.create_flavor(name='dest_flavor',
                                               vcpus=2,
                                               guest_os=guest_os)[1]

    LOG.tc_step('Resize vm to dest flavor and revert')
    vm_helper.resize_vm(vm_under_test,
                        dest_flavor_id,
                        revert=True,
                        fail_ok=False)
    _ping_vm_data(vm_under_test, base_vm_id, action='resize revert')

    LOG.tc_step('Resize vm to dest flavor and revert False')
    vm_helper.resize_vm(vm_under_test, dest_flavor_id, fail_ok=False)
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='resize')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'rebuild'))
    vm_helper.rebuild_vm(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, vm_under_test, action='rebuild')
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='rebuild')
Esempio n. 6
0
    def test_resize_vm_with_shared_cpu(self, add_shared_cpu, origin_total_vcpus):
        """
        Test that the vm created with shared vcpus can successfully be resized to a flavor with shared vcpus and to a
        flavor without shared vcpus (and back)

        Setup:
            - Configure two computes to have shared cpus via 'system host-cpu-modify -f shared p0=1,p1=1 <hostname>'

        Test Steps:
            - Create 3 flavors as follows:
                - flavor1 has 2 vcpus, dedicated cpu policy, and a shared vcpu
                - flavor2 has 4 vcpus, dedicated cpu policy, and a shared vcpu
                - flavor3 has 4 vcpus, dedicated cpu policy, and no shared vcpus
            - Add specific cpu_policy (dedicated), shared_vcpu values to flavor extra specs
            - Boot a vm with the flavor1
            - Ensure vm is booted successfully
            - Validate the shared cpu
            - Resize vm to flavor2 (enabled shared vcpu flavor)
            - Revalidate the shared cpu
            - Resize vm to flavor3 (disabled shared vcpu)
            - Revalidate the shared cpu by ensuring that it does not have a shared vcpu
            - Resize vm to back to flavor2
            - Revalidate the shared cpu by making sure it has a shared vcpu again

        Teardown:
            - Delete created vm if any (function)
            - Delete created volume if any (module)
            - Set shared cpus to 0 (default setting) on the compute node under test (module)
        """
        storage_backing, shared_cpu_hosts, max_vcpus_per_proc = add_shared_cpu

        LOG.tc_step("Create a flavor with given number of vcpus")
        f1_vcpus = 2
        f1_shared_vcpu = 1
        flavor1 = create_shared_flavor(vcpus=f1_vcpus, storage_backing=storage_backing, shared_vcpu=f1_shared_vcpu)

        LOG.tc_step("Boot a vm with above flavor, and ensure vm is booted successfully")
        code, vm_id, output = vm_helper.boot_vm(name='shared_cpu', flavor=flavor1, fail_ok=True, cleanup='function')

        assert 0 == code, "Boot vm failed. Details: {}".format(output)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, vcpus=f1_vcpus,
                          prev_total_vcpus=origin_total_vcpus, shared_vcpu=f1_shared_vcpu)

        f2_vcpus = 4
        f2_shared_vcpu = 1
        f2_shared_cpu = create_shared_flavor(vcpus=f2_vcpus, storage_backing=storage_backing,
                                             shared_vcpu=f2_shared_vcpu)

        f3_vcpus = 4
        f3_non_shared = create_shared_flavor(vcpus=f3_vcpus, storage_backing=storage_backing)

        LOG.tc_step("Resize vm w/shared cpu flavor and validate shared vcpu")
        vm_helper.resize_vm(vm_id, f2_shared_cpu)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=f2_shared_vcpu, vcpus=f2_vcpus, prev_total_vcpus=origin_total_vcpus)

        LOG.tc_step("Resize vm w/non shared cpu flavor")
        vm_helper.resize_vm(vm_id, f3_non_shared)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=None, vcpus=f3_vcpus, prev_total_vcpus=origin_total_vcpus)

        LOG.tc_step("Resize vm back to shared cpu flavor and validate shared vcpu")
        vm_helper.resize_vm(vm_id, f2_shared_cpu)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=f2_shared_vcpu, vcpus=f2_vcpus, prev_total_vcpus=origin_total_vcpus)
def _test_ea_max_vms_with_crypto_vfs(_flavors, hosts_pci_device_info):
    """
    Verify maximum number of guests with Crypto VFs can be launched and
    stabilized

    Args:
        _flavors:
        hosts_pci_device_info:

    Returns:

    """

    LOG.info("Pci device  {}".format(hosts_pci_device_info))

    flavor_id = _flavors['flavor_qat_vf_4']
    # Assume we only have 1 coleto creek pci device on system
    crypto_hosts = list(hosts_pci_device_info.keys())
    host = crypto_hosts[0]
    vf_info = hosts_pci_device_info[host][0]
    vf_device_id = vf_info['vf_device_id']
    vf_count = vf_info['vf_count']
    LOG.info("Vf_device_id {}, count: {}".format(vf_device_id, vf_count))

    # number of vms to launch to max out the total configured device VFs. Each VM is launched with 4 Vfs. 4 Vfs in each
    # compute are reserved for resize nova action.

    number_of_vms = int((vf_count - 4 * len(crypto_hosts)) / 4)

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    vif_type = get_vif_type()

    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id, 'vif-model': vif_type}]

    vm_helper.ensure_vms_quotas(number_of_vms + 10)

    vms = {}
    LOG.tc_step("Launch {} vms using flavor flavor_qat_vf_4 and nics {}".format(number_of_vms, nics))
    for i in range(1, number_of_vms + 1):
        vm_name = 'vm_crypto_{}'.format(i)
        vm_id = vm_helper.boot_vm(cleanup='function', name='vm_crypto_{}'.format(i), nics=nics, flavor=flavor_id)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vms[vm_name] = vm_id

    for vm_name_, vm_id_ in vms.items():
        vm_host = vm_helper.get_vm_host(vm_id_)
        host_dev_name = hosts_pci_device_info[vm_host][0]['device_name']
        expt_qat_devs = {host_dev_name: 4}
        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)

        LOG.info("Checking if other host has room for cold migrate vm {}".format(vm_name_))
        for host_ in crypto_hosts:
            if host_ != vm_host:
                total_vfs, used_vfs = network_helper.get_pci_device_vfs_counts_for_host(
                    host_, device_id=vf_device_id, fields=('pci_vfs_configured', 'pci_vfs_used'))

                if int(total_vfs) - int(used_vfs) >= 4:
                    LOG.info("Migrate to other host is possible")
                    expt_res = 0
                    break
        else:
            LOG.info("Migrate to other host is not possible")
            expt_res = 2

        LOG.tc_step("Attempt to cold migrate {} and ensure it {}".format(vm_name_,
                                                                         'succeeds' if expt_res == '0' else 'fails'))
        rc, msg = vm_helper.cold_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert expt_res == rc, "Expected: {}. Actual: {}".format(expt_res, msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        LOG.tc_step("Suspend/resume VM {} ....".format(vm_name_))
        vm_helper.suspend_vm(vm_id_)
        vm_helper.resume_vm(vm_id_)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # vm_host = nova_helper.get_vm_host(vm_id_)
        # total, used = network_helper.get_pci_device_vfs_counts_for_host(vm_host, vf_device_id)[0]
        # if (total - int(used)) >= 4:
        #     expt_res = 0

        flavor_resize_id = _flavors['flavor_resize_qat_vf_4']
        LOG.tc_step("Resize VM {} to new flavor {} with increased memory...".format(vm_name_, flavor_resize_id))
        vm_helper.resize_vm(vm_id_, flavor_resize_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # else:
        #     expt_res = 1
        #     LOG.info("Resizing of vm {} skipped; host {} max out vfs; used vfs = {}".format(vm_name_, vm_host, used))

        LOG.tc_step("Attempt to live migrate {} and ensure it's rejected".format(vm_name_))
        rc, msg = vm_helper.live_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert 6 == rc, "Expect live migration to fail on vm with pci alias device. Actual: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)
Esempio n. 8
0
def test_resize_instances(launch_instances, create_flavour_and_image):
    vm_helper.resize_vm(vm_id=launch_instances, flavor_id=create_flavour_and_image["flavor2"])
    vm_helper.resize_vm(vm_id=launch_instances, flavor_id=create_flavour_and_image["flavor1"])
Esempio n. 9
0
    def test_resize_different_comp_node(self, storage_backing,
                                        get_hosts_per_backing):
        """
        Test resizing disks of a larger vm onto a different compute node and check hypervisor statistics to
        make sure difference in disk usage of both nodes involved is correctly reflected

        Args:
            storage_backing: The host storage backing required
        Skip Conditions:
            - 2 hosts must exist with required storage backing.
        Test setup:
            - For each of the two backings tested, the setup will return the number of nodes for each backing,
            the vm host that the vm will initially be created on and the number of hosts for that backing.
        Test Steps:
            - Create a flavor with a root disk size that is slightly larger than the default image used to boot up
            the VM
            - Create a VM with the aforementioned flavor
            - Create a flavor will enough cpus to occupy the rest of the cpus on the same host as the first VM
            - Create another VM on the same host as the first VM
            - Create a similar flavor to the first one, except that it has one more vcpu
            - Resize the first VM and confirm that it is on a different host
            - Check hypervisor-show on both computes to make sure that disk usage goes down on the original host and
              goes up on the new host
        Test Teardown:
            - Delete created VMs
            - Delete created flavors

        """
        hosts_with_backing = get_hosts_per_backing.get(storage_backing, [])
        if len(hosts_with_backing) < 2:
            skip(
                SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING.format(
                    storage_backing))

        origin_host, cpu_count, compute_space_dict = get_cpu_count(
            hosts_with_backing)

        root_disk_size = GuestImages.IMAGE_FILES[
            GuestImages.DEFAULT['guest']][1] + 5

        # make vm (1 cpu)
        LOG.tc_step("Create flavor with 1 cpu")
        numa0_specs = {
            FlavorSpec.CPU_POLICY: 'dedicated',
            FlavorSpec.NUMA_0: 0
        }
        flavor_1 = nova_helper.create_flavor(
            ephemeral=0,
            swap=0,
            root_disk=root_disk_size,
            vcpus=1,
            storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_1)
        nova_helper.set_flavor(flavor_1, **numa0_specs)

        LOG.tc_step("Boot a vm with above flavor")
        vm_to_resize = vm_helper.boot_vm(flavor=flavor_1,
                                         source='image',
                                         cleanup='function',
                                         vm_host=origin_host)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_to_resize)

        # launch another vm
        LOG.tc_step("Create a flavor to occupy vcpus")
        occupy_amount = int(cpu_count) - 1
        second_specs = {
            FlavorSpec.CPU_POLICY: 'dedicated',
            FlavorSpec.NUMA_0: 0
        }
        flavor_2 = nova_helper.create_flavor(
            vcpus=occupy_amount, storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_2)
        nova_helper.set_flavor(flavor_2, **second_specs)

        LOG.tc_step("Boot a vm with above flavor to occupy remaining vcpus")
        vm_2 = vm_helper.boot_vm(flavor=flavor_2,
                                 source='image',
                                 cleanup='function',
                                 vm_host=origin_host)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_2)

        LOG.tc_step('Check disk usage before resize')
        prev_val_origin_host = get_disk_avail_least(origin_host)
        LOG.info("{} space left on compute".format(prev_val_origin_host))

        # create a larger flavor and resize
        LOG.tc_step(
            "Create a flavor that has an extra vcpu to force resize to a different node"
        )
        resize_flavor = nova_helper.create_flavor(
            ephemeral=0,
            swap=0,
            root_disk=root_disk_size,
            vcpus=2,
            storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', resize_flavor)
        nova_helper.set_flavor(resize_flavor, **numa0_specs)

        LOG.tc_step("Resize the vm and verify if it is on a different host")
        vm_helper.resize_vm(vm_to_resize, resize_flavor)
        new_host = vm_helper.get_vm_host(vm_to_resize)
        assert new_host != origin_host, "vm did not change hosts following resize"

        LOG.tc_step('Check disk usage on computes after resize')
        if storage_backing == 'remote':
            LOG.info(
                "Compute disk usage change should be minimal for remote storage backing"
            )
            root_disk_size = 0

        check_correct_post_resize_value(prev_val_origin_host, root_disk_size,
                                        origin_host)

        prev_val_new_host = compute_space_dict[new_host]
        check_correct_post_resize_value(prev_val_new_host,
                                        -root_disk_size,
                                        new_host,
                                        sleep=False)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_to_resize)
Esempio n. 10
0
    def test_resize_vm_negative(self, add_hosts_to_zone, storage_backing,
                                origin_flavor, dest_flavor, boot_source):
        """
        Test resizing disks of a vm not allowed:
        - Resize to smaller ephemeral flavor is not allowed
        - Resize to zero disk flavor is not allowed     (boot from image only)

        Args:
            storage_backing: The host storage backing required
            origin_flavor: The flavor to boot the vm from, listed by GBs for root, ephemeral, and swap disks, i.e. for a
                           system with a 2GB root disk, a 1GB ephemeral disk, and no swap disk: (2, 1, 0)
            boot_source: Which source to boot the vm from, either 'volume' or 'image'
        Skip Conditions:
            - No hosts exist with required storage backing.
        Test setup:
            - Put a single host of each backing in cgcsautozone to prevent migration and instead force resize.
            - Create two flavors based on origin_flavor and dest_flavor
            - Create a volume or image to boot from.
            - Boot VM with origin_flavor
        Test Steps:
            - Resize VM to dest_flavor with revert
            - Resize VM to dest_flavor with confirm
        Test Teardown:
            - Delete created VM
            - Delete created volume or image
            - Delete created flavors
            - Remove hosts from cgcsauto zone
            - Delete cgcsauto zone

        """
        vm_host = add_hosts_to_zone.get(storage_backing, None)

        if not vm_host:
            skip("No available host with {} storage backing".format(
                storage_backing))

        LOG.tc_step('Create origin flavor')
        origin_flavor_id = _create_flavor(origin_flavor, storage_backing)
        LOG.tc_step('Create destination flavor')
        dest_flavor_id = _create_flavor(dest_flavor, storage_backing)
        vm_id = _boot_vm_to_test(boot_source, vm_host, origin_flavor_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id)
        root, ephemeral, swap = origin_flavor
        file_paths, content = touch_files_under_vm_disks(vm_id=vm_id,
                                                         ephemeral=ephemeral,
                                                         swap=swap,
                                                         vm_type=boot_source,
                                                         disks=vm_disks)

        LOG.tc_step('Resize vm to dest flavor')
        code, output = vm_helper.resize_vm(vm_id, dest_flavor_id, fail_ok=True)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        assert vm_helper.get_vm_flavor(
            vm_id) == origin_flavor_id, 'VM did not keep origin flavor'
        assert code > 0, "Resize VM CLI is not rejected"

        LOG.tc_step("Check files after resize attempt")
        check_helper.check_vm_files(vm_id=vm_id,
                                    storage_backing=storage_backing,
                                    root=root,
                                    ephemeral=ephemeral,
                                    swap=swap,
                                    vm_type=boot_source,
                                    vm_action=None,
                                    file_paths=file_paths,
                                    content=content,
                                    disks=vm_disks)
Esempio n. 11
0
    def test_resize_vm_positive(self, add_hosts_to_zone, storage_backing,
                                origin_flavor, dest_flavor, boot_source):
        """
        Test resizing disks of a vm
        - Resize root disk is allowed except 0 & boot-from-image
        - Resize to larger or same ephemeral is allowed
        - Resize swap to any size is allowed including removing

        Args:
            storage_backing: The host storage backing required
            origin_flavor: The flavor to boot the vm from, listed by GBs for root, ephemeral, and swap disks, i.e. for a
                           system with a 2GB root disk, a 1GB ephemeral disk, and no swap disk: (2, 1, 0)
            boot_source: Which source to boot the vm from, either 'volume' or 'image'
            add_hosts_to_zone
            dest_flavor

        Skip Conditions:
            - No hosts exist with required storage backing.
        Test setup:
            - Put a single host of each backing in cgcsautozone to prevent migration and instead force resize.
            - Create two flavors based on origin_flavor and dest_flavor
            - Create a volume or image to boot from.
            - Boot VM with origin_flavor
        Test Steps:
            - Resize VM to dest_flavor with revert
            - If vm is booted from image and has a non-remote backing, check that the amount of disk space post-revert
            is around the same pre-revert    # TC5155
            - Resize VM to dest_flavor with confirm
            - If vm is booted from image and has a non-remote backing, check that the amount of disk space post-confirm
            is reflects the increase in disk-space taken up      # TC5155
        Test Teardown:
            - Delete created VM
            - Delete created volume or image
            - Delete created flavors
            - Remove hosts from cgcsautozone
            - Delete cgcsautozone

        """
        vm_host = add_hosts_to_zone.get(storage_backing, None)

        if not vm_host:
            skip(
                SkipStorageBacking.NO_HOST_WITH_BACKING.format(
                    storage_backing))

        expected_increase, expect_to_check = get_expt_disk_increase(
            origin_flavor, dest_flavor, boot_source, storage_backing)
        LOG.info("Expected_increase of vm compute occupancy is {}".format(
            expected_increase))

        LOG.tc_step('Create origin flavor')
        origin_flavor_id = _create_flavor(origin_flavor, storage_backing)
        vm_id = _boot_vm_to_test(boot_source, vm_host, origin_flavor_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id)
        root, ephemeral, swap = origin_flavor
        if boot_source == 'volume':
            root = GuestImages.IMAGE_FILES[GuestImages.DEFAULT['guest']][1]
        file_paths, content = touch_files_under_vm_disks(vm_id=vm_id,
                                                         ephemeral=ephemeral,
                                                         swap=swap,
                                                         vm_type=boot_source,
                                                         disks=vm_disks)

        if expect_to_check:
            LOG.tc_step('Check initial disk usage')
            original_disk_value = get_disk_avail_least(vm_host)
            LOG.info("{} space left on compute".format(original_disk_value))

        LOG.tc_step('Create destination flavor')
        dest_flavor_id = _create_flavor(dest_flavor, storage_backing)
        LOG.tc_step('Resize vm to dest flavor and revert')
        vm_helper.resize_vm(vm_id, dest_flavor_id, revert=True, fail_ok=False)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        swap_size = swap
        LOG.tc_step("Check files after resize revert")
        if storage_backing == 'remote' and swap and dest_flavor[2]:
            swap_size = dest_flavor[2]

        time.sleep(30)
        prev_host = vm_helper.get_vm_host(vm_id)
        check_helper.check_vm_files(vm_id=vm_id,
                                    storage_backing=storage_backing,
                                    root=root,
                                    ephemeral=ephemeral,
                                    swap=swap_size,
                                    vm_type=boot_source,
                                    vm_action=None,
                                    file_paths=file_paths,
                                    content=content,
                                    disks=vm_disks,
                                    check_volume_root=True)

        # Check for TC5155 blocked by JIRA: CGTS-8299
        # if expect_to_check:
        #     LOG.tc_step('Check disk usage after revertion')
        #     revert_disk_value = check_correct_post_resize_value(original_disk_value, 0, vm_host)

        LOG.tc_step('Resize vm to dest flavor and confirm')
        vm_helper.resize_vm(vm_id, dest_flavor_id, revert=False, fail_ok=False)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        post_host = vm_helper.get_vm_host(vm_id)
        post_root, post_ephemeral, post_swap = dest_flavor
        if boot_source == 'volume':
            post_root = GuestImages.IMAGE_FILES[
                GuestImages.DEFAULT['guest']][1]
        post_ephemeral = ephemeral if ephemeral else post_ephemeral  # CGTS-8041
        LOG.tc_step("Check files after resize attempt")
        check_helper.check_vm_files(
            vm_id=vm_id,
            storage_backing=storage_backing,
            ephemeral=post_ephemeral,
            swap=post_swap,
            vm_type=boot_source,
            vm_action='resize',
            file_paths=file_paths,
            content=content,
            prev_host=prev_host,
            post_host=post_host,
            root=post_root,
            disks=vm_disks,
            post_disks=vm_helper.get_vm_devices_via_virsh(vm_id),
            check_volume_root=True)
Esempio n. 12
0
    def test_pci_vm_nova_actions(self, pci_numa_affinity,
                                 pci_irq_affinity_mask, pci_alias,
                                 vif_model_check, pci_dev_numa_nodes):
        """
        Test vm actions on vm with multiple ports with given vif models on the same tenant network

        Args:

        Setups:
            - create a flavor with dedicated cpu policy (module)
            - choose one tenant network and one internal network to be used by test (module)
            - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (module)
            - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant network with base vm,
            and ping it from NatBox      (class)
            - Ping vm2's own data network ips        (class)
            - Ping vm2 from vm1 to verify management and data networks connection    (class)

        Test Steps:
            - Perform given actions on vm2 (migrate, start/stop, etc)
            - Verify ping from vm1 to vm2 over management and data networks still works
            - Verify the correct number of PCI devices are created, in correct types,
                    the numa node of the PCI devices aligns with that of CPUs, and affined CPUs for PCI devices
                    are same as specified by 'pci_alias' (if applicable)

        Teardown:
            - Delete created vms and flavor
        """
        pci_irq_affinity_mask, pci_alias = _convert_irqmask_pcialias(
            pci_irq_affinity_mask, pci_alias)
        boot_forbidden = False
        migrate_forbidden = False
        if pci_numa_affinity == 'required' and pci_alias is not None:
            host_count = pci_dev_numa_nodes
            if host_count == 0:
                boot_forbidden = True
            elif host_count == 1:
                migrate_forbidden = True
        LOG.tc_step(
            "Expected result - Disallow boot: {}; Disallow migrate: {}".format(
                boot_forbidden, migrate_forbidden))

        self.pci_numa_affinity = pci_numa_affinity
        self.pci_alias = pci_alias
        self.pci_irq_affinity_mask = pci_irq_affinity_mask

        if pci_alias is not None:
            LOG.info('Check if PCI-Alias devices existing')
            self.is_pci_device_supported(pci_alias)

        self.vif_model, self.base_vm, self.base_flavor_id, self.nics_to_test, self.seg_id, \
            self.pnet_name, self.extra_pcipt_net = vif_model_check

        LOG.tc_step(
            "Create a flavor with specified extra-specs and dedicated cpu policy"
        )
        flavor_id = self.create_flavor_for_pci()

        LOG.tc_step("Boot a vm with {} vif model on internal net".format(
            self.vif_model))
        # TODO: feature unavailable atm. Update required
        # resource_param = 'pci_vfs_used' if 'sriov' in self.vif_model else 'pci_pfs_used'
        # LOG.tc_step("Get resource usage for {} interface before booting VM(s)".format(self.vif_model))
        # pre_resource_value = nova_helper.get_provider_net_info(self.pnet_name, field=resource_param)

        res, vm_id, err = vm_helper.boot_vm(name=self.vif_model,
                                            flavor=flavor_id,
                                            cleanup='function',
                                            nics=self.nics_to_test,
                                            fail_ok=boot_forbidden)
        if boot_forbidden:
            assert res > 0, "VM booted successfully while it numa node for pcipt/sriov and pci alias mismatch"
            return

        self.vm_id = vm_id

        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                       net_seg_id=self.seg_id,
                                                       init_conf=True)

        LOG.tc_step("Ping vm over mgmt and internal nets from base vm")
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.vm_id,
                                   net_types=['mgmt', 'internal'])
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        self.vm_topology = vm_helper.get_vm_values(
            vm_id=self.vm_id, fields='wrs-res:topology')[0]
        vnic_type = 'direct' if self.vif_model == 'pci-sriov' else 'direct-physical'
        self.pci_nics = vm_helper.get_vm_nics_info(vm_id=self.vm_id,
                                                   vnic_type=vnic_type)
        assert self.pci_nics

        self.wait_check_vm_states(step='boot')

        # TODO: feature unavailable atm. Update required
        # LOG.tc_step("Check {} usage is incremented by 1".format(resource_param))
        # post_resource_value = nova_helper.get_provider_net_info(self.pnet_name, field=resource_param)
        # expt_change = 2 if self.vif_model == 'pci-passthrough' and self.extra_pcipt_net else 1
        # assert pre_resource_value + expt_change == post_resource_value, "{} usage is not incremented by {} as " \
        #                                                                 "expected".format(resource_param, expt_change)

        LOG.tc_step('Pause/Unpause {} vm'.format(self.vif_model))
        vm_helper.pause_vm(self.vm_id)
        vm_helper.unpause_vm(self.vm_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after pause/unpause"
        )
        self.wait_check_vm_states(step='pause/unpause')

        LOG.tc_step('Suspend/Resume {} vm'.format(self.vif_model))
        vm_helper.suspend_vm(self.vm_id)
        vm_helper.resume_vm(self.vm_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after suspend/resume"
        )
        self.wait_check_vm_states(step='suspend/resume')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step('Cold migrate {} vm'.format(self.vif_model))
        code, msg = vm_helper.cold_migrate_vm(self.vm_id,
                                              fail_ok=migrate_forbidden)
        if migrate_forbidden:
            assert code > 0, "Expect migrate fail due to no other host has pcipt/sriov and pci-alias on same numa. " \
                             "Actual: {}".format(msg)
        self.wait_check_vm_states(step='cold-migrate')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after cold migration"
        )
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step('Set vm to error and wait for it to be auto recovered')
        vm_helper.set_vm_state(vm_id=self.vm_id,
                               error_state=True,
                               fail_ok=False)
        vm_helper.wait_for_vm_values(vm_id=self.vm_id,
                                     status=VMStatus.ACTIVE,
                                     fail_ok=False,
                                     timeout=600)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after auto recovery"
        )
        self.wait_check_vm_states(step='set-error-state-recover')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step("Hard reboot {} vm".format(self.vif_model))
        vm_helper.reboot_vm(self.vm_id, hard=True)
        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after nova reboot hard"
        )
        self.wait_check_vm_states(step='hard-reboot')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step(
            "Create a flavor with dedicated cpu policy and resize vm to new flavor"
        )
        resize_flavor = nova_helper.create_flavor(name='dedicated',
                                                  ram=2048,
                                                  cleanup='function')[1]
        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=resize_flavor, **extra_specs)
        vm_helper.resize_vm(self.vm_id, resize_flavor)

        LOG.tc_step("Check vm still reachable after resize")
        self.wait_check_vm_states(step='resize')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])
Esempio n. 13
0
def test_snat_vm_actions(snat_setups, snat):
    """
    Test VM external access over VM launch, live-migration, cold-migration, pause/unpause, etc

    Args:
        snat_setups (tuple): returns vm id and fip. Enable snat, create vm and attach floating ip.

    Test Setups (module):
        - Find a tenant router that is dvr or non-dvr based on the parameter
        - Enable SNAT on tenant router
        - boot a vm and attach a floating ip
        - Ping vm from NatBox

    Test Steps:
        - Enable/Disable SNAT based on snat param
        - Ping from VM to 8.8.8.8
        - wget <lab_fip> to VM
        - scp from NatBox to VM
        - Live-migrate the VM and verify ping from VM
        - Cold-migrate the VM and verify ping from VM
        - Pause and un-pause the VM and verify ping from VM
        - Suspend and resume the VM and verify ping from VM
        - Stop and start the VM and verify ping from VM
        - Reboot the VM and verify ping from VM

    Test Teardown:
        - Enable snat for next test in the same module     (function)
        - Delete the created vm     (module)
        - Disable snat  (module)

    """
    vm_ = snat_setups[0]
    snat = True if snat == 'snat_enabled' else False
    LOG.tc_step("Update tenant router external gateway to set SNAT to {}".format(snat))
    network_helper.set_router_gateway(enable_snat=snat)

    # Allow router update to complete, since we've seen cases where ping vm pass but ssh fail
    time.sleep(30)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=snat)

    LOG.tc_step("Ping from VM {} to 8.8.8.8".format(vm_))
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("wget to VM {}".format(vm_))
    with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_, use_fip=True) as vm_ssh:
        vm_ssh.exec_cmd('wget google.ca', fail_ok=False)

    LOG.tc_step("scp from NatBox to VM {}".format(vm_))
    vm_fip = network_helper.get_external_ips_for_vms(vms=vm_)[0]
    natbox_ssh = NATBoxClient.get_natbox_client()
    natbox_ssh.scp_on_source(source_path='test', dest_user='******', dest_ip=vm_fip, dest_path='/tmp/',
                             dest_password='******', timeout=30)

    LOG.tc_step("Live-migrate the VM and verify ping from VM")
    vm_helper.live_migrate_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Cold-migrate the VM and verify ping from VM")
    vm_helper.cold_migrate_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Pause and un-pause the VM and verify ping from VM")
    vm_helper.pause_vm(vm_)
    vm_helper.unpause_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Suspend and resume the VM and verify ping from VM")
    vm_helper.suspend_vm(vm_)
    vm_helper.resume_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Stop and start the VM and verify ping from VM")
    vm_helper.stop_vms(vm_)
    vm_helper.start_vms(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Reboot the VM and verify ping from VM")
    vm_helper.reboot_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Resize the vm to a flavor with 2 dedicated cpus and verify ping from VM")
    new_flv = nova_helper.create_flavor(name='ded', vcpus=2)[1]
    ResourceCleanup.add('flavor', new_flv, scope='module')
    nova_helper.set_flavor(new_flv, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    vm_helper.resize_vm(vm_, new_flv)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)
Esempio n. 14
0
def perform_vm_operation(vm_type,
                         vm_id,
                         op='live_migration',
                         extra_specs='vtpm'):
    LOG.info('Perform action:{} to the VM, extra specs:{}'.format(
        op, extra_specs))

    op_table = {
        'live_migration':
        lambda x, y: vm_helper.live_migrate_vm(y),
        'cold_migration':
        lambda x, y: vm_helper.cold_migrate_vm(y),
        'stop_start':
        lambda x, y: (vm_helper.stop_vms(y), vm_helper.start_vms(y)),
        'suspend_resume':
        lambda x, y: (vm_helper.suspend_vm(y), vm_helper.resume_vm(y)),
        'pause_unpause':
        lambda x, y: (vm_helper.pause_vm(y), vm_helper.unpause_vm(y)),
        'reboot_host':
        lambda x, y: reboot_hosting_node(x, y, force_reboot=False),
        'soft_reboot':
        lambda x, y: vm_helper.reboot_vm(y, hard=False),
        'hard_reboot':
        lambda x, y: vm_helper.reboot_vm(y, hard=True),
        'lock_unlock':
        lambda x, y: lock_unlock_hosting_node(x, y, force_lock=False),
        'evacuate':
        lambda x, y: reboot_hosting_node(x, y, force_reboot=True),
    }

    if op in op_table:
        LOG.info('Perform action: {}'.format(op))
        op_table[op](vm_type, vm_id)

        return True

    elif op == 'resize_to_autorc':
        if vm_type == 'autorc':
            LOG.info(
                'resize from AUTO-RECOVERY to another AUTO-RECOVER flavor')

        to_flavor_id = get_flavor_id(vm_type, 'autorc2')

        LOG.info('TODO: {}, m_type={}, to_flavor_id={}'.format(
            to_flavor_id, vm_type, to_flavor_id))

        vm_helper.resize_vm(vm_id, to_flavor_id)

    elif op == 'resize_to_non_autorc':
        LOG.info('perform {} on type:{}, id:{}'.format(op, vm_type, vm_id))
        if vm_type == 'non_autorc2':
            LOG.warn(
                'resize from AUTO-RECOVERY to another AUTO-RECOVER flavor')

        to_flavor_id = get_flavor_id(vm_type, 'non_autorc2')
        vm_helper.resize_vm(vm_id, to_flavor_id)

    elif op == 'resize_to_non_vtpm':
        LOG.info('perform {} on type:{}, id:{}'.format(op, vm_type, vm_id))

        to_flavor_id = get_flavor_id(vm_type, 'non_vtpm')

        vm_helper.resize_vm(vm_id, to_flavor_id)

    else:
        LOG.fatal('Unsupported action: {}'.format(op))
        return False