示例#1
0
    # lock standby controller node and verify it is successfully locked
    LOG.tc_step("Lock active controller and ensure it fail to lock")
    exit_code, cmd_output = host_helper.lock_host(active_controller,
                                                  fail_ok=True,
                                                  swact=False,
                                                  check_first=False)
    assert exit_code == 1, 'Expect locking active controller to ' \
                           'be rejected. Actual: {}'.format(cmd_output)
    status = system_helper.get_host_values(active_controller,
                                           'administrative')[0]
    assert status == 'unlocked', "Fail: The active controller was locked."


@mark.parametrize('host_type', [
    param('controller',
          marks=mark.priorities('platform_sanity', 'sanity', 'cpe_sanity')),
    param('compute', marks=mark.priorities('platform_sanity')),
    param('storage', marks=mark.priorities('platform_sanity')),
])
def test_lock_unlock_host(host_type):
    """
    Verify lock unlock host

    Test Steps:
        - Select a host per given type. If type is controller, select
            standby controller.
        - Lock selected host and ensure it is successfully locked
        - Unlock selected host and ensure it is successfully unlocked

    """
    LOG.tc_step("Select a {} node from system if any".format(host_type))
示例#2
0
from consts.stx import FlavorSpec, VMStatus
from consts.reasons import SkipStorageSpace

from keywords import vm_helper, nova_helper, glance_helper, cinder_helper
from testfixtures.fixture_resources import ResourceCleanup


def id_gen(val):
    if isinstance(val, list):
        return '-'.join(val)


@mark.parametrize(('guest_os', 'cpu_pol', 'actions'), [
    param('tis-centos-guest',
          'dedicated', ['pause', 'unpause'],
          marks=mark.priorities('sanity', 'cpe_sanity', 'sx_sanity')),
    param('ubuntu_14', 'shared', ['stop', 'start'], marks=mark.sanity),
    param('ubuntu_14', 'dedicated', ['auto_recover'], marks=mark.sanity),
    param('tis-centos-guest',
          'dedicated', ['suspend', 'resume'],
          marks=mark.priorities('sanity', 'cpe_sanity', 'sx_sanity')),
],
                  ids=id_gen)
def test_nova_actions(guest_os, cpu_pol, actions):
    """

    Args:
        guest_os:
        cpu_pol:
        actions:
示例#3
0
class TestResizeSameHost:
    @fixture(scope='class')
    def add_hosts_to_zone(self, request, add_cgcsauto_zone,
                          get_hosts_per_backing):
        hosts_per_backing = get_hosts_per_backing
        avail_hosts = {
            key: vals[0]
            for key, vals in hosts_per_backing.items() if vals
        }

        if not avail_hosts:
            skip("No host in any storage aggregate")

        nova_helper.add_hosts_to_aggregate(aggregate='cgcsauto',
                                           hosts=list(avail_hosts.values()))

        def remove_hosts_from_zone():
            nova_helper.remove_hosts_from_aggregate(aggregate='cgcsauto',
                                                    check_first=False)

        request.addfinalizer(remove_hosts_from_zone)
        return avail_hosts

    @mark.parametrize(
        ('storage_backing', 'origin_flavor', 'dest_flavor', 'boot_source'),
        [
            ('remote', (4, 0, 0), (5, 1, 512), 'image'),
            ('remote', (4, 1, 512), (5, 2, 1024), 'image'),
            ('remote', (4, 1, 512), (4, 1, 0),
             'image'),  # https://bugs.launchpad.net/nova/+bug/1762423
            param('remote', (4, 0, 0), (1, 1, 512),
                  'volume',
                  marks=mark.priorities('nightly', 'sx_nightly')),
            ('remote', (4, 1, 512), (8, 2, 1024), 'volume'),
            ('remote', (4, 1, 512), (0, 1, 0), 'volume'),
            ('local_image', (4, 0, 0), (5, 1, 512), 'image'),
            param('local_image', (4, 1, 512), (5, 2, 1024),
                  'image',
                  marks=mark.priorities('nightly', 'sx_nightly')),
            ('local_image', (5, 1, 512), (5, 1, 0), 'image'),
            ('local_image', (4, 0, 0), (5, 1, 512), 'volume'),
            ('local_image', (4, 1, 512), (0, 2, 1024), 'volume'),
            ('local_image', (4, 1, 512), (1, 1, 0),
             'volume'),  # https://bugs.launchpad.net/nova/+bug/1762423
        ],
        ids=id_gen)
    def test_resize_vm_positive(self, add_hosts_to_zone, storage_backing,
                                origin_flavor, dest_flavor, boot_source):
        """
        Test resizing disks of a vm
        - Resize root disk is allowed except 0 & boot-from-image
        - Resize to larger or same ephemeral is allowed
        - Resize swap to any size is allowed including removing

        Args:
            storage_backing: The host storage backing required
            origin_flavor: The flavor to boot the vm from, listed by GBs for root, ephemeral, and swap disks, i.e. for a
                           system with a 2GB root disk, a 1GB ephemeral disk, and no swap disk: (2, 1, 0)
            boot_source: Which source to boot the vm from, either 'volume' or 'image'
            add_hosts_to_zone
            dest_flavor

        Skip Conditions:
            - No hosts exist with required storage backing.
        Test setup:
            - Put a single host of each backing in cgcsautozone to prevent migration and instead force resize.
            - Create two flavors based on origin_flavor and dest_flavor
            - Create a volume or image to boot from.
            - Boot VM with origin_flavor
        Test Steps:
            - Resize VM to dest_flavor with revert
            - If vm is booted from image and has a non-remote backing, check that the amount of disk space post-revert
            is around the same pre-revert    # TC5155
            - Resize VM to dest_flavor with confirm
            - If vm is booted from image and has a non-remote backing, check that the amount of disk space post-confirm
            is reflects the increase in disk-space taken up      # TC5155
        Test Teardown:
            - Delete created VM
            - Delete created volume or image
            - Delete created flavors
            - Remove hosts from cgcsautozone
            - Delete cgcsautozone

        """
        vm_host = add_hosts_to_zone.get(storage_backing, None)

        if not vm_host:
            skip(
                SkipStorageBacking.NO_HOST_WITH_BACKING.format(
                    storage_backing))

        expected_increase, expect_to_check = get_expt_disk_increase(
            origin_flavor, dest_flavor, boot_source, storage_backing)
        LOG.info("Expected_increase of vm compute occupancy is {}".format(
            expected_increase))

        LOG.tc_step('Create origin flavor')
        origin_flavor_id = _create_flavor(origin_flavor, storage_backing)
        vm_id = _boot_vm_to_test(boot_source, vm_host, origin_flavor_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id)
        root, ephemeral, swap = origin_flavor
        if boot_source == 'volume':
            root = GuestImages.IMAGE_FILES[GuestImages.DEFAULT['guest']][1]
        file_paths, content = touch_files_under_vm_disks(vm_id=vm_id,
                                                         ephemeral=ephemeral,
                                                         swap=swap,
                                                         vm_type=boot_source,
                                                         disks=vm_disks)

        if expect_to_check:
            LOG.tc_step('Check initial disk usage')
            original_disk_value = get_disk_avail_least(vm_host)
            LOG.info("{} space left on compute".format(original_disk_value))

        LOG.tc_step('Create destination flavor')
        dest_flavor_id = _create_flavor(dest_flavor, storage_backing)
        LOG.tc_step('Resize vm to dest flavor and revert')
        vm_helper.resize_vm(vm_id, dest_flavor_id, revert=True, fail_ok=False)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        swap_size = swap
        LOG.tc_step("Check files after resize revert")
        if storage_backing == 'remote' and swap and dest_flavor[2]:
            swap_size = dest_flavor[2]

        time.sleep(30)
        prev_host = vm_helper.get_vm_host(vm_id)
        check_helper.check_vm_files(vm_id=vm_id,
                                    storage_backing=storage_backing,
                                    root=root,
                                    ephemeral=ephemeral,
                                    swap=swap_size,
                                    vm_type=boot_source,
                                    vm_action=None,
                                    file_paths=file_paths,
                                    content=content,
                                    disks=vm_disks,
                                    check_volume_root=True)

        # Check for TC5155 blocked by JIRA: CGTS-8299
        # if expect_to_check:
        #     LOG.tc_step('Check disk usage after revertion')
        #     revert_disk_value = check_correct_post_resize_value(original_disk_value, 0, vm_host)

        LOG.tc_step('Resize vm to dest flavor and confirm')
        vm_helper.resize_vm(vm_id, dest_flavor_id, revert=False, fail_ok=False)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        post_host = vm_helper.get_vm_host(vm_id)
        post_root, post_ephemeral, post_swap = dest_flavor
        if boot_source == 'volume':
            post_root = GuestImages.IMAGE_FILES[
                GuestImages.DEFAULT['guest']][1]
        post_ephemeral = ephemeral if ephemeral else post_ephemeral  # CGTS-8041
        LOG.tc_step("Check files after resize attempt")
        check_helper.check_vm_files(
            vm_id=vm_id,
            storage_backing=storage_backing,
            ephemeral=post_ephemeral,
            swap=post_swap,
            vm_type=boot_source,
            vm_action='resize',
            file_paths=file_paths,
            content=content,
            prev_host=prev_host,
            post_host=post_host,
            root=post_root,
            disks=vm_disks,
            post_disks=vm_helper.get_vm_devices_via_virsh(vm_id),
            check_volume_root=True)

        # TODO: Check that root Cinder volume does not resize, for appropriate cases
        # Check for TC5155 blocked by JIRA: CGTS-8299
        # if expect_to_check:
        #     LOG.tc_step('Check that disk usage in hypervisor-stats changes is expected after a confirmed resize')
        #     check_correct_post_resize_value(original_disk_value, expected_increase, vm_host)

    @mark.parametrize(
        ('storage_backing', 'origin_flavor', 'dest_flavor', 'boot_source'),
        [
            ('remote', (5, 0, 0),
             (0, 0, 0), 'image'),  # Root disk can be resized, but cannot be 0
            ('remote', (5, 2, 512), (5, 1, 512),
             'image'),  # check ephemeral disk cannot be smaller than origin
            # ('remote',      (1, 0, 0), (0, 0, 0), 'volume'),   This should not fail, root disk size from volume not flavor
            ('remote', (1, 1, 512), (1, 0, 512), 'volume'
             ),  # check ephemeral disk cannot be smaller than origin
            ('local_image', (5, 0, 0),
             (0, 0, 0), 'image'),  # Root disk can be resized, but cannot be 0
            ('local_image', (5, 2, 512), (5, 1, 512), 'image'),
            ('local_image', (5, 1, 512), (4, 1, 512), 'image'),
            ('local_image', (5, 1, 512), (4, 1, 0), 'image'),
            # ('local_image', (1, 0, 0), (0, 0, 0), 'volume'),    root disk size from volume not flavor
            ('local_image', (1, 1, 512), (1, 0, 512), 'volume'),
        ],
        ids=id_gen)
    def test_resize_vm_negative(self, add_hosts_to_zone, storage_backing,
                                origin_flavor, dest_flavor, boot_source):
        """
        Test resizing disks of a vm not allowed:
        - Resize to smaller ephemeral flavor is not allowed
        - Resize to zero disk flavor is not allowed     (boot from image only)

        Args:
            storage_backing: The host storage backing required
            origin_flavor: The flavor to boot the vm from, listed by GBs for root, ephemeral, and swap disks, i.e. for a
                           system with a 2GB root disk, a 1GB ephemeral disk, and no swap disk: (2, 1, 0)
            boot_source: Which source to boot the vm from, either 'volume' or 'image'
        Skip Conditions:
            - No hosts exist with required storage backing.
        Test setup:
            - Put a single host of each backing in cgcsautozone to prevent migration and instead force resize.
            - Create two flavors based on origin_flavor and dest_flavor
            - Create a volume or image to boot from.
            - Boot VM with origin_flavor
        Test Steps:
            - Resize VM to dest_flavor with revert
            - Resize VM to dest_flavor with confirm
        Test Teardown:
            - Delete created VM
            - Delete created volume or image
            - Delete created flavors
            - Remove hosts from cgcsauto zone
            - Delete cgcsauto zone

        """
        vm_host = add_hosts_to_zone.get(storage_backing, None)

        if not vm_host:
            skip("No available host with {} storage backing".format(
                storage_backing))

        LOG.tc_step('Create origin flavor')
        origin_flavor_id = _create_flavor(origin_flavor, storage_backing)
        LOG.tc_step('Create destination flavor')
        dest_flavor_id = _create_flavor(dest_flavor, storage_backing)
        vm_id = _boot_vm_to_test(boot_source, vm_host, origin_flavor_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id)
        root, ephemeral, swap = origin_flavor
        file_paths, content = touch_files_under_vm_disks(vm_id=vm_id,
                                                         ephemeral=ephemeral,
                                                         swap=swap,
                                                         vm_type=boot_source,
                                                         disks=vm_disks)

        LOG.tc_step('Resize vm to dest flavor')
        code, output = vm_helper.resize_vm(vm_id, dest_flavor_id, fail_ok=True)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        assert vm_helper.get_vm_flavor(
            vm_id) == origin_flavor_id, 'VM did not keep origin flavor'
        assert code > 0, "Resize VM CLI is not rejected"

        LOG.tc_step("Check files after resize attempt")
        check_helper.check_vm_files(vm_id=vm_id,
                                    storage_backing=storage_backing,
                                    root=root,
                                    ephemeral=ephemeral,
                                    swap=swap,
                                    vm_type=boot_source,
                                    vm_action=None,
                                    file_paths=file_paths,
                                    content=content,
                                    disks=vm_disks)
示例#4
0
class TestMutiPortsBasic:
    @fixture(scope='class')
    def base_setup(self):

        flavor_id = nova_helper.create_flavor(name='dedicated')[1]
        ResourceCleanup.add('flavor', flavor_id, scope='class')

        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        mgmt_net_id = network_helper.get_mgmt_net_id()
        tenant_net_id = network_helper.get_tenant_net_id()
        internal_net_id = network_helper.get_internal_net_id()

        nics = [{'net-id': mgmt_net_id},
                {'net-id': tenant_net_id},
                {'net-id': internal_net_id}]

        LOG.fixture_step(
            "(class) Boot a base vm with following nics: {}".format(nics))
        base_vm = vm_helper.boot_vm(name='multiports_base',
                                    flavor=flavor_id, nics=nics,
                                    cleanup='class',
                                    reuse_vol=False)[1]

        vm_helper.wait_for_vm_pingable_from_natbox(base_vm)
        vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types='data')

        return base_vm, flavor_id, mgmt_net_id, tenant_net_id, internal_net_id

    @mark.parametrize('vifs', [
        param(('virtio_x4',), marks=mark.priorities('nightly', 'sx_nightly'))
    ], ids=id_params)
    def test_multiports_on_same_network_vm_actions(self, vifs, base_setup):
        """
        Test vm actions on vm with multiple ports with given vif models on
        the same tenant network

        Args:
            vifs (tuple): each item in the tuple is 1 nic to be added to vm
                with specified (vif_mode, pci_address)
            base_setup (list): test fixture to boot base vm

        Setups:
            - create a flavor with dedicated cpu policy (class)
            - choose one tenant network and one internal network to be used
            by test (class)
            - boot a base vm - vm1 with above flavor and networks, and ping
            it from NatBox (class)
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with base vm,
            and ping it from NatBox      (class)
            - Ping vm2's own data network ips        (class)
            - Ping vm2 from vm1 to verify management and data networks
            connection    (class)

        Test Steps:
            - Perform given actions on vm2 (migrate, start/stop, etc)
            - Verify pci_address preserves
            - Verify ping from vm1 to vm2 over management and data networks
            still works

        Teardown:
            - Delete created vms and flavor
        """
        base_vm, flavor, mgmt_net_id, tenant_net_id, internal_net_id = \
            base_setup

        vm_under_test, nics = _boot_multiports_vm(flavor=flavor,
                                                  mgmt_net_id=mgmt_net_id,
                                                  vifs=vifs,
                                                  net_id=tenant_net_id,
                                                  net_type='data',
                                                  base_vm=base_vm)

        for vm_actions in [['auto_recover'],
                           ['cold_migrate'],
                           ['pause', 'unpause'],
                           ['suspend', 'resume'],
                           ['hard_reboot']]:
            if vm_actions[0] == 'auto_recover':
                LOG.tc_step(
                    "Set vm to error state and wait for auto recovery "
                    "complete, then verify ping from "
                    "base vm over management and data networks")
                vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True,
                                       fail_ok=False)
                vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                             status=VMStatus.ACTIVE,
                                             fail_ok=True, timeout=600)
            else:
                LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                    vm_under_test, vm_actions))
                for action in vm_actions:
                    if 'migrate' in action and system_helper.is_aio_simplex():
                        continue

                    kwargs = {}
                    if action == 'hard_reboot':
                        action = 'reboot'
                        kwargs['hard'] = True
                    kwargs['action'] = action

                    vm_helper.perform_action_on_vm(vm_under_test, **kwargs)

            vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)

            # LOG.tc_step("Verify vm pci address preserved after {}".format(
            # vm_actions))
            # check_helper.check_vm_pci_addr(vm_under_test, nics)

            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management "
                "and data networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm,
                                       net_types=['mgmt', 'data'])
示例#5
0
class TestConfigMempage:
    MEM_CONFIGS = [None, 'any', 'large', 'small', '2048', '1048576']

    @fixture(scope='class')
    def add_1g_and_4k_pages(self, request, config_host_class,
                            skip_for_one_proc, add_stxauto_zone,
                            add_admin_role_module):
        storage_backing, candidate_hosts = \
            keywords.host_helper.get_storage_backing_with_max_hosts()

        if len(candidate_hosts) < 2:
            skip("Less than two up hosts have same storage backing")

        LOG.fixture_step("Check mempage configs for hypervisors and select "
                         "host to use or configure")
        hosts_selected, hosts_to_configure = get_hosts_to_configure(
            candidate_hosts)

        if set(hosts_to_configure) != {None}:

            def _modify(host):
                is_1g = True if hosts_selected.index(host) == 0 else False
                proc1_kwargs = {'gib_1g': 2, 'gib_4k_range': (None, 2)} if \
                    is_1g else {'gib_1g': 0, 'gib_4k_range': (2, None)}
                kwargs = {'gib_1g': 0, 'gib_4k_range': (None, 2)}, proc1_kwargs

                actual_mems = host_helper._get_actual_mems(host=host)
                LOG.fixture_step("Modify {} proc0 to have 0 of 1G pages and "
                                 "<2GiB of 4K pages".format(host))
                host_helper.modify_host_memory(host,
                                               proc=0,
                                               actual_mems=actual_mems,
                                               **kwargs[0])
                LOG.fixture_step("Modify {} proc1 to have >=2GiB of {} "
                                 "pages".format(host, '1G' if is_1g else '4k'))
                host_helper.modify_host_memory(host,
                                               proc=1,
                                               actual_mems=actual_mems,
                                               **kwargs[1])

            for host_to_config in hosts_to_configure:
                if host_to_config:
                    config_host_class(host=host_to_config, modify_func=_modify)
                    LOG.fixture_step(
                        "Check mem pages for {} are modified "
                        "and updated successfully".format(host_to_config))
                    host_helper.wait_for_memory_update(host=host_to_config)

            LOG.fixture_step("Check host memories for {} after mem config "
                             "completed".format(hosts_selected))
            _, hosts_unconfigured = get_hosts_to_configure(hosts_selected)
            assert not hosts_unconfigured[0], \
                "Failed to configure {}. Expt: proc0:1g<2,4k<2gib;" \
                "proc1:1g>=2,4k<2gib".format(hosts_unconfigured[0])
            assert not hosts_unconfigured[1], \
                "Failed to configure {}. Expt: proc0:1g<2,4k<2gib;" \
                "proc1:1g<2,4k>=2gib".format(hosts_unconfigured[1])

        LOG.fixture_step('(class) Add hosts to stxauto aggregate: '
                         '{}'.format(hosts_selected))
        nova_helper.add_hosts_to_aggregate(aggregate='stxauto',
                                           hosts=hosts_selected)

        def remove_host_from_zone():
            LOG.fixture_step('(class) Remove hosts from stxauto aggregate: '
                             '{}'.format(hosts_selected))
            nova_helper.remove_hosts_from_aggregate(aggregate='stxauto',
                                                    check_first=False)

        request.addfinalizer(remove_host_from_zone)

        return hosts_selected, storage_backing

    @fixture(scope='class')
    def flavor_2g(self, add_1g_and_4k_pages):
        hosts, storage_backing = add_1g_and_4k_pages
        LOG.fixture_step("Create a 2G memory flavor to be used by mempage "
                         "testcases")
        flavor = nova_helper.create_flavor(name='flavor-2g',
                                           ram=2048,
                                           storage_backing=storage_backing,
                                           cleanup='class')[1]
        return flavor, hosts, storage_backing

    @fixture(scope='class')
    def image_mempage(self):
        LOG.fixture_step("(class) Create a glance image for mempage testcases")
        image_id = glance_helper.create_image(name='mempage',
                                              cleanup='class')[1]
        return image_id

    @fixture()
    def check_alarms(self, add_1g_and_4k_pages):
        hosts, storage_backing = add_1g_and_4k_pages
        host_helper.get_hypervisor_info(hosts=hosts)
        for host in hosts:
            host_helper.get_host_memories(host, wait_for_update=False)

    @fixture(params=MEM_CONFIGS)
    def flavor_mem_page_size(self, request, flavor_2g):
        flavor_id = flavor_2g[0]
        mem_page_size = request.param
        skip_4k_for_ovs(mem_page_size)

        if mem_page_size is None:
            nova_helper.unset_flavor(flavor_id, FlavorSpec.MEM_PAGE_SIZE)
        else:
            nova_helper.set_flavor(flavor_id,
                                   **{FlavorSpec.MEM_PAGE_SIZE: mem_page_size})

        return mem_page_size

    @mark.parametrize('image_mem_page_size', MEM_CONFIGS)
    def test_boot_vm_mem_page_size(self, flavor_2g, flavor_mem_page_size,
                                   image_mempage, image_mem_page_size):
        """
        Test boot vm with various memory page size setting in flavor and image.

        Args:
            flavor_2g (tuple): flavor id of a flavor with ram set to 2G,
                hosts configured and storage_backing
            flavor_mem_page_size (str): memory page size extra spec value to
                set in flavor
            image_mempage (str): image id for tis image
            image_mem_page_size (str): memory page metadata value to set in
                image

        Setup:
            - Create a flavor with 2G RAM (module)
            - Get image id of tis image (module)

        Test Steps:
            - Set/Unset flavor memory page size extra spec with given value (
            unset if None is given)
            - Set/Unset image memory page size metadata with given value (
            unset if None if given)
            - Attempt to boot a vm with above flavor and image
            - Verify boot result based on the mem page size values in the
            flavor and image

        Teardown:
            - Delete vm if booted
            - Delete created flavor (module)

        """
        skip_4k_for_ovs(image_mem_page_size)

        flavor_id, hosts, storage_backing = flavor_2g

        if image_mem_page_size is None:
            glance_helper.unset_image(image_mempage,
                                      properties=ImageMetadata.MEM_PAGE_SIZE)
            expt_code = 0
        else:
            glance_helper.set_image(
                image=image_mempage,
                properties={ImageMetadata.MEM_PAGE_SIZE: image_mem_page_size})
            if flavor_mem_page_size is None:
                expt_code = 4
            elif flavor_mem_page_size.lower() in ['any', 'large']:
                expt_code = 0
            else:
                expt_code = 0 if flavor_mem_page_size.lower() == \
                                 image_mem_page_size.lower() else 4

        LOG.tc_step("Attempt to boot a vm with flavor_mem_page_size: {}, and "
                    "image_mem_page_size: {}. And check return "
                    "code is {}.".format(flavor_mem_page_size,
                                         image_mem_page_size, expt_code))

        actual_code, vm_id, msg = vm_helper.boot_vm(name='mem_page_size',
                                                    flavor=flavor_id,
                                                    source='image',
                                                    source_id=image_mempage,
                                                    fail_ok=True,
                                                    avail_zone='stxauto',
                                                    cleanup='function')

        assert expt_code == actual_code, "Expect boot vm to return {}; " \
                                         "Actual result: {} with msg: " \
                                         "{}".format(expt_code, actual_code,
                                                     msg)

        if expt_code != 0:
            assert re.search(
                NovaCLIOutput.VM_BOOT_REJECT_MEM_PAGE_SIZE_FORBIDDEN, msg)
        else:
            assert vm_helper.get_vm_host(vm_id) in hosts, \
                "VM is not booted on hosts in stxauto zone"
            LOG.tc_step("Ensure VM is pingable from NatBox")
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    @mark.parametrize('mem_page_size', [
        param('1048576', marks=mark.priorities('domain_sanity', 'nightly')),
        param('large'),
        param('small', marks=mark.nightly),
    ])
    def test_schedule_vm_mempage_config(self, flavor_2g, mem_page_size):
        """
        Test memory used by vm is taken from the expected memory pool and the
        vm was scheduled on the correct
        host/processor

        Args:
            flavor_2g (tuple): flavor id of a flavor with ram set to 2G,
            hosts, storage_backing
            mem_page_size (str): mem page size setting in flavor

        Setup:
            - Create host aggregate
            - Add two hypervisors to the host aggregate
            - Host-0 configuration:
                - Processor-0:
                    - Insufficient 1g pages to boot vm that requires 2g
                    - Insufficient 4k pages to boot vm that requires 2g
                - Processor-1:
                    - Sufficient 1g pages to boot vm that requires 2g
                    - Insufficient 4k pages to boot vm that requires 2g
            - Host-1 configuration:
                - Processor-0:
                    - Insufficient 1g pages to boot vm that requires 2g
                    - Insufficient 4k pages to boot vm that requires 2g
                - Processor-1:
                    - Insufficient 1g pages to boot vm that requires 2g
                    - Sufficient 4k pages to boot vm that requires 2g
            - Configure a compute to have 4 1G hugepages (module)
            - Create a flavor with 2G RAM (module)
            - Create a volume with default values (module)

        Test Steps:
            - Set memory page size flavor spec to given value
            - Boot a vm with above flavor and a basic volume
            - Calculate the available/used memory change on the vm host
            - Verify the memory is taken from 1G hugepage memory pool
            - Verify the vm was booted on a supporting host

        Teardown:
            - Delete created vm
            - Delete created volume and flavor (module)
            - Re-Configure the compute to have 0 hugepages (module)
            - Revert host mem pages back to original
        """
        skip_4k_for_ovs(mem_page_size)

        flavor_id, hosts_configured, storage_backing = flavor_2g
        LOG.tc_step("Set memory page size extra spec in flavor")
        nova_helper.set_flavor(
            flavor_id, **{
                FlavorSpec.CPU_POLICY: 'dedicated',
                FlavorSpec.MEM_PAGE_SIZE: mem_page_size
            })

        host_helper.wait_for_hypervisors_up(hosts_configured)
        prev_computes_mems = {}
        for host in hosts_configured:
            prev_computes_mems[host] = host_helper.get_host_memories(
                host=host, headers=MEMPAGE_HEADERS)

        LOG.tc_step(
            "Boot a vm with mem page size spec - {}".format(mem_page_size))

        host_1g, host_4k = hosts_configured
        code, vm_id, msg = vm_helper.boot_vm('mempool_configured',
                                             flavor_id,
                                             fail_ok=True,
                                             avail_zone='stxauto',
                                             cleanup='function')
        assert 0 == code, "VM is not successfully booted."

        instance_name, vm_host = vm_helper.get_vm_values(
            vm_id, fields=[":instance_name", ":host"], strict=False)
        vm_node = vm_helper.get_vm_numa_nodes_via_ps(
            vm_id=vm_id, instance_name=instance_name, host=vm_host)
        if mem_page_size == '1048576':
            assert host_1g == vm_host, \
                "VM is not created on the configured host " \
                "{}".format(hosts_configured[0])
            assert vm_node == [1], "VM (huge) did not boot on the correct " \
                                   "processor"
        elif mem_page_size == 'small':
            assert host_4k == vm_host, "VM is not created on the configured " \
                                       "host {}".format(hosts_configured[1])
            assert vm_node == [1], "VM (small) did not boot on the correct " \
                                   "processor"
        else:
            assert vm_host in hosts_configured

        LOG.tc_step("Calculate memory change on vm host - {}".format(vm_host))
        check_mempage_change(vm_id,
                             vm_host,
                             prev_host_mems=prev_computes_mems[vm_host],
                             mempage_size=mem_page_size,
                             mem_gib=2,
                             numa_node=vm_node[0])

        LOG.tc_step("Ensure vm is pingable from NatBox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
示例#6
0
            network_quota_, subnet_quota_ = quotas
            vm_helper.set_quotas(tenant=tenant_id,
                                 networks=network_quota_,
                                 subnets=subnet_quota_)

    request.addfinalizer(revert)

    return tenants_quotas


@mark.usefixtures('check_alarms')
@mark.parametrize(
    'template_name',
    [
        # param('WR_Neutron_ProviderNetRange.yaml', marks=mark.priorities('p2')),  # Need update due to datanetwork change
        param('OS_Cinder_Volume.yaml', marks=mark.priorities('p2')),
        # param('OS_Glance_Image.yaml'), # Stack update needed
        # https://bugs.launchpad.net/bugs/1819483
        param('OS_Ceilometer_Alarm.yaml', marks=mark.priorities('p2')),
        param('OS_Neutron_Port.yaml', marks=mark.priorities('p2')),
        param('OS_Neutron_Net.yaml', marks=mark.priorities('p2')),
        param('OS_Neutron_Subnet.yaml', marks=mark.priorities('p2')),
        param('OS_Nova_Flavor.yaml', marks=mark.priorities('p2')),
        param('OS_Neutron_FloatingIP.yaml', marks=mark.priorities('p2')),
        param('OS_Neutron_Router.yaml', marks=mark.priorities('p2')),
        param('OS_Neutron_RouterGateway.yaml', marks=mark.priorities('p2')),
        param('OS_Neutron_RouterInterface.yaml', marks=mark.priorities('p2')),
        param('OS_Neutron_SecurityGroup.yaml', marks=mark.priorities('p2')),
        # param('OS_Nova_ServerGroup.yaml', marks=mark.priorities('p2')),     # Stack update needed
        param('OS_Nova_KeyPair.yaml', marks=mark.priorities('p2')),
        # param('WR_Neutron_QoSPolicy.yaml', marks=mark.priorities('p2')),    # CGTS-10095
    # lock standby controller node and verify it is successfully locked
    LOG.tc_step("Lock active controller and ensure it fail to lock")
    exit_code, cmd_output = host_helper.lock_host(active_controller,
                                                  fail_ok=True,
                                                  swact=False,
                                                  check_first=False)
    assert exit_code == 1, 'Expect locking active controller to be rejected. ' \
                           'Actual: {}'.format(cmd_output)
    status = system_helper.get_host_values(active_controller,
                                           'administrative')[0]
    assert status == 'unlocked', "Fail: The active controller was locked."


@mark.parametrize('host_type', [
    param('controller', marks=mark.priorities('platform_sanity', 'kpi')),
    param('compute', marks=mark.priorities('platform_sanity', 'kpi')),
    param('storage', marks=mark.priorities('platform_sanity', 'kpi')),
])
def test_lock_unlock_host(host_type, collect_kpi):
    """
    Verify lock unlock host

    Test Steps:
        - Select a host per given type. If type is controller, select standby controller.
        - Lock selected host and ensure it is successfully locked
        - Unlock selected host and ensure it is successfully unlocked

    """
    init_time = None
    if collect_kpi:
示例#8
0
    srv_grp_id = None
    if policy is not None:
        LOG.tc_step(
            "Create a server group with policy set to {}".format(policy))
        srv_grp_id = nova_helper.create_server_group(policy=policy)[1]
        ResourceCleanup.add(resource_type='server_group',
                            resource_id=srv_grp_id)

    return flavor_id, srv_grp_id


# server group messaging is removed since STX
@mark.parametrize(('policy', 'vms_num'), [
    param('affinity',
          2,
          marks=mark.priorities('nightly', 'domain_sanity', 'sx_nightly')),
    ('soft_anti_affinity', 3),
    param(
        'anti_affinity', 2, marks=mark.priorities('nightly', 'domain_sanity')),
    ('soft_affinity', 3),
])
def test_server_group_boot_vms(policy, vms_num, check_system):
    """
    Test server group policy and messaging
    Test live migration with anti-affinity server group (TC6566)
    Test changing size of existing server group via CLI (TC2917)

    Args:
        policy (str): server group policy to set when creating the group
        vms_num (int): number of vms to boot
示例#9
0
    LOG.tc_step("Cold migrate vm and check vm topology stays the same")
    vm_helper.cold_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=cpu_pol, vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])


# Deprecated
@mark.usefixtures('add_admin_role_module')
@mark.parametrize(('vcpus_dedicated', 'vcpus_shared', 'pol_source', 'boot_source'), [
    param(2, 1, 'flavor', 'image', marks=mark.p2),
    param(1, 3, 'image', 'image', marks=mark.p2),
    param(2, 4, 'image', 'volume', marks=mark.p2),
    param(3, 2, 'flavor', 'volume', marks=mark.priorities('nightly', 'sx_nightly')),
])
def _test_cpu_pol_dedicated_shared_coexists(vcpus_dedicated, vcpus_shared, pol_source, boot_source):
    """
    Test two vms coexisting on the same host, one with the dedicated cpu property, and one with the shared cpu property.

    Args:
        vcpus_dedicated: Amount of vcpu(s) to allocate for the vm with the dedicated CPU_POLICY.
        vcpus_shared: Amount of vcpu(s) to allocate for the vm with the shared CPU_POLICY.
        pol_source: Where the CPU_POLICY is set from.
        boot_source: The boot media the vm will use to boot.

    Test Setups:
        - Create two flavors, one for each vm.
        - If using 'flavor' for pol_source, set extra specs for the CPU_POLICY.
        - If using 'image' for pol_source, set ImageMetaData for the CPU_POLICY.