def start_computes_and_servers(self):
        # Start 2 computes
        self.start_compute(
            hostname='host_a',
            host_info=fakelibvirt.HostInfo(
                cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1))
        self.start_compute(
            hostname='host_b',
            host_info=fakelibvirt.HostInfo(
                cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1))

        # Create a 2-CPU flavor
        extra_spec = {'hw:cpu_policy': 'dedicated'}
        flavor = self._create_flavor(vcpu=2, extra_spec=extra_spec)

        # Boot 2 servers with 2 CPUs each, one on host_a and one on host_b.
        # Given the cpu_dedicated_set we set earlier, they should both be on
        # CPUs 0,1.
        for server_name, host in [('server_a', 'host_a'),
                                  ('server_b', 'host_b')]:
            server = self._create_server(flavor_id=flavor, host=host,
                                         networks='none')
            setattr(self, server_name,
                    self._wait_for_state_change(server, 'ACTIVE'))
            self.assertEqual(host, self.get_host(server['id']))
            self._assert_instance_pinned_cpus(server['id'], [0, 1], [0, 1])
Exemplo n.º 2
0
    def test_different_page_sizes(self):
        self.start_compute(hostname='host_a',
                           host_info=fakelibvirt.HostInfo(
                               kB_mem=1024000,
                               mempages={
                                   0:
                                   fakelibvirt.create_mempages([(4, 256000),
                                                                (1024, 1000)])
                               }))
        self.start_compute(hostname='host_b',
                           host_info=fakelibvirt.HostInfo(
                               kB_mem=1024000,
                               mempages={
                                   0:
                                   fakelibvirt.create_mempages([(4, 256000),
                                                                (2048, 500)]),
                               }))

        extra_spec = {
            'hw:numa_nodes': 1,
            'hw:cpu_policy': 'dedicated',
            'hw:mem_page_size': 'large'
        }
        flavor = self._create_flavor(vcpu=2,
                                     memory_mb=512,
                                     extra_spec=extra_spec)
        server = self._build_server(
            flavor_id=flavor,
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6')
        server['networks'] = 'none'
        post = {'server': server}
        server = self.api.post_server(post)
        self._wait_for_state_change(server, 'ACTIVE')
        initial_host = self.get_host(server['id'])
        dest_host = 'host_a' if initial_host == 'host_b' else 'host_b'
        # NOTE(artom) Because we use the CastAsCallFixture, we expect the
        # MigrationPreCheckError to be bubbled up to the API as an error 500.
        self.api.api_post('/servers/%s/action' % server['id'], {
            'os-migrateLive': {
                'host': dest_host,
                'block_migration': 'auto',
                'force': True
            }
        },
                          check_response_status=[500])
        self._wait_for_state_change(server, 'ACTIVE')
        self._wait_for_migration_status(server, ['error'])
        self.assertEqual(initial_host, self.get_host(server['id']))
        self.assertIsNone(self._get_migration_context(server['id']))
        log_out = self.stdlog.logger.output
        self.assertIn(
            'Migration pre-check error: '
            'Insufficient compute resources: '
            'Requested page size is different from current page '
            'size.', log_out)
Exemplo n.º 3
0
    def _get_connection(
        self,
        host_info=None,
        pci_info=None,
        mdev_info=None,
        vdpa_info=None,
        libvirt_version=None,
        qemu_version=None,
        hostname=None,
    ):
        if not host_info:
            host_info = fakelibvirt.HostInfo(cpu_nodes=2,
                                             cpu_sockets=1,
                                             cpu_cores=2,
                                             cpu_threads=2)

        # sanity check
        self.assertGreater(
            16, host_info.cpus,
            "Host.get_online_cpus is only accounting for 16 CPUs but you're "
            "requesting %d; change the mock or your test" % host_info.cpus)

        libvirt_version = libvirt_version or fakelibvirt.FAKE_LIBVIRT_VERSION
        qemu_version = qemu_version or fakelibvirt.FAKE_QEMU_VERSION

        fake_connection = fakelibvirt.Connection('qemu:///system',
                                                 version=libvirt_version,
                                                 hv_version=qemu_version,
                                                 host_info=host_info,
                                                 pci_info=pci_info,
                                                 mdev_info=mdev_info,
                                                 vdpa_info=vdpa_info,
                                                 hostname=hostname)
        return fake_connection
Exemplo n.º 4
0
    def setUp(self):
        super().setUp()

        self.start_compute(hostname='src',
                           host_info=fakelibvirt.HostInfo(cpu_nodes=1,
                                                          cpu_sockets=1,
                                                          cpu_cores=4,
                                                          cpu_threads=1))
        self.start_compute(hostname='dest',
                           host_info=fakelibvirt.HostInfo(cpu_nodes=1,
                                                          cpu_sockets=1,
                                                          cpu_cores=4,
                                                          cpu_threads=1))

        self.src = self.computes['src']
        self.dest = self.computes['dest']
Exemplo n.º 5
0
    def setUp(self):
        super().setUp()

        # Launch a single libvirt based compute service with a single NUMA node
        host_info = fakelibvirt.HostInfo()
        self.start_compute(host_info=host_info, hostname='compute1')

        # Use a flavor requesting 2 NUMA nodes that we know will always fail
        self.flavor_id = self._create_flavor(extra_spec={'hw:numa_nodes': '2'})

        # Craft a common bfv server request for use within each test
        self.volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
        self.server = {
            'name':
            'test',
            'flavorRef':
            self.flavor_id,
            'imageRef':
            '',
            'networks':
            'none',
            'block_device_mapping_v2': [{
                'source_type': 'volume',
                'destination_type': 'volume',
                'boot_index': 0,
                'uuid': self.volume_id
            }]
        }
Exemplo n.º 6
0
    def _start_compute_service(self, hostname):
        fake_connection = self._get_connection(
            # Need a host to support creating more servers with vpmems
            host_info=fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
                                           cpu_cores=2, cpu_threads=2),
            hostname=hostname)
        self.mock_conn.return_value = fake_connection
        compute = self._start_compute(host=hostname)

        # Ensure populating the existing pmems correctly.
        vpmems = compute.driver._vpmems_by_name
        expected_vpmems = {
            'ns_0': objects.LibvirtVPMEMDevice(
                label='4GB', name='ns_0', devpath='/dev/dax0.0',
                size=4292870144, align=2097152),
            'ns_1': objects.LibvirtVPMEMDevice(
                label='SMALL', name='ns_1', devpath='/dev/dax0.1',
                size=4292870144, align=2097152),
            'ns_2': objects.LibvirtVPMEMDevice(
                label='SMALL', name='ns_2', devpath='/dev/dax0.2',
                size=4292870144, align=2097152)}
        self.assertDictEqual(expected_vpmems, vpmems)

        # Ensure reporting vpmems resources correctly
        rp_uuid = self._get_provider_uuid_by_host(compute.host)
        inventory = self._get_provider_inventory(rp_uuid)
        self.assertEqual(1, inventory['CUSTOM_PMEM_NAMESPACE_4GB']['total'])
        self.assertEqual(2, inventory['CUSTOM_PMEM_NAMESPACE_SMALL']['total'])

        return compute
Exemplo n.º 7
0
    def test_insufficient_resources(self):
        self.start_compute(hostname='host_a',
                           host_info=fakelibvirt.HostInfo(cpu_nodes=1,
                                                          cpu_sockets=1,
                                                          cpu_cores=3,
                                                          cpu_threads=1))
        self.start_compute(hostname='host_b',
                           host_info=fakelibvirt.HostInfo(cpu_nodes=2,
                                                          cpu_sockets=1,
                                                          cpu_cores=2,
                                                          cpu_threads=1))

        extra_spec = {'hw:numa_nodes': 1, 'hw:cpu_policy': 'dedicated'}
        flavor = self._create_flavor(vcpu=3, extra_spec=extra_spec)
        server = self._build_server(
            flavor_id=flavor,
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6')
        server['networks'] = 'none'
        post = {'server': server}
        server = self.api.post_server(post)
        self._wait_for_state_change(server, 'ACTIVE')
        self.assertEqual('host_a', self.get_host(server['id']))
        # NOTE(artom) Because we use the CastAsCallFixture, we expect the
        # MigrationPreCheckError to be bubbled up to the API as an error 500.
        self.api.api_post('/servers/%s/action' % server['id'], {
            'os-migrateLive': {
                'host': 'host_b',
                'block_migration': 'auto',
                'force': True
            }
        },
                          check_response_status=[500])
        self._wait_for_state_change(server, 'ACTIVE')
        self._wait_for_migration_status(server, ['error'])
        self.assertIsNone(self._get_migration_context(server['id']))
        self.assertEqual('host_a', self.get_host(server['id']))
        log_out = self.stdlog.logger.output
        self.assertIn(
            'Migration pre-check error: '
            'Insufficient compute resources: '
            'Requested instance NUMA topology cannot fit', log_out)
Exemplo n.º 8
0
    def setUp(self):
        super().setUp()
        self.neutron.list_extensions = self.list_extensions
        self.neutron_api = neutron.API()

        self.useFixture(nova_fixtures.OSBrickFixture())

        self.start_compute(hostname='start_host',
                           host_info=fakelibvirt.HostInfo(cpu_nodes=1,
                                                          cpu_sockets=1,
                                                          cpu_cores=4,
                                                          cpu_threads=2))
        self.start_compute(hostname='end_host',
                           host_info=fakelibvirt.HostInfo(cpu_nodes=1,
                                                          cpu_sockets=1,
                                                          cpu_cores=4,
                                                          cpu_threads=2))

        self.ctxt = context.get_admin_context()
        # TODO(sean-k-mooney): remove this when it is part of ServersTestBase
        self.useFixture(
            fixtures.MonkeyPatch(
                'nova.tests.fixtures.libvirt.Domain.migrateToURI3',
                self._migrate_stub))
    def _test(self, pin_source, pin_cond, expect_success=True):
        self.start_compute(
            hostname='source',
            host_info=fakelibvirt.HostInfo())
        self.start_compute(
            hostname='dest',
            host_info=fakelibvirt.HostInfo())

        # This duplication is required to let the LibvirtMigrationMixin know
        # which host is which in terms of the migration.
        self.src = self.computes['source']
        self.dest = self.computes['dest']

        ctxt = context.get_admin_context()
        src_mgr = self.computes['source'].manager
        cond_mgr = self.conductor.manager.compute_task_mgr
        if pin_source:
            src_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
                '5.2')
        else:
            # Since we upgraded the RPC API to 6.0, we somehow need to pin the
            # compute service here to 5.max to verify the legacy behaviours.
            # TODO(sbauza): Remove this cruft
            src_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
                '5.13')
        if pin_cond:
            cond_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
                '5.2')
        else:
            # Since we upgraded the RPC API to 6.0, we somehow need to pin the
            # compute service here to 5.max to verify the legacy behaviours.
            # TODO(sbauza): Remove this cruft
            cond_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
                '5.13')

        self.assertEqual(
            not pin_source,
            src_mgr.compute_rpcapi.router.client(
                ctxt).can_send_version('5.3'))
        self.assertEqual(
            not pin_cond,
            cond_mgr.compute_rpcapi.router.client(
                ctxt).can_send_version('5.3'))

        extra_spec = {'hw:numa_nodes': 1,
                      'hw:cpu_policy': 'dedicated'}
        flavor = self._create_flavor(vcpu=2, extra_spec=extra_spec)
        server1 = self._create_server(flavor_id=flavor, networks='none')
        server2 = self._create_server(flavor_id=flavor, networks='none')
        if self.get_host(server1['id']) == 'source':
            self.migrating_server = server1
            self.server = server1
        else:
            self.migrating_server = server2
            self.server = server2
        self.api.post_server_action(
            self.migrating_server['id'],
            {'os-migrateLive': {'host': 'dest',
                                'block_migration': 'auto',
                                'force': True}})
        self._wait_for_state_change(self.migrating_server, 'ACTIVE')
        if expect_success:
            final_host = 'dest'
            self._wait_for_migration_status(self.migrating_server,
                                            ['completed'])
        else:
            final_host = 'source'
            self._wait_for_migration_status(self.migrating_server, ['failed'])
        self.assertEqual(final_host,
                         self.get_host(self.migrating_server['id']))
        self.assertTrue(self.migrate_stub_ran)