def start_service(self, name, host=None, **kwargs): cell = None # if the host is None then the CONF.host remains defaulted to # 'fake-mini' (originally done in ConfFixture) if host is not None: # Make sure that CONF.host is relevant to the right hostname self.useFixture(nova_fixtures.ConfPatcher(host=host)) if name == 'compute' and self.USES_DB: # NOTE(danms): We need to create the HostMapping first, because # otherwise we'll fail to update the scheduler while running # the compute node startup routines below. ctxt = context.get_context() cell_name = kwargs.pop('cell', CELL1_NAME) or CELL1_NAME cell = self.cell_mappings[cell_name] if (host or name) not in self.host_mappings: # NOTE(gibi): If the HostMapping does not exists then this is # the first start of the service so we create the mapping. hm = objects.HostMapping(context=ctxt, host=host or name, cell_mapping=cell) hm.create() self.host_mappings[hm.host] = hm svc = self.useFixture( nova_fixtures.ServiceFixture(name, host, cell=cell, **kwargs)) return svc.service
def test_get_machine_type_from_conf(self): self.useFixture( nova_fixtures.ConfPatcher( group="libvirt", hw_machine_type=['x86_64=q35', 'i686=legacy'])) self.assertEqual('q35', libvirt_utils.get_default_machine_type('x86_64'))
def stub_out_image_service(test): """Stubs out the image service for the test with the FakeImageService :param test: instance of nova.test.TestCase :returns: The stubbed out FakeImageService object """ image_service = FakeImageService() test.stub_out('nova.image.glance.get_remote_image_service', lambda x, y: (image_service, y)) test.stub_out('nova.image.glance.get_default_image_service', lambda: image_service) test.useFixture(nova_fixtures.ConfPatcher( group="glance", api_servers=['http://localhost:9292'])) return image_service
def start_service(self, name, host=None, cell_name=None, **kwargs): # Disallow starting multiple scheduler services if name == 'scheduler' and self._service_fixture_count[name]: raise TestingException("Duplicate start_service(%s)!" % name) cell = None # if the host is None then the CONF.host remains defaulted to # 'fake-mini' (originally done in ConfFixture) if host is not None: # Make sure that CONF.host is relevant to the right hostname self.useFixture(nova_fixtures.ConfPatcher(host=host)) if name == 'compute' and self.USES_DB: # NOTE(danms): We need to create the HostMapping first, because # otherwise we'll fail to update the scheduler while running # the compute node startup routines below. ctxt = context.get_context() cell_name = cell_name or CELL1_NAME cell = self.cell_mappings[cell_name] if (host or name) not in self.host_mappings: # NOTE(gibi): If the HostMapping does not exists then this is # the first start of the service so we create the mapping. hm = objects.HostMapping(context=ctxt, host=host or name, cell_mapping=cell) hm.create() self.host_mappings[hm.host] = hm svc = self.useFixture( nova_fixtures.ServiceFixture(name, host, cell=cell, **kwargs)) # Keep track of how many instances of this service are running. self._service_fixture_count[name] += 1 real_stop = svc.service.stop # Make sure stopping the service decrements the active count, so that # start,stop,start doesn't trigger the "Duplicate start_service" # exception. def patch_stop(*a, **k): self._service_fixture_count[name] -= 1 return real_stop(*a, **k) self.useFixture( fixtures.MockPatchObject(svc.service, 'stop', patch_stop)) return svc.service
def test_live_migration_actions(self): server = self._boot_a_server( extra_params={'networks': [{'port': self.neutron.port_1['id']}]}) self._wait_for_notification('instance.create.end') self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL) # server will boot on host1 self.useFixture(fixtures.ConfPatcher(host='host2')) self.compute2 = self.start_service('compute', host='host2') actions = [ self._test_live_migration_rollback, ] for action in actions: fake_notifier.reset() action(server) # Ensure that instance is in active state after an action self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
def start_service(self, name, host=None, **kwargs): if name == 'compute' and self.USES_DB: # NOTE(danms): We need to create the HostMapping first, because # otherwise we'll fail to update the scheduler while running # the compute node startup routines below. ctxt = context.get_context() cell = self.cell_mappings[kwargs.pop('cell', CELL1_NAME)] hm = objects.HostMapping(context=ctxt, host=host or name, cell_mapping=cell) hm.create() self.host_mappings[hm.host] = hm if host is not None: # Make sure that CONF.host is relevant to the right hostname self.useFixture(nova_fixtures.ConfPatcher(host=host)) svc = self.useFixture( nova_fixtures.ServiceFixture(name, host, **kwargs)) return svc.service
def test_get_machine_type_survives_invalid_conf(self): self.useFixture(nova_fixtures.ConfPatcher( group="libvirt", hw_machine_type=['x86_64=q35', 'foo'])) self.assertEqual('q35', libvirt_utils.get_default_machine_type('x86_64'))
def test_get_machine_type_missing_conf_and_fallback(self): self.useFixture(nova_fixtures.ConfPatcher( group="libvirt", hw_machine_type=['x86_64=q35', 'i686=legacy'])) self.assertIsNone(libvirt_utils.get_default_machine_type('sparc'))
def test_invalid_machine_type_mappings(self): self.useFixture(nova_fixtures.ConfPatcher( group="libvirt", hw_machine_type=['x86_64=q35', 'foo'])) self.assertDictEqual({'x86_64': 'q35'}, libvirt_utils.machine_type_mappings())