def setUp(self): # Point _VirtDriverTestCase at the right module self.driver_module = 'nova.virt.libvirt.LibvirtDriver' super(LibvirtConnTestCase, self).setUp() self.stub_out('nova.context.get_admin_context', lambda: self.ctxt) # This is needed for the live migration tests which spawn off the # operation for monitoring. self.useFixture(nova_fixtures.SpawnIsSynchronousFixture()) # When destroying an instance, os-vif will try to execute some commands # which hang tests so let's just stub out the unplug call to os-vif # since we don't care about it. self.stub_out('os_vif.unplug', lambda a, kw: None) self.stub_out('nova.compute.utils.get_machine_ips', lambda: [])
def test_spawn_n_has_link(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn_n(mock.MagicMock) passed_arg = 'test' call_count = [] def fake(thread, param): self.assertEqual(gt, thread) self.assertEqual(passed_arg, param) call_count.append(1) gt.link(fake, passed_arg) self.assertEqual(1, len(call_count))
def setUp(self, mock_init_agg, mock_init_inst): super(OpieHostManagerTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [ nova_host_manager.HostState('fake_host%s' % x, 'fake-node') for x in range(1, 5) ] self.fake_hosts += [ nova_host_manager.HostState('fake_multihost', 'fake-node%s' % x) for x in range(1, 5) ] self.useFixture(fixtures.SpawnIsSynchronousFixture())
def setUp(self): super(ComputeManagerTestCase, self).setUp() self.useFixture(nova_fixtures.SpawnIsSynchronousFixture()) self.useFixture(cast_as_call.CastAsCall(self)) self.conductor = self.start_service('conductor') self.start_service('scheduler') self.compute = self.start_service('compute') self.context = context.RequestContext('fake', 'fake') fake_server_actions.stub_out_action_events(self) fake_network.set_stub_network_methods(self) self.useFixture(fixtures.MockPatch( 'nova.network.neutron.API.get_instance_nw_info', return_value=network_model.NetworkInfo(), ))
def setUp(self): self.flags(use_ipv6=False) self.flags(glance_link_prefix=self._get_glance_host(), compute_link_prefix=self._get_host(), group='api') # load any additional fixtures specified by the scenario for fix in self._additional_fixtures: self.useFixture(fix()) if not self.SUPPORTS_CELLS: # NOTE(danms): Disable base automatic DB (and cells) config self.USES_DB = False self.USES_DB_SELF = True # This is to enable the network quota which is being registered # based on CONF.enable_network_quota. Need this to test the # network quota in quota sample tests. self.flags(enable_network_quota=True) self.useFixture(fixtures.RegisterNetworkQuota()) # super class call is delayed here so that we have the right # paste and conf before loading all the services, as we can't # change these later. super(ApiSampleTestBaseV21, self).setUp() if not self.SUPPORTS_CELLS: self.useFixture(fixtures.Database()) self.useFixture(fixtures.Database(database='api')) self.useFixture(fixtures.DefaultFlavorsFixture()) self.useFixture(fixtures.SingleCellSimple()) super(ApiSampleTestBaseV21, self)._setup_services() if not self.USE_NEUTRON: # self.network is only setup if USE_NEUTRON=False self.useFixture(test.SampleNetworks(host=self.network.host)) fake_network.stub_compute_with_ips(self) self.useFixture(fixtures.SpawnIsSynchronousFixture()) # this is used to generate sample docs self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None # NOTE(mikal): this is used to stub away privsep helpers def fake_noop(*args, **kwargs): return '', '' self.stub_out('nova.privsep.linux_net.add_bridge', fake_noop) self.stub_out('nova.privsep.linux_net.set_device_mtu', fake_noop) self.stub_out('nova.privsep.linux_net.set_device_enabled', fake_noop) self.stub_out('nova.privsep.linux_net.set_device_macaddr', fake_noop)
def setUp(self): # Point _VirtDriverTestCase at the right module self.driver_module = 'nova.virt.libvirt.LibvirtDriver' super(LibvirtConnTestCase, self).setUp() self.stubs.Set(self.connection, '_set_host_enabled', mock.MagicMock()) self.useFixture( fixtures.MonkeyPatch('nova.context.get_admin_context', self._fake_admin_context)) # This is needed for the live migration tests which spawn off the # operation for monitoring. self.useFixture(nova_fixtures.SpawnIsSynchronousFixture()) # When using CONF.use_neutron=True and destroying an instance os-vif # will try to execute some commands which hangs tests so let's just # stub out the unplug call to os-vif since we don't care about it. self.stub_out('os_vif.unplug', lambda a, kw: None)
def test_scatter_gather_cells(self, mock_get_inst, mock_target_cell): self.useFixture(nova_fixtures.SpawnIsSynchronousFixture()) ctxt = context.get_context() mapping = objects.CellMapping(database_connection='fake://db', transport_url='fake://mq', uuid=uuids.cell) mappings = objects.CellMappingList(objects=[mapping]) filters = {'deleted': False} context.scatter_gather_cells( ctxt, mappings, 60, objects.InstanceList.get_by_filters, filters, sort_dir='foo') mock_get_inst.assert_called_once_with( mock_target_cell.return_value.__enter__.return_value, filters, sort_dir='foo')
def setUp(self): self.flags(use_ipv6=False, osapi_compute_link_prefix=self._get_host(), osapi_glance_link_prefix=self._get_glance_host()) # load any additional fixtures specified by the scenario for fix in self._additional_fixtures: self.useFixture(fix()) # super class call is delayed here so that we have the right # paste and conf before loading all the services, as we can't # change these later. super(ApiSampleTestBaseV21, self).setUp() self.useFixture(test.SampleNetworks(host=self.network.host)) fake_network.stub_compute_with_ips(self.stubs) self.useFixture(fixtures.SpawnIsSynchronousFixture()) # this is used to generate sample docs self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
def setUp(self): super(TestInstanceListBig, self).setUp() cells = [objects.CellMapping(uuid=getattr(uuids, 'cell%i' % i), name='cell%i' % i, transport_url='fake:///', database_connection='fake://') for i in range(0, 3)] insts = list([ dict( uuid=getattr(uuids, 'inst%i' % i), hostname='inst%i' % i) for i in range(0, 100)]) self.cells = cells self.insts = insts self.context = nova_context.RequestContext() self.useFixture(fixtures.SpawnIsSynchronousFixture())
def test_scatter_gather_cells_exception(self, mock_get_inst, mock_log_exception): # This is needed because we're mocking get_by_filters. self.useFixture(nova_fixtures.SpawnIsSynchronousFixture()) ctxt = context.get_context() mapping0 = objects.CellMapping(database_connection='fake://db0', transport_url='none:///', uuid=objects.CellMapping.CELL0_UUID) mapping1 = objects.CellMapping(database_connection='fake://db1', transport_url='fake://mq1', uuid=uuids.cell1) mappings = objects.CellMappingList(objects=[mapping0, mapping1]) # Simulate cell1 raising an exception. mock_get_inst.side_effect = [ mock.sentinel.instances, test.TestingException() ] filters = {'deleted': False} results = context.scatter_gather_cells( ctxt, mappings, 30, objects.InstanceList.get_by_filters, filters) self.assertEqual(2, len(results)) self.assertIn(mock.sentinel.instances, results.values()) self.assertIsInstance(results[mapping1.uuid], Exception) # non-NovaException gets logged self.assertTrue(mock_log_exception.called) # Now run it again with a NovaException to see it's not logged. mock_log_exception.reset_mock() mock_get_inst.side_effect = [ mock.sentinel.instances, exception.NotFound() ] results = context.scatter_gather_cells( ctxt, mappings, 30, objects.InstanceList.get_by_filters, filters) self.assertEqual(2, len(results)) self.assertIn(mock.sentinel.instances, results.values()) self.assertIsInstance(results[mapping1.uuid], exception.NovaException) # NovaExceptions are not logged, the caller should handle them. mock_log_exception.assert_not_called()
def setUp(self): super(TestInstanceList, self).setUp() cells = [objects.CellMapping(uuid=getattr(uuids, 'cell%i' % i), name='cell%i' % i, transport_url='fake:///', database_connection='fake://') for i in range(0, 3)] insts = {} for cell in cells: insts[cell.uuid] = list([ dict( uuid=getattr(uuids, '%s-inst%i' % (cell.name, i)), hostname='%s-inst%i' % (cell.name, i)) for i in range(0, 3)]) self.cells = cells self.insts = insts self.context = mock.sentinel.context self.useFixture(fixtures.SpawnIsSynchronousFixture())
def test_scatter_gather_cells_exception(self, mock_get_inst, mock_log_exception): # This is needed because we're mocking get_by_filters. self.useFixture(nova_fixtures.SpawnIsSynchronousFixture()) ctxt = context.get_context() mapping0 = objects.CellMapping(database_connection='fake://db0', transport_url='none:///', uuid=objects.CellMapping.CELL0_UUID) mapping1 = objects.CellMapping(database_connection='fake://db1', transport_url='fake://mq1', uuid=uuids.cell1) mappings = objects.CellMappingList(objects=[mapping0, mapping1]) # Simulate cell1 raising an exception. mock_get_inst.side_effect = [mock.sentinel.instances, test.TestingException()] results = context.scatter_gather_cells( ctxt, mappings, 30, objects.InstanceList.get_by_filters) self.assertEqual(2, len(results)) self.assertIn(mock.sentinel.instances, results.values()) self.assertIn(context.raised_exception_sentinel, results.values()) self.assertTrue(mock_log_exception.called)
def setUp(self): super(TestInstanceList, self).setUp() cells = [objects.CellMapping(uuid=getattr(uuids, 'cell%i' % i), name='cell%i' % i, transport_url='fake:///', database_connection='fake://') for i in range(0, 3)] insts = {} for cell in cells: insts[cell.uuid] = list([ dict( uuid=getattr(uuids, '%s-inst%i' % (cell.name, i)), hostname='%s-inst%i' % (cell.name, i)) for i in range(0, 3)]) self.cells = cells self.insts = insts self.context = nova_context.RequestContext() self.useFixture(fixtures.SpawnIsSynchronousFixture()) self.flags(instance_list_cells_batch_strategy='fixed', group='api')
def test_spawn_n_return_has_wait(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn_n(lambda x: '%s' % x, 'foo') foo = gt.wait() self.assertEqual('foo', foo)
def test_spawn_passes_through(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) tester = mock.MagicMock() utils.spawn_n(tester.function, 'foo', bar='bar') tester.function.assert_called_once_with('foo', bar='bar')
def test_spawn_patch(self): orig_spawn = utils.spawn_n fix = fixtures.SpawnIsSynchronousFixture() self.useFixture(fix) self.assertNotEqual(orig_spawn, utils.spawn_n)
def test_create_delete_server_with_instance_update(self): # This makes server network creation synchronous which is necessary # for notification samples that expect instance.info_cache.network_info # to be set. self.useFixture(fixtures.SpawnIsSynchronousFixture()) self.flags(notify_on_state_change='vm_and_task_state', group='notifications') server = self._boot_a_server( extra_params={'networks': [{ 'port': self.neutron.port_1['id'] }]}) self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL) instance_updates = self._wait_for_notifications('instance.update', 8) # The first notification comes from the nova-conductor, the # eighth notification comes from nova-api the # rest is from the nova-compute. To keep the test simpler # assert this fact and then modify the publisher_id of the # first and eighth notification to match the template self.assertEqual('nova-conductor:fake-mini', instance_updates[0]['publisher_id']) self.assertEqual('nova-api:fake-mini', instance_updates[7]['publisher_id']) instance_updates[0]['publisher_id'] = 'nova-compute:fake-mini' instance_updates[7]['publisher_id'] = 'nova-compute:fake-mini' create_steps = [ # nothing -> scheduling { 'reservation_id': server['reservation_id'], 'uuid': server['id'], 'host': None, 'node': None, 'state_update.new_task_state': 'scheduling', 'state_update.old_task_state': 'scheduling', 'state_update.state': 'building', 'state_update.old_state': 'building', 'state': 'building' }, # scheduling -> building { 'state_update.new_task_state': None, 'state_update.old_task_state': 'scheduling', 'task_state': None }, # scheduled { 'host': 'compute', 'node': 'fake-mini', 'state_update.old_task_state': None, 'updated_at': '2012-10-29T13:42:11Z' }, # building -> networking { 'state_update.new_task_state': 'networking', 'state_update.old_task_state': 'networking', 'task_state': 'networking' }, # networking -> block_device_mapping { 'state_update.new_task_state': 'block_device_mapping', 'state_update.old_task_state': 'networking', 'task_state': 'block_device_mapping', 'ip_addresses': [{ "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0", "nova_object.data": { "mac": "fa:16:3e:4c:2c:30", "address": "192.168.1.3", "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "meta": {}, "version": 4, "label": "private-network", "device_name": "tapce531f90-19" } }] }, # block_device_mapping -> spawning { 'state_update.new_task_state': 'spawning', 'state_update.old_task_state': 'block_device_mapping', 'task_state': 'spawning', }, # spawning -> active { 'state_update.new_task_state': None, 'state_update.old_task_state': 'spawning', 'state_update.state': 'active', 'launched_at': '2012-10-29T13:42:11Z', 'state': 'active', 'task_state': None, 'power_state': 'running' }, # tag added { 'state_update.old_task_state': None, 'state_update.old_state': 'active', 'tags': ['tag1'] }, ] replacements = self._verify_instance_update_steps( create_steps, instance_updates) fake_notifier.reset() # Let's generate some bandwidth usage data. # Just call the periodic task directly for simplicity self.compute.manager._poll_bandwidth_usage(context.get_admin_context()) self.api.delete_server(server['id']) self._wait_until_deleted(server) instance_updates = self._get_notifications('instance.update') self.assertEqual(2, len(instance_updates)) delete_steps = [ # active -> deleting { 'state_update.new_task_state': 'deleting', 'state_update.old_task_state': 'deleting', 'state_update.old_state': 'active', 'state': 'active', 'task_state': 'deleting', 'bandwidth': [{ 'nova_object.namespace': 'nova', 'nova_object.name': 'BandwidthPayload', 'nova_object.data': { 'network_name': 'private-network', 'out_bytes': 0, 'in_bytes': 0 }, 'nova_object.version': '1.0' }], 'tags': ["tag1"], 'block_devices': [{ "nova_object.data": { "boot_index": None, "delete_on_termination": False, "device_name": "/dev/sdb", "tag": None, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }] }, # deleting -> deleted { 'state_update.new_task_state': None, 'state_update.old_task_state': 'deleting', 'state_update.old_state': 'active', 'state_update.state': 'deleted', 'state': 'deleted', 'task_state': None, 'terminated_at': '2012-10-29T13:42:11Z', 'ip_addresses': [], 'power_state': 'pending', 'bandwidth': [], 'tags': ["tag1"], 'block_devices': [{ "nova_object.data": { "boot_index": None, "delete_on_termination": False, "device_name": "/dev/sdb", "tag": None, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }] }, ] self._verify_instance_update_steps(delete_steps, instance_updates, initial=replacements)