def setUp(self): super(ImageCacheTest, self).setUp() self.flags(compute_driver='fake.FakeDriverWithCaching') self.notifier = self.useFixture(fixtures.NotificationFixture(self)) self.context = context.get_admin_context() self.conductor = self.start_service('conductor') self.compute1 = self.start_service('compute', host='compute1') self.compute2 = self.start_service('compute', host='compute2') self.compute3 = self.start_service('compute', host='compute3', cell_name='cell2') self.compute4 = self.start_service('compute', host='compute4', cell_name='cell2') self.compute5 = self.start_service('compute', host='compute5', cell_name='cell2') cell2 = self.cell_mappings['cell2'] with context.target_cell(self.context, cell2) as cctxt: srv = objects.Service.get_by_compute_host(cctxt, 'compute5') srv.forced_down = True srv.save()
def setUp(self): super(NotificationSampleTestBase, self).setUp() api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api self.admin_api = api_fixture.admin_api max_version = self.MAX_MICROVERSION self.api.microversion = max_version self.admin_api.microversion = max_version self.notifier = self.useFixture( nova_fixtures.NotificationFixture(self)) self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow())) self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) context_patcher = self.mock_gen_request_id = mock.patch( 'oslo_context.context.generate_request_id', return_value='req-5b6c791d-5709-4f36-8fbe-c3e02869e35d') self.mock_gen_request_id = context_patcher.start() self.addCleanup(context_patcher.stop) self.start_service('conductor') self.start_service('scheduler') self.compute = self.start_service('compute') # Reset the service create notifications self.notifier.reset()
def test_evacuate_no_valid_host(self): # Boot a server server = self._boot_a_server() # Force source compute down compute_id = self.api.get_services(host=self.hostname, binary='nova-compute')[0]['id'] self.api.put_service(compute_id, {'forced_down': 'true'}) self.notifier = self.useFixture( nova_fixtures.NotificationFixture(self)) # Initiate evacuation self._evacuate_server(server, expected_state='ERROR', expected_host=self.hostname, expected_migration_status='error') self._wait_for_notification_event_type('compute_task.rebuild_server') # Check migrations migrations = self.api.get_migrations() self.assertEqual(1, len(migrations)) self.assertEqual('evacuation', migrations[0]['migration_type']) self.assertEqual(server['id'], migrations[0]['instance_uuid']) self.assertEqual(self.hostname, migrations[0]['source_compute']) self.assertEqual('error', migrations[0]['status'])
def setUp(self): super(TestParallelEvacuationWithServerGroup, self).setUp() self.useFixture(nova_fixtures.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # This stubs out the network allocation in compute. fake_network.set_stub_network_methods(self) # We need the computes reporting into placement for the filter # scheduler to pick a host. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.admin_api # 2.11 is needed for force_down # 2.14 is needed for evacuate without onSharedStorage flag self.api.microversion = '2.14' self.notifier = self.useFixture( nova_fixtures.NotificationFixture(self)) # the image fake backend needed for image discovery self.useFixture(nova_fixtures.GlanceFixture(self)) self.start_service('conductor') self.start_service('scheduler') # We start two compute services because we need two instances with # anti-affinity server group policy to be booted self.compute1 = self.start_service('compute', host='host1') self.compute2 = self.start_service('compute', host='host2') self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id'] manager_class = manager.ComputeManager original_rebuild = manager_class._do_rebuild_instance def fake_rebuild(self_, context, instance, *args, **kwargs): # Simulate that the rebuild request of one of the instances # reaches the target compute manager significantly later so the # rebuild of the other instance can finish before the late # validation of the first rebuild. # We cannot simply delay the virt driver's rebuild or the # manager's _rebuild_default_impl as those run after the late # validation if instance.host == 'host1': # wait for the other instance rebuild to start self.notifier.wait_for_versioned_notifications( 'instance.rebuild.start', n_events=1) original_rebuild(self_, context, instance, *args, **kwargs) self.stub_out('nova.compute.manager.ComputeManager.' '_do_rebuild_instance', fake_rebuild)
def setUp(self): super(ComputeHostAPITestCase, self).setUp() self.host_api = compute.HostAPI() self.aggregate_api = compute.AggregateAPI() self.ctxt = context.get_admin_context() self.notifier = self.useFixture( nova_fixtures.NotificationFixture(self)) self.req = fakes.HTTPRequest.blank('') self.controller = services.ServiceController() self.useFixture(nova_fixtures.SingleCellSimple())
def setUp(self): super(RescheduleBuildAvailabilityZoneUpCall, self).setUp() # Use the standard fixtures. self.useFixture(nova_fixtures.RealPolicyFixture()) self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) # Start controller services. self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api self.start_service('conductor') self.start_service('scheduler') # Start two computes with the fake reschedule driver. self.flags(compute_driver='fake.FakeRescheduleDriver') self.start_service('compute', host='host1') self.start_service('compute', host='host2') # Listen for notifications. self.notifier = self.useFixture( nova_fixtures.NotificationFixture(self))
def setUp(self): super(RegressionTest1835822, self).setUp() # Use the standard fixtures. self.useFixture(nova_fixtures.RealPolicyFixture()) self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) self.api = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')).api self.start_service('conductor') self.start_service('scheduler') self.start_service('compute') images = self.api.get_images() self.image_ref_0 = images[0]['id'] self.image_ref_1 = images[1]['id'] self.notifier = self.useFixture( nova_fixtures.NotificationFixture(self))
def setUp(self): super(RescheduleMigrateAvailabilityZoneUpCall, self).setUp() # Use the standard fixtures. self.useFixture(nova_fixtures.RealPolicyFixture()) self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) # Start controller services. self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api self.start_service('conductor') self.start_service('scheduler') # We need three hosts for this test, one is the initial host on which # the server is built, and the others are for the migration where the # first will fail and the second is an alternate. self.start_service('compute', host='host1') self.start_service('compute', host='host2') self.start_service('compute', host='host3') # Listen for notifications. self.notifier = self.useFixture( nova_fixtures.NotificationFixture(self))
def setUp(self): super(NotificationsTestCase, self).setUp() self.fixture = self.useFixture(o_fixture.ClearRequestContext()) self.net_info = fake_network.fake_get_instance_nw_info(self) self.notifier = self.useFixture(fixtures.NotificationFixture(self)) self.flags(host='testhost') self.flags(notify_on_state_change="vm_and_task_state", group='notifications') self.flags(api_servers=['http://localhost:9292'], group='glance') self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.fake_time = datetime.datetime(2017, 2, 2, 16, 45, 0) timeutils.set_time_override(self.fake_time) self.instance = self._wrapped_create() self.decorated_function_called = False
def setUp(self): super(TestNotificationFixture, self).setUp() self.notifier = self.useFixture(fixtures.NotificationFixture(self)) self.context = context.RequestContext()
def setUp(self): super(_LibvirtEvacuateTest, self).setUp() self.useFixture(nova_fixtures.CinderFixture(self)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) fake_network.set_stub_network_methods(self) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api # force_down and evacuate without onSharedStorage self.api.microversion = '2.14' self.notifier = self.useFixture( nova_fixtures.NotificationFixture(self)) self.useFixture(nova_fixtures.LibvirtFixture()) # Fake out all the details of volume connection self.useFixture( fixtures.MockPatch( 'nova.virt.libvirt.driver.LibvirtDriver.get_volume_connector')) self.useFixture( fixtures.MockPatch( 'nova.virt.libvirt.driver.LibvirtDriver._connect_volume')) # For cleanup self.useFixture( fixtures.MockPatch( 'nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')) volume_config = libvirt_config.LibvirtConfigGuestDisk() volume_config.driver_name = 'fake-volume-driver' volume_config.source_path = 'fake-source-path' volume_config.target_dev = 'fake-target-dev' volume_config.target_bus = 'fake-target-bus' get_volume_config = self.useFixture( fixtures.MockPatch( 'nova.virt.libvirt.driver.LibvirtDriver._get_volume_config') ).mock get_volume_config.return_value = volume_config # Ensure our computes report lots of available disk, vcpu, and ram lots = 10000000 get_local_gb_info = self.useFixture( fixtures.MockPatch( 'nova.virt.libvirt.driver.LibvirtDriver._get_local_gb_info') ).mock get_local_gb_info.return_value = { 'total': lots, 'free': lots, 'used': 1 } get_vcpu_available = self.useFixture( fixtures.MockPatch( 'nova.virt.libvirt.driver.LibvirtDriver._get_vcpu_available') ).mock get_vcpu_available.return_value = set(cpu for cpu in range(24)) get_memory_mb_total = self.useFixture( fixtures.MockPatch( 'nova.virt.libvirt.host.Host.get_memory_mb_total')).mock get_memory_mb_total.return_value = lots # Mock out adding rng devices self.useFixture( fixtures.MockPatch( 'nova.virt.libvirt.driver.LibvirtDriver._add_rng_device')).mock self.start_service('conductor') self.start_service('scheduler') self.flags(compute_driver='libvirt.LibvirtDriver') ctxt = context.get_admin_context() for flavor in FLAVOR_FIXTURES: objects.Flavor(context=ctxt, **flavor).create()
def setUp(self): super(WrapExceptionTestCase, self).setUp() self.notifier = self.useFixture( nova_fixtures.NotificationFixture(self))