def setUp(self): super(ServerGroupTestBase, self).setUp() self.flags(enabled_filters=self._enabled_filters, group='filter_scheduler') # NOTE(sbauza): Don't verify VCPUS and disks given the current nodes. self.flags(cpu_allocation_ratio=9999.0) self.flags(disk_allocation_ratio=9999.0) self.flags(weight_classes=self._get_weight_classes(), group='filter_scheduler') self.useFixture(nova_fixtures.RealPolicyFixture()) self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.api self.api.microversion = self.microversion self.admin_api = api_fixture.admin_api self.admin_api.microversion = self.microversion self.start_service('conductor') self.start_service('scheduler')
def setUp(self): super(ServerGroupTestBase, self).setUp() self.flags(enabled_filters=self._enabled_filters, group='filter_scheduler') # NOTE(sbauza): Don't verify VCPUS and disks given the current nodes. self.flags(cpu_allocation_ratio=9999.0) self.flags(disk_allocation_ratio=9999.0) self.flags(weight_classes=self._get_weight_classes(), group='filter_scheduler') self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api self.api.microversion = self.microversion self.admin_api = api_fixture.admin_api self.admin_api.microversion = self.microversion # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.start_service('conductor') self.start_service('scheduler') self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
def setUp(self): super(TestLiveMigrateOneOfConcurrentlyCreatedInstances, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.api.microversion = self.microversion nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') self.start_service('compute', host='host1') self.start_service('compute', host='host2') fake_network.set_stub_network_methods(self) flavors = self.api.get_flavors() self.flavor1 = flavors[0]
def setUp(self): super(TestLiveMigrateOneOfConcurrentlyCreatedInstances, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.api.microversion = self.microversion nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') # set_nodes() is needed to have each compute service return a # different nodename, so we get two hosts in the list of candidates # for scheduling. Otherwise both hosts will have the same default # nodename "fake-mini". The host passed to start_service controls the # "host" attribute and set_nodes() sets the "nodename" attribute. # We set_nodes() to make host and nodename the same for each compute. fake.set_nodes(['host1']) self.addCleanup(fake.restore_nodes) self.start_service('compute', host='host1') fake.set_nodes(['host2']) self.start_service('compute', host='host2') fake_network.set_stub_network_methods(self) flavors = self.api.get_flavors() self.flavor1 = flavors[0]
def setUp(self): super(MissingReqSpecInstanceGroupUUIDTestCase, self).setUp() # Stub out external dependencies. self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # Configure the API to allow resizing to the same host so we can keep # the number of computes down to two in the test. self.flags(allow_resize_to_same_host=True) # Start nova controller services. api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.start_service('conductor') # Use our custom weigher defined above to make sure that we have # a predictable scheduling sort order. self.flags(weight_classes=[__name__ + '.HostNameWeigher'], group='filter_scheduler') self.start_service('scheduler') # Start two computes, one where the server will be created and another # where we'll cold migrate it. self.addCleanup(fake_virt.restore_nodes) self.computes = {} # keep track of the compute services per host name for host in ('host1', 'host2'): fake_virt.set_nodes([host]) compute_service = self.start_service('compute', host=host) self.computes[host] = compute_service
def setUp(self): super(TestEvacuationWithSourceReturningDuringRebuild, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # This stubs out the network allocation in compute. fake_network.set_stub_network_methods(self) # We need the computes reporting into placement for the filter # scheduler to pick a host. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api # 2.11 is needed for force_down # 2.14 is needed for evacuate without onSharedStorage flag self.api.microversion = '2.14' # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') # Start two computes self.computes = {} fake.set_nodes(['host1']) self.addCleanup(fake.restore_nodes) self.computes['host1'] = self.start_service('compute', host='host1') fake.set_nodes(['host2']) self.computes['host2'] = self.start_service('compute', host='host2') self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id'] self.addCleanup(fake_notifier.reset) # Stub out rebuild with a slower method allowing the src compute to be # restarted once the migration hits pre-migrating after claiming # resources on the dest. manager_class = nova.compute.manager.ComputeManager original_rebuild = manager_class._do_rebuild_instance def start_src_rebuild(self_, context, instance, *args, **kwargs): server = self.api.get_server(instance.uuid) # Start the src compute once the migration is pre-migrating. self._wait_for_migration_status(server, ['pre-migrating']) self.computes.get(self.source_compute).start() original_rebuild(self_, context, instance, *args, **kwargs) self.stub_out( 'nova.compute.manager.ComputeManager.' '_do_rebuild_instance', start_src_rebuild)
def setUp(self): super(MetadataTest, self).setUp() self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) self.start_service('conductor') self.start_service('scheduler') self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).api self.start_service('compute') # create a server for the tests server = self._build_server(name='test') server = self.api.post_server({'server': server}) self.server = self._wait_for_state_change(server, 'ACTIVE') self.api_fixture = self.useFixture(nova_fixtures.OSMetadataServer()) self.md_url = self.api_fixture.md_url # make sure that the metadata service returns information about the # server we created above def fake_get_fixed_ip_by_address(self, ctxt, address): return {'instance_uuid': server['id']} self.useFixture( fixtures.MonkeyPatch( 'nova.network.neutron.API.get_fixed_ip_by_address', fake_get_fixed_ip_by_address))
def setUp(self): super(TestResizeWithNoAllocationScheduler, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.api.microversion = self.microversion nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.start_service('conductor') # Create two compute nodes/services. for host in ('host1', 'host2'): self.start_service('compute', host=host) scheduler_service = self.start_service('scheduler') # We need to mock the FilterScheduler to not use Placement so that # allocations won't be created during scheduling. scheduler_service.manager.driver.USES_ALLOCATION_CANDIDATES = False flavors = self.api.get_flavors() self.old_flavor = flavors[0] self.new_flavor = flavors[1]
def setUp(self): self.flags(compute_driver=self.compute_driver) super(ProviderUsageBaseTestCase, self).setUp() self.policy = self.useFixture(policy_fixture.RealPolicyFixture()) self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self)) self.placement = self.useFixture(func_fixtures.PlacementFixture()).api self.useFixture(nova_fixtures.AllServicesCurrent()) fake_notifier.stub_notifier(self) self.addCleanup(fake_notifier.reset) self.api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.admin_api = self.api_fixture.admin_api self.admin_api.microversion = self.microversion self.api = self.admin_api # the image fake backend needed for image discovery self.image_service = ( nova.tests.unit.image.fake.stub_out_image_service(self)) self.start_service('conductor') self.scheduler_service = self.start_service('scheduler') self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
def test_create_volume_backed_server_with_zero_disk_allowed(self): """Tests that creating a volume-backed server with a zero-root disk flavor will be allowed for admins. """ # For this test, we want to start conductor and the scheduler but # we don't start compute so that scheduling fails; we don't really # care about successfully building an active server here. self.useFixture(func_fixtures.PlacementFixture()) self.useFixture(nova_fixtures.CinderFixture(self)) self.start_service('conductor') self.start_service('scheduler') server_req = self._build_minimal_create_server_request( 'test_create_volume_backed_server_with_zero_disk_allowed', flavor_id=self.zero_disk_flavor['id']) server_req.pop('imageRef', None) server_req['block_device_mapping_v2'] = [{ 'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL, 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0 }] server = self.admin_api.post_server({'server': server_req}) server = self._wait_for_state_change(server, 'ERROR') self.assertIn('No valid host', server['fault']['message'])
def setUp(self): super(TestRescheduleWithVolumesAttached, self).setUp() # Use the new attach flow fixture for cinder cinder_fixture = nova_fixtures.CinderFixture(self) self.cinder = self.useFixture(cinder_fixture) self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) fake_network.set_stub_network_methods(self) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.flags(compute_driver='fake.FakeRescheduleDriver') self.start_service('conductor') self.start_service('scheduler') # Start two computes to allow the instance to be rescheduled self.host1 = self.start_service('compute', host='host1') self.host2 = self.start_service('compute', host='host2') self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id']
def setUp(self): super(TestAvailabilityZoneScheduling, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.api.microversion = 'latest' fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') # Start two compute services in separate zones. self._start_host_in_zone('host1', 'zone1') self._start_host_in_zone('host2', 'zone2') flavors = self.api.get_flavors() self.flavor1 = flavors[0]['id'] self.flavor2 = flavors[1]['id']
def setUp(self): super(ColdMigrateTargetHostThenLiveMigrateTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) # The admin API is used to get the server details to verify the # host on which the server was built and cold/live migrate it. self.admin_api = api_fixture.admin_api self.api = api_fixture.api # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.admin_api.microversion = 'latest' self.api.microversion = 'latest' image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') for host in ('host1', 'host2'): self.start_service('compute', host=host)
def setUp(self): super(AntiAffinityMultiCreateRequest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.glance = self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) # The admin API is used to get the server details to verify the # host on which the server was built. self.admin_api = api_fixture.admin_api self.api = api_fixture.api self.start_service('conductor') # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.admin_api.microversion = 'latest' self.api.microversion = 'latest' self.useFixture(nova_fixtures.HostNameWeigherFixture()) # disable late check on compute node to mimic devstack. self.flags(disable_group_policy_check_upcall=True, group='workarounds') self.start_service('scheduler') self.start_service('compute', host='host1') self.start_service('compute', host='host2')
def setUp(self): super(TestParallelEvacuationWithServerGroup, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # This stubs out the network allocation in compute. fake_network.set_stub_network_methods(self) # We need the computes reporting into placement for the filter # scheduler to pick a host. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.admin_api # 2.11 is needed for force_down # 2.14 is needed for evacuate without onSharedStorage flag self.api.microversion = '2.14' fake_notifier.stub_notifier(self) self.addCleanup(fake_notifier.reset) # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') # We start two compute services because we need two instances with # anti-affinity server group policy to be booted self.compute1 = self.start_service('compute', host='host1') self.compute2 = self.start_service('compute', host='host2') self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id'] manager_class = nova.compute.manager.ComputeManager original_rebuild = manager_class._do_rebuild_instance def fake_rebuild(self_, context, instance, *args, **kwargs): # Simulate that the rebuild request of one of the instances # reaches the target compute manager significantly later so the # rebuild of the other instance can finish before the late # validation of the first rebuild. # We cannot simply delay the virt driver's rebuild or the # manager's _rebuild_default_impl as those run after the late # validation if instance.host == 'host1': # wait for the other instance rebuild to start fake_notifier.wait_for_versioned_notifications( 'instance.rebuild.start', n_events=1) original_rebuild(self_, context, instance, *args, **kwargs) self.stub_out('nova.compute.manager.ComputeManager.' '_do_rebuild_instance', fake_rebuild)
def setUp(self): super(TestListServersIpFilter, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.neutron = self.useFixture( nova_fixtures.NeutronFixture(self)) # Add a 2nd port to the neutron fixture to have multiple ports self.neutron.create_port({'port': self.neutron.port_2}) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.useFixture(func_fixtures.PlacementFixture()) self.start_service('conductor') self.flags(enabled_filters=['ComputeFilter'], group='filter_scheduler') self.start_service('scheduler') self.start_service('compute') self.useFixture(cast_as_call.CastAsCall(self)) self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id']
def setUp(self): super(ServersTestBase, self).setUp() # Replace libvirt with fakelibvirt self.useFixture(fake_imagebackend.ImageBackendFixture()) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.host.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.guest.libvirt', fakelibvirt)) self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.useFixture(func_fixtures.PlacementFixture()) self.stub_out('nova.privsep.utils.supports_direct_io', lambda _: True) # Mock the 'get_connection' function, as we're going to need to provide # custom capabilities for each test _p = mock.patch('nova.virt.libvirt.host.Host.get_connection') self.mock_conn = _p.start() self.addCleanup(_p.stop) # As above, mock the 'get_arch' function as we may need to provide # different host architectures during some tests. _a = mock.patch('nova.virt.libvirt.utils.get_arch') self.mock_arch = _a.start() # Default to X86_64 self.mock_arch.return_value = obj_fields.Architecture.X86_64 self.addCleanup(_a.stop)
def setUp(self): super(NotificationSampleTestBase, self).setUp() api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.api self.admin_api = api_fixture.admin_api max_version = self.MAX_MICROVERSION self.api.microversion = max_version self.admin_api.microversion = max_version fake_notifier.stub_notifier(self) self.addCleanup(fake_notifier.reset) self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow())) self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) context_patcher = self.mock_gen_request_id = mock.patch( 'oslo_context.context.generate_request_id', return_value='req-5b6c791d-5709-4f36-8fbe-c3e02869e35d') self.mock_gen_request_id = context_patcher.start() self.addCleanup(context_patcher.stop) self.start_service('conductor') self.start_service('scheduler') self.compute = self.start_service('compute') # Reset the service create notifications fake_notifier.reset()
def setUp(self): super(AntiAffinityMultiCreateRequest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) # The admin API is used to get the server details to verify the # host on which the server was built. self.admin_api = api_fixture.admin_api self.api = api_fixture.api image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.admin_api.microversion = 'latest' self.api.microversion = 'latest' # Add our custom weigher. self.flags(weight_classes=[__name__ + '.HostNameWeigher'], group='filter_scheduler') # disable late check on compute node to mimic devstack. self.flags(disable_group_policy_check_upcall=True, group='workarounds') self.start_service('scheduler') fake.set_nodes(['host1']) self.addCleanup(fake.restore_nodes) self.start_service('compute', host='host1') fake.set_nodes(['host2']) self.start_service('compute', host='host2')
def setUp(self): super(NonPersistentFieldNotResetTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.api.microversion = 'latest' image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') self.compute = {} self.addCleanup(fake.restore_nodes) for host in ('host1', 'host2', 'host3'): fake.set_nodes([host]) compute_service = self.start_service('compute', host=host) self.compute.update({host: compute_service}) self.ctxt = context.get_admin_context()
def setUp(self): super(TestBootFromVolumeIsolatedHostsFilter, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.CinderFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api image_fakes.stub_out_image_service(self) self.addCleanup(image_fakes.FakeImageService_reset) self.start_service('conductor') # Add the IsolatedHostsFilter to the list of enabled filters since it # is not enabled by default. enabled_filters = CONF.filter_scheduler.enabled_filters enabled_filters.append('IsolatedHostsFilter') self.flags( enabled_filters=enabled_filters, isolated_images=[image_fakes.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID], isolated_hosts=['host1'], restrict_isolated_hosts_to_isolated_images=True, group='filter_scheduler') self.start_service('scheduler') # Create two compute nodes/services so we can restrict the image # we'll use to one of the hosts. for host in ('host1', 'host2'): self.start_service('compute', host=host)
def setUp(self): super(SchedulerOnlyChecksTargetTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.flags(use_neutron=True) self.useFixture(nova_fixtures.NeutronFixture(self)) # We need the computes reporting into placement for the filter # scheduler to pick a host. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) # The admin API is used to get the server details to verify the # host on which the server was built. self.admin_api = api_fixture.admin_api self.api = api_fixture.api # the image fake backend needed for image discovery image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') # We have to get the image before we use 2.latest otherwise we'll get # a 404 on the /images proxy API because of 2.36. self.image_id = self.api.get_images()[0]['id'] # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.admin_api.microversion = 'latest' self.api.microversion = 'latest' # Define a very basic scheduler that only verifies if host is down. self.flags(enabled_filters=['ComputeFilter'], group='filter_scheduler') # NOTE(sbauza): Use the above weigher so we are sure that # we prefer first host1 for the boot request and forget about any # other weigher. # Host2 should only be preferred over host3 if and only if that's the # only host we verify (as requested_destination does). self.flags(weight_classes=[__name__ + '.HostNameWeigher'], group='filter_scheduler') self.start_service('scheduler') # Let's now start three compute nodes as we said above. # set_nodes() is needed to have each compute service return a # different nodename, so we get two hosts in the list of candidates # for scheduling. Otherwise both hosts will have the same default # nodename "fake-mini". The host passed to start_service controls the # "host" attribute and set_nodes() sets the "nodename" attribute. # We set_nodes() to make host and nodename the same for each compute. fake.set_nodes(['host1']) self.addCleanup(fake.restore_nodes) self.start_service('compute', host='host1') fake.set_nodes(['host2']) self.start_service('compute', host='host2') fake.set_nodes(['host3']) self.start_service('compute', host='host3') self.useFixture(cast_as_call.CastAsCall(self))
def setUp(self): super(ServersTestBase, self).setUp() # Replace libvirt with fakelibvirt self.useFixture(fake_imagebackend.ImageBackendFixture()) self.useFixture( fixtures.MonkeyPatch('nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture( fixtures.MonkeyPatch('nova.virt.libvirt.driver.libvirt', fakelibvirt)) self.useFixture( fixtures.MonkeyPatch('nova.virt.libvirt.host.libvirt', fakelibvirt)) self.useFixture( fixtures.MonkeyPatch('nova.virt.libvirt.guest.libvirt', fakelibvirt)) self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.useFixture(func_fixtures.PlacementFixture()) self.stub_out('nova.privsep.utils.supports_direct_io', lambda _: True) # Mock the 'get_connection' function, as we're going to need to provide # custom capabilities for each test _p = mock.patch('nova.virt.libvirt.host.Host.get_connection') self.mock_conn = _p.start() self.addCleanup(_p.stop)
def setUp(self): super(FailedEvacuateStateTests, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.api.microversion = self.microversion nova.tests.unit.image.fake.stub_out_image_service(self) self.start_service('conductor') self.start_service('scheduler') self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.hostname = 'host1' self.compute1 = self.start_service('compute', host=self.hostname) fake_network.set_stub_network_methods(self) flavors = self.api.get_flavors() self.flavor1 = flavors[0]
def setUp(self): super(TestLocalDeleteAttachedVolumes, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # We need the CinderFixture to stub out the volume API. self.cinder = self.useFixture( nova_fixtures.CinderFixtureNewAttachFlow(self)) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # Use the PlacementFixture to avoid annoying warnings in the logs. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.api # We want to use 2.37 for passing networks='none' on server create. # We also need this since you can only attach a volume to a # shelved-offloaded server in microversion 2.20+. self.api.microversion = 'latest' # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.start_service('conductor') self.start_service('scheduler') self.start_service('compute') # The consoleauth service is needed for deleting console tokens. self.start_service('consoleauth') self.useFixture(cast_as_call.CastAsCall(self)) self.flavor_id = self.api.get_flavors()[0]['id']
def setUp(self): super(TestBootFromVolumeIsolatedHostsFilter, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.glance = self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.CinderFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.start_service('conductor') # Add the IsolatedHostsFilter to the list of enabled filters since it # is not enabled by default. enabled_filters = CONF.filter_scheduler.enabled_filters enabled_filters.append('IsolatedHostsFilter') self.flags( enabled_filters=enabled_filters, isolated_images=[self.glance.auto_disk_config_enabled_image['id']], isolated_hosts=['host1'], restrict_isolated_hosts_to_isolated_images=True, group='filter_scheduler') self.start_service('scheduler') # Create two compute nodes/services so we can restrict the image # we'll use to one of the hosts. for host in ('host1', 'host2'): self.start_service('compute', host=host)
def setUp(self): super(CrossAZAttachTestCase, self).setUp() # Use the standard fixtures. self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.CinderFixture(self, az=self.az)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # Start nova controller services. self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api self.start_service('conductor') self.start_service('scheduler') # Start one compute service and add it to the AZ. This allows us to # get past the AvailabilityZoneFilter and build a server. self.start_service('compute', host='host1') agg_id = self.api.post_aggregate( {'aggregate': { 'name': self.az, 'availability_zone': self.az }})['id'] self.api.api_post('/os-aggregates/%s/action' % agg_id, {'add_host': { 'host': 'host1' }})
def setUp(self): super(InstanceListWithDeletedServicesTestCase, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # We need the computes reporting into placement for the filter # scheduler to pick a host. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.api self.admin_api = api_fixture.admin_api self.admin_api.microversion = 'latest' # the image fake backend needed for image discovery fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # Get the image before we set the microversion to latest to avoid # the proxy issues with GET /images in 2.36. self.image_id = self.api.get_images()[0]['id'] self.api.microversion = 'latest' self.start_service('conductor') self.start_service('scheduler')
def setUp(self): super(TestRescheduleWithServerGroup, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # This stubs out the network allocation in compute. fake_network.set_stub_network_methods(self) # We need the computes reporting into placement for the filter # scheduler to pick a host. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.api # The admin API is used to get the server details to verify the # host on which the server was built. self.admin_api = api_fixture.admin_api # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') # We start two compute services because we're going to fake one raising # RescheduledException to trigger a retry to the other compute host. fake.set_nodes(['host1']) self.addCleanup(fake.restore_nodes) self.start_service('compute', host='host1') fake.set_nodes(['host2']) self.addCleanup(fake.restore_nodes) self.start_service('compute', host='host2') self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id'] # This is our flag that we set when we hit the first host and # made it fail. self.failed_host = None self.attempts = 0 def fake_validate_instance_group_policy(_self, *args, **kwargs): self.attempts += 1 if self.failed_host is None: # Set the failed_host value to the ComputeManager.host value. self.failed_host = _self.host raise exception.RescheduledException(instance_uuid='fake', reason='Policy violated') self.stub_out( 'nova.compute.manager.ComputeManager.' '_validate_instance_group_policy', fake_validate_instance_group_policy)
def setUp(self): super(BootFromVolumeLargeRequestTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.glance = self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.CinderFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api