def setUp(self): super(TestBootFromVolumeIsolatedHostsFilter, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api image_fakes.stub_out_image_service(self) self.addCleanup(image_fakes.FakeImageService_reset) self.start_service('conductor') # Add the IsolatedHostsFilter to the list of enabled filters since it # is not enabled by default. enabled_filters = CONF.filter_scheduler.enabled_filters enabled_filters.append('IsolatedHostsFilter') self.flags( enabled_filters=enabled_filters, isolated_images=[image_fakes.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID], isolated_hosts=['host1'], restrict_isolated_hosts_to_isolated_images=True, group='filter_scheduler') self.start_service('scheduler') # Create two compute nodes/services so we can restrict the image # we'll use to one of the hosts. for host in ('host1', 'host2'): self.start_service('compute', host=host)
def setUp(self): super(TestLocalDeleteAttachedVolumes, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # We need the CinderFixture to stub out the volume API. self.cinder = self.useFixture( nova_fixtures.CinderFixtureNewAttachFlow(self)) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # Use the PlacementFixture to avoid annoying warnings in the logs. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.api # We want to use 2.37 for passing networks='none' on server create. # We also need this since you can only attach a volume to a # shelved-offloaded server in microversion 2.20+. self.api.microversion = 'latest' # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.start_service('conductor') self.start_service('scheduler') self.start_service('compute') # The consoleauth service is needed for deleting console tokens. self.start_service('consoleauth') self.useFixture(cast_as_call.CastAsCall(self)) self.flavor_id = self.api.get_flavors()[0]['id']
def test_create_volume_backed_server_with_zero_disk_allowed(self): """Tests that creating a volume-backed server with a zero-root disk flavor will be allowed for admins. """ # For this test, we want to start conductor and the scheduler but # we don't start compute so that scheduling fails; we don't really # care about successfully building an active server here. self.useFixture(func_fixtures.PlacementFixture()) self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self)) self.start_service('conductor') self.start_service('scheduler') server_req = self._build_minimal_create_server_request( self.api, 'test_create_volume_backed_server_with_zero_disk_allowed', flavor_id=self.zero_disk_flavor['id']) server_req.pop('imageRef', None) server_req['block_device_mapping_v2'] = [{ 'uuid': nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL, 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0 }] server = self.admin_api.post_server({'server': server_req}) server = self._wait_for_state_change(self.api, server, 'ERROR') self.assertIn('No valid host', server['fault']['message'])
def setUp(self): # These need to be set up before services are started, else they # won't be reflected in the running service. self.flags(allow_resize_to_same_host=True) super(BootFromVolumeTest, self).setUp() self.admin_api = self.api_fixture.admin_api self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
def setUp(self): # Everything has been upgraded to the latest code to support # multiattach. self.useFixture(nova_fixtures.AllServicesCurrent()) super(TestMultiattachVolumes, self).setUp() self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self)) self.useFixture(nova_fixtures.NeutronFixture(self))
def setUp(self): super(TestRescheduleWithVolumesAttached, self).setUp() # Use the new attach flow fixture for cinder cinder_fixture = nova_fixtures.CinderFixtureNewAttachFlow(self) self.cinder = self.useFixture(cinder_fixture) self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) fake_network.set_stub_network_methods(self) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.flags(compute_driver='fake.FakeRescheduleDriver') self.start_service('conductor') self.start_service('scheduler') # Start two computes to allow the instance to be rescheduled self.host1 = self.start_service('compute', host='host1') self.host2 = self.start_service('compute', host='host2') self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id']
def setUp(self): self.flags(use_neutron=True) self.flags(volume_usage_poll_interval=60) super(TestVolumeUsageNotificationSample, self).setUp() self.neutron = fixtures.NeutronFixture(self) self.useFixture(self.neutron) self.cinder = fixtures.CinderFixtureNewAttachFlow(self) self.useFixture(self.cinder)
def test_live_migrate_attachment_delete_fails(self): self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self)) server = self.api.post_server({ 'server': { 'flavorRef': 1, 'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'name': 'live-migrate-attachment-delete-fail-test', 'networks': 'none', 'block_device_mapping_v2': [{ 'boot_index': 0, 'uuid': uuids.broken_volume, 'source_type': 'volume', 'destination_type': 'volume' }, { 'boot_index': 1, 'uuid': uuids.working_volume, 'source_type': 'volume', 'destination_type': 'volume' }] } }) server = self._wait_for_state_change(self.api, server, 'ACTIVE') source = server['OS-EXT-SRV-ATTR:host'] if source == self.compute.host: dest = self.compute2.host else: dest = self.compute.host post = { 'os-migrateLive': { 'host': dest, 'block_migration': False, } } stub_attachment_delete = FakeCinderError() self.stub_out('nova.volume.cinder.API.attachment_delete', stub_attachment_delete) self.api.post_server_action(server['id'], post) self._wait_for_server_parameter(self.api, server, { 'OS-EXT-SRV-ATTR:host': dest, 'status': 'ACTIVE' }) self.assertEqual(2, stub_attachment_delete.call_count) self.assertEqual(1, stub_attachment_delete.raise_count)
def setUp(self): super(VolumeBackedResizeDiskDown, self).setUp() self.flags(allow_resize_to_same_host=True) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.admin_api self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self)) self.useFixture(func_fixtures.PlacementFixture()) fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') self.start_service('compute')
def setUp(self): super(BootFromVolumeOverQuotaRaceDeleteTest, self).setUp() # We need the cinder fixture for boot from volume testing. self.cinder_fixture = self.useFixture( nova_fixtures.CinderFixtureNewAttachFlow(self)) # Use the standard fixtures. self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.PlacementFixture()) self.api = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')).api # Use microversion 2.52 which allows creating a server with tags. self.api.microversion = '2.52' self.start_service('conductor') self.start_service('scheduler') self.start_service('compute')
def setUp(self): super(TestNovaManagePlacementHealAllocations, self).setUp() self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self)) self.cli = manage.PlacementCommands() # We need to start a compute in each non-cell0 cell. for cell_name, cell_mapping in self.cell_mappings.items(): if cell_mapping.uuid == objects.CellMapping.CELL0_UUID: continue self._start_compute(cell_name, cell_name=cell_name) # Make sure we have two hypervisors reported in the API. hypervisors = self.admin_api.api_get( '/os-hypervisors').body['hypervisors'] self.assertEqual(2, len(hypervisors)) self.flavor = self.api.get_flavors()[0] self.output = StringIO() self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output)) # We need to mock the FilterScheduler to not use Placement so that # allocations won't be created during scheduling and then we can heal # them in the CLI. self.scheduler_service.manager.driver.USES_ALLOCATION_CANDIDATES = \ False
def test_delete_with_reserved_volumes_new(self, mock_version_get=None): self.cinder = self.useFixture( nova_fixtures.CinderFixtureNewAttachFlow(self)) # Create a server which should go to ERROR state because we don't # have any active computes. volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL server = self._create_error_server(volume_id) server_id = server['id'] # There should now exist an attachment to the volume as it was created # by Nova. self.assertIn(volume_id, self.cinder.attachments[server_id]) # Delete this server, which should delete BDMs and remove the # reservation on the instances. self.api.delete_server(server['id']) # The volume should no longer have any attachments as instance delete # should have removed them. self.assertNotIn(volume_id, self.cinder.attachments[server_id])
def test_bfv_delete_build_request_pre_scheduling(self): cinder = self.useFixture( nova_fixtures.CinderFixtureNewAttachFlow(self)) # This makes the get_minimum_version_all_cells check say we're running # the latest of everything. self.useFixture(nova_fixtures.AllServicesCurrent()) volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL server = self.api.post_server({ 'server': { 'flavorRef': '1', 'name': 'test_bfv_delete_build_request_pre_scheduling', 'networks': 'none', 'block_device_mapping_v2': [ { 'boot_index': 0, 'uuid': volume_id, 'source_type': 'volume', 'destination_type': 'volume' }, ] } }) # Since _IntegratedTestBase uses the CastAsCall fixture, when we # get the server back we know all of the volume stuff should be done. self.assertIn(volume_id, cinder.volume_ids_for_instance(server['id'])) # Now delete the server, which should go through the "local delete" # code in the API, find the build request and delete it along with # detaching the volume from the instance. self.api.delete_server(server['id']) # The volume should no longer have any attachments as instance delete # should have removed them. self.assertNotIn(volume_id, cinder.volume_ids_for_instance(server['id']))
def setUp(self): super(ServersSampleJson267Test, self).setUp() self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
def setUp(self): super(ConfigurableMaxDiskDevicesTest, self).setUp() self.cinder = self.useFixture( nova_fixtures.CinderFixtureNewAttachFlow(self))
def setUp(self): super(VolumeAttachmentsSampleV249, self).setUp() self.useFixture(fixtures.CinderFixtureNewAttachFlow(self))
def setUp(self): super(RebuildVolumeBackedSameImage, self).setUp() # We are creating a volume-backed server so we need the CinderFixture. self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))