def setUp(self): super(TestRescheduleWithVolumesAttached, self).setUp() # Use the new attach flow fixture for cinder cinder_fixture = nova_fixtures.CinderFixture(self) self.cinder = self.useFixture(cinder_fixture) self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) fake_network.set_stub_network_methods(self) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.flags(compute_driver='fake.FakeRescheduleDriver') self.start_service('conductor') self.start_service('scheduler') # Start two computes to allow the instance to be rescheduled self.host1 = self.start_service('compute', host='host1') self.host2 = self.start_service('compute', host='host2') self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id']
def setUp(self): super(CrossAZAttachTestCase, self).setUp() # Use the standard fixtures. self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.CinderFixture(self, az=self.az)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # Start nova controller services. self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api self.start_service('conductor') self.start_service('scheduler') # Start one compute service and add it to the AZ. This allows us to # get past the AvailabilityZoneFilter and build a server. self.start_service('compute', host='host1') agg_id = self.api.post_aggregate( {'aggregate': { 'name': self.az, 'availability_zone': self.az }})['id'] self.api.api_post('/os-aggregates/%s/action' % agg_id, {'add_host': { 'host': 'host1' }})
def setUp(self): # These need to be set up before services are started, else they # won't be reflected in the running service. self.flags(allow_resize_to_same_host=True) super(BootFromVolumeTest, self).setUp() self.admin_api = self.api_fixture.admin_api self.useFixture(nova_fixtures.CinderFixture(self))
def setUp(self): # Use our custom weigher defined above to make sure that we have # a predictable scheduling sort order during server create. self.flags(weight_classes=[__name__ + '.HostNameWeigher'], group='filter_scheduler') super(TestMultiCellMigrate, self).setUp() self.cinder = self.useFixture(nova_fixtures.CinderFixture(self)) self._enable_cross_cell_resize() self.created_images = [] # list of image IDs created during resize # Adjust the polling interval and timeout for long RPC calls. self.flags(rpc_response_timeout=1) self.flags(long_rpc_timeout=60) # Set up 2 compute services in different cells self.host_to_cell_mappings = {'host1': 'cell1', 'host2': 'cell2'} for host in sorted(self.host_to_cell_mappings): cell_name = self.host_to_cell_mappings[host] # Start the compute service on the given host in the given cell. self._start_compute(host, cell_name=cell_name) # Create an aggregate where the AZ name is the cell name. agg_id = self._create_aggregate(cell_name, availability_zone=cell_name) # Add the host to the aggregate. body = {'add_host': {'host': host}} self.admin_api.post_aggregate_action(agg_id, body)
def test_create_volume_backed_server_with_zero_disk_allowed(self): """Tests that creating a volume-backed server with a zero-root disk flavor will be allowed for admins. """ # For this test, we want to start conductor and the scheduler but # we don't start compute so that scheduling fails; we don't really # care about successfully building an active server here. self.useFixture(func_fixtures.PlacementFixture()) self.useFixture(nova_fixtures.CinderFixture(self)) self.start_service('conductor') self.start_service('scheduler') server_req = self._build_minimal_create_server_request( 'test_create_volume_backed_server_with_zero_disk_allowed', flavor_id=self.zero_disk_flavor['id']) server_req.pop('imageRef', None) server_req['block_device_mapping_v2'] = [{ 'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL, 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0 }] server = self.admin_api.post_server({'server': server_req}) server = self._wait_for_state_change(server, 'ERROR') self.assertIn('No valid host', server['fault']['message'])
def setUp(self): super(TestLocalDeleteAttachedVolumes, self).setUp() self.useFixture(nova_fixtures.RealPolicyFixture()) # We need the CinderFixture to stub out the volume API. self.cinder = self.useFixture(nova_fixtures.CinderFixture(self)) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # Use the PlacementFixture to avoid annoying warnings in the logs. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.api # We want to use 2.37 for passing networks='none' on server create. # We also need this since you can only attach a volume to a # shelved-offloaded server in microversion 2.20+. self.api.microversion = 'latest' # the image fake backend needed for image discovery self.useFixture(nova_fixtures.GlanceFixture(self)) self.start_service('conductor') self.start_service('scheduler') self.start_service('compute') self.useFixture(nova_fixtures.CastAsCallFixture(self)) self.flavor_id = self.api.get_flavors()[0]['id']
def setUp(self): self.flags(use_neutron=True) super(TestInstanceNotificationSample, self).setUp() self.neutron = fixtures.NeutronFixture(self) self.useFixture(self.neutron) self.cinder = fixtures.CinderFixture(self) self.useFixture(self.cinder)
def setUp(self): self.flags(volume_usage_poll_interval=60) super(TestVolumeUsageNotificationSample, self).setUp() self.neutron = fixtures.NeutronFixture(self) self.useFixture(self.neutron) self.cinder = fixtures.CinderFixture(self) self.useFixture(self.cinder)
def setUp(self): super(TestBootFromVolumeIsolatedHostsFilter, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.glance = self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.CinderFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.start_service('conductor') # Add the IsolatedHostsFilter to the list of enabled filters since it # is not enabled by default. enabled_filters = CONF.filter_scheduler.enabled_filters enabled_filters.append('IsolatedHostsFilter') self.flags( enabled_filters=enabled_filters, isolated_images=[self.glance.auto_disk_config_enabled_image['id']], isolated_hosts=['host1'], restrict_isolated_hosts_to_isolated_images=True, group='filter_scheduler') self.start_service('scheduler') # Create two compute nodes/services so we can restrict the image # we'll use to one of the hosts. for host in ('host1', 'host2'): self.start_service('compute', host=host)
def setUp(self): super(TestBootFromVolumeIsolatedHostsFilter, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.CinderFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api image_fakes.stub_out_image_service(self) self.addCleanup(image_fakes.FakeImageService_reset) self.start_service('conductor') # Add the IsolatedHostsFilter to the list of enabled filters since it # is not enabled by default. enabled_filters = CONF.filter_scheduler.enabled_filters enabled_filters.append('IsolatedHostsFilter') self.flags( enabled_filters=enabled_filters, isolated_images=[image_fakes.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID], isolated_hosts=['host1'], restrict_isolated_hosts_to_isolated_images=True, group='filter_scheduler') self.start_service('scheduler') # Create two compute nodes/services so we can restrict the image # we'll use to one of the hosts. for host in ('host1', 'host2'): self.start_service('compute', host=host)
def test_bfv_delete_build_request_pre_scheduling_ocata(self, mock_get): cinder = self.useFixture(nova_fixtures.CinderFixture(self)) volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL server = self.api.post_server({ 'server': { 'flavorRef': '1', 'name': 'test_bfv_delete_build_request_pre_scheduling', 'networks': 'none', 'block_device_mapping_v2': [ { 'boot_index': 0, 'uuid': volume_id, 'source_type': 'volume', 'destination_type': 'volume' }, ] } }) # Since _IntegratedTestBase uses the CastAsCall fixture, when we # get the server back we know all of the volume stuff should be done. self.assertIn(volume_id, cinder.reserved_volumes) # Now delete the server, which should go through the "local delete" # code in the API, find the build request and delete it along with # detaching the volume from the instance. self.api.delete_server(server['id']) # The volume should no longer have any attachments as instance delete # should have removed them. self.assertNotIn(volume_id, cinder.reserved_volumes)
def setUp(self): self.flags(use_neutron=True) self.flags(bdms_in_notifications='True', group='notifications') super(TestInstanceNotificationSampleWithMultipleCompute, self).setUp() self.neutron = fixtures.NeutronFixture(self) self.useFixture(self.neutron) self.cinder = fixtures.CinderFixture(self) self.useFixture(self.cinder)
def setUp(self): super(BootFromVolumeLargeRequestTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.glance = self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.CinderFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
def setUp(self): super(BootFromVolumeLargeRequestTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.CinderFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) self.image_service = fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
def test_live_migrate_terminate_connection_fails(self, _): self.useFixture(nova_fixtures.CinderFixture(self)) server = self.api.post_server({ 'server': { 'flavorRef': 1, 'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'name': 'live-migrate-terminate-connection-fail-test', 'networks': 'none', 'block_device_mapping_v2': [{ 'boot_index': 0, 'uuid': uuids.broken_volume, 'source_type': 'volume', 'destination_type': 'volume' }, { 'boot_index': 1, 'uuid': uuids.working_volume, 'source_type': 'volume', 'destination_type': 'volume' }] } }) server = self._wait_for_state_change(self.api, server, 'ACTIVE') source = server['OS-EXT-SRV-ATTR:host'] if source == self.compute.host: dest = self.compute2.host else: dest = self.compute.host post = { 'os-migrateLive': { 'host': dest, 'block_migration': False, } } stub_terminate_connection = FakeCinderError() self.stub_out('nova.volume.cinder.API.terminate_connection', stub_terminate_connection) self.api.post_server_action(server['id'], post) # Live migration should complete despite a volume failing to detach. # Waiting for ACTIVE on dest is essentially an assert for just that. self._wait_for_server_parameter(self.api, server, { 'OS-EXT-SRV-ATTR:host': dest, 'status': 'ACTIVE' }) self.assertEqual(2, stub_terminate_connection.call_count) self.assertEqual(1, stub_terminate_connection.raise_count)
def test_live_migrate_attachment_delete_fails(self): self.useFixture(nova_fixtures.CinderFixture(self)) server = self.api.post_server({ 'server': { 'flavorRef': 1, 'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'name': 'live-migrate-attachment-delete-fail-test', 'networks': 'none', 'block_device_mapping_v2': [{ 'boot_index': 0, 'uuid': uuids.broken_volume, 'source_type': 'volume', 'destination_type': 'volume' }, { 'boot_index': 1, 'uuid': uuids.working_volume, 'source_type': 'volume', 'destination_type': 'volume' }] } }) server = self._wait_for_state_change(self.api, server, 'ACTIVE') source = server['OS-EXT-SRV-ATTR:host'] if source == self.compute.host: dest = self.compute2.host else: dest = self.compute.host post = { 'os-migrateLive': { 'host': dest, 'block_migration': False, } } stub_attachment_delete = FakeCinderError() self.stub_out('nova.volume.cinder.API.attachment_delete', stub_attachment_delete) self.api.post_server_action(server['id'], post) self._wait_for_server_parameter(self.api, server, { 'OS-EXT-SRV-ATTR:host': dest, 'status': 'ACTIVE' }) self.assertEqual(2, stub_attachment_delete.call_count) self.assertEqual(1, stub_attachment_delete.raise_count)
def setUp(self): super(_IntegratedTestBase, self).setUp() self.useFixture(cast_as_call.CastAsCall(self)) self.placement = self.useFixture(func_fixtures.PlacementFixture()).api self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self)) self.cinder = self.useFixture(nova_fixtures.CinderFixture(self)) self.glance = self.useFixture(nova_fixtures.GlanceFixture(self)) self.policy = self.useFixture(policy_fixture.RealPolicyFixture()) fake_notifier.stub_notifier(self) self.addCleanup(fake_notifier.reset) self._setup_services()
def setUp(self): super(VolumeBackedResizeDiskDown, self).setUp() self.flags(allow_resize_to_same_host=True) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.CinderFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) self.start_service('conductor') self.start_service('scheduler') self.start_service('compute')
def setUp(self): super(CrossAZAttachTestCase, self).setUp() # Use the standard fixtures. self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.CinderFixture(self, az=self.az)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # Start nova controller services. self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api self.start_service('conductor') self.start_service('scheduler') # Start one compute service. self.start_service('compute', host='host1')
def setUp(self): super(BootFromVolumeOverQuotaRaceDeleteTest, self).setUp() # We need the cinder fixture for boot from volume testing. self.cinder_fixture = self.useFixture( nova_fixtures.CinderFixture(self)) # Use the standard fixtures. self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) self.api = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')).api # Use microversion 2.52 which allows creating a server with tags. self.api.microversion = '2.52' self.start_service('conductor') self.start_service('scheduler') self.start_service('compute')
def setUp(self): super(_IntegratedTestBase, self).setUp() self.fake_image_service =\ nova.tests.unit.image.fake.stub_out_image_service(self) self.useFixture(cast_as_call.CastAsCall(self)) self.placement = self.useFixture(func_fixtures.PlacementFixture()).api self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self)) self.cinder = self.useFixture(nova_fixtures.CinderFixture(self)) fake_notifier.stub_notifier(self) self.addCleanup(fake_notifier.reset) self._setup_services() self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
def test_delete_with_reserved_volumes(self, mock_version_get=None): self.cinder = self.useFixture(nova_fixtures.CinderFixture(self)) # Create a server which should go to ERROR state because we don't # have any active computes. volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL server = self._create_error_server(volume_id) # The status of the volume at this point should be 'attaching' as it # is reserved by Nova by the API. self.assertIn(volume_id, self.cinder.reserved_volumes) # Delete this server, which should delete BDMs and remove the # reservation on the instances. self.api.delete_server(server['id']) # The volume should no longer be reserved as the deletion of the # server should have released all the resources. self.assertNotIn(volume_id, self.cinder.reserved_volumes)
def setUp(self): super(TestNovaManagePlacementHealAllocations, self).setUp() self.useFixture(nova_fixtures.CinderFixture(self)) self.cli = manage.PlacementCommands() # We need to start a compute in each non-cell0 cell. for cell_name, cell_mapping in self.cell_mappings.items(): if cell_mapping.uuid == objects.CellMapping.CELL0_UUID: continue self._start_compute(cell_name, cell_name=cell_name) # Make sure we have two hypervisors reported in the API. hypervisors = self.admin_api.api_get( '/os-hypervisors').body['hypervisors'] self.assertEqual(2, len(hypervisors)) self.flavor = self.api.get_flavors()[0] self.output = StringIO() self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output)) # We need to mock the FilterScheduler to not use Placement so that # allocations won't be created during scheduling and then we can heal # them in the CLI. self.scheduler_service.manager.driver.USES_ALLOCATION_CANDIDATES = \ False
def test_delete_with_reserved_volumes_new(self): self.cinder = self.useFixture(nova_fixtures.CinderFixture(self)) # Create a server which should go to ERROR state because we don't # have any active computes. volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL server = self._create_error_server(volume_id) server_id = server['id'] # There should now exist an attachment to the volume as it was created # by Nova. self.assertIn(volume_id, self.cinder.volume_ids_for_instance(server_id)) # Delete this server, which should delete BDMs and remove the # reservation on the instances. self.api.delete_server(server['id']) # The volume should no longer have any attachments as instance delete # should have removed them. self.assertNotIn(volume_id, self.cinder.volume_ids_for_instance(server_id))
def setUp(self): # Use our custom weigher defined above to make sure that we have # a predictable scheduling sort order during server create. self.flags(weight_classes=[__name__ + '.HostNameWeigher'], group='filter_scheduler') super(TestMultiCellMigrate, self).setUp() self.cinder = self.useFixture(nova_fixtures.CinderFixture(self)) self._enable_cross_cell_resize() # Adjust the polling interval and timeout for long RPC calls. self.flags(rpc_response_timeout=1) self.flags(long_rpc_timeout=60) # Set up 2 compute services in different cells self.host_to_cell_mappings = {'host1': 'cell1', 'host2': 'cell2'} # TODO(mriedem): Should probably put the hosts in separate AZs so we # can assert the instance.availability_zone value is updated when it # moves. for host in sorted(self.host_to_cell_mappings): self._start_compute(host, cell_name=self.host_to_cell_mappings[host])
def setUp(self): super(VolumeAttachmentsSampleV249OldCinderFlow, self).setUp() self.useFixture(fixtures.CinderFixture(self))
def setUp(self): # Everything has been upgraded to the latest code to support # multiattach. self.useFixture(nova_fixtures.AllServicesCurrent()) super(TestMultiattachVolumes, self).setUp() self.useFixture(nova_fixtures.CinderFixture(self))
def setUp(self): super(TestLocalDeleteAttachedVolumesOldFlow, self).setUp() self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
def setUp(self): super(ServersSampleJson267Test, self).setUp() self.useFixture(nova_fixtures.CinderFixture(self))
def setUp(self): super(ConfigurableMaxDiskDevicesTest, self).setUp() self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))