def setUp(self): super(ConfigDriveTestV21, self).setUp() self.Controller = self._get_config_drive_controller() fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) fake.stub_out_image_service(self.stubs) self._setup_wsgi()
def setUp(self): super(_FakeDriverBackendTestCase, self).setUp() # TODO(sdague): it would be nice to do this in a way that only # the relevant backends where replaced for tests, though this # should not harm anything by doing it for all backends fake_image.stub_out_image_service(self) self._setup_fakelibvirt()
def setUp(self): super(ServerActionsControllerTest, self).setUp() CONF.set_override('host', 'localhost', group='glance') self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get(vm_state=vm_states.ACTIVE, host='fake_host')) self.stubs.Set(db, 'instance_update_and_get_original', instance_update_and_get_original) fakes.stub_out_nw_api(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) fake.stub_out_image_service(self.stubs) self.flags(allow_instance_snapshots=True, enable_instance_password=True) self.uuid = FAKE_UUID self.url = '/servers/%s/action' % self.uuid self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6' ext_info = plugins.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) self.compute_api = self.controller.compute_api self.context = context.RequestContext('fake', 'fake') self.app = fakes.wsgi_app_v21(init_only=('servers',), fake_auth_context=self.context)
def setUp(self): super(TestAvailabilityZoneScheduling, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.admin_api self.api.microversion = 'latest' fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') # Start two compute services in separate zones. self._start_host_in_zone('host1', 'zone1') self._start_host_in_zone('host2', 'zone2') flavors = self.api.get_flavors() self.flavor1 = flavors[0]['id'] self.flavor2 = flavors[1]['id']
def setUp(self): super(ConfigDriveTestV21, self).setUp() self.Controller = self._get_config_drive_controller() fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) fake.stub_out_image_service(self.stubs) self._setup_wsgi()
def test_shelve(self): # Ensure instance can be shelved. fake_instance = self._create_fake_instance_obj( {'display_name': 'vm01'}) instance = fake_instance self.assertIsNone(instance['task_state']) def fake_init(self2): # In original _FakeImageService.__init__(), some fake images are # created. To verify the snapshot name of this test only, here # sets a fake method. self2.images = {} def fake_create(self2, ctxt, metadata, data=None): self.assertEqual(metadata['name'], 'vm01-shelved') metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42' return metadata fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init) self.stubs.Set(fake_image._FakeImageService, 'create', fake_create) self.compute_api.shelve(self.context, instance) self.assertEqual(instance.task_state, task_states.SHELVING) db.instance_destroy(self.context, instance['uuid'])
def setUp(self): super(TestBootFromVolumeIsolatedHostsFilter, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.CinderFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api image_fakes.stub_out_image_service(self) self.addCleanup(image_fakes.FakeImageService_reset) self.start_service('conductor') # Add the IsolatedHostsFilter to the list of enabled filters since it # is not enabled by default. enabled_filters = CONF.filter_scheduler.enabled_filters enabled_filters.append('IsolatedHostsFilter') self.flags( enabled_filters=enabled_filters, isolated_images=[image_fakes.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID], isolated_hosts=['host1'], restrict_isolated_hosts_to_isolated_images=True, group='filter_scheduler') self.start_service('scheduler') # Create two compute nodes/services so we can restrict the image # we'll use to one of the hosts. for host in ('host1', 'host2'): self.start_service('compute', host=host)
def setUp(self): super(NonPersistentFieldNotResetTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.admin_api # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.api.microversion = 'latest' image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') self.compute = {} self.addCleanup(fake.restore_nodes) for host in ('host1', 'host2', 'host3'): fake.set_nodes([host]) compute_service = self.start_service('compute', host=host) self.compute.update({host: compute_service}) self.ctxt = context.get_admin_context()
def setUp(self): super(_LibvirtEvacuateTest, self).setUp() self.useFixture(nova_fixtures.NeutronFixture(self)) fake_network.set_stub_network_methods(self) self.useFixture(nova_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api # force_down and evacuate without onSharedStorage self.api.microversion = '2.14' fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) fake_notifier.stub_notifier(self) self.addCleanup(fake_notifier.reset) self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.start_service('conductor') self.start_service('scheduler') self.flags(compute_driver='libvirt.LibvirtDriver') self.compute0 = self._start_compute('compute0') # Choice of image id and flavor are arbitrary. Fixed for consistency. self.image_id = fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID self.flavor_id = next(flavor for flavor in self.api.get_flavors() if flavor['name'] == 'm1.tiny')['id']
def setUp(self): super(MissingReqSpecInstanceGroupUUIDTestCase, self).setUp() # Stub out external dependencies. self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.PlacementFixture()) fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # Configure the API to allow resizing to the same host so we can keep # the number of computes down to two in the test. self.flags(allow_resize_to_same_host=True) # Start nova controller services. api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.start_service('conductor') # Use our custom weigher defined above to make sure that we have # a predictable scheduling sort order. self.flags(weight_classes=[__name__ + '.HostNameWeigher'], group='filter_scheduler') self.start_service('scheduler') # Start two computes, one where the server will be created and another # where we'll cold migrate it. self.addCleanup(fake_virt.restore_nodes) self.computes = {} # keep track of the compute services per host name for host in ('host1', 'host2'): fake_virt.set_nodes([host]) compute_service = self.start_service('compute', host=host) self.computes[host] = compute_service
def setUp(self): """Shared implementation for tests below that create instance.""" super(MultiCreateExtensionTestV21, self).setUp() self.flags(enable_instance_password=True, group='api') self.instance_cache_num = 0 self.instance_cache_by_id = {} self.instance_cache_by_uuid = {} # Network API needs to be stubbed out before creating the controllers. fakes.stub_out_nw_api(self) self.controller = servers_v21.ServersController() def instance_get(context, instance_id): """Stub for compute/api create() pulling in instance after scheduling """ return self.instance_cache_by_id[instance_id] def instance_update(context, uuid, values): instance = self.instance_cache_by_uuid[uuid] instance.update(values) return instance def server_update(context, instance_uuid, params, columns_to_join=None): inst = self.instance_cache_by_uuid[instance_uuid] inst.update(params) return (inst, inst) def fake_method(*args, **kwargs): pass def project_get_networks(context, user_id): return dict(id='1', host='localhost') def create_db_entry_for_new_instance(*args, **kwargs): instance = args[4] self.instance_cache_by_uuid[instance.uuid] = instance return instance fakes.stub_out_key_pair_funcs(self) fake.stub_out_image_service(self) self.stub_out('nova.db.api.instance_add_security_group', return_security_group) self.stub_out('nova.db.api.project_get_networks', project_get_networks) self.stub_out('nova.compute.api.API.create_db_entry_for_new_instance', create_db_entry_for_new_instance) self.stub_out('nova.db.api.instance_system_metadata_update', fake_method) self.stub_out('nova.db.api.instance_get', instance_get) self.stub_out('nova.db.api.instance_update', instance_update) self.stub_out('nova.db.api.instance_update_and_get_original', server_update) self.stub_out('nova.network.manager.VlanManager.allocate_fixed_ip', fake_method) self.req = fakes.HTTPRequest.blank('')
def setUp(self): super(ServerListLimitMarkerCell0Test, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api # the image fake backend needed for image discovery image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) # We have to get the image before we use 2.latest otherwise we'll get # a 404 on the /images proxy API because of 2.36. self.image_id = self.api.get_images()[0]['id'] # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.api.microversion = 'latest' self.start_service('conductor') self.flags(driver='chance_scheduler', group='scheduler') self.start_service('scheduler') # We don't start the compute service because we want NoValidHost so # all of the instances go into ERROR state and get put into cell0. self.useFixture(cast_as_call.CastAsCall(self))
def test_shelve(self): # Ensure instance can be shelved. fake_instance = self._create_fake_instance_obj( {'display_name': 'vm01'}) instance = fake_instance self.assertIsNone(instance['task_state']) def fake_init(self2): # In original _FakeImageService.__init__(), some fake images are # created. To verify the snapshot name of this test only, here # sets a fake method. self2.images = {} def fake_create(self2, ctxt, metadata, data=None): self.assertEqual(metadata['name'], 'vm01-shelved') metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42' return metadata fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init) self.stubs.Set(fake_image._FakeImageService, 'create', fake_create) self.compute_api.shelve(self.context, instance) self.assertEqual(instance.task_state, task_states.SHELVING) db.instance_destroy(self.context, instance['uuid'])
def setUp(self): super(CrossAZAttachTestCase, self).setUp() # Use the standard fixtures. self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.CinderFixture(self, az=self.az)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # Start nova controller services. self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api self.start_service('conductor') self.start_service('scheduler') # Start one compute service and add it to the AZ. This allows us to # get past the AvailabilityZoneFilter and build a server. self.start_service('compute', host='host1') agg_id = self.api.post_aggregate( {'aggregate': { 'name': self.az, 'availability_zone': self.az }})['id'] self.api.api_post('/os-aggregates/%s/action' % agg_id, {'add_host': { 'host': 'host1' }})
def setUp(self): super(ServerActionsControllerTest, self).setUp() CONF.set_override('host', 'localhost', group='glance') self.stubs.Set( db, 'instance_get_by_uuid', fakes.fake_instance_get(vm_state=vm_states.ACTIVE, host='fake_host')) self.stubs.Set(db, 'instance_update_and_get_original', instance_update_and_get_original) fakes.stub_out_nw_api(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) fake.stub_out_image_service(self.stubs) self.flags(allow_instance_snapshots=True, enable_instance_password=True) self.uuid = FAKE_UUID self.url = '/servers/%s/action' % self.uuid self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6' ext_info = plugins.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) self.compute_api = self.controller.compute_api self.context = context.RequestContext('fake', 'fake') self.app = fakes.wsgi_app_v21(init_only=('servers', ), fake_auth_context=self.context)
def setUp(self): super(AntiAffinityMultiCreateRequest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) # The admin API is used to get the server details to verify the # host on which the server was built. self.admin_api = api_fixture.admin_api self.api = api_fixture.api image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.admin_api.microversion = 'latest' self.api.microversion = 'latest' # Add our custom weigher. self.flags(weight_classes=[__name__ + '.HostNameWeigher'], group='filter_scheduler') # disable late check on compute node to mimic devstack. self.flags(disable_group_policy_check_upcall=True, group='workarounds') self.start_service('scheduler') fake.set_nodes(['host1']) self.addCleanup(fake.restore_nodes) self.start_service('compute', host='host1') fake.set_nodes(['host2']) self.start_service('compute', host='host2')
def setUp(self): super(TestAvailabilityZoneScheduling, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.api.microversion = 'latest' fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') # Start two compute services in separate zones. self._start_host_in_zone('host1', 'zone1') self._start_host_in_zone('host2', 'zone2') flavors = self.api.get_flavors() self.flavor1 = flavors[0]['id'] self.flavor2 = flavors[1]['id']
def setUp(self): super(ColdMigrateTargetHostThenLiveMigrateTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) # The admin API is used to get the server details to verify the # host on which the server was built and cold/live migrate it. self.admin_api = api_fixture.admin_api self.api = api_fixture.api # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.admin_api.microversion = 'latest' self.api.microversion = 'latest' image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') for host in ('host1', 'host2'): self.start_service('compute', host=host)
def setUp(self): super(NonPersistentFieldNotResetTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.api.microversion = 'latest' image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') self.compute = {} self.addCleanup(fake.restore_nodes) for host in ('host1', 'host2', 'host3'): fake.set_nodes([host]) compute_service = self.start_service('compute', host=host) self.compute.update({host: compute_service}) self.ctxt = context.get_admin_context()
def setUp(self): super(SchedulerTestCase, self).setUp() self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI) def fake_show(meh, context, id, **kwargs): if id: return { 'id': id, 'min_disk': None, 'min_ram': None, 'name': 'fake_name', 'status': 'active', 'properties': { 'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'something_else': 'meow' } } else: raise exception.ImageNotFound(image_id=id) fake_image.stub_out_image_service(self.stubs) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) self.image_service = glance.get_default_image_service() self.driver = self.driver_cls() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' self.servicegroup_api = servicegroup.API()
def setUp(self): super(ColdMigrateTargetHostThenLiveMigrateTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) # The admin API is used to get the server details to verify the # host on which the server was built and cold/live migrate it. self.admin_api = api_fixture.admin_api self.api = api_fixture.api # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.admin_api.microversion = 'latest' self.api.microversion = 'latest' image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') for host in ('host1', 'host2'): fake.set_nodes([host]) self.addCleanup(fake.restore_nodes) self.start_service('compute', host=host)
def setUp(self): super(SchedulerOnlyChecksTargetTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.flags(use_neutron=True) self.useFixture(nova_fixtures.NeutronFixture(self)) # We need the computes reporting into placement for the filter # scheduler to pick a host. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) # The admin API is used to get the server details to verify the # host on which the server was built. self.admin_api = api_fixture.admin_api self.api = api_fixture.api # the image fake backend needed for image discovery image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') # We have to get the image before we use 2.latest otherwise we'll get # a 404 on the /images proxy API because of 2.36. self.image_id = self.api.get_images()[0]['id'] # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.admin_api.microversion = 'latest' self.api.microversion = 'latest' # Define a very basic scheduler that only verifies if host is down. self.flags(enabled_filters=['ComputeFilter'], group='filter_scheduler') # NOTE(sbauza): Use the above weigher so we are sure that # we prefer first host1 for the boot request and forget about any # other weigher. # Host2 should only be preferred over host3 if and only if that's the # only host we verify (as requested_destination does). self.flags(weight_classes=[__name__ + '.HostNameWeigher'], group='filter_scheduler') self.start_service('scheduler') # Let's now start three compute nodes as we said above. # set_nodes() is needed to have each compute service return a # different nodename, so we get two hosts in the list of candidates # for scheduling. Otherwise both hosts will have the same default # nodename "fake-mini". The host passed to start_service controls the # "host" attribute and set_nodes() sets the "nodename" attribute. # We set_nodes() to make host and nodename the same for each compute. fake.set_nodes(['host1']) self.addCleanup(fake.restore_nodes) self.start_service('compute', host='host1') fake.set_nodes(['host2']) self.start_service('compute', host='host2') fake.set_nodes(['host3']) self.start_service('compute', host='host3') self.useFixture(cast_as_call.CastAsCall(self))
def setUp(self): super(MetadataTest, self).setUp() fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) self.start_service('conductor') self.start_service('scheduler') self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).api self.start_service('compute') # create a server for the tests server = self._build_server(name='test') server = self.api.post_server({'server': server}) self.server = self._wait_for_state_change(server, 'ACTIVE') self.api_fixture = self.useFixture(nova_fixtures.OSMetadataServer()) self.md_url = self.api_fixture.md_url # make sure that the metadata service returns information about the # server we created above def fake_get_fixed_ip_by_address(self, ctxt, address): return {'instance_uuid': server['id']} self.useFixture( fixtures.MonkeyPatch( 'nova.network.neutron.API.get_fixed_ip_by_address', fake_get_fixed_ip_by_address))
def setUp(self): super(ServerListLimitMarkerCell0Test, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.api # the image fake backend needed for image discovery image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) # We have to get the image before we use 2.latest otherwise we'll get # a 404 on the /images proxy API because of 2.36. self.image_id = self.api.get_images()[0]['id'] # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.api.microversion = 'latest' self.start_service('conductor') self.start_service('scheduler') self.start_service('consoleauth') # We don't start the compute service because we want NoValidHost so # all of the instances go into ERROR state and get put into cell0. self.useFixture(cast_as_call.CastAsCall(self))
def setUp(self): super(InstanceListWithDeletedServicesTestCase, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # We need the computes reporting into placement for the filter # scheduler to pick a host. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.api self.admin_api = api_fixture.admin_api self.admin_api.microversion = 'latest' # the image fake backend needed for image discovery fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # Get the image before we set the microversion to latest to avoid # the proxy issues with GET /images in 2.36. self.image_id = self.api.get_images()[0]['id'] self.api.microversion = 'latest' self.start_service('conductor') self.start_service('scheduler')
def setUp(self): super(_FakeDriverBackendTestCase, self).setUp() # TODO(sdague): it would be nice to do this in a way that only # the relevant backends where replaced for tests, though this # should not harm anything by doing it for all backends fake_image.stub_out_image_service(self.stubs) self._setup_fakelibvirt()
def setUp(self): """Shared implementation for tests below that create instance.""" super(MultiCreateExtensionTestV21, self).setUp() self.flags(enable_instance_password=True, group='api') self.instance_cache_num = 0 self.instance_cache_by_id = {} self.instance_cache_by_uuid = {} # Network API needs to be stubbed out before creating the controllers. fakes.stub_out_nw_api(self) ext_info = extension_info.LoadedExtensionInfo() self.controller = servers_v21.ServersController( extension_info=ext_info) def instance_get(context, instance_id): """Stub for compute/api create() pulling in instance after scheduling """ return self.instance_cache_by_id[instance_id] def instance_update(context, uuid, values): instance = self.instance_cache_by_uuid[uuid] instance.update(values) return instance def server_update(context, instance_uuid, params, columns_to_join=None): inst = self.instance_cache_by_uuid[instance_uuid] inst.update(params) return (inst, inst) def fake_method(*args, **kwargs): pass def project_get_networks(context, user_id): return dict(id='1', host='localhost') def create_db_entry_for_new_instance(*args, **kwargs): instance = args[4] self.instance_cache_by_uuid[instance.uuid] = instance return instance fakes.stub_out_key_pair_funcs(self) fake.stub_out_image_service(self) self.stub_out('nova.db.instance_add_security_group', return_security_group) self.stub_out('nova.db.project_get_networks', project_get_networks) self.stub_out('nova.compute.api.API.create_db_entry_for_new_instance', create_db_entry_for_new_instance) self.stub_out('nova.db.instance_system_metadata_update', fake_method) self.stub_out('nova.db.instance_get', instance_get) self.stub_out('nova.db.instance_update', instance_update) self.stub_out('nova.db.instance_update_and_get_original', server_update) self.stub_out('nova.network.manager.VlanManager.allocate_fixed_ip', fake_method) self.req = fakes.HTTPRequest.blank('')
def _setup_stubs(self): db_fakes.stub_out_db_instance_api(self.stubs) fake_image.stub_out_image_service(self.stubs) fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs) def fake_fetch(context, image_id, target, user, project): self._fetched_image = target self.stubs.Set(images, 'fetch', fake_fetch) def fake_get_remote_image_service(context, name): class FakeGlanceImageService(object): def update(self_fake, context, image_id, image_metadata, f): if self._update_image_raise_exception: raise vmutils.HyperVException( "Simulated update failure") self._image_metadata = image_metadata return (FakeGlanceImageService(), 1) self.stubs.Set(glance, 'get_remote_image_service', fake_get_remote_image_service) def fake_check_min_windows_version(fake_self, major, minor): if [major, minor] >= [6, 3]: return False return self._check_min_windows_version_satisfied self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version', fake_check_min_windows_version) def fake_sleep(ms): pass self.stubs.Set(time, 'sleep', fake_sleep) class FakeIOThread(object): def __init__(self, src, dest, max_bytes): pass def start(self): pass self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils) self.stubs.Set(ioutils, 'IOThread', FakeIOThread) self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_id') self._mox.StubOutWithMock(hostutils.HostUtils, 'get_local_ips') self._mox.StubOutWithMock(rdpconsoleutils.RDPConsoleUtils, 'get_rdp_console_port') self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata') self._mox.StubOutWithMock(instance_metadata.InstanceMetadata, 'metadata_for_config_drive') # Can't use StubOutClassWithMocks due to __exit__ and __enter__ self._mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder') self._mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive') self._mox.StubOutWithMock(fileutils, 'delete_if_exists') self._mox.StubOutWithMock(utils, 'execute')
def _setup_stubs(self): db_fakes.stub_out_db_instance_api(self.stubs) fake_image.stub_out_image_service(self.stubs) fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs) def fake_fetch(context, image_id, target, user, project): self._fetched_image = target self.stubs.Set(images, 'fetch', fake_fetch) def fake_get_remote_image_service(context, name): class FakeGlanceImageService(object): def update(self_fake, context, image_id, image_metadata, f): if self._update_image_raise_exception: raise vmutils.HyperVException( "Simulated update failure") self._image_metadata = image_metadata return (FakeGlanceImageService(), 1) self.stubs.Set(glance, 'get_remote_image_service', fake_get_remote_image_service) def fake_check_min_windows_version(fake_self, major, minor): if [major, minor] >= [6, 3]: return False return self._check_min_windows_version_satisfied self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version', fake_check_min_windows_version) def fake_sleep(ms): pass self.stubs.Set(time, 'sleep', fake_sleep) class FakeIOThread(object): def __init__(self, src, dest, max_bytes): pass def start(self): pass self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils) self.stubs.Set(ioutils, 'IOThread', FakeIOThread) self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_id') self._mox.StubOutWithMock(hostutils.HostUtils, 'get_local_ips') self._mox.StubOutWithMock(rdpconsoleutils.RDPConsoleUtils, 'get_rdp_console_port') self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata') self._mox.StubOutWithMock(instance_metadata.InstanceMetadata, 'metadata_for_config_drive') # Can't use StubOutClassWithMocks due to __exit__ and __enter__ self._mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder') self._mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive') self._mox.StubOutWithMock(fileutils, 'delete_if_exists') self._mox.StubOutWithMock(utils, 'execute')
def setUp(self): super(LXDTestDriver, self).setUp() self.ctxt = utils.get_test_admin_context() fake_image.stub_out_image_service(self.stubs) self.flags(lxd_root_dir=self.useFixture(fixtures.TempDir()).path, group='lxd') self.driver = driver.LXDDriver(None, None)
def setUp(self): super(ServersPreSchedulingTestCase, self).setUp() fake_image.stub_out_image_service(self) self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NoopConductorFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(api_version="v2.1")) self.api = api_fixture.api self.api.microversion = "latest"
def setUp(self): super(ServersPreSchedulingTestCase, self).setUp() fake_image.stub_out_image_service(self) self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NoopConductorFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api self.api.microversion = 'latest'
def setUp(self): super(BlockDeviceMappingTestV21, self).setUp() self._setup_controller() fake.stub_out_image_service(self) self.volume_id = fakes.FAKE_UUID self.bdm = [{ 'no_device': None, 'virtual_name': 'root', 'volume_id': self.volume_id, 'device_name': 'vda', 'delete_on_termination': False }]
def setUp(self): super(BlockDeviceMappingTestV21, self).setUp() self._setup_controller() fake.stub_out_image_service(self.stubs) self.volume_id = fakes.FAKE_UUID self.bdm = [{ 'no_device': None, 'virtual_name': 'root', 'volume_id': self.volume_id, 'device_name': 'vda', 'delete_on_termination': False }]
def setUp(self): super(BlockDeviceMappingTestV21, self).setUp() self._setup_controller() fake.stub_out_image_service(self) self.bdm = [{ 'no_device': None, 'source_type': 'volume', 'destination_type': 'volume', 'uuid': 'fake', 'device_name': 'vdb', 'delete_on_termination': False, }]
def setUp(self): super(BlockDeviceMappingTestV21, self).setUp() self._setup_controller() fake.stub_out_image_service(self.stubs) self.bdm = [{ 'no_device': None, 'source_type': 'volume', 'destination_type': 'volume', 'uuid': 'fake', 'device_name': 'vdb', 'delete_on_termination': False, }]
def setUp(self): super(ServersPreSchedulingTestCase, self).setUp() fake_image.stub_out_image_service(self) self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NoopConductorFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.api self.api.microversion = 'latest' self.useFixture( nova_fixtures.SingleCellSimple(instances_created=False))
def setUp(self): super(ListDeletedServersWithMarker, self).setUp() # Start standard fixtures. self.useFixture(func_fixtures.PlacementFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # Start nova services. self.api = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')).admin_api self.start_service('conductor') self.start_service('scheduler') self.start_service('compute')
def setUp(self): super(ServersPreSchedulingTestCase, self).setUp() fake_image.stub_out_image_service(self) self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NoopConductorFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api self.api.microversion = 'latest' self.useFixture(nova_fixtures.SingleCellSimple( instances_created=False))
def setUp(self): super(FillVirtualInterfaceListMigration, self).setUp() api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.admin_api self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) self.start_service('conductor') self.start_service('scheduler') self.start_service('compute') # the image fake backend needed for image discovery fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset)
def test_end_to_end(self): """This test emulates a full end to end test showing that without this feature a vm cannot be spawning using a custom trait and then start a compute service that provides that trait. """ self.neutron = nova_fixtures.NeutronFixture(self) self.useFixture(self.neutron) fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # Start nova services. self.api = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api self.api.microversion = 'latest' fake_notifier.stub_notifier(self) self.addCleanup(fake_notifier.reset) self.start_service('conductor') # start nova-compute that will not have the additional trait. self._start_compute("fake-host-1") node_name = "fake-host-2" # create a config file with explicit name provider_config = self._create_config_entry(node_name, id_method="name") self._place_config_file("provider_config.yaml", provider_config) self._create_flavor(name='CUSTOM_Flavor', id=42, vcpu=4, memory_mb=4096, disk=1024, swap=0, extra_spec={ f"trait:{os_traits.normalize_name(node_name)}": "required" }) self._create_server(flavor_id=42, expected_state='ERROR', networks=[{ 'port': self.neutron.port_1['id'] }]) # start compute node that will report the custom trait. self._start_compute("fake-host-2") self._create_server(flavor_id=42, expected_state='ACTIVE', networks=[{ 'port': self.neutron.port_1['id'] }])
def setUp(self): super(AccessIPsExtAPIValidationTestV21, self).setUp() def fake_save(context, **kwargs): pass def fake_rebuild(*args, **kwargs): pass self._set_up_controller() fake.stub_out_image_service(self.stubs) self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get()) self.stubs.Set(instance_obj.Instance, 'save', fake_save) self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
def setUp(self): super(BlockDeviceMappingTestV21, self).setUp() self._setup_controller() fake.stub_out_image_service(self.stubs) self.volume_id = fakes.FAKE_UUID self.bdm = [ { "no_device": None, "virtual_name": "root", "volume_id": self.volume_id, "device_name": "vda", "delete_on_termination": False, } ]
def setUp(self): super(AccessIPsExtAPIValidationTestV21, self).setUp() def fake_save(context, **kwargs): pass def fake_rebuild(*args, **kwargs): pass self._set_up_controller() fake.stub_out_image_service(self.stubs) self.stubs.Set(db, "instance_get_by_uuid", fakes.fake_instance_get()) self.stubs.Set(instance_obj.Instance, "save", fake_save) self.stubs.Set(compute_api.API, "rebuild", fake_rebuild)
def setUp(self): super(ServerTagsFilteringTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # Use the PlacementFixture to avoid annoying warnings in the logs. self.useFixture(nova_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) self.api = api_fixture.api # the image fake backend needed for image discovery image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) # We have to get the image before we use 2.latest otherwise we'll get # a 404 on the /images proxy API because of 2.36. image_id = self.api.get_images()[0]['id'] # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.api.microversion = 'latest' self.start_service('conductor') self.flags(driver='chance_scheduler', group='scheduler') self.start_service('scheduler') self.start_service('compute') # The consoleauth service is needed for deleting console tokens when # the server is deleted. self.start_service('consoleauth') # create two test servers self.servers = [] for x in range(2): server = self.api.post_server( dict(server=self._build_minimal_create_server_request( self.api, 'test-list-server-tag-filters%i' % x, image_id, networks='none'))) self.addCleanup(self.api.delete_server, server['id']) server = self._wait_for_state_change(self.api, server, 'ACTIVE') self.servers.append(server) # now apply two tags to the first server self.two_tag_server = self.servers[0] self.api.put_server_tags(self.two_tag_server['id'], ['foo', 'bar']) # apply one tag to the second server which intersects with one tag # from the first server self.one_tag_server = self.servers[1] self.api.put_server_tags(self.one_tag_server['id'], ['foo'])
def setUp(self): super(SchedulerOnlyChecksTargetTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # We need the computes reporting into placement for the filter # scheduler to pick a host. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(api_version='v2.1')) # The admin API is used to get the server details to verify the # host on which the server was built. self.admin_api = api_fixture.admin_api self.api = api_fixture.api # the image fake backend needed for image discovery image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.admin_api.microversion = 'latest' self.api.microversion = 'latest' # Define a very basic scheduler that only verifies if host is down. self.flags(enabled_filters=['ComputeFilter'], group='filter_scheduler') # NOTE(sbauza): Use the HostNameWeigherFixture so we are sure that # we prefer first host1 for the boot request and forget about any # other weigher. # Host2 should only be preferred over host3 if and only if that's the # only host we verify (as requested_destination does). self.useFixture( nova_fixtures.HostNameWeigherFixture(weights={ 'host1': 100, 'host2': 1, 'host3': 50 })) self.start_service('scheduler') # Let's now start three compute nodes as we said above. self.start_service('compute', host='host1') self.start_service('compute', host='host2') self.start_service('compute', host='host3') self.useFixture(cast_as_call.CastAsCall(self))
def setUp(self): super(AccessIPsExtAPIValidationTest, self).setUp() def fake_save(context, **kwargs): pass def fake_rebuild(*args, **kwargs): pass ext_info = plugins.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) fake.stub_out_image_service(self.stubs) self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get()) self.stubs.Set(instance_obj.Instance, 'save', fake_save) self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
def setUp(self): super(AccessIPsExtAPIValidationTest, self).setUp() def fake_save(context, **kwargs): pass def fake_rebuild(*args, **kwargs): pass ext_info = plugins.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) fake.stub_out_image_service(self.stubs) self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get()) self.stubs.Set(instance_obj.Instance, 'save', fake_save) self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
def setUp(self): super(TestRequestSpecRetryReschedule, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # We need the computes reporting into placement for the filter # scheduler to pick a host. self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) # The admin API is used to get the server details to verify the # host on which the server was built. self.admin_api = api_fixture.admin_api self.api = api_fixture.api # the image fake backend needed for image discovery image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) self.start_service('conductor') # We have to get the image before we use 2.latest otherwise we'll get # a 404 on the /images proxy API because of 2.36. self.image_id = self.api.get_images()[0]['id'] # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.admin_api.microversion = 'latest' self.api.microversion = 'latest' # The consoleauth service is needed for deleting console tokens when # the server is deleted. self.start_service('consoleauth') # Use our custom weigher defined above to make sure that we have # a predictable scheduling sort order. self.flags(weight_classes=[__name__ + '.HostNameWeigher'], group='filter_scheduler') self.start_service('scheduler') # Let's now start three compute nodes as we said above. self.addCleanup(fake.restore_nodes) for host in ['host1', 'host2', 'host3']: fake.set_nodes([host]) self.start_service('compute', host=host)
def setUp(self): super(TestS3ImageService, self).setUp() self.context = context.RequestContext(None, None) self.useFixture(fixtures.FakeLogger('boto')) # set up 3 fixtures to test shows, should have id '1', '2', and '3' db.s3_image_create(self.context, '155d900f-4e14-4e4c-a73d-069cbf4541e6') db.s3_image_create(self.context, 'a2459075-d96c-40d5-893e-577ff92e721c') db.s3_image_create(self.context, '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6') fake.stub_out_image_service(self.stubs) self.image_service = s3.S3ImageService() ec2utils.reset_cache()
def setUp(self): super(AccessIPsExtAPIValidationTestV21, self).setUp() def fake_save(context, **kwargs): pass def fake_rebuild(*args, **kwargs): pass self._set_up_controller() fake.stub_out_image_service(self) self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get()) self.stubs.Set(instance_obj.Instance, 'save', fake_save) self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild) self.req = fakes.HTTPRequest.blank('')
def setUp(self): super(TestServerGet, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api # the image fake backend needed for image discovery image_service = fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # NOTE(mriedem): This image has an invalid architecture metadata value # and is used for negative testing in the functional stack. timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3) image = {'id': 'c456eb30-91d7-4f43-8f46-2efd9eccd744', 'name': 'fake-image-invalid-arch', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'raw', 'disk_format': 'raw', 'size': '25165824', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel, 'architecture': 'x64'}} self.image_id = image_service.create(None, image)['id'] self.flavor_id = self.api.get_flavors()[0]['id']
def setUp(self): super(ServerTagsFilteringTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # Use the PlacementFixture to avoid annoying warnings in the logs. self.useFixture(nova_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api # the image fake backend needed for image discovery image_fake.stub_out_image_service(self) self.addCleanup(image_fake.FakeImageService_reset) # We have to get the image before we use 2.latest otherwise we'll get # a 404 on the /images proxy API because of 2.36. image_id = self.api.get_images()[0]['id'] # Use the latest microversion available to make sure something does # not regress in new microversions; cap as necessary. self.api.microversion = 'latest' self.start_service('conductor') self.flags(driver='chance_scheduler', group='scheduler') self.start_service('scheduler') self.start_service('compute') # The consoleauth service is needed for deleting console tokens when # the server is deleted. self.start_service('consoleauth') # create two test servers self.servers = [] for x in range(2): server = self.api.post_server( dict(server=self._build_minimal_create_server_request( self.api, 'test-list-server-tag-filters%i' % x, image_id, networks='none'))) self.addCleanup(self.api.delete_server, server['id']) server = self._wait_for_state_change(self.api, server, 'ACTIVE') self.servers.append(server) # now apply two tags to the first server self.two_tag_server = self.servers[0] self.api.put_server_tags(self.two_tag_server['id'], ['foo', 'bar']) # apply one tag to the second server which intersects with one tag # from the first server self.one_tag_server = self.servers[1] self.api.put_server_tags(self.one_tag_server['id'], ['foo'])
def setUp(self): super(TestMultiCreateServerGroupMemberOverQuota, self).setUp() self.flags(server_group_members=2, group='quota') self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api self.api.microversion = '2.37' # so we can specify networks='none' fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) group = {'name': 'test group', 'policies': ['soft-anti-affinity']} self.created_group = self.api.post_server_groups(group)
def setUp(self): super(AccessIPsExtAPIValidationTestV21, self).setUp() def fake_save(context, **kwargs): pass def fake_rebuild(*args, **kwargs): pass self._set_up_controller() fake.stub_out_image_service(self) self.stub_out('nova.db.instance_get_by_uuid', fakes.fake_instance_get()) self.stub_out('nova.objects.instance.Instance.save', fake_save) self.stub_out('nova.compute.api.API.rebuild', fake_rebuild) self.req = fakes.HTTPRequest.blank('')
def setUp(self): super(BlockDeviceMappingTestV21, self).setUp() self._setup_controller() fake.stub_out_image_service(self.stubs) self.volume_id = fakes.FAKE_UUID self.bdm = [{ 'id': 1, 'no_device': None, 'virtual_name': None, 'snapshot_id': None, 'volume_id': self.volume_id, 'status': 'active', 'device_name': 'vda', 'delete_on_termination': False, 'volume_image_metadata': {'test_key': 'test_value'} }]
def setUp(self): super(VolumeBackedResizeDiskDown, self).setUp() self.flags(allow_resize_to_same_host=True) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.admin_api self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self)) self.useFixture(func_fixtures.PlacementFixture()) fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) self.start_service('conductor') self.start_service('scheduler') self.start_service('compute')
def setUp(self): """Shared implementation for tests below that create instance.""" super(ServersControllerCreateTestV21, self).setUp() self.instance_cache_num = 0 fakes.stub_out_nw_api(self) self._set_up_controller() def create_db_entry_for_new_instance(*args, **kwargs): instance = args[4] instance.uuid = FAKE_UUID return instance fake.stub_out_image_service(self) self.stub_out('nova.compute.api.API.create_db_entry_for_new_instance', create_db_entry_for_new_instance) self.req = fakes.HTTPRequest.blank('')
def setUp(self): super(ServerActionsControllerTestV21, self).setUp() CONF.set_override("host", "localhost", group="glance") self.stubs.Set(db, "instance_get_by_uuid", fakes.fake_instance_get(vm_state=vm_states.ACTIVE, host="fake_host")) self.stubs.Set(db, "instance_update_and_get_original", instance_update_and_get_original) fakes.stub_out_nw_api(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) fake.stub_out_image_service(self.stubs) self.flags(allow_instance_snapshots=True, enable_instance_password=True) self._image_href = "155d900f-4e14-4e4c-a73d-069cbf4541e6" self.controller = self._get_controller() self.compute_api = self.controller.compute_api self.req = fakes.HTTPRequest.blank("") self.context = self.req.environ["nova.context"]
def setUp(self): super(ServerActionsControllerTestV21, self).setUp() self.flags(group='glance', api_servers=['http://localhost:9292']) self.stub_out('nova.compute.api.API.get', fakes.fake_compute_get(vm_state=vm_states.ACTIVE, host='fake_host')) self.stub_out('nova.objects.Instance.save', lambda *a, **kw: None) fakes.stub_out_compute_api_snapshot(self) fake.stub_out_image_service(self) self.flags(enable_instance_password=True, group='api') self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6' self.controller = self._get_controller() self.compute_api = self.controller.compute_api # We don't care about anything getting as far as hitting the compute # RPC API so we just mock it out here. mock_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi') mock_rpcapi.start() self.addCleanup(mock_rpcapi.stop) # The project_id here matches what is used by default in # fake_compute_get which need to match for policy checks. self.req = fakes.HTTPRequest.blank('', project_id='fake_project') self.context = self.req.environ['nova.context'] self.image_api = image.API() # Assume that anything that hits the compute API and looks for a # RequestSpec doesn't care about it, since testing logic that deep # should be done in nova.tests.unit.compute.test_compute_api. mock_reqspec = mock.patch('nova.objects.RequestSpec') mock_reqspec.start() self.addCleanup(mock_reqspec.stop) # Similarly we shouldn't care about anything hitting conductor from # these tests. mock_conductor = mock.patch.object( self.controller.compute_api, 'compute_task_api') mock_conductor.start() self.addCleanup(mock_conductor.stop) # Assume that none of the tests are using ports with resource requests. self.mock_list_port = self.useFixture( fixtures.MockPatch( 'nova.network.neutronv2.api.API.list_ports')).mock self.mock_list_port.return_value = {'ports': []}