def setUp(self): super(TestS3ImageService, self).setUp() self.context = context.RequestContext(None, None) self.useFixture(fixtures.FakeLogger('boto')) # set up 3 fixtures to test shows, should have id '1', '2', and '3' db.s3_image_create(self.context, '155d900f-4e14-4e4c-a73d-069cbf4541e6') db.s3_image_create(self.context, 'a2459075-d96c-40d5-893e-577ff92e721c') db.s3_image_create(self.context, '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6') fake.stub_out_image_service(self.stubs) self.image_service = s3.S3ImageService() ec2utils.reset_cache()
def setUp(self): super(CinderCloudTestCase, self).setUp() ec2utils.reset_cache() vol_tmpdir = self.useFixture(fixtures.TempDir()).path fake_utils.stub_out_utils_spawn_n(self.stubs) self.flags(compute_driver='nova.virt.fake.FakeDriver', volume_api_class='nova.tests.fake_volume.API') def fake_show(meh, context, id): return {'id': id, 'name': 'fake_name', 'container_format': 'ami', 'status': 'active', 'properties': { 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175', 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175', 'type': 'machine', 'image_state': 'available'}} def fake_detail(_self, context, **kwargs): image = fake_show(None, context, None) image['name'] = kwargs.get('filters', {}).get('name') return [image] self.stubs.Set(fake._FakeImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'detail', fake_detail) fake.stub_out_image_service(self.stubs) def dumb(*args, **kwargs): pass self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb) fake_network.set_stub_network_methods(self.stubs) # set up our cloud self.cloud = cloud.CloudController() self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler') # Short-circuit the conductor service self.flags(use_local=True, group='conductor') # set up services self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) self.compute = self.start_service('compute') self.scheduler = self.start_service('scheduler') self.network = self.start_service('network') self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) self.volume_api = volume.API() self.volume_api.reset_fake_api(self.context) # NOTE(comstud): Make 'cast' behave like a 'call' which will # ensure that operations complete self.stubs.Set(rpc, 'cast', rpc.call) # make sure we can map ami-00000001/2 to a uuid in FakeImageService db.api.s3_image_create(self.context, 'cedef40a-ed67-4d10-800e-17455edce175') db.api.s3_image_create(self.context, '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def setUp(self): super(CinderCloudTestCase, self).setUp() ec2utils.reset_cache() self.useFixture(fixtures.TempDir()).path fake_utils.stub_out_utils_spawn_n(self.stubs) self.flags(compute_driver='nova.virt.fake.FakeDriver', volume_api_class='nova.tests.unit.fake_volume.API') def fake_show(meh, context, id, **kwargs): return { 'id': id, 'name': 'fake_name', 'container_format': 'ami', 'status': 'active', 'properties': { 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175', 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175', 'type': 'machine', 'image_state': 'available' } } def fake_detail(_self, context, **kwargs): image = fake_show(None, context, None) image['name'] = kwargs.get('filters', {}).get('name') return [image] self.stubs.Set(fake._FakeImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'detail', fake_detail) fake.stub_out_image_service(self.stubs) def dumb(*args, **kwargs): pass self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb) fake_network.set_stub_network_methods(self.stubs) # set up our cloud self.cloud = cloud.CloudController() self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler') # Short-circuit the conductor service self.flags(use_local=True, group='conductor') # Stub out the notification service so we use the no-op serializer # and avoid lazy-load traces with the wrap_exception decorator in # the compute service. fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) # set up services self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) self.compute = self.start_service('compute') self.scheduler = self.start_service('scheduler') self.network = self.start_service('network') self.consoleauth = self.start_service('consoleauth') self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) self.volume_api = volume.API() self.volume_api.reset_fake_api(self.context) self.useFixture(cast_as_call.CastAsCall(self.stubs)) # make sure we can map ami-00000001/2 to a uuid in FakeImageService db.s3_image_create(self.context, 'cedef40a-ed67-4d10-800e-17455edce175') db.s3_image_create(self.context, '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def setUp(self): super(CinderCloudTestCase, self).setUp() ec2utils.reset_cache() self.useFixture(fixtures.TempDir()).path fake_utils.stub_out_utils_spawn_n(self.stubs) self.flags(compute_driver='nova.virt.fake.FakeDriver', volume_api_class='nova.tests.unit.fake_volume.API') def fake_show(meh, context, id, **kwargs): return {'id': id, 'name': 'fake_name', 'container_format': 'ami', 'status': 'active', 'properties': { 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175', 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175', 'type': 'machine', 'image_state': 'available'}} def fake_detail(_self, context, **kwargs): image = fake_show(None, context, None) image['name'] = kwargs.get('filters', {}).get('name') return [image] self.stubs.Set(fake._FakeImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'detail', fake_detail) fake.stub_out_image_service(self) def dumb(*args, **kwargs): pass self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb) fake_network.set_stub_network_methods(self.stubs) # set up our cloud self.cloud = cloud.CloudController() self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler') # Short-circuit the conductor service self.flags(use_local=True, group='conductor') # Stub out the notification service so we use the no-op serializer # and avoid lazy-load traces with the wrap_exception decorator in # the compute service. fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) # set up services self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) self.compute = self.start_service('compute') self.scheduler = self.start_service('scheduler') self.network = self.start_service('network') self.consoleauth = self.start_service('consoleauth') self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) self.volume_api = volume.API() self.volume_api.reset_fake_api(self.context) self.stubs.Set(compute_manager.ComputeManager, '_update_scheduler_instance_info', dumb) self.stubs.Set(compute_manager.ComputeManager, '_delete_scheduler_instance_info', dumb) self.stubs.Set(compute_manager.ComputeManager, '_sync_scheduler_instance_info', dumb) self.useFixture(cast_as_call.CastAsCall(self.stubs)) # make sure we can map ami-00000001/2 to a uuid in FakeImageService db.s3_image_create(self.context, 'cedef40a-ed67-4d10-800e-17455edce175') db.s3_image_create(self.context, '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def setUp(self): self.ctxt = context.get_admin_context() ec2utils.reset_cache() super(EC2UtilsTestCase, self).setUp()
def setUp(self): super(CinderCloudTestCase, self).setUp() ec2utils.reset_cache() vol_tmpdir = self.useFixture(fixtures.TempDir()).path self.flags(compute_driver="nova.virt.fake.FakeDriver", volume_api_class="nova.tests.fake_volume.API") def fake_show(meh, context, id): return { "id": id, "name": "fake_name", "container_format": "ami", "status": "active", "properties": { "kernel_id": "cedef40a-ed67-4d10-800e-17455edce175", "ramdisk_id": "cedef40a-ed67-4d10-800e-17455edce175", "type": "machine", "image_state": "available", }, } def fake_detail(_self, context, **kwargs): image = fake_show(None, context, None) image["name"] = kwargs.get("filters", {}).get("name") return [image] self.stubs.Set(fake._FakeImageService, "show", fake_show) self.stubs.Set(fake._FakeImageService, "detail", fake_detail) fake.stub_out_image_service(self.stubs) def dumb(*args, **kwargs): pass self.stubs.Set(compute_utils, "notify_about_instance_usage", dumb) fake_network.set_stub_network_methods(self.stubs) # set up our cloud self.cloud = cloud.CloudController() self.flags(scheduler_driver="nova.scheduler.chance.ChanceScheduler") # Short-circuit the conductor service self.flags(use_local=True, group="conductor") # set up services self.conductor = self.start_service("conductor", manager=CONF.conductor.manager) self.compute = self.start_service("compute") self.scheduler = self.start_service("scheduler") self.network = self.start_service("network") self.user_id = "fake" self.project_id = "fake" self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) self.volume_api = volume.API() self.volume_api.reset_fake_api(self.context) # NOTE(comstud): Make 'cast' behave like a 'call' which will # ensure that operations complete self.stubs.Set(rpc, "cast", rpc.call) # make sure we can map ami-00000001/2 to a uuid in FakeImageService db.api.s3_image_create(self.context, "cedef40a-ed67-4d10-800e-17455edce175") db.api.s3_image_create(self.context, "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6")