def test_allocate_for_instance_handles_macs_passed(self): # If a macs argument is supplied to the 'nova-network' API, it is just # ignored. This test checks that the call down to the rpcapi layer # doesn't pass macs down: nova-network doesn't support hypervisor # mac address limits (today anyhow). macs = set(['ab:cd:ef:01:23:34']) self.mox.StubOutWithMock(self.network_api.network_rpcapi, "allocate_for_instance") kwargs = dict( zip([ 'host', 'instance_id', 'project_id', 'requested_networks', 'rxtx_factor', 'vpn', 'macs', 'dhcp_options' ], itertools.repeat(mox.IgnoreArg()))) self.network_api.network_rpcapi.allocate_for_instance( mox.IgnoreArg(), **kwargs).AndReturn([]) self.mox.ReplayAll() flavor = flavors.get_default_flavor() flavor['rxtx_factor'] = 0 sys_meta = flavors.save_flavor_info({}, flavor) instance = dict(id=1, uuid='uuid', project_id='project_id', host='host', system_metadata=utils.dict_to_metadata(sys_meta)) instance = fake_instance.fake_instance_obj( self.context, expected_attrs=['system_metadata'], **instance) self.network_api.allocate_for_instance(self.context, instance, 'vpn', 'requested_networks', macs=macs)
def setUp(self): super(SimpleTenantUsageControllerTest, self).setUp() self.controller = simple_tenant_usage.SimpleTenantUsageController() class FakeComputeAPI: def get_instance_type(self, context, flavor_type): if flavor_type == 1: return flavors.get_default_flavor() else: raise exception.InstanceTypeNotFound(flavor_type) self.compute_api = FakeComputeAPI() self.context = None now = datetime.datetime.now() self.baseinst = dict(display_name='foo', launched_at=now - datetime.timedelta(1), terminated_at=now, instance_type_id=1, vm_state='deleted', deleted=0) basetype = flavors.get_default_flavor() sys_meta = utils.dict_to_metadata( flavors.save_flavor_info({}, basetype)) self.baseinst['system_metadata'] = sys_meta self.basetype = flavors.extract_flavor(self.baseinst)
def _stub_migrate_instance_calls(self, method, multi_host, info): fake_flavor = flavors.get_default_flavor() fake_flavor['rxtx_factor'] = 1.21 sys_meta = utils.dict_to_metadata( flavors.save_flavor_info({}, fake_flavor)) fake_instance = {'uuid': 'fake_uuid', 'instance_type_id': fake_flavor['id'], 'project_id': 'fake_project_id', 'system_metadata': sys_meta} fake_migration = {'source_compute': 'fake_compute_source', 'dest_compute': 'fake_compute_dest'} def fake_mig_inst_method(*args, **kwargs): info['kwargs'] = kwargs def fake_get_multi_addresses(*args, **kwargs): return multi_host, ['fake_float1', 'fake_float2'] self.stubs.Set(network_rpcapi.NetworkAPI, method, fake_mig_inst_method) self.stubs.Set(self.network_api, '_get_multi_addresses', fake_get_multi_addresses) expected = {'instance_uuid': 'fake_uuid', 'source_compute': 'fake_compute_source', 'dest_compute': 'fake_compute_dest', 'rxtx_factor': 1.21, 'project_id': 'fake_project_id', 'floating_addresses': None} if multi_host: expected['floating_addresses'] = ['fake_float1', 'fake_float2'] return fake_instance, fake_migration, expected
def test_dict_to_metadata(self): def sort_key(adict): return sorted(adict.items()) metadata = utils.dict_to_metadata(dict(foo1="bar1", foo2="bar2")) expected = [{"key": "foo1", "value": "bar1"}, {"key": "foo2", "value": "bar2"}] self.assertEqual(sorted(metadata, key=sort_key), sorted(expected, key=sort_key))
def get_fake_db_instance(start, end, instance_id, tenant_id, vm_state=vm_states.ACTIVE): sys_meta = utils.dict_to_metadata( flavors.save_flavor_info({}, FAKE_INST_TYPE)) # NOTE(mriedem): We use fakes.stub_instance since it sets the fields # needed on the db instance for converting it to an object, but we still # need to override system_metadata to use our fake flavor. inst = fakes.stub_instance( id=instance_id, uuid='00000000-0000-0000-0000-00000000000000%02d' % instance_id, image_ref='1', project_id=tenant_id, user_id='fakeuser', display_name='name', flavor_id=FAKE_INST_TYPE['id'], launched_at=start, terminated_at=end, vm_state=vm_state, memory_mb=MEMORY_MB, vcpus=VCPUS, root_gb=ROOT_GB, ephemeral_gb=EPHEMERAL_GB, ) inst['system_metadata'] = sys_meta return inst
def setUp(self): super(SimpleTenantUsageControllerTest, self).setUp() self.controller = simple_tenant_usage.SimpleTenantUsageController() class FakeComputeAPI: def get_instance_type(self, context, flavor_type): if flavor_type == 1: return flavors.get_default_flavor() else: raise exception.InstanceTypeNotFound(flavor_type) self.compute_api = FakeComputeAPI() self.context = None now = timeutils.utcnow() self.baseinst = dict(display_name='foo', launched_at=now - datetime.timedelta(1), terminated_at=now, instance_type_id=1, vm_state='deleted', deleted=0) basetype = flavors.get_default_flavor() sys_meta = utils.dict_to_metadata( flavors.save_flavor_info({}, basetype)) self.baseinst['system_metadata'] = sys_meta self.basetype = flavors.extract_flavor(self.baseinst)
def test_dict_to_metadata(self): def sort_key(adict): return sorted(adict.items()) metadata = utils.dict_to_metadata(dict(foo1='bar1', foo2='bar2')) expected = [{'key': 'foo1', 'value': 'bar1'}, {'key': 'foo2', 'value': 'bar2'}] self.assertEqual(sorted(metadata, key=sort_key), sorted(expected, key=sort_key))
def test_shelve_volume_backed(self): db_instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=db_instance) instance = instance_obj.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) instance.task_state = task_states.SHELVING instance.save() host = 'fake-mini' cur_time = timeutils.utcnow() timeutils.set_time_override(cur_time) sys_meta = dict(instance.system_metadata) sys_meta['shelved_at'] = timeutils.strtime(at=cur_time) sys_meta['shelved_image_id'] = None sys_meta['shelved_host'] = host db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta) self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.start') self.compute.driver.power_off(instance) self.compute._get_power_state(self.context, instance).AndReturn(123) db.instance_update_and_get_original( self.context, instance['uuid'], { 'power_state': 123, 'host': None, 'node': None, 'vm_state': vm_states.SHELVED_OFFLOADED, 'task_state': None, 'expected_task_state': [task_states.SHELVING, task_states.SHELVING_OFFLOADING] }, update_cells=False, columns_to_join=['metadata', 'system_metadata'], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.end') self.mox.ReplayAll() self.compute.shelve_offload_instance(self.context, instance) self.mox.VerifyAll() self.mox.UnsetStubs() self.compute.terminate_instance(self.context, instance=instance)
def _shelve_offload(self, clean_shutdown=True): instance = self._create_fake_instance_obj() instance.task_state = task_states.SHELVING instance.save() db_instance = obj_base.obj_to_primitive(instance) host = 'fake-mini' cur_time = timeutils.utcnow() timeutils.set_time_override(cur_time) sys_meta = dict(instance.system_metadata) sys_meta['shelved_at'] = timeutils.strtime(at=cur_time) sys_meta['shelved_image_id'] = None sys_meta['shelved_host'] = host db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta) self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.start') if clean_shutdown: self.compute.driver.power_off(instance, CONF.shutdown_timeout, self.compute.SHUTDOWN_RETRY_INTERVAL) else: self.compute.driver.power_off(instance, 0, 0) self.compute._get_power_state(self.context, instance).AndReturn(123) db.instance_update_and_get_original( self.context, instance['uuid'], { 'power_state': 123, 'host': None, 'node': None, 'vm_state': vm_states.SHELVED_OFFLOADED, 'task_state': None, 'expected_task_state': [task_states.SHELVING, task_states.SHELVING_OFFLOADING] }, update_cells=False, columns_to_join=[ 'metadata', 'system_metadata', 'info_cache', 'security_groups' ], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.end') self.mox.ReplayAll() self.compute.shelve_offload_instance(self.context, instance, clean_shutdown=clean_shutdown)
def test_dict_to_metadata(self): expected = [{ 'key': 'foo1', 'value': 'bar1' }, { 'key': 'foo2', 'value': 'bar2' }] self.assertEqual( utils.dict_to_metadata(dict(foo1='bar1', foo2='bar2')), expected)
def test_shelve(self): CONF.shelved_offload_time = -1 db_instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=db_instance) instance = instance_obj.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) image_id = 'fake_image_id' host = 'fake-mini' cur_time = timeutils.utcnow() timeutils.set_time_override(cur_time) instance.task_state = task_states.SHELVING instance.save() sys_meta = dict(instance.system_metadata) sys_meta['shelved_at'] = timeutils.strtime(at=cur_time) sys_meta['shelved_image_id'] = image_id sys_meta['shelved_host'] = host db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta) self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'snapshot') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.compute._notify_about_instance_usage(self.context, instance, 'shelve.start') self.compute.driver.power_off(instance) self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute.driver.snapshot(self.context, instance, 'fake_image_id', mox.IgnoreArg()) db.instance_update_and_get_original(self.context, instance['uuid'], {'power_state': 123, 'vm_state': vm_states.SHELVED, 'task_state': None, 'expected_task_state': [task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING], 'system_metadata': sys_meta}, update_cells=False, columns_to_join=['metadata', 'system_metadata'], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, 'shelve.end') self.mox.ReplayAll() self.compute.shelve_instance(self.context, instance, image_id=image_id) self.mox.VerifyAll() self.mox.UnsetStubs() self.compute.terminate_instance(self.context, instance=instance)
def test_shelve(self): CONF.shelved_offload_time = -1 db_instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=db_instance) instance = instance_obj.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) image_id = 'fake_image_id' host = 'fake-mini' cur_time = timeutils.utcnow() timeutils.set_time_override(cur_time) instance.task_state = task_states.SHELVING instance.save() sys_meta = instance.system_metadata sys_meta['shelved_at'] = timeutils.strtime(at=cur_time) sys_meta['shelved_image_id'] = image_id sys_meta['shelved_host'] = host db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta) self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'snapshot') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.compute._notify_about_instance_usage(self.context, instance, 'shelve.start') self.compute.driver.power_off(instance) self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute.driver.snapshot(self.context, instance, 'fake_image_id', mox.IgnoreArg()) db.instance_update_and_get_original(self.context, instance['uuid'], {'power_state': 123, 'vm_state': vm_states.SHELVED, 'task_state': None, 'expected_task_state': [task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING], 'system_metadata': sys_meta}).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, 'shelve.end') self.mox.ReplayAll() self.compute.shelve_instance(self.context, instance, image_id=image_id) self.mox.VerifyAll() self.mox.UnsetStubs() self.compute.terminate_instance(self.context, instance=instance)
def get_fake_db_instance(start, end, instance_id, tenant_id): sys_meta = utils.dict_to_metadata( flavors.save_flavor_info({}, FAKE_INST_TYPE)) return {'id': instance_id, 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % instance_id, 'image_ref': '1', 'project_id': tenant_id, 'user_id': 'fakeuser', 'display_name': 'name', 'state_description': 'state', 'instance_type_id': 1, 'launched_at': start, 'terminated_at': end, 'system_metadata': sys_meta}
def get_fake_db_instance(start, end, instance_id, tenant_id): sys_meta = utils.dict_to_metadata(flavors.save_flavor_info({}, FAKE_INST_TYPE)) return { "id": instance_id, "uuid": "00000000-0000-0000-0000-00000000000000%02d" % instance_id, "image_ref": "1", "project_id": tenant_id, "user_id": "fakeuser", "display_name": "name", "state_description": "state", "instance_type_id": 1, "launched_at": start, "terminated_at": end, "system_metadata": sys_meta, }
def test_shelve(self): CONF.shelved_offload_time = -1 db_instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, db_instance, {}, {}, [], None, None, True, None, False) instance = instance_obj.Instance.get_by_uuid( self.context, db_instance["uuid"], expected_attrs=["metadata", "system_metadata"] ) image_id = "fake_image_id" host = "fake-mini" cur_time = timeutils.utcnow() timeutils.set_time_override(cur_time) instance.task_state = task_states.SHELVING instance.save() sys_meta = dict(instance.system_metadata) sys_meta["shelved_at"] = timeutils.strtime(at=cur_time) sys_meta["shelved_image_id"] = image_id sys_meta["shelved_host"] = host db_instance["system_metadata"] = utils.dict_to_metadata(sys_meta) self.mox.StubOutWithMock(self.compute, "_notify_about_instance_usage") self.mox.StubOutWithMock(self.compute.driver, "snapshot") self.mox.StubOutWithMock(self.compute.driver, "power_off") self.mox.StubOutWithMock(self.compute, "_get_power_state") self.mox.StubOutWithMock(db, "instance_update_and_get_original") self.compute._notify_about_instance_usage(self.context, instance, "shelve.start") self.compute.driver.power_off(instance) self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute.driver.snapshot(self.context, instance, "fake_image_id", mox.IgnoreArg()) db.instance_update_and_get_original( self.context, instance["uuid"], { "power_state": 123, "vm_state": vm_states.SHELVED, "task_state": None, "expected_task_state": [task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING], "system_metadata": sys_meta, }, update_cells=False, columns_to_join=["metadata", "system_metadata"], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, "shelve.end") self.mox.ReplayAll() self.compute.shelve_instance(self.context, instance, image_id=image_id)
def _shelve_offload(self, clean_shutdown=True): instance = self._create_fake_instance_obj() instance.task_state = task_states.SHELVING instance.save() db_instance = obj_base.obj_to_primitive(instance) host = 'fake-mini' cur_time = timeutils.utcnow() timeutils.set_time_override(cur_time) sys_meta = dict(instance.system_metadata) sys_meta['shelved_at'] = timeutils.strtime(at=cur_time) sys_meta['shelved_image_id'] = None sys_meta['shelved_host'] = host db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta) self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.start') if clean_shutdown: self.compute.driver.power_off(instance, CONF.shutdown_timeout, self.compute.SHUTDOWN_RETRY_INTERVAL) else: self.compute.driver.power_off(instance, 0, 0) self.compute._get_power_state(self.context, instance).AndReturn(123) db.instance_update_and_get_original(self.context, instance['uuid'], {'power_state': 123, 'host': None, 'node': None, 'vm_state': vm_states.SHELVED_OFFLOADED, 'task_state': None, 'expected_task_state': [task_states.SHELVING, task_states.SHELVING_OFFLOADING]}, update_cells=False, columns_to_join=['metadata', 'system_metadata', 'info_cache', 'security_groups'], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.end') self.mox.ReplayAll() self.compute.shelve_offload_instance(self.context, instance, clean_shutdown=clean_shutdown)
def test_shelve_volume_backed(self): db_instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=db_instance) instance = instance_obj.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) instance.task_state = task_states.SHELVING instance.save() host = 'fake-mini' cur_time = timeutils.utcnow() timeutils.set_time_override(cur_time) sys_meta = instance.system_metadata sys_meta['shelved_at'] = timeutils.strtime(at=cur_time) sys_meta['shelved_image_id'] = None sys_meta['shelved_host'] = host db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta) self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.start') self.compute.driver.power_off(instance) self.compute._get_power_state(self.context, instance).AndReturn(123) db.instance_update_and_get_original(self.context, instance['uuid'], {'power_state': 123, 'host': None, 'node': None, 'vm_state': vm_states.SHELVED_OFFLOADED, 'task_state': None, 'expected_task_state': [task_states.SHELVING, task_states.SHELVING_OFFLOADING]}).AndReturn( (db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.end') self.mox.ReplayAll() self.compute.shelve_offload_instance(self.context, instance) self.mox.VerifyAll() self.mox.UnsetStubs() self.compute.terminate_instance(self.context, instance=instance)
def get_fake_db_instance(start, end, instance_id, tenant_id, vm_state=vm_states.ACTIVE): sys_meta = utils.dict_to_metadata(flavors.save_flavor_info({}, FAKE_INST_TYPE)) # NOTE(mriedem): We use fakes.stub_instance since it sets the fields # needed on the db instance for converting it to an object, but we still # need to override system_metadata to use our fake flavor. inst = fakes.stub_instance( id=instance_id, uuid="00000000-0000-0000-0000-00000000000000%02d" % instance_id, image_ref="1", project_id=tenant_id, user_id="fakeuser", display_name="name", flavor_id=FAKE_INST_TYPE["id"], launched_at=start, terminated_at=end, vm_state=vm_state, ) inst["system_metadata"] = sys_meta return inst
def _live_migration_instance(self): inst_type = instance_types.get_instance_type(1) # NOTE(danms): we have _got_ to stop doing this! inst_type['memory_mb'] = 1024 sys_meta = utils.dict_to_metadata( instance_types.save_instance_type_info({}, inst_type)) return {'id': 31337, 'uuid': 'fake_uuid', 'name': 'fake-instance', 'host': 'fake_host1', 'power_state': power_state.RUNNING, 'memory_mb': 1024, 'root_gb': 1024, 'ephemeral_gb': 0, 'vm_state': '', 'task_state': '', 'instance_type_id': inst_type['id'], 'image_ref': 'fake-image-ref', 'system_metadata': sys_meta}
def test_shelve_volume_backed(self): db_instance = jsonutils.to_primitive(self._create_fake_instance()) instance = objects.Instance.get_by_uuid( self.context, db_instance["uuid"], expected_attrs=["metadata", "system_metadata"] ) instance.task_state = task_states.SHELVING instance.save() host = "fake-mini" cur_time = timeutils.utcnow() timeutils.set_time_override(cur_time) sys_meta = dict(instance.system_metadata) sys_meta["shelved_at"] = timeutils.strtime(at=cur_time) sys_meta["shelved_image_id"] = None sys_meta["shelved_host"] = host db_instance["system_metadata"] = utils.dict_to_metadata(sys_meta) self.mox.StubOutWithMock(self.compute, "_notify_about_instance_usage") self.mox.StubOutWithMock(self.compute.driver, "power_off") self.mox.StubOutWithMock(self.compute, "_get_power_state") self.mox.StubOutWithMock(db, "instance_update_and_get_original") self.compute._notify_about_instance_usage(self.context, instance, "shelve_offload.start") self.compute.driver.power_off(instance) self.compute._get_power_state(self.context, instance).AndReturn(123) db.instance_update_and_get_original( self.context, instance["uuid"], { "power_state": 123, "host": None, "node": None, "vm_state": vm_states.SHELVED_OFFLOADED, "task_state": None, "expected_task_state": [task_states.SHELVING, task_states.SHELVING_OFFLOADING], }, update_cells=False, columns_to_join=["metadata", "system_metadata"], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, "shelve_offload.end") self.mox.ReplayAll() self.compute.shelve_offload_instance(self.context, instance)
def _stub_migrate_instance_calls(self, method, multi_host, info): fake_flavor = flavors.get_default_flavor() fake_flavor['rxtx_factor'] = 1.21 sys_meta = utils.dict_to_metadata( flavors.save_flavor_info({}, fake_flavor)) fake_instance = { 'uuid': 'fake_uuid', 'instance_type_id': fake_flavor['id'], 'project_id': 'fake_project_id', 'system_metadata': sys_meta } fake_migration = { 'source_compute': 'fake_compute_source', 'dest_compute': 'fake_compute_dest' } def fake_mig_inst_method(*args, **kwargs): info['kwargs'] = kwargs def fake_is_multi_host(*args, **kwargs): return multi_host def fake_get_floaters(*args, **kwargs): return ['fake_float1', 'fake_float2'] self.stubs.Set(network_rpcapi.NetworkAPI, method, fake_mig_inst_method) self.stubs.Set(self.network_api, '_is_multi_host', fake_is_multi_host) self.stubs.Set(self.network_api, '_get_floating_ip_addresses', fake_get_floaters) expected = { 'instance_uuid': 'fake_uuid', 'source_compute': 'fake_compute_source', 'dest_compute': 'fake_compute_dest', 'rxtx_factor': 1.21, 'project_id': 'fake_project_id', 'floating_addresses': None } if multi_host: expected['floating_addresses'] = ['fake_float1', 'fake_float2'] return fake_instance, fake_migration, expected
def test_allocate_for_instance_handles_macs_passed(self): # If a macs argument is supplied to the 'nova-network' API, it is just # ignored. This test checks that the call down to the rpcapi layer # doesn't pass macs down: nova-network doesn't support hypervisor # mac address limits (today anyhow). macs = set(['ab:cd:ef:01:23:34']) self.mox.StubOutWithMock( self.network_api.network_rpcapi, "allocate_for_instance") kwargs = dict(zip(['host', 'instance_id', 'project_id', 'requested_networks', 'rxtx_factor', 'vpn', 'macs'], itertools.repeat(mox.IgnoreArg()))) self.network_api.network_rpcapi.allocate_for_instance( mox.IgnoreArg(), **kwargs).AndReturn([]) self.mox.ReplayAll() inst_type = flavors.get_default_flavor() inst_type['rxtx_factor'] = 0 sys_meta = flavors.save_flavor_info({}, inst_type) instance = dict(id='id', uuid='uuid', project_id='project_id', host='host', system_metadata=utils.dict_to_metadata(sys_meta)) self.network_api.allocate_for_instance( self.context, instance, 'vpn', 'requested_networks', macs=macs)
def _live_migration_instance(self): inst_type = {'memory_mb': 1024, 'root_gb': 40, 'deleted_at': None, 'name': u'm1.medium', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 2, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': u'3', 'vcpu_weight': None, 'id': 1} sys_meta = utils.dict_to_metadata( flavors.save_flavor_info({}, inst_type)) return {'id': 31337, 'uuid': 'fake_uuid', 'name': 'fake-instance', 'host': 'fake_host1', 'power_state': power_state.RUNNING, 'memory_mb': 1024, 'root_gb': 1024, 'ephemeral_gb': 0, 'vm_state': '', 'task_state': '', 'instance_type_id': inst_type['id'], 'image_ref': 'fake-image-ref', 'system_metadata': sys_meta}
def _live_migration_instance(self): inst_type = { "memory_mb": 1024, "root_gb": 40, "deleted_at": None, "name": u"m1.medium", "deleted": 0, "created_at": None, "ephemeral_gb": 0, "updated_at": None, "disabled": False, "vcpus": 2, "extra_specs": {}, "swap": 0, "rxtx_factor": 1.0, "is_public": True, "flavorid": u"3", "vcpu_weight": None, "id": 1, } sys_meta = utils.dict_to_metadata(flavors.save_flavor_info({}, inst_type)) return { "id": 31337, "uuid": "fake_uuid", "name": "fake-instance", "host": "fake_host1", "power_state": power_state.RUNNING, "memory_mb": 1024, "root_gb": 1024, "ephemeral_gb": 0, "vm_state": "", "task_state": "", "instance_type_id": inst_type["id"], "image_ref": "fake-image-ref", "system_metadata": sys_meta, }
def get_fake_db_instance(start, end, instance_id, tenant_id, vm_state=vm_states.ACTIVE): sys_meta = utils.dict_to_metadata( flavors.save_flavor_info({}, FAKE_INST_TYPE)) # NOTE(mriedem): We use fakes.stub_instance since it sets the fields # needed on the db instance for converting it to an object, but we still # need to override system_metadata to use our fake flavor. inst = fakes.stub_instance( id=instance_id, uuid='00000000-0000-0000-0000-00000000000000%02d' % instance_id, image_ref='1', project_id=tenant_id, user_id='fakeuser', display_name='name', flavor_id=FAKE_INST_TYPE['id'], launched_at=start, terminated_at=end, vm_state=vm_state, memory_mb=MEMORY_MB, vcpus=VCPUS, root_gb=ROOT_GB, ephemeral_gb=EPHEMERAL_GB,) inst['system_metadata'] = sys_meta return inst
def _stub_migrate_instance_calls(self, method, multi_host, info): fake_flavor = flavors.get_default_flavor() fake_flavor["rxtx_factor"] = 1.21 sys_meta = utils.dict_to_metadata(flavors.save_flavor_info({}, fake_flavor)) fake_instance = { "uuid": "fake_uuid", "instance_type_id": fake_flavor["id"], "project_id": "fake_project_id", "system_metadata": sys_meta, } fake_migration = {"source_compute": "fake_compute_source", "dest_compute": "fake_compute_dest"} def fake_mig_inst_method(*args, **kwargs): info["kwargs"] = kwargs def fake_is_multi_host(*args, **kwargs): return multi_host def fake_get_floaters(*args, **kwargs): return ["fake_float1", "fake_float2"] self.stubs.Set(network_rpcapi.NetworkAPI, method, fake_mig_inst_method) self.stubs.Set(self.network_api, "_is_multi_host", fake_is_multi_host) self.stubs.Set(self.network_api, "_get_floating_ip_addresses", fake_get_floaters) expected = { "instance_uuid": "fake_uuid", "source_compute": "fake_compute_source", "dest_compute": "fake_compute_dest", "rxtx_factor": 1.21, "project_id": "fake_project_id", "floating_addresses": None, } if multi_host: expected["floating_addresses"] = ["fake_float1", "fake_float2"] return fake_instance, fake_migration, expected
def test_dict_to_metadata_empty(self): self.assertEqual(utils.dict_to_metadata({}), [])
def stub_instance(id, user_id=None, project_id=None, host=None, node=None, vm_state=None, task_state=None, reservation_id="", uuid=FAKE_UUID, image_ref="10", flavor_id="1", name=None, key_name='', access_ipv4=None, access_ipv6=None, progress=0, auto_disk_config=False, display_name=None, include_fake_metadata=True, config_drive=None, power_state=None, nw_cache=None, metadata=None, security_groups=None, root_device_name=None, limit=None, marker=None, launched_at=timeutils.utcnow(), terminated_at=timeutils.utcnow(), availability_zone='', locked_by=None, cleaned=False, memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0): if user_id is None: user_id = 'fake_user' if project_id is None: project_id = 'fake_project' if metadata: metadata = [{'key': k, 'value': v} for k, v in metadata.items()] elif include_fake_metadata: metadata = [models.InstanceMetadata(key='seq', value=str(id))] else: metadata = [] inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id)) sys_meta = flavors.save_flavor_info({}, inst_type) if host is not None: host = str(host) if key_name: key_data = 'FAKE' else: key_data = '' if security_groups is None: security_groups = [{"id": 1, "name": "test", "description": "Foo:", "project_id": "project", "user_id": "user", "created_at": None, "updated_at": None, "deleted_at": None, "deleted": False}] # ReservationID isn't sent back, hack it in there. server_name = name or "server%s" % id if reservation_id != "": server_name = "reservation_%s" % (reservation_id, ) info_cache = create_info_cache(nw_cache) instance = { "id": int(id), "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0), "deleted": None, "user_id": user_id, "project_id": project_id, "image_ref": image_ref, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, "key_name": key_name, "key_data": key_data, "config_drive": config_drive, "vm_state": vm_state or vm_states.BUILDING, "task_state": task_state, "power_state": power_state, "memory_mb": memory_mb, "vcpus": vcpus, "root_gb": root_gb, "ephemeral_gb": ephemeral_gb, "ephemeral_key_uuid": None, "hostname": display_name or server_name, "host": host, "node": node, "instance_type_id": 1, "instance_type": inst_type, "user_data": "", "reservation_id": reservation_id, "mac_address": "", "scheduled_at": timeutils.utcnow(), "launched_at": launched_at, "terminated_at": terminated_at, "availability_zone": availability_zone, "display_name": display_name or server_name, "display_description": "", "locked": locked_by is not None, "locked_by": locked_by, "metadata": metadata, "access_ip_v4": access_ipv4, "access_ip_v6": access_ipv6, "uuid": uuid, "progress": progress, "auto_disk_config": auto_disk_config, "name": "instance-%s" % id, "shutdown_terminate": True, "disable_terminate": False, "security_groups": security_groups, "root_device_name": root_device_name, "system_metadata": utils.dict_to_metadata(sys_meta), "pci_devices": [], "vm_mode": "", "default_swap_device": "", "default_ephemeral_device": "", "launched_on": "", "cell_name": "", "architecture": "", "os_type": "", "cleaned": cleaned} instance.update(info_cache) instance['info_cache']['instance_uuid'] = instance['uuid'] return instance
def get_default_sys_meta(): return utils.dict_to_metadata( flavors.save_flavor_info( {}, flavors.get_default_flavor()))
def get_default_sys_meta(): return utils.dict_to_metadata( instance_types.save_instance_type_info( {}, instance_types.get_default_instance_type()))
def _shelve_instance(self, shelved_offload_time, clean_shutdown=True): CONF.set_override("shelved_offload_time", shelved_offload_time) db_instance = jsonutils.to_primitive(self._create_fake_instance()) instance = objects.Instance.get_by_uuid( self.context, db_instance["uuid"], expected_attrs=["metadata", "system_metadata"] ) image_id = "fake_image_id" host = "fake-mini" cur_time = timeutils.utcnow() timeutils.set_time_override(cur_time) instance.task_state = task_states.SHELVING instance.save() sys_meta = dict(instance.system_metadata) sys_meta["shelved_at"] = timeutils.strtime(at=cur_time) sys_meta["shelved_image_id"] = image_id sys_meta["shelved_host"] = host db_instance["system_metadata"] = utils.dict_to_metadata(sys_meta) self.mox.StubOutWithMock(self.compute, "_notify_about_instance_usage") self.mox.StubOutWithMock(self.compute.driver, "snapshot") self.mox.StubOutWithMock(self.compute.driver, "power_off") self.mox.StubOutWithMock(self.compute, "_get_power_state") self.mox.StubOutWithMock(db, "instance_update_and_get_original") self.compute._notify_about_instance_usage(self.context, instance, "shelve.start") if clean_shutdown: self.compute.driver.power_off(instance, CONF.shutdown_timeout, self.compute.SHUTDOWN_RETRY_INTERVAL) else: self.compute.driver.power_off(instance, 0, 0) self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute.driver.snapshot(self.context, instance, "fake_image_id", mox.IgnoreArg()) update_values = { "power_state": 123, "vm_state": vm_states.SHELVED, "task_state": None, "expected_task_state": [task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING], "system_metadata": sys_meta, } if CONF.shelved_offload_time == 0: update_values["task_state"] = task_states.SHELVING_OFFLOADING db.instance_update_and_get_original( self.context, instance["uuid"], update_values, update_cells=False, columns_to_join=["metadata", "system_metadata"], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, "shelve.end") if CONF.shelved_offload_time == 0: self.compute._notify_about_instance_usage(self.context, instance, "shelve_offload.start") self.compute.driver.power_off(instance) self.compute._get_power_state(self.context, instance).AndReturn(123) db.instance_update_and_get_original( self.context, instance["uuid"], { "power_state": 123, "host": None, "node": None, "vm_state": vm_states.SHELVED_OFFLOADED, "task_state": None, "expected_task_state": [task_states.SHELVING, task_states.SHELVING_OFFLOADING], }, update_cells=False, columns_to_join=["metadata", "system_metadata"], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, "shelve_offload.end") self.mox.ReplayAll() self.compute.shelve_instance(self.context, instance, image_id=image_id, clean_shutdown=clean_shutdown)
def get_default_sys_meta(): return utils.dict_to_metadata( flavors.save_flavor_info({}, flavors.get_default_flavor()))
def test_dict_to_metadata(self): expected = [{"key": "foo1", "value": "bar1"}, {"key": "foo2", "value": "bar2"}] self.assertEqual(sorted(utils.dict_to_metadata(dict(foo1="bar1", foo2="bar2"))), sorted(expected))
def stub_instance(id=1, user_id=None, project_id=None, host=None, node=None, vm_state=None, task_state=None, reservation_id="", uuid=FAKE_UUID, image_ref="10", flavor_id="1", name=None, key_name='', access_ipv4=None, access_ipv6=None, progress=0, auto_disk_config=False, display_name=None, display_description=None, include_fake_metadata=True, config_drive=None, power_state=None, nw_cache=None, metadata=None, security_groups=None, root_device_name=None, limit=None, marker=None, launched_at=timeutils.utcnow(), terminated_at=timeutils.utcnow(), availability_zone='', locked_by=None, cleaned=False, memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0, instance_type=None, launch_index=0, kernel_id="", ramdisk_id="", user_data=None, system_metadata=None, services=None): if user_id is None: user_id = 'fake_user' if project_id is None: project_id = 'fake_project' if metadata: metadata = [{'key': k, 'value': v} for k, v in metadata.items()] elif include_fake_metadata: metadata = [models.InstanceMetadata(key='seq', value=str(id))] else: metadata = [] inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id)) sys_meta = flavors.save_flavor_info({}, inst_type) sys_meta.update(system_metadata or {}) if host is not None: host = str(host) if key_name: key_data = 'FAKE' else: key_data = '' if security_groups is None: security_groups = [{ "id": 1, "name": "test", "description": "Foo:", "project_id": "project", "user_id": "user", "created_at": None, "updated_at": None, "deleted_at": None, "deleted": False }] # ReservationID isn't sent back, hack it in there. server_name = name or "server%s" % id if reservation_id != "": server_name = "reservation_%s" % (reservation_id, ) info_cache = create_info_cache(nw_cache) if instance_type is None: instance_type = flavors.get_default_flavor() flavorinfo = jsonutils.dumps({ 'cur': instance_type.obj_to_primitive(), 'old': None, 'new': None, }) instance = { "id": int(id), "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0), "deleted": None, "user_id": user_id, "project_id": project_id, "image_ref": image_ref, "kernel_id": kernel_id, "ramdisk_id": ramdisk_id, "launch_index": launch_index, "key_name": key_name, "key_data": key_data, "config_drive": config_drive, "vm_state": vm_state or vm_states.ACTIVE, "task_state": task_state, "power_state": power_state, "memory_mb": memory_mb, "vcpus": vcpus, "min_vcpus": vcpus, "max_vcpus": vcpus, "root_gb": root_gb, "ephemeral_gb": ephemeral_gb, "ephemeral_key_uuid": None, "hostname": display_name or server_name, "host": host, "node": node, "instance_type_id": 1, "instance_type": inst_type, "user_data": user_data, "reservation_id": reservation_id, "mac_address": "", "launched_at": launched_at, "terminated_at": terminated_at, "availability_zone": availability_zone, "display_name": display_name or server_name, "display_description": display_description, "locked": locked_by is not None, "locked_by": locked_by, "metadata": metadata, "access_ip_v4": access_ipv4, "access_ip_v6": access_ipv6, "uuid": uuid, "progress": progress, "auto_disk_config": auto_disk_config, "name": "instance-%s" % id, "shutdown_terminate": True, "disable_terminate": False, "security_groups": security_groups, "root_device_name": root_device_name, "system_metadata": utils.dict_to_metadata(sys_meta), "pci_devices": [], "vm_mode": "", "default_swap_device": "", "default_ephemeral_device": "", "launched_on": "", "cell_name": "", "architecture": "", "os_type": "", "extra": { "numa_topology": None, "pci_requests": None, "flavor": flavorinfo, }, "cleaned": cleaned, "services": services } instance.update(info_cache) instance['info_cache']['instance_uuid'] = instance['uuid'] return instance
def get_default_sys_meta(): return utils.dict_to_metadata( flavors.save_instance_type_info({}, flavors.get_default_instance_type()))
def _shelve_instance(self, shelved_offload_time, clean_shutdown=True): CONF.set_override('shelved_offload_time', shelved_offload_time) db_instance = jsonutils.to_primitive(self._create_fake_instance()) instance = objects.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) image_id = 'fake_image_id' host = 'fake-mini' cur_time = timeutils.utcnow() timeutils.set_time_override(cur_time) instance.task_state = task_states.SHELVING instance.save() sys_meta = dict(instance.system_metadata) sys_meta['shelved_at'] = timeutils.strtime(at=cur_time) sys_meta['shelved_image_id'] = image_id sys_meta['shelved_host'] = host db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta) self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'snapshot') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.compute._notify_about_instance_usage(self.context, instance, 'shelve.start') if clean_shutdown: self.compute.driver.power_off(instance, CONF.shutdown_timeout, self.compute.SHUTDOWN_RETRY_INTERVAL) else: self.compute.driver.power_off(instance, 0, 0) self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute.driver.snapshot(self.context, instance, 'fake_image_id', mox.IgnoreArg()) update_values = {'power_state': 123, 'vm_state': vm_states.SHELVED, 'task_state': None, 'expected_task_state': [task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING], 'system_metadata': sys_meta} if CONF.shelved_offload_time == 0: update_values['task_state'] = task_states.SHELVING_OFFLOADING db.instance_update_and_get_original(self.context, instance['uuid'], update_values, update_cells=False, columns_to_join=['metadata', 'system_metadata'], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, 'shelve.end') if CONF.shelved_offload_time == 0: self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.start') self.compute.driver.power_off(instance) self.compute._get_power_state(self.context, instance).AndReturn(123) db.instance_update_and_get_original(self.context, instance['uuid'], {'power_state': 123, 'host': None, 'node': None, 'vm_state': vm_states.SHELVED_OFFLOADED, 'task_state': None, 'expected_task_state': [task_states.SHELVING, task_states.SHELVING_OFFLOADING]}, update_cells=False, columns_to_join=['metadata', 'system_metadata'], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.end') self.mox.ReplayAll() self.compute.shelve_instance(self.context, instance, image_id=image_id, clean_shutdown=clean_shutdown)
def stub_instance(id, user_id=None, project_id=None, host=None, node=None, vm_state=None, task_state=None, reservation_id="", uuid=FAKE_UUID, image_ref="10", flavor_id="1", name=None, key_name='', access_ipv4=None, access_ipv6=None, progress=0, auto_disk_config=False, display_name=None, include_fake_metadata=True, config_drive=None, power_state=None, nw_cache=None, metadata=None, security_groups=None, root_device_name=None, limit=None, marker=None): if user_id is None: user_id = 'fake_user' if project_id is None: project_id = 'fake_project' if metadata: metadata = [{'key': k, 'value': v} for k, v in metadata.items()] elif include_fake_metadata: metadata = [models.InstanceMetadata(key='seq', value=str(id))] else: metadata = [] inst_type = flavors.get_instance_type_by_flavor_id(int(flavor_id)) sys_meta = flavors.save_instance_type_info({}, inst_type) if host is not None: host = str(host) if key_name: key_data = 'FAKE' else: key_data = '' if security_groups is None: security_groups = [{"id": 1, "name": "test"}] # ReservationID isn't sent back, hack it in there. server_name = name or "server%s" % id if reservation_id != "": server_name = "reservation_%s" % (reservation_id, ) info_cache = create_info_cache(nw_cache) instance = { "id": int(id), "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "user_id": user_id, "project_id": project_id, "image_ref": image_ref, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, "key_name": key_name, "key_data": key_data, "config_drive": config_drive, "vm_state": vm_state or vm_states.BUILDING, "task_state": task_state, "power_state": power_state, "memory_mb": 0, "vcpus": 0, "root_gb": 0, "ephemeral_gb": 0, "hostname": display_name or server_name, "host": host, "node": node, "instance_type_id": 1, "instance_type": dict(inst_type), "user_data": "", "reservation_id": reservation_id, "mac_address": "", "scheduled_at": timeutils.utcnow(), "launched_at": timeutils.utcnow(), "terminated_at": timeutils.utcnow(), "availability_zone": "", "display_name": display_name or server_name, "display_description": "", "locked": False, "metadata": metadata, "access_ip_v4": access_ipv4, "access_ip_v6": access_ipv6, "uuid": uuid, "progress": progress, "auto_disk_config": auto_disk_config, "name": "instance-%s" % id, "shutdown_terminate": True, "disable_terminate": False, "security_groups": security_groups, "root_device_name": root_device_name, "system_metadata": utils.dict_to_metadata(sys_meta), "vm_mode": "", "default_swap_device": "", "default_ephemeral_device": "", "launched_on": "", "cell_name": "", "architecture": "", "os_type": "" } instance.update(info_cache) return instance
def test_dict_to_metadata(self): expected = [{'key': 'foo1', 'value': 'bar1'}, {'key': 'foo2', 'value': 'bar2'}] self.assertEqual(utils.dict_to_metadata(dict(foo1='bar1', foo2='bar2')), expected)
def test_allocate_for_instance_handles_macs_passed(self): # If a macs argument is supplied to the 'nova-network' API, it is just # ignored. This test checks that the call down to the rpcapi layer # doesn't pass macs down: nova-network doesn't support hypervisor # mac address limits (today anyhow). macs = set(["ab:cd:ef:01:23:34"]) self.mox.StubOutWithMock(self.network_api.network_rpcapi, "allocate_for_instance") kwargs = dict( zip( [ "host", "instance_id", "project_id", "requested_networks", "rxtx_factor", "vpn", "macs", "dhcp_options", ], itertools.repeat(mox.IgnoreArg()), ) ) self.network_api.network_rpcapi.allocate_for_instance(mox.IgnoreArg(), **kwargs).AndReturn([]) self.mox.ReplayAll() flavor = flavors.get_default_flavor() flavor["rxtx_factor"] = 0 sys_meta = flavors.save_flavor_info({}, flavor) instance = dict( id="id", uuid="uuid", project_id="project_id", host="host", system_metadata=utils.dict_to_metadata(sys_meta) ) self.network_api.allocate_for_instance(self.context, instance, "vpn", "requested_networks", macs=macs)
def _shelve_instance(self, shelved_offload_time): CONF.set_override('shelved_offload_time', shelved_offload_time) db_instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, db_instance, {}, {}, [], None, None, True, None, False) instance = instance_obj.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) image_id = 'fake_image_id' host = 'fake-mini' cur_time = timeutils.utcnow() timeutils.set_time_override(cur_time) instance.task_state = task_states.SHELVING instance.save() sys_meta = dict(instance.system_metadata) sys_meta['shelved_at'] = timeutils.strtime(at=cur_time) sys_meta['shelved_image_id'] = image_id sys_meta['shelved_host'] = host db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta) self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'snapshot') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.compute._notify_about_instance_usage(self.context, instance, 'shelve.start') self.compute.driver.power_off(instance) self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute.driver.snapshot(self.context, instance, 'fake_image_id', mox.IgnoreArg()) update_values = { 'power_state': 123, 'vm_state': vm_states.SHELVED, 'task_state': None, 'expected_task_state': [task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING], 'system_metadata': sys_meta } if CONF.shelved_offload_time == 0: update_values['task_state'] = task_states.SHELVING_OFFLOADING db.instance_update_and_get_original( self.context, instance['uuid'], update_values, update_cells=False, columns_to_join=['metadata', 'system_metadata'], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, 'shelve.end') if CONF.shelved_offload_time == 0: self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.start') self.compute.driver.power_off(instance) self.compute._get_power_state(self.context, instance).AndReturn(123) db.instance_update_and_get_original( self.context, instance['uuid'], { 'power_state': 123, 'host': None, 'node': None, 'vm_state': vm_states.SHELVED_OFFLOADED, 'task_state': None, 'expected_task_state': [task_states.SHELVING, task_states.SHELVING_OFFLOADING] }, update_cells=False, columns_to_join=['metadata', 'system_metadata'], ).AndReturn((db_instance, db_instance)) self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.end') self.mox.ReplayAll() self.compute.shelve_instance(self.context, instance, image_id=image_id)