def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) db.service_get_all_compute_by_host(self.context, dest).AndReturn(['fake_service3']) utils.service_is_up('fake_service3').AndReturn(True) self.driver._get_compute_info(self.context, dest, 'memory_mb').AndReturn(2048) db.instance_get_all_by_host(self.context, dest).AndReturn( [dict(memory_mb=1024), dict(memory_mb=512)]) self.mox.ReplayAll() self.assertRaises(exception.MigrationError, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_live_migration_same_shared_storage_okay(self): """live migration works with same src and dest shared storage""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) self._check_shared_storage(dest, instance, False) self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_alive(self): """Raise exception when src compute node is not alive.""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(db, 'service_get_all_by_topic') self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') dest = 'fake_host2' block_migration = False instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) # Volume up db.service_get_all_by_topic(self.context, 'volume').AndReturn( ['fake_service']) utils.service_is_up('fake_service').AndReturn(True) # Compute down db.service_get_all_compute_by_host(self.context, instance['host']).AndReturn(['fake_service2']) utils.service_is_up('fake_service2').AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration)
def test_notify_about_instance_usage(self): instance_id = self._create_instance() instance = db.instance_get(self.context, instance_id) # Set some system metadata sys_metadata = {"image_md_key1": "val1", "image_md_key2": "val2", "other_data": "meow"} extra_usage_info = {"image_name": "fake_name"} db.instance_system_metadata_update(self.context, instance["uuid"], sys_metadata, False) # NOTE(russellb) Make sure our instance has the latest system_metadata # in it. instance = db.instance_get(self.context, instance_id) compute_utils.notify_about_instance_usage( notify.get_notifier("compute"), self.context, instance, "create.start", extra_usage_info=extra_usage_info ) self.assertEquals(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEquals(msg.priority, "INFO") self.assertEquals(msg.event_type, "compute.instance.create.start") payload = msg.payload self.assertEquals(payload["tenant_id"], self.project_id) self.assertEquals(payload["user_id"], self.user_id) self.assertEquals(payload["instance_id"], instance["uuid"]) self.assertEquals(payload["instance_type"], "m1.tiny") type_id = flavors.get_flavor_by_name("m1.tiny")["id"] self.assertEquals(str(payload["instance_type_id"]), str(type_id)) flavor_id = flavors.get_flavor_by_name("m1.tiny")["flavorid"] self.assertEquals(str(payload["instance_flavor_id"]), str(flavor_id)) for attr in ("display_name", "created_at", "launched_at", "state", "state_description", "image_meta"): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) self.assertEquals(payload["image_meta"], {"md_key1": "val1", "md_key2": "val2"}) self.assertEquals(payload["image_name"], "fake_name") image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEquals(payload["image_ref_url"], image_ref_url) self.compute.terminate_instance(self.context, jsonutils.to_primitive(instance))
def test_notify_usage_exists_deleted_instance(self): # Ensure 'exists' notification generates appropriate usage data. instance_id = self._create_instance() instance = db.instance_get(self.context, instance_id) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} db.instance_system_metadata_update(self.context, instance['uuid'], sys_metadata, False) self.compute.terminate_instance(self.context, instance) instance = db.instance_get(self.context.elevated(read_deleted='yes'), instance_id) compute_utils.notify_usage_exists(self.context, instance) msg = test_notifier.NOTIFICATIONS[-1] self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'compute.instance.exists') payload = msg['payload'] self.assertEquals(payload['tenant_id'], self.project_id) self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['instance_id'], instance['uuid']) self.assertEquals(payload['instance_type'], 'm1.tiny') type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] self.assertEquals(str(payload['instance_type_id']), str(type_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) self.assertEquals(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_instance_dns(self): fixedip = '192.168.0.101' self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'network_update') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.instance_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'security_groups': [{'id': 0}]}) db.instance_get(self.context, 1).AndReturn({'display_name': HOST}) db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fixedip) db.network_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0]) db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, 1, HOST, networks[0]['id']) addresses = self.network.instance_dns_manager.get_entries_by_name(HOST) self.assertEqual(len(addresses), 1) self.assertEqual(addresses[0], fixedip)
def test_add_fixed_ip_instance_without_vpn_requested_networks(self): self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.instance_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'security_groups': [{'id': 0}]}) db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('192.168.0.101') db.network_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0]) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, 1, HOST, networks[0]['id'])
def test_live_migration_compute_dest_not_alive(self): """Raise exception when dest compute node is not alive.""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(utils, 'service_is_up') dest = 'fake_host2' block_migration = False instance = self._live_migration_instance() instance_id = instance['id'] db.instance_get(self.context, instance_id).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) db.service_get_all_compute_by_host(self.context, dest).AndReturn(['fake_service3']) # Compute is down utils.service_is_up('fake_service3').AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.driver.schedule_live_migration, self.context, instance_id=instance_id, dest=dest, block_migration=block_migration)
def test_live_migration_basic(self): """Test basic schedule_live_migration functionality""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') self.mox.StubOutWithMock(self.driver, '_live_migration_common_check') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(driver, 'cast_to_compute_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) self.driver._live_migration_common_check(self.context, instance, dest, block_migration, disk_over_commit) db.instance_update_and_get_original(self.context, instance['id'], {"task_state": task_states.MIGRATING}).AndReturn( (instance, instance)) driver.cast_to_compute_host(self.context, instance['host'], 'live_migration', update_db=False, instance_id=instance['id'], dest=dest, block_migration=block_migration) self.mox.ReplayAll() self.driver.schedule_live_migration(self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_live_migration_dest_check_service_same_host(self): """Confirms exception raises in case dest and src is same host.""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(utils, 'service_is_up') block_migration = False disk_over_commit = False instance = self._live_migration_instance() # make dest same as src dest = instance['host'] db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) db.service_get_all_compute_by_host(self.context, dest).AndReturn(['fake_service3']) utils.service_is_up('fake_service3').AndReturn(True) self.mox.ReplayAll() self.assertRaises(exception.UnableToMigrateToSelf, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=False)
def test_live_migration_dest_hypervisor_version_older_raises(self): """Confirm live migration to older hypervisor raises""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() instance_id = instance['id'] db.instance_get(self.context, instance_id).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest) db.service_get_all_compute_by_host(self.context, dest).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1}]}]) db.service_get_all_compute_by_host(self.context, instance['host']).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 2}]}]) self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.driver.schedule_live_migration, self.context, instance_id=instance_id, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_get_by_id(self): self.mox.StubOutWithMock(db, 'instance_get') db.instance_get(self.context, 'instid', columns_to_join=[] ).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_id(self.context, 'instid') self.assertEqual(inst.uuid, self.fake_instance['uuid']) self.assertRemotes()
def test_get_by_id(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, "instance_get") db.instance_get(ctxt, "instid", columns_to_join=[]).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_id(ctxt, "instid") self.assertEqual(inst.uuid, self.fake_instance["uuid"]) self.assertRemotes()
def test_get_by_id(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'instance_get') db.instance_get(ctxt, 'instid', columns_to_join=[]).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_id(ctxt, 'instid') self.assertEqual(inst.uuid, self.fake_instance['uuid']) self.assertRemotes()
def test_get_by_id(self): self.mox.StubOutWithMock(db, "instance_get") db.instance_get(self.context, "instid", columns_to_join=["info_cache", "security_groups"]).AndReturn( self.fake_instance ) self.mox.ReplayAll() inst = instance.Instance.get_by_id(self.context, "instid") self.assertEqual(inst.uuid, self.fake_instance["uuid"]) self.assertRemotes()
def test_get_by_id(self): self.mox.StubOutWithMock(db, 'instance_get') db.instance_get(self.context, 'instid', columns_to_join=['info_cache', 'security_groups' ]).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_id(self.context, 'instid') self.assertEqual(inst.uuid, self.fake_instance['uuid']) self.assertRemotes()
def test_block_migration_dest_check_service_lack_disk(self): """Confirms exception raises when dest doesn't have enough disk.""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(self.driver, 'assert_compute_node_has_enough_memory') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') dest = 'fake_host2' block_migration = True disk_over_commit = True instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) db.service_get_all_compute_by_host(self.context, dest).AndReturn(['fake_service3']) utils.service_is_up('fake_service3').AndReturn(True) # Enough memory self.driver.assert_compute_node_has_enough_memory(self.context, instance, dest) # Not enough disk self.driver._get_compute_info(self.context, dest, 'disk_available_least').AndReturn(1023) rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') instance_disk_info_msg = { 'method': 'get_instance_disk_info', 'args': { 'instance_name': instance['name'], }, 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION, } instance_disk_info = [{'disk_size': 1024 * (1024 ** 3)}] rpc.call(self.context, 'src_queue', instance_disk_info_msg, None).AndReturn(jsonutils.dumps(instance_disk_info)) self.mox.ReplayAll() self.assertRaises(exception.MigrationError, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_block_migration_dest_check_service_lack_disk(self): """Confirms exception raises when dest doesn't have enough disk.""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(self.driver, 'assert_compute_node_has_enough_memory') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') dest = 'fake_host2' block_migration = True disk_over_commit = True instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) db.service_get_all_compute_by_host(self.context, dest).AndReturn(['fake_service3']) utils.service_is_up('fake_service3').AndReturn(True) # Enough memory self.driver.assert_compute_node_has_enough_memory(self.context, instance, dest) # Not enough disk self.driver._get_compute_info(self.context, dest, 'disk_available_least').AndReturn(1023) rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') instance_disk_info_msg = { 'method': 'get_instance_disk_info', 'args': { 'instance_name': instance['name'], }, 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION, } instance_disk_info = [{'disk_size': 1024 * (1024 ** 3)}] rpc.call(self.context, 'src_queue', instance_disk_info_msg, None).AndReturn(jsonutils.dumps(instance_disk_info)) self.mox.ReplayAll() self.assertRaises(exception.MigrationError, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_live_migration_dest_host_incompatable_cpu_raises(self): self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') self.mox.StubOutWithMock(db, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) db.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') db.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') tmp_filename = 'test-filename' rpc.call(self.context, 'dest_queue', {'method': 'create_shared_storage_test_file'} ).AndReturn(tmp_filename) rpc.call(self.context, 'src_queue', {'method': 'check_shared_storage_test_file', 'args': {'filename': tmp_filename}}).AndReturn(True) rpc.call(self.context, 'dest_queue', {'method': 'cleanup_shared_storage_test_file', 'args': {'filename': tmp_filename}}) db.service_get_all_compute_by_host(self.context, dest).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1}]}]) db.service_get_all_compute_by_host(self.context, instance['host']).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1, 'cpu_info': 'fake_cpu_info'}]}]) db.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') rpc.call(self.context, 'dest_queue', {'method': 'compare_cpu', 'args': {'cpu_info': 'fake_cpu_info'}}).AndRaise( rpc.RemoteError()) self.mox.ReplayAll() self.assertRaises(rpc_common.RemoteError, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration)
def test_discard_a_blessed_instance(self): instance_id = utils.create_instance(self.context) self.gridcentric.bless_instance(self.context, instance_id) blessed_id = instance_id + 1 self.gridcentric.discard_instance(self.context, blessed_id) try: db.instance_get(self.context, blessed_id) self.fail("The blessed instance should no longer exists after being discarded.") except exception.InstanceNotFound: pass
def test_finish_revert_resize(self): """Ensure that the flavor is reverted to the original on revert""" context = self.context.elevated() instance_id = self._create_instance() def fake(*args, **kwargs): pass self.stubs.Set(self.compute.driver, 'finish_migration', fake) self.stubs.Set(self.compute.driver, 'revert_migration', fake) self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake) self.compute.run_instance(self.context, instance_id) # Confirm the instance size before the resize starts inst_ref = db.instance_get(context, instance_id) instance_type_ref = db.instance_type_get(context, inst_ref['instance_type_id']) self.assertEqual(instance_type_ref['flavorid'], 1) db.instance_update(self.context, instance_id, {'host': 'foo'}) self.compute.prep_resize(context, inst_ref['uuid'], 3) migration_ref = db.migration_get_by_instance_and_status( context, inst_ref['uuid'], 'pre-migrating') self.compute.resize_instance(context, inst_ref['uuid'], migration_ref['id']) self.compute.finish_resize(context, inst_ref['uuid'], int(migration_ref['id']), {}) # Prove that the instance size is now the new size inst_ref = db.instance_get(context, instance_id) instance_type_ref = db.instance_type_get(context, inst_ref['instance_type_id']) self.assertEqual(instance_type_ref['flavorid'], 3) # Finally, revert and confirm the old flavor has been applied self.compute.revert_resize(context, inst_ref['uuid'], migration_ref['id']) self.compute.finish_revert_resize(context, inst_ref['uuid'], migration_ref['id']) inst_ref = db.instance_get(context, instance_id) instance_type_ref = db.instance_type_get(context, inst_ref['instance_type_id']) self.assertEqual(instance_type_ref['flavorid'], 1) self.compute.terminate_instance(context, instance_id)
def test_finish_revert_resize(self): """Ensure that the flavor is reverted to the original on revert""" context = self.context.elevated() instance_id = self._create_instance() def fake(*args, **kwargs): pass self.stubs.Set(self.compute.driver, 'finish_migration', fake) self.stubs.Set(self.compute.driver, 'revert_migration', fake) self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake) self.compute.run_instance(self.context, instance_id) # Confirm the instance size before the resize starts inst_ref = db.instance_get(context, instance_id) instance_type_ref = db.instance_type_get(context, inst_ref['instance_type_id']) self.assertEqual(instance_type_ref['flavorid'], 1) db.instance_update(self.context, instance_id, {'host': 'foo'}) self.compute.prep_resize(context, inst_ref['uuid'], 3) migration_ref = db.migration_get_by_instance_and_status(context, inst_ref['uuid'], 'pre-migrating') self.compute.resize_instance(context, inst_ref['uuid'], migration_ref['id']) self.compute.finish_resize(context, inst_ref['uuid'], int(migration_ref['id']), {}) # Prove that the instance size is now the new size inst_ref = db.instance_get(context, instance_id) instance_type_ref = db.instance_type_get(context, inst_ref['instance_type_id']) self.assertEqual(instance_type_ref['flavorid'], 3) # Finally, revert and confirm the old flavor has been applied self.compute.revert_resize(context, inst_ref['uuid'], migration_ref['id']) self.compute.finish_revert_resize(context, inst_ref['uuid'], migration_ref['id']) inst_ref = db.instance_get(context, instance_id) instance_type_ref = db.instance_type_get(context, inst_ref['instance_type_id']) self.assertEqual(instance_type_ref['flavorid'], 1) self.compute.terminate_instance(context, instance_id)
def test_post_live_migration_working_correctly(self): """Confirm post_live_migration() works as expected correctly.""" dest = 'desthost' flo_addr = '1.2.1.2' # Preparing datas c = context.get_admin_context() instance_id = self._create_instance() i_ref = db.instance_get(c, instance_id) db.instance_update(c, i_ref['id'], { 'state_description': 'migrating', 'state': power_state.PAUSED }) v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id}) fix_addr = db.fixed_ip_create(c, { 'address': '1.1.1.1', 'instance_id': instance_id }) fix_ref = db.fixed_ip_get_by_address(c, fix_addr) flo_ref = db.floating_ip_create(c, { 'address': flo_addr, 'fixed_ip_id': fix_ref['id'] }) # reload is necessary before setting mocks i_ref = db.instance_get(c, instance_id) # Preparing mocks self.mox.StubOutWithMock(self.compute.volume_manager, 'remove_compute_volume') for v in i_ref['volumes']: self.compute.volume_manager.remove_compute_volume(c, v['id']) self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') self.compute.driver.unfilter_instance(i_ref, []) # executing self.mox.ReplayAll() ret = self.compute.post_live_migration(c, i_ref, dest) # make sure every data is rewritten to dest i_ref = db.instance_get(c, i_ref['id']) c1 = (i_ref['host'] == dest) flo_refs = db.floating_ip_get_all_by_host(c, dest) c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr) # post operaton self.assertTrue(c1 and c2) db.instance_destroy(c, instance_id) db.volume_destroy(c, v_ref['id']) db.floating_ip_destroy(c, flo_addr)
def test_update_of_instance_wont_update_private_fields(self): inst = db.instance_create(self.context, {}) self.cloud.update_instance(self.context, inst['id'], mac_address='DE:AD:BE:EF') inst = db.instance_get(self.context, inst['id']) self.assertEqual(None, inst['mac_address']) db.instance_destroy(self.context, inst['id'])
def get_dhcp_opts(context, network_ref): """Get network's hosts config in dhcp-opts format.""" hosts = [] ips_ref = db.network_get_associated_fixed_ips(context, network_ref['id']) if ips_ref: #set of instance ids instance_set = set( [fixed_ip_ref['instance_id'] for fixed_ip_ref in ips_ref]) default_gw_network_node = {} for instance_id in instance_set: vifs = db.virtual_interface_get_by_instance(context, instance_id) if vifs: #offer a default gateway to the first virtual interface default_gw_network_node[instance_id] = vifs[0]['network_id'] for fixed_ip_ref in ips_ref: instance_id = fixed_ip_ref['instance_id'] try: instance_ref = db.instance_get(context, instance_id) except exception.InstanceNotFound: msg = _("Instance %(instance_id)s not found") LOG.debug(msg % {'instance_id': instance_id}) continue if instance_id in default_gw_network_node: target_network_id = default_gw_network_node[instance_id] # we don't want default gateway for this fixed ip if target_network_id != fixed_ip_ref['network_id']: hosts.append(_host_dhcp_opts(fixed_ip_ref, instance_ref)) return '\n'.join(hosts)
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" instance_ref = db.instance_get(context, instance_id) if (instance_ref['availability_zone'] and ':' in instance_ref['availability_zone'] and context.is_admin): zone, _x, host = instance_ref['availability_zone'].partition(':') service = db.service_get_by_args(context.elevated(), host, 'nova-compute') if not self.service_is_up(service): raise driver.WillNotSchedule(_("Host %s is not alive") % host) # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow now = datetime.datetime.utcnow() db.instance_update(context, instance_id, { 'host': host, 'scheduled_at': now }) return host results = db.service_get_all_compute_sorted(context) for result in results: (service, instance_cores) = result compute_ref = db.service_get_all_compute_by_host( context, service['host'])[0] compute_node_ref = compute_ref['compute_node'][0] if (instance_ref['vcpus'] + instance_cores > compute_node_ref['vcpus'] * FLAGS.max_cores): raise driver.NoValidHost(_("All hosts have too many cores")) LOG.debug( _("requested instance cores = %s + used compute node cores = %s < total compute node cores = %s * max cores = %s" ) % (instance_ref['vcpus'], instance_cores, compute_node_ref['vcpus'], FLAGS.max_cores)) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow now = datetime.datetime.utcnow() db.instance_update(context, instance_id, { 'host': service['host'], 'scheduled_at': now }) LOG.debug( _("instance = %s scheduled to host = %s") % (instance_id, service['host'])) return service['host'] raise driver.NoValidHost( _("Scheduler was unable to locate a host" " for this request. Is the appropriate" " service running?"))
def get_dhcp_opts(context, network_ref): """Get network's hosts config in dhcp-opts format.""" hosts = [] ips_ref = db.network_get_associated_fixed_ips(context, network_ref['id']) if ips_ref: #set of instance ids instance_set = set([fixed_ip_ref['instance_id'] for fixed_ip_ref in ips_ref]) default_gw_network_node = {} for instance_id in instance_set: vifs = db.virtual_interface_get_by_instance(context, instance_id) if vifs: #offer a default gateway to the first virtual interface default_gw_network_node[instance_id] = vifs[0]['network_id'] for fixed_ip_ref in ips_ref: instance_id = fixed_ip_ref['instance_id'] try: instance_ref = db.instance_get(context, instance_id) except exception.InstanceNotFound: msg = _("Instance %(instance_id)s not found") LOG.debug(msg % {'instance_id': instance_id}) continue if instance_id in default_gw_network_node: target_network_id = default_gw_network_node[instance_id] # we don't want default gateway for this fixed ip if target_network_id != fixed_ip_ref['network_id']: hosts.append(_host_dhcp_opts(fixed_ip_ref, instance_ref)) return '\n'.join(hosts)
def test_resize_instance_notification(self): """Ensure notifications on instance migrate/resize""" instance_id = self._create_instance() context = self.context.elevated() inst_ref = db.instance_get(context, instance_id) self.compute.run_instance(self.context, instance_id) test_notifier.NOTIFICATIONS = [] db.instance_update(self.context, instance_id, {'host': 'foo'}) self.compute.prep_resize(context, inst_ref['uuid'], 1) migration_ref = db.migration_get_by_instance_and_status(context, inst_ref['uuid'], 'pre-migrating') self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'compute.instance.resize.prep') payload = msg['payload'] self.assertEquals(payload['tenant_id'], self.project_id) self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['instance_id'], instance_id) self.assertEquals(payload['instance_type'], 'm1.tiny') type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] self.assertEquals(str(payload['instance_type_id']), str(type_id)) self.assertTrue('display_name' in payload) self.assertTrue('created_at' in payload) self.assertTrue('launched_at' in payload) self.assertEquals(payload['image_ref'], '1') self.compute.terminate_instance(context, instance_id)
def _get_vm_opaque_ref(self, instance_or_vm): """ Refactored out the common code of many methods that receive either a vm name or a vm instance, and want a vm instance in return. """ # if instance_or_vm is a string it must be opaque ref or instance name if isinstance(instance_or_vm, basestring): obj = None try: # check for opaque ref obj = self._session.get_xenapi().VM.get_uuid(instance_or_vm) return instance_or_vm except self.XenAPI.Failure: # wasn't an opaque ref, can be an instance name instance_name = instance_or_vm # if instance_or_vm is an int/long it must be instance id elif isinstance(instance_or_vm, (int, long)): ctx = context.get_admin_context() instance_obj = db.instance_get(ctx, instance_or_vm) instance_name = instance_obj.name else: instance_name = instance_or_vm.name vm_ref = VMHelper.lookup(self._session, instance_name) if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance_obj.id) return vm_ref
def test_notify_usage_exists_instance_not_found(self): # Ensure 'exists' notification generates appropriate usage data. instance_id = self._create_instance() instance = db.instance_get(self.context, instance_id) self.compute.terminate_instance(self.context, jsonutils.to_primitive(instance)) compute_utils.notify_usage_exists(notify.get_notifier('compute'), self.context, instance) msg = fake_notifier.NOTIFICATIONS[-1] self.assertEquals(msg.priority, 'INFO') self.assertEquals(msg.event_type, 'compute.instance.exists') payload = msg.payload self.assertEquals(payload['tenant_id'], self.project_id) self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['instance_id'], instance['uuid']) self.assertEquals(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEquals(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEquals(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) self.assertEquals(payload['image_meta'], {}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_live_migration_common_check_checking_cpuinfo_fail(self): """Raise excetion when original host doen't have compatible cpu.""" dest = 'dummydest' instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) # compute service for destination s_ref = self._create_compute_service(host=i_ref['host']) # compute service for original host s_ref2 = self._create_compute_service(host=dest) # mocks driver = self.scheduler.driver self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), {"method": 'compare_cpu', "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) self.mox.ReplayAll() try: self.scheduler.driver._live_migration_common_check(self.context, i_ref, dest) except rpc.RemoteError, e: c = (e.message.find(_("doesn't have compatibility to")) >= 0)
def test_live_migration_common_check_service_orig_not_exists(self): """Destination host does not exist.""" dest = 'dummydest' # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t1 = datetime.datetime.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) # mocks for mounted_on_same_shared_storage() fpath = '/test/20110127120000' self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) topic = FLAGS.compute_topic driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(self.context, topic, dest), {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), {"method": 'check_shared_storage_test_file', "args": {'filename': fpath}}) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, dest), {"method": 'cleanup_shared_storage_test_file', "args": {'filename': fpath}}) self.mox.ReplayAll() try: self.scheduler.driver._live_migration_common_check(self.context, i_ref, dest) except exception.Invalid, e: c = (e.message.find('does not exist') >= 0)
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" instance_ref = db.instance_get(context, instance_id) if (instance_ref['availability_zone'] and ':' in instance_ref['availability_zone'] and context.is_admin): zone, _x, host = instance_ref['availability_zone'].partition(':') service = db.service_get_by_args(context.elevated(), host, 'nova-compute') if not self.service_is_up(service): raise driver.WillNotSchedule(_("Host %s is not alive") % host) # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow now = datetime.datetime.utcnow() db.instance_update(context, instance_id, {'host': host, 'scheduled_at': now}) return host results = db.service_get_all_compute_sorted(context) for result in results: (service, instance_cores) = result if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores: raise driver.NoValidHost(_("All hosts have too many cores")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow now = datetime.datetime.utcnow() db.instance_update(context, instance_id, {'host': service['host'], 'scheduled_at': now}) return service['host'] raise driver.NoValidHost(_("No hosts found"))
def test_notify_usage_exists(self): """Ensure 'exists' notification generates appropriate usage data.""" instance_id = self._create_instance() instance = db.instance_get(self.context, instance_id) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} db.instance_system_metadata_update(self.context, instance['uuid'], sys_metadata, False) compute_utils.notify_usage_exists(self.context, instance) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'compute.instance.exists') payload = msg['payload'] self.assertEquals(payload['tenant_id'], self.project_id) self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['instance_id'], instance.uuid) self.assertEquals(payload['instance_type'], 'm1.tiny') type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] self.assertEquals(str(payload['instance_type_id']), str(type_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) self.assertEquals(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) image_ref_url = "%s/images/1" % utils.generate_glance_url() self.assertEquals(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance)
def test_notify_usage_exists_instance_not_found(self): # Ensure 'exists' notification generates appropriate usage data. instance_id = self._create_instance() instance = db.instance_get(self.context, instance_id) self.compute.terminate_instance(self.context, instance) compute_utils.notify_usage_exists(self.context, instance) msg = test_notifier.NOTIFICATIONS[-1] self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'compute.instance.exists') payload = msg['payload'] self.assertEquals(payload['tenant_id'], self.project_id) self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['instance_id'], instance['uuid']) self.assertEquals(payload['instance_type'], 'm1.tiny') type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] self.assertEquals(str(payload['instance_type_id']), str(type_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) self.assertEquals(payload['image_meta'], {}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEquals(payload['image_ref_url'], image_ref_url)
def list_instances_detail(self, context): """Return a list of InstanceInfo for all registered VMs""" LOG.debug("list_instances_detail") info_list = [] bmms = db.bmm_get_all_by_instance_id_not_null(context) for bmm in bmms: instance = db.instance_get(context, bmm["instance_id"]) status = PowerManager(bmm["ipmi_ip"]).status() if status == "off": inst_power_state = power_state.SHUTOFF if instance["vm_state"] == vm_states.ACTIVE: db.instance_update(context, instance["id"], {"vm_state": vm_states.STOPPED}) else: inst_power_state = power_state.RUNNING if instance["vm_state"] == vm_states.STOPPED: db.instance_update(context, instance["id"], {"vm_state": vm_states.ACTIVE}) info_list.append( driver.InstanceInfo( self._instance_id_to_name(bmm["instance_id"]), inst_power_state)) return info_list
def _get_vm_opaque_ref(self, instance_or_vm): """Refactored out the common code of many methods that receive either a vm name or a vm instance, and want a vm instance in return. """ vm = None try: if instance_or_vm.startswith("OpaqueRef:"): # Got passed an opaque ref; return it return instance_or_vm else: # Must be the instance name instance_name = instance_or_vm except (AttributeError, KeyError): # Note the the KeyError will only happen with fakes.py # Not a string; must be an ID or a vm instance if isinstance(instance_or_vm, (int, long)): ctx = context.get_admin_context() try: instance_obj = db.instance_get(ctx, instance_or_vm) instance_name = instance_obj.name except exception.NotFound: # The unit tests screw this up, as they use an integer for # the vm name. I'd fix that up, but that's a matter for # another bug report. So for now, just try with the passed # value instance_name = instance_or_vm else: instance_name = instance_or_vm.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: raise exception.NotFound( _('Instance not present %s') % instance_name) return vm
def test_unfilter_instance_undefines_nwfilters(self): admin_ctxt = context.get_admin_context() fakefilter = NWFilterFakes() self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName instance_ref = self._create_instance() inst_id = instance_ref['id'] inst_uuid = instance_ref['uuid'] self.security_group = self.setup_and_return_security_group() db.instance_add_security_group(self.context, inst_uuid, self.security_group['id']) instance = db.instance_get(self.context, inst_id) network_info = _fake_network_info(self.stubs, 1) self.fw.setup_basic_filtering(instance, network_info) original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance, network_info) self.assertEqual(original_filter_count - len(fakefilter.filters), 1) db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_notify_about_instance_usage(self): instance_id = self._create_instance() instance = db.instance_get(self.context, instance_id) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} extra_usage_info = {'image_name': 'fake_name'} db.instance_system_metadata_update(self.context, instance['uuid'], sys_metadata, False) compute_utils.notify_about_instance_usage(self.context, instance, 'create.start', extra_usage_info=extra_usage_info) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'compute.instance.create.start') payload = msg['payload'] self.assertEquals(payload['tenant_id'], self.project_id) self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['instance_id'], instance.uuid) self.assertEquals(payload['instance_type'], 'm1.tiny') type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] self.assertEquals(str(payload['instance_type_id']), str(type_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'image_meta'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) self.assertEquals(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) self.assertEquals(payload['image_name'], 'fake_name') image_ref_url = "%s/images/1" % utils.generate_glance_url() self.assertEquals(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance)
def test_live_migration_common_check_service_different_version(self): """Original host and dest host has different hypervisor version.""" dest = 'dummydest' instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) # compute service for destination s_ref = self._create_compute_service(host=i_ref['host']) # compute service for original host s_ref2 = self._create_compute_service(host=dest, hypervisor_version=12002) # mocks driver = self.scheduler.driver self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.scheduler.driver._live_migration_common_check, self.context, i_ref, dest) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id'])
def test_live_migration_common_check_checking_cpuinfo_fail(self): """Raise excetion when original host doen't have compatible cpu.""" dest = 'dummydest' instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) # compute service for destination s_ref = self._create_compute_service(host=i_ref['host']) # compute service for original host s_ref2 = self._create_compute_service(host=dest) # mocks driver = self.scheduler.driver self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), {"method": 'compare_cpu', "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) self.mox.ReplayAll() try: self.scheduler.driver._live_migration_common_check( self.context, i_ref, dest) except rpc.RemoteError, e: c = (e.message.find(_("doesn't have compatibility to")) >= 0)
def test_notify_usage_exists(self): """Ensure 'exists' notification generates appropriate usage data.""" instance_id = self._create_instance() instance = db.instance_get(self.context, instance_id) # Set some system metadata sys_metadata = {"image_md_key1": "val1", "image_md_key2": "val2", "other_data": "meow"} db.instance_system_metadata_update(self.context, instance["uuid"], sys_metadata, False) compute_utils.notify_usage_exists(self.context, instance) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg["priority"], "INFO") self.assertEquals(msg["event_type"], "compute.instance.exists") payload = msg["payload"] self.assertEquals(payload["tenant_id"], self.project_id) self.assertEquals(payload["user_id"], self.user_id) self.assertEquals(payload["instance_id"], instance.uuid) self.assertEquals(payload["instance_type"], "m1.tiny") type_id = instance_types.get_instance_type_by_name("m1.tiny")["id"] self.assertEquals(str(payload["instance_type_id"]), str(type_id)) for attr in ( "display_name", "created_at", "launched_at", "state", "state_description", "bandwidth", "audit_period_beginning", "audit_period_ending", "image_meta", ): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) self.assertEquals(payload["image_meta"], {"md_key1": "val1", "md_key2": "val2"}) image_ref_url = "%s/images/1" % utils.generate_glance_url() self.assertEquals(payload["image_ref_url"], image_ref_url) self.compute.terminate_instance(self.context, instance["uuid"])
def list(self, host=None): """Lists all floating ips (optionally by host) Note: if host is given, only active floating IPs are returned""" ctxt = context.get_admin_context() try: if host is None: floating_ips = db.floating_ip_get_all(ctxt) else: floating_ips = db.floating_ip_get_all_by_host(ctxt, host) except exception.NoFloatingIpsDefined: print _("No floating IP addresses have been defined.") return for floating_ip in floating_ips: instance_id = None if floating_ip['fixed_ip_id']: fixed_ip = db.fixed_ip_get(ctxt, floating_ip['fixed_ip_id']) try: instance = db.instance_get(ctxt, fixed_ip['instance_id']) instance_id = instance.get('uuid', "none") except exception.InstanceNotFound: msg = _('Missing instance %s') instance_id = msg % fixed_ip['instance_id'] print "%s\t%s\t%s\t%s\t%s" % (floating_ip['project_id'], floating_ip['address'], instance_id, floating_ip['pool'], floating_ip['interface'])
def test_resize_instance_notification(self): """Ensure notifications on instance migrate/resize""" instance_id = self._create_instance() context = self.context.elevated() inst_ref = db.instance_get(context, instance_id) self.compute.run_instance(self.context, instance_id) test_notifier.NOTIFICATIONS = [] db.instance_update(self.context, instance_id, {'host': 'foo'}) self.compute.prep_resize(context, inst_ref['uuid'], 1) migration_ref = db.migration_get_by_instance_and_status( context, inst_ref['uuid'], 'pre-migrating') self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'compute.instance.resize.prep') payload = msg['payload'] self.assertEquals(payload['tenant_id'], self.project_id) self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['instance_id'], instance_id) self.assertEquals(payload['instance_type'], 'm1.tiny') type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] self.assertEquals(str(payload['instance_type_id']), str(type_id)) self.assertTrue('display_name' in payload) self.assertTrue('created_at' in payload) self.assertTrue('launched_at' in payload) self.assertEquals(payload['image_ref'], '1') self.compute.terminate_instance(context, instance_id)
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" instance_ref = db.instance_get(context, instance_id) if (instance_ref['availability_zone'] and ':' in instance_ref['availability_zone'] and context.is_admin): zone, _x, host = instance_ref['availability_zone'].partition(':') service = db.service_get_by_args(context.elevated(), host, 'nova-compute') if not self.service_is_up(service): raise driver.WillNotSchedule(_("Host %s is not alive") % host) # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow now = datetime.datetime.utcnow() db.instance_update(context, instance_id, { 'host': host, 'scheduled_at': now }) return host results = db.service_get_all_compute_sorted(context) for result in results: (service, instance_cores) = result if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores: raise driver.NoValidHost(_("All hosts have too many cores")) if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow now = datetime.datetime.utcnow() db.instance_update(context, instance_id, { 'host': service['host'], 'scheduled_at': now }) return service['host'] raise driver.NoValidHost(_("No hosts found"))
def test_live_migration_src_check_volume_node_not_alive(self): """Raise exception when volume node is not alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, { 'instance_id': instance_id, 'size': 1 }) t1 = utils.utcnow() - datetime.timedelta(1) dic = { 'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0 } s_ref = db.service_create(self.context, dic) self.assertRaises(exception.VolumeServiceUnavailable, self.scheduler.driver.schedule_live_migration, self.context, instance_id, i_ref['host']) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.volume_destroy(self.context, v_ref['id'])
def get_instance_uuids_by_ip_filter(self, context, filters): # This is not returning the instance IDs like the method name # would make you think; it is matching the return format of # the method it's overriding. instance_ids = self.ipam.get_instance_ids_by_ip_address( context, filters.get('ip')) instances = [db.instance_get(context, id) for id in instance_ids] return [{'instance_uuid': instance.uuid} for instance in instances]
def test_run_terminate_timestamps(self): """Make sure timestamps are set for launched and destroyed""" instance_id = self._create_instance() instance_ref = db.instance_get(self.context, instance_id) self.assertEqual(instance_ref['launched_at'], None) self.assertEqual(instance_ref['deleted_at'], None) launch = utils.utcnow() self.compute.run_instance(self.context, instance_id) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] > launch) self.assertEqual(instance_ref['deleted_at'], None) terminate = utils.utcnow() self.compute.terminate_instance(self.context, instance_id) self.context = self.context.elevated(True) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] < terminate) self.assert_(instance_ref['deleted_at'] > terminate)
def test_get_devs_object(self): def _fake_obj_load_attr(foo, attrname): if attrname == 'pci_devices': self.load_attr_called = True foo.pci_devices = objects.PciDeviceList() inst = fakes.stub_instance(id='1') ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'instance_get') db.instance_get(ctxt, '1', columns_to_join=[]).AndReturn(inst) self.mox.ReplayAll() inst = objects.Instance.get_by_id(ctxt, '1', expected_attrs=[]) self.stubs.Set(objects.Instance, 'obj_load_attr', _fake_obj_load_attr) self.load_attr_called = False pci_manager.get_instance_pci_devs(inst) self.assertEqual(self.load_attr_called, True)
def get_by_id(cls, context, inst_id, expected_attrs=None): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = _expected_cols(expected_attrs) db_inst = db.instance_get(context, inst_id, columns_to_join=columns_to_join) return cls._from_db_object(context, cls(), db_inst, expected_attrs)
def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) ec2_id = ec2utils.id_to_ec2_id(inst['id']) self.cloud.update_instance(self.context, ec2_id, display_name='c00l 1m4g3') inst = db.instance_get(self.context, inst['id']) self.assertEqual('c00l 1m4g3', inst['display_name']) db.instance_destroy(self.context, inst['id'])
def test_live_migration_instance_not_running(self): """The instance given by instance_id is not running.""" self.mox.StubOutWithMock(db, 'instance_get') dest = 'fake_host2' block_migration = False instance = self._live_migration_instance() instance['power_state'] = power_state.NOSTATE db.instance_get(self.context, instance['id']).AndReturn(instance) self.mox.ReplayAll() self.assertRaises(exception.InstanceNotRunning, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration)
def get_by_id(cls, context, inst_id, expected_attrs=None): if expected_attrs is None: expected_attrs = [] columns_to_join = cls._attrs_to_columns(expected_attrs) db_inst = db.instance_get(context, inst_id, columns_to_join=columns_to_join) return Instance._from_db_object(context, cls(), db_inst, expected_attrs)
def test_notify_usage_exists_fail_on_deleted_instance(self): # notify_usage_exists should not work for a deleted VM. A # notification should be done before the instance is deleted in the db. instance_id = self._create_instance() instance = db.instance_get(self.context, instance_id) # Set some system metadata sys_metadata = { 'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow' } db.instance_system_metadata_update(self.context, instance['uuid'], sys_metadata, False) self.compute.terminate_instance(self.context, instance) instance = db.instance_get(self.context.elevated(read_deleted='yes'), instance_id) self.assertRaises(KeyError, compute_utils.notify_usage_exists, self.context, instance)
def schedule_start_instance(self, context, instance_id, *_args, **_kwargs): instance_ref = db.instance_get(context, instance_id) host = self._schedule_instance(context, instance_ref, *_args, **_kwargs) driver.cast_to_compute_host(context, host, 'start_instance', instance_id=instance_id, **_kwargs)
def test_resize_same_source_fails(self): """Ensure instance fails to migrate when source and destination are the same host""" instance_id = self._create_instance() self.compute.run_instance(self.context, instance_id) inst_ref = db.instance_get(self.context, instance_id) self.assertRaises(exception.Error, self.compute.prep_resize, self.context, inst_ref['uuid'], 1) self.compute.terminate_instance(self.context, instance_id)