def test_shelve(self): # Ensure instance can be shelved. fake_instance = self._create_fake_instance_obj( {'display_name': 'vm01'}) instance = fake_instance self.assertIsNone(instance['task_state']) def fake_init(self2): # In original _FakeImageService.__init__(), some fake images are # created. To verify the snapshot name of this test only, here # sets a fake method. self2.images = {} def fake_create(self2, ctxt, metadata, data=None): self.assertEqual(metadata['name'], 'vm01-shelved') metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42' return metadata fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init) self.stubs.Set(fake_image._FakeImageService, 'create', fake_create) self.compute_api.shelve(self.context, instance) self.assertEqual(instance.task_state, task_states.SHELVING) db.instance_destroy(self.context, instance['uuid'])
def init_host(self, host=socket.gethostname()): """ Initialize anything that is necessary for the driver to function, including catching up with currently running VE's on the given host. """ ctxt = context.get_admin_context() LOG.debug("Hostname: %s" % (host,)) LOG.debug("Instances: %s" % (db.instance_get_all_by_host(ctxt, host))) for instance in db.instance_get_all_by_host(ctxt, host): try: LOG.debug("Checking state of %s" % instance["name"]) state = self.get_info(instance["name"])["state"] except exception.NotFound: state = power_state.SHUTOFF LOG.debug("Current state of %s was %s." % (instance["name"], state)) db.instance_set_state(ctxt, instance["id"], state) if state == power_state.SHUTOFF: db.instance_destroy(ctxt, instance["id"]) if state != power_state.RUNNING: continue
def test_live_migration_common_check_service_orig_not_exists(self): """Destination host does not exist.""" dest = 'dummydest' # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t1 = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) # mocks for mounted_on_same_shared_storage() fpath = '/test/20110127120000' self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) topic = FLAGS.compute_topic driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(self.context, topic, dest), {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), {"method": 'check_shared_storage_test_file', "args": {'filename': fpath}}) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, dest), {"method": 'cleanup_shared_storage_test_file', "args": {'filename': fpath}}) self.mox.ReplayAll() self.assertRaises(exception.SourceHostUnavailable, self.scheduler.driver._live_migration_common_check, self.context, i_ref, dest) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_live_migration_common_check_service_different_hypervisor(self): """Original host and dest host has different hypervisor type.""" dest = "dummydest" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) # compute service for destination s_ref = self._create_compute_service(host=i_ref["host"]) # compute service for original host s_ref2 = self._create_compute_service(host=dest, hypervisor_type="xen") # mocks driver = self.scheduler.driver self.mox.StubOutWithMock(driver, "mounted_on_same_shared_storage") driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.ReplayAll() self.assertRaises( exception.InvalidHypervisorType, self.scheduler.driver._live_migration_common_check, self.context, i_ref, dest, ) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref["id"]) db.service_destroy(self.context, s_ref2["id"])
def test_unfilter_instance_undefines_nwfilters(self): admin_ctxt = context.get_admin_context() fakefilter = NWFilterFakes() self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName instance_ref = self._create_instance() inst_id = instance_ref['id'] inst_uuid = instance_ref['uuid'] self.security_group = self.setup_and_return_security_group() db.instance_add_security_group(self.context, inst_uuid, self.security_group['id']) instance = db.instance_get(self.context, inst_id) network_info = _fake_network_info(self.stubs, 1) self.fw.setup_basic_filtering(instance, network_info) original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance, network_info) self.assertEqual(original_filter_count - len(fakefilter.filters), 1) db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_update_of_instance_wont_update_private_fields(self): inst = db.instance_create(self.context, {}) self.cloud.update_instance(self.context, inst['id'], mac_address='DE:AD:BE:EF') inst = db.instance_get(self.context, inst['id']) self.assertEqual(None, inst['mac_address']) db.instance_destroy(self.context, inst['id'])
def test_too_many_addresses(self): """Test for a NoMoreAddresses exception when all fixed ips are used. """ admin_context = context.get_admin_context() network = db.project_get_network(admin_context, self.projects[0].id) num_available_ips = db.network_count_available_ips(admin_context, network['id']) addresses = [] instance_ids = [] for i in range(num_available_ips): instance_ref = self._create_instance(0) instance_ids.append(instance_ref['id']) address = self._create_address(0, instance_ref['id']) addresses.append(address) lease_ip(address) ip_count = db.network_count_available_ips(context.get_admin_context(), network['id']) self.assertEqual(ip_count, 0) self.assertRaises(db.NoMoreAddresses, self.network.allocate_fixed_ip, self.context, 'foo') for i in range(num_available_ips): self.network.deallocate_fixed_ip(self.context, addresses[i]) release_ip(addresses[i]) db.instance_destroy(context.get_admin_context(), instance_ids[i]) ip_count = db.network_count_available_ips(context.get_admin_context(), network['id']) self.assertEqual(ip_count, num_available_ips)
def test_destroy_with_not_equal_constraint_met(self): ctx = context.get_admin_context() instance = db.instance_create(ctx, {'task_state': 'deleting'}) constraint = db.constraint(task_state=db.not_equal('error', 'resize')) db.instance_destroy(ctx, instance['uuid'], constraint) self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, ctx, instance['uuid'])
def tearDown(self): try: shutil.rmtree(FLAGS.volumes_dir) except OSError: pass db.instance_destroy(self.context, self.instance_uuid) super(VolumeTestCase, self).tearDown()
def test_shelve(self): # Ensure instance can be shelved. fake_instance = self._create_fake_instance({"display_name": "vm01"}) instance = jsonutils.to_primitive(fake_instance) instance_uuid = instance["uuid"] self.assertIsNone(instance["task_state"]) def fake_init(self2): # In original _FakeImageService.__init__(), some fake images are # created. To verify the snapshot name of this test only, here # sets a fake method. self2.images = {} def fake_create(self2, ctxt, metadata, data=None): self.assertEqual(metadata["name"], "vm01-shelved") metadata["id"] = "8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42" return metadata fake_image.stub_out_image_service(self.stubs) self.stubs.Set(fake_image._FakeImageService, "__init__", fake_init) self.stubs.Set(fake_image._FakeImageService, "create", fake_create) inst_obj = objects.Instance.get_by_uuid(self.context, instance_uuid) self.compute_api.shelve(self.context, inst_obj) inst_obj.refresh() self.assertEqual(inst_obj.task_state, task_states.SHELVING) db.instance_destroy(self.context, instance["uuid"])
def test_too_many_cores(self): """Ensures we don't go over max cores""" compute1 = self.start_service('compute', host='host1') compute2 = self.start_service('compute', host='host2') instance_ids1 = [] instance_ids2 = [] for index in xrange(FLAGS.max_cores): instance_id = self._create_instance() compute1.run_instance(self.context, instance_id) instance_ids1.append(instance_id) instance_id = self._create_instance() compute2.run_instance(self.context, instance_id) instance_ids2.append(instance_id) instance_id = self._create_instance() self.assertRaises(driver.NoValidHost, self.scheduler.driver.schedule_run_instance, self.context, instance_id) db.instance_destroy(self.context, instance_id) for instance_id in instance_ids1: compute1.terminate_instance(self.context, instance_id) for instance_id in instance_ids2: compute2.terminate_instance(self.context, instance_id) compute1.kill() compute2.kill()
def test_add_console_does_not_duplicate(self, mock_get): mock_get.return_value = self.pool_info instance = self._create_instance() cons1 = self.console.add_console(self.context, instance['id']) cons2 = self.console.add_console(self.context, instance['id']) self.assertEqual(cons1, cons2) db.instance_destroy(self.context, instance['uuid'])
def test_shelve(self): # Ensure instance can be shelved. fake_instance = self._create_fake_instance({'display_name': 'vm01'}) instance = jsonutils.to_primitive(fake_instance) instance_uuid = instance['uuid'] self.compute.run_instance(self.context, instance, {}, {}, [], None, None, True, None, False) self.assertIsNone(instance['task_state']) def fake_init(self2): # In original _FakeImageService.__init__(), some fake images are # created. To verify the snapshot name of this test only, here # sets a fake method. self2.images = {} def fake_create(self2, ctxt, metadata): self.assertEqual(metadata['name'], 'vm01-shelved') metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42' return metadata fake_image.stub_out_image_service(self.stubs) self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init) self.stubs.Set(fake_image._FakeImageService, 'create', fake_create) inst_obj = instance_obj.Instance.get_by_uuid(self.context, instance_uuid) self.compute_api.shelve(self.context, inst_obj) inst_obj.refresh() self.assertEqual(inst_obj.task_state, task_states.SHELVING) db.instance_destroy(self.context, instance['uuid'])
def test_delete_fast_if_host_not_set(self): inst = self._create_instance_obj() inst.host = '' updates = {'progress': 0, 'task_state': task_states.DELETING} self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(db, 'constraint') self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(self.compute_api, '_create_reservations') db.block_device_mapping_get_all_by_instance(self.context, inst.uuid).AndReturn([]) inst.save() self.compute_api._create_reservations(self.context, inst, inst.instance_type_id, inst.project_id, inst.user_id ).AndReturn(None) db.constraint(host=mox.IgnoreArg()).AndReturn('constraint') db.instance_destroy(self.context, inst.uuid, 'constraint') self.mox.ReplayAll() self.compute_api.delete(self.context, inst) for k, v in updates.items(): self.assertEqual(inst[k], v)
def test_remove_console(self): instance = self._create_instance() console_id = self.console.add_console(self.context, instance["id"]) self.console.remove_console(self.context, console_id) self.assertRaises(exception.NotFound, db.console_get, self.context, console_id) db.instance_destroy(self.context, instance["uuid"])
def test_show_works_correctly(self): """show() works correctly as expected.""" ctxt = context.get_admin_context() s_ref = self._create_compute_service() i_ref1 = _create_instance(project_id='p-01', host=s_ref['host']) i_ref2 = _create_instance(project_id='p-02', vcpus=3, host=s_ref['host']) result = self.controller.show(self.req, s_ref['host']) c1 = ('resource' in result['host'] and 'usage' in result['host']) compute_node = s_ref['compute_node'][0] c2 = self._dic_is_equal(result['host']['resource'], compute_node) c3 = result['host']['usage'].keys() == ['p-01', 'p-02'] keys = ['vcpus', 'memory_mb'] c4 = self._dic_is_equal( result['host']['usage']['p-01'], i_ref1, keys) disk = i_ref2['root_gb'] + i_ref2['ephemeral_gb'] if result['host']['usage']['p-01']['local_gb'] == disk: c6 = True else: c6 = False c5 = self._dic_is_equal( result['host']['usage']['p-02'], i_ref2, keys) if result['host']['usage']['p-02']['local_gb'] == disk: c7 = True else: c7 = False self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7) db.service_destroy(ctxt, s_ref['id']) db.instance_destroy(ctxt, i_ref1['id']) db.instance_destroy(ctxt, i_ref2['id'])
def test_delete_fast_if_host_not_set(self): inst = self._create_instance_obj() inst.host = '' db_inst = obj_base.obj_to_primitive(inst) updates = {'progress': 0, 'task_state': task_states.DELETING} new_inst = dict(db_inst, **updates) self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(db, 'constraint') self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(self.compute_api, '_create_reservations') db.block_device_mapping_get_all_by_instance(self.context, inst.uuid).AndReturn([]) db.instance_update_and_get_original( self.context, inst.uuid, updates).AndReturn((db_inst, new_inst)) self.compute_api._create_reservations(self.context, db_inst, new_inst, inst.project_id, inst.user_id).AndReturn(None) db.constraint(host=mox.IgnoreArg()).AndReturn('constraint') db.instance_destroy(self.context, inst.uuid, 'constraint') if self.is_cells: self.mox.StubOutWithMock(self.compute_api, '_cast_to_cells') self.compute_api._cast_to_cells( self.context, db_inst, 'delete') self.mox.ReplayAll() self.compute_api.delete(self.context, db_inst)
def test_live_migration_common_check_service_different_version(self): """Original host and dest host has different hypervisor version.""" dest = 'dummydest' instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) # compute service for destination s_ref = self._create_compute_service(host=i_ref['host']) # compute service for original host s_ref2 = self._create_compute_service(host=dest, hypervisor_version=12002) # mocks driver = self.scheduler.driver self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.scheduler.driver._live_migration_common_check, self.context, i_ref, dest) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id'])
def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', 'host': 'host1'}) inst2 = db.instance_create(self.context, {'reservation_id': 'a', 'host': 'host2'}) comp1 = db.service_create(self.context, {'host': 'host1', 'availability_zone': 'zone1', 'topic': "compute"}) comp2 = db.service_create(self.context, {'host': 'host2', 'availability_zone': 'zone2', 'topic': "compute"}) result = self.cloud.describe_instances(self.context) result = result['reservationSet'][0] self.assertEqual(len(result['instancesSet']), 2) instance_id = cloud.id_to_ec2_id(inst2['id']) result = self.cloud.describe_instances(self.context, instance_id=[instance_id]) result = result['reservationSet'][0] self.assertEqual(len(result['instancesSet']), 1) self.assertEqual(result['instancesSet'][0]['instanceId'], instance_id) self.assertEqual(result['instancesSet'][0] ['placement']['availabilityZone'], 'zone2') db.instance_destroy(self.context, inst1['id']) db.instance_destroy(self.context, inst2['id']) db.service_destroy(self.context, comp1['id']) db.service_destroy(self.context, comp2['id'])
def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) ec2_id = ec2utils.id_to_ec2_id(inst['id']) self.cloud.update_instance(self.context, ec2_id, display_name='c00l 1m4g3') inst = db.instance_get(self.context, inst['id']) self.assertEqual('c00l 1m4g3', inst['display_name']) db.instance_destroy(self.context, inst['id'])
def tearDown(self): try: shutil.rmtree(FLAGS.volumes_dir) except OSError: pass db.instance_destroy(self.context, self.instance_uuid) notifier_api._reset_drivers() super(VolumeTestCase, self).tearDown()
def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes): for vol in volumes: self.volume_api.delete(self.context, vol) for uuid in (inst1["uuid"], inst2["uuid"]): for bdm in db.block_device_mapping_get_all_by_instance(self.context, uuid): db.block_device_mapping_destroy(self.context, bdm["id"]) db.instance_destroy(self.context, inst2["uuid"]) db.instance_destroy(self.context, inst1["uuid"])
def test_regular_user_can_schedule(self): """Ensures a non-admin can run an instance""" s_ref = self._create_compute_service(host='host1') instance_id = self._create_instance() ctxt = context.RequestContext('fake', 'fake', False) self.scheduler.driver.schedule_run_instance(ctxt, instance_id) db.instance_destroy(self.context, s_ref['id'])
def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) ec2_id = cloud.id_to_ec2_id(inst['id']) self.cloud.update_instance(self.context, ec2_id, display_name='c00l 1m4g3') inst = db.instance_get(self.context, inst['id']) self.assertEqual('c00l 1m4g3', inst['display_name']) db.instance_destroy(self.context, inst['id'])
def test_remove_console(self): instance = self._create_instance() console_id = self.console.add_console(self.context, instance['id']) self.console.remove_console(self.context, console_id) self.assertRaises(exception.NotFound, db.console_get, self.context, console_id) db.instance_destroy(self.context, instance['uuid'])
def test_destroy_stubbed(self): self.mox.StubOutWithMock(db, 'instance_destroy') db.instance_destroy(self.context, 'fake-uuid', constraint=None) self.mox.ReplayAll() inst = instance.Instance() inst.id = 1 inst.uuid = 'fake-uuid' inst.host = 'foo' inst.destroy(self.context)
def _tearDownBlockDeviceMapping(self, instances, volumes): for vols in volumes: for vol in vols: self.volume_api.delete(self.context, vol['id']) for instance in instances: for bdm in db.block_device_mapping_get_all_by_instance( self.context, instance['uuid']): db.block_device_mapping_destroy(self.context, bdm['id']) db.instance_destroy(self.context, instance['uuid'])
def tearDown(self): # TODO(termie): this should really be instantiating clean datastores # in between runs, one failure kills all the tests db.instance_destroy(context.get_admin_context(), self.instance_id) db.instance_destroy(context.get_admin_context(), self.instance2_id) for project in self.projects: self.manager.delete_project(project) self.manager.delete_user(self.user) super(NetworkTestCase, self).tearDown()
def test_remove_console(self, mock_get): mock_get.return_value = self.pool_info instance = self._create_instance() console_id = self.console.add_console(self.context, instance['id']) self.console.remove_console(self.context, console_id) self.assertRaises(exception.NotFound, db.console_get, self.context, console_id) db.instance_destroy(self.context, instance['uuid'])
def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes): for vol in volumes: self.volume_api.delete(self.context, vol['id']) for uuid in (inst1['uuid'], inst2['uuid']): for bdm in db.block_device_mapping_get_all_by_instance( self.context, uuid): db.block_device_mapping_destroy(self.context, bdm['id']) db.instance_destroy(self.context, inst2['uuid']) db.instance_destroy(self.context, inst1['uuid'])
def test_instance_get_all_by_filters_deleted(self): args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} inst1 = db.instance_create(self.context, args1) args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = db.instance_get_all_by_filters(self.context.elevated(), {}) self.assertEqual(1, len(result)) self.assertEqual(result[0].id, inst2.id)
def test_destroy_stubbed(self): self.mox.StubOutWithMock(db, "instance_destroy") db.instance_destroy(self.context, "fake-uuid", constraint=None) self.mox.ReplayAll() inst = instance.Instance() inst.id = 1 inst.uuid = "fake-uuid" inst.host = "foo" inst.destroy(self.context)
def test_will_schedule_on_disabled_host_if_specified(self): compute1 = self.start_service("compute", host="host1") s1 = db.service_get_by_args(self.context, "host1", "nova-compute") db.service_update(self.context, s1["id"], {"disabled": True}) instance_id2 = self._create_instance(availability_zone="nova:host1") host = self.scheduler.driver.schedule_run_instance(self.context, instance_id2) self.assertEqual("host1", host) db.instance_destroy(self.context, instance_id2) compute1.kill()
def test_will_schedule_on_disabled_host_if_specified(self): compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') db.service_update(self.context, s1['id'], {'disabled': True}) instance_id2 = self._create_instance(availability_zone='nova:host1') host = self.scheduler.driver.schedule_run_instance( self.context, instance_id2) self.assertEqual('host1', host) db.instance_destroy(self.context, instance_id2) compute1.kill()
def test_create_instance_defaults_display_name(self): """Verify that an instance cannot be created without a display_name.""" cases = [dict(), dict(display_name=None)] for instance in cases: ref = self.compute_api.create(self.context, FLAGS.default_instance_type, None, **instance) try: self.assertNotEqual(ref[0]['display_name'], None) finally: db.instance_destroy(self.context, ref[0]['id'])
def test_will_schedule_on_disabled_host_if_specified(self): compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') db.service_update(self.context, s1['id'], {'disabled': True}) instance_id2 = self._create_instance(availability_zone='nova:host1') host = self.scheduler.driver.schedule_run_instance(self.context, instance_id2) self.assertEqual('host1', host) db.instance_destroy(self.context, instance_id2) compute1.kill()
def test_create_instance_defaults_display_name(self): """Verify that an instance cannot be created without a display_name.""" cases = [dict(), dict(display_name=None)] for instance in cases: ref = self.compute_api.create(self.context, instance_types.get_default_instance_type(), None, **instance) try: self.assertNotEqual(ref[0]['display_name'], None) finally: db.instance_destroy(self.context, ref[0]['id'])
def test_add_console(self): instance_id = self._create_instance() self.console.add_console(self.context, instance_id) instance = db.instance_get(self.context, instance_id) pool = db.console_pool_get_by_host_type( self.context, instance['host'], self.console.host, self.console.driver.console_type) console_instances = [con['instance_id'] for con in pool.consoles] self.assert_(instance_id in console_instances) db.instance_destroy(self.context, instance_id)
def test_instance_get_all_by_filters_deleted(self): args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} inst1 = db.instance_create(self.context, args1) args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = db.instance_get_all_by_filters(self.context.elevated(), {}) self.assertEqual(2, len(result)) self.assertEqual(result[0].id, inst2.id) self.assertEqual(result[1].id, inst1.id) self.assertTrue(result[1].deleted)
def test_add_console(self): instance = self._create_instance() self.console.add_console(self.context, instance['id']) instance = db.instance_get(self.context, instance['id']) pool = db.console_pool_get_by_host_type( self.context, instance['host'], self.console.host, self.console.driver.console_type) console_instances = [con['instance_uuid'] for con in pool['consoles']] self.assertIn(instance['uuid'], console_instances) db.instance_destroy(self.context, instance['uuid'])
def test_live_migration_dest_check_service_same_host(self): """Confirms exceptioin raises in case dest and src is same host.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host=i_ref['host']) self.assertRaises(exception.UnableToMigrateToSelf, self.scheduler.driver._live_migration_dest_check, self.context, i_ref, i_ref['host']) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_live_migration_dest_check_service_works_correctly(self): """Confirms method finishes with no error.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host='somewhere', memory_mb_used=5) ret = self.scheduler.driver._live_migration_dest_check( self.context, i_ref, 'somewhere') self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_terminate_instances(self): inst1 = db.instance_create(self.context, {'reservation_id': 'a', 'image_id': 1, 'host': 'host1'}) terminate_instances = self.cloud.terminate_instances # valid instance_id result = terminate_instances(self.context, ['i-00000001']) self.assertTrue(result) # non-existing instance_id self.assertRaises(exception.InstanceNotFound, terminate_instances, self.context, ['i-2']) db.instance_destroy(self.context, inst1['id'])
def test_live_migration_src_check_works_correctly(self): """Confirms this method finishes with no error.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host=i_ref['host']) ret = self.scheduler.driver._live_migration_src_check( self.context, i_ref) self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_run_attach_detach_volume(self): """Make sure volume can be attached and detached from instance.""" inst = {} inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = 'fake' inst['project_id'] = 'fake' inst['instance_type_id'] = '2' # m1.tiny inst['ami_launch_index'] = 0 instance = db.instance_create(self.context, {}) instance_id = instance['id'] instance_uuid = instance['uuid'] mountpoint = "/dev/sdf" volume = self._create_volume() volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) if FLAGS.fake_tests: db.volume_attached(self.context, volume_id, instance_uuid, mountpoint) else: self.compute.attach_volume(self.context, instance_uuid, volume_id, mountpoint) vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") self.assertEqual(vol['mountpoint'], mountpoint) self.assertEqual(vol['instance_uuid'], instance_uuid) self.assertNotEqual(vol['attach_time'], None) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) if FLAGS.fake_tests: db.volume_detached(self.context, volume_id) else: self.compute.detach_volume(self.context, instance_uuid, volume_id) vol = db.volume_get(self.context, volume_id) self.assertEqual(vol['status'], "available") self.assertEqual(vol['attach_time'], None) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) db.instance_destroy(self.context, instance_uuid)
def _test_shelve(self, vm_state=vm_states.ACTIVE, boot_from_volume=False, clean_shutdown=True): # Ensure instance can be shelved. params = dict(task_state=None, vm_state=vm_state, display_name='vm01') fake_instance = self._create_fake_instance_obj(params=params) instance = fake_instance self.assertIsNone(instance['task_state']) with test.nested( mock.patch.object(compute_utils, 'is_volume_backed_instance', return_value=boot_from_volume), mock.patch.object(self.compute_api, '_create_image', return_value=dict(id='fake-image-id')), mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(self.compute_api.compute_rpcapi, 'shelve_instance'), mock.patch.object(self.compute_api.compute_rpcapi, 'shelve_offload_instance')) as ( volume_backed_inst, create_image, instance_save, record_action_start, rpcapi_shelve_instance, rpcapi_shelve_offload_instance): self.compute_api.shelve(self.context, instance, clean_shutdown=clean_shutdown) self.assertEqual(instance.task_state, task_states.SHELVING) # assert our mock calls volume_backed_inst.assert_called_once_with(self.context, instance) instance_save.assert_called_once_with(expected_task_state=[None]) record_action_start.assert_called_once_with( self.context, instance, instance_actions.SHELVE) if boot_from_volume: rpcapi_shelve_offload_instance.assert_called_once_with( self.context, instance=instance, clean_shutdown=clean_shutdown) else: rpcapi_shelve_instance.assert_called_once_with( self.context, instance=instance, image_id='fake-image-id', clean_shutdown=clean_shutdown) db.instance_destroy(self.context, instance['uuid'])
def test_wont_sechedule_if_specified_host_is_down(self): compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) instance_id2 = self._create_instance(availability_zone='nova:host1') self.assertRaises(driver.WillNotSchedule, self.scheduler.driver.schedule_run_instance, self.context, instance_id2) db.instance_destroy(self.context, instance_id2) compute1.kill()
def test_default_hostname_generator(self): cases = [(None, 'server_1'), ('Hello, Server!', 'hello_server'), ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello')] for display_name, hostname in cases: ref = self.compute_api.create( self.context, instance_types.get_default_instance_type(), None, display_name=display_name) try: self.assertEqual(ref[0]['hostname'], hostname) finally: db.instance_destroy(self.context, ref[0]['id'])
def test_too_many_cores(self): instance_ids = [] instance_id = self._create_instance(cores=4) instance_ids.append(instance_id) self.assertRaises(quota.QuotaError, compute.API().create, self.context, min_count=1, max_count=1, instance_type='m1.small', image_id=1) for instance_id in instance_ids: db.instance_destroy(self.context, instance_id)
def test_destroy_stubbed(self): self.mox.StubOutWithMock(db, 'instance_destroy') deleted_at = datetime.datetime(1955, 11, 6) fake_inst = fake_instance.fake_db_instance(deleted_at=deleted_at, deleted=True) db.instance_destroy(self.context, 'fake-uuid', constraint=None).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance(id=1, uuid='fake-uuid', host='foo') inst.destroy(self.context) self.assertEqual(timeutils.normalize_time(inst.deleted_at), timeutils.normalize_time(deleted_at)) self.assertTrue(inst.deleted)
def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host='somewhere', memory_mb_used=12) self.assertRaises(exception.MigrationError, self.scheduler.driver._live_migration_dest_check, self.context, i_ref, 'somewhere') db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_least_busy_host_gets_instance(self): """Ensures the host with less cores gets the next one""" compute1 = self.start_service('compute', host='host1') compute2 = self.start_service('compute', host='host2') instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance() host = self.scheduler.driver.schedule_run_instance( self.context, instance_id2) self.assertEqual(host, 'host2') compute1.terminate_instance(self.context, instance_id1) db.instance_destroy(self.context, instance_id2) compute1.kill() compute2.kill()
def test_specific_host_gets_instance(self): """Ensures if you set availability_zone it launches on that zone""" compute1 = self.start_service('compute', host='host1') compute2 = self.start_service('compute', host='host2') instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance(availability_zone='nova:host1') host = self.scheduler.driver.schedule_run_instance( self.context, instance_id2) self.assertEqual('host1', host) compute1.terminate_instance(self.context, instance_id1) db.instance_destroy(self.context, instance_id2) compute1.kill() compute2.kill()
def test_get_sorted_with_deleted_marker(self): marker = self.instances[1]['uuid'] before = list( instance_list.get_instances_sorted(self.context, {}, None, marker, [], None, None)) db.instance_destroy(self.context, marker) after = list( instance_list.get_instances_sorted(self.context, {}, None, marker, [], None, None)) self.assertEqual(before, after)
def test_live_migration_raises_exception(self): """Confirms recover method is called when exceptions are raised.""" # Skip if non-libvirt environment if not self.lazy_load_library_exists(): return # Preparing data self.compute = utils.import_object(FLAGS.compute_manager) instance_dict = { 'host': 'fake', 'state': power_state.RUNNING, 'state_description': 'running' } instance_ref = db.instance_create(self.context, self.test_instance) instance_ref = db.instance_update(self.context, instance_ref['id'], instance_dict) vol_dict = {'status': 'migrating', 'size': 1} volume_ref = db.volume_create(self.context, vol_dict) db.volume_attached(self.context, volume_ref['id'], instance_ref['id'], '/dev/fake') # Preparing mocks vdmock = self.mox.CreateMock(libvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', mox.IgnoreArg(), None, FLAGS.live_migration_bandwidth).\ AndRaise(libvirt.libvirtError('ERR')) def fake_lookup(instance_name): if instance_name == instance_ref.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) # Start test self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn._live_migration, self.context, instance_ref, 'dest', '', self.compute.recover_live_migration) instance_ref = db.instance_get(self.context, instance_ref['id']) self.assertTrue(instance_ref['state_description'] == 'running') self.assertTrue(instance_ref['state'] == power_state.RUNNING) volume_ref = db.volume_get(self.context, volume_ref['id']) self.assertTrue(volume_ref['status'] == 'in-use') db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id'])
def test_post_live_migration_working_correctly(self): """Confirm post_live_migration() works as expected correctly.""" dest = 'desthost' flo_addr = '1.2.1.2' # Preparing datas c = context.get_admin_context() instance_id = self._create_instance() i_ref = db.instance_get(c, instance_id) db.instance_update(c, i_ref['id'], { 'state_description': 'migrating', 'state': power_state.PAUSED }) v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id}) fix_addr = db.fixed_ip_create(c, { 'address': '1.1.1.1', 'instance_id': instance_id }) fix_ref = db.fixed_ip_get_by_address(c, fix_addr) flo_ref = db.floating_ip_create(c, { 'address': flo_addr, 'fixed_ip_id': fix_ref['id'] }) # reload is necessary before setting mocks i_ref = db.instance_get(c, instance_id) # Preparing mocks self.mox.StubOutWithMock(self.compute.volume_manager, 'remove_compute_volume') for v in i_ref['volumes']: self.compute.volume_manager.remove_compute_volume(c, v['id']) self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') self.compute.driver.unfilter_instance(i_ref, []) # executing self.mox.ReplayAll() ret = self.compute.post_live_migration(c, i_ref, dest) # make sure every data is rewritten to dest i_ref = db.instance_get(c, i_ref['id']) c1 = (i_ref['host'] == dest) flo_refs = db.floating_ip_get_all_by_host(c, dest) c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr) # post operaton self.assertTrue(c1 and c2) db.instance_destroy(c, instance_id) db.volume_destroy(c, v_ref['id']) db.floating_ip_destroy(c, flo_addr)