def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', 'host': 'host1'}) inst2 = db.instance_create(self.context, {'reservation_id': 'a', 'host': 'host2'}) comp1 = db.service_create(self.context, {'host': 'host1', 'availability_zone': 'zone1', 'topic': "compute"}) comp2 = db.service_create(self.context, {'host': 'host2', 'availability_zone': 'zone2', 'topic': "compute"}) result = self.cloud.describe_instances(self.context) result = result['reservationSet'][0] self.assertEqual(len(result['instancesSet']), 2) instance_id = cloud.id_to_ec2_id(inst2['id']) result = self.cloud.describe_instances(self.context, instance_id=[instance_id]) result = result['reservationSet'][0] self.assertEqual(len(result['instancesSet']), 1) self.assertEqual(result['instancesSet'][0]['instanceId'], instance_id) self.assertEqual(result['instancesSet'][0] ['placement']['availabilityZone'], 'zone2') db.instance_destroy(self.context, inst1['id']) db.instance_destroy(self.context, inst2['id']) db.service_destroy(self.context, comp1['id']) db.service_destroy(self.context, comp2['id'])
def test_live_migration_src_check_volume_node_not_alive(self): """Raise exception when volume node is not alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, { 'instance_id': instance_id, 'size': 1 }) t1 = utils.utcnow() - datetime.timedelta(1) dic = { 'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0 } s_ref = db.service_create(self.context, dic) self.assertRaises(exception.VolumeServiceUnavailable, self.scheduler.driver.schedule_live_migration, self.context, instance_id, i_ref['host']) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.volume_destroy(self.context, v_ref['id'])
def test_live_migration_common_check_service_different_hypervisor(self): """Original host and dest host has different hypervisor type.""" dest = "dummydest" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) # compute service for destination s_ref = self._create_compute_service(host=i_ref["host"]) # compute service for original host s_ref2 = self._create_compute_service(host=dest, hypervisor_type="xen") # mocks driver = self.scheduler.driver self.mox.StubOutWithMock(driver, "mounted_on_same_shared_storage") driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.ReplayAll() self.assertRaises( exception.InvalidHypervisorType, self.scheduler.driver._live_migration_common_check, self.context, i_ref, dest, ) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref["id"]) db.service_destroy(self.context, s_ref2["id"])
def kill(self): """Destroy the service object in the datastore.""" self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: logging.warn(_('Service killed that has no database entry'))
def test_live_migration_common_check_service_orig_not_exists(self): """Destination host does not exist.""" dest = 'dummydest' # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t1 = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) # mocks for mounted_on_same_shared_storage() fpath = '/test/20110127120000' self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) topic = FLAGS.compute_topic driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(self.context, topic, dest), {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), {"method": 'check_shared_storage_test_file', "args": {'filename': fpath}}) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, dest), {"method": 'cleanup_shared_storage_test_file', "args": {'filename': fpath}}) self.mox.ReplayAll() self.assertRaises(exception.SourceHostUnavailable, self.scheduler.driver._live_migration_common_check, self.context, i_ref, dest) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_live_migration_common_check_service_different_version(self): """Original host and dest host has different hypervisor version.""" dest = 'dummydest' instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) # compute service for destination s_ref = self._create_compute_service(host=i_ref['host']) # compute service for original host s_ref2 = self._create_compute_service(host=dest, hypervisor_version=12002) # mocks driver = self.scheduler.driver self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.scheduler.driver._live_migration_common_check, self.context, i_ref, dest) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id'])
def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', 'image_id': 1, 'host': 'host1'}) inst2 = db.instance_create(self.context, {'reservation_id': 'a', 'image_id': 1, 'host': 'host2'}) comp1 = db.service_create(self.context, {'host': 'host1', 'availability_zone': 'zone1', 'topic': "compute"}) comp2 = db.service_create(self.context, {'host': 'host2', 'availability_zone': 'zone2', 'topic': "compute"}) result = self.cloud.describe_instances(self.context) result = result['reservationSet'][0] self.assertEqual(len(result['instancesSet']), 2) instance_id = ec2utils.id_to_ec2_id(inst2['id']) result = self.cloud.describe_instances(self.context, instance_id=[instance_id]) result = result['reservationSet'][0] self.assertEqual(len(result['instancesSet']), 1) self.assertEqual(result['instancesSet'][0]['instanceId'], instance_id) self.assertEqual(result['instancesSet'][0] ['placement']['availabilityZone'], 'zone2') db.instance_destroy(self.context, inst1['id']) db.instance_destroy(self.context, inst2['id']) db.service_destroy(self.context, comp1['id']) db.service_destroy(self.context, comp2['id'])
def test_show_host_resources_works_correctly(self): """Show_host_resources() works correctly as expected.""" scheduler = manager.SchedulerManager() ctxt = context.get_admin_context() s_ref = self._create_compute_service() i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host']) i_ref2 = self._create_instance(project_id='p-02', vcpus=3, host=s_ref['host']) result = scheduler.show_host_resources(ctxt, s_ref['host']) c1 = ('resource' in result and 'usage' in result) compute_node = s_ref['compute_node'][0] c2 = self._dic_is_equal(result['resource'], compute_node) c3 = result['usage'].keys() == ['p-01', 'p-02'] keys = ['vcpus', 'memory_mb', 'local_gb'] c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys) self.assertTrue(c1 and c2 and c3 and c4 and c5) db.service_destroy(ctxt, s_ref['id']) db.instance_destroy(ctxt, i_ref1['id']) db.instance_destroy(ctxt, i_ref2['id'])
def _test_destroy(self): self.mox.StubOutWithMock(db, 'service_destroy') db.service_destroy(self.context, 123) self.mox.ReplayAll() service_obj = service.Service(context=self.context) service_obj.id = 123 service_obj.destroy()
def _test_destroy(self): self.mox.StubOutWithMock(db, 'service_destroy') db.service_destroy(self.context, 123) self.mox.ReplayAll() service_obj = service.Service() service_obj.id = 123 service_obj.destroy(self.context)
def test_show_works_correctly(self): """show() works correctly as expected.""" ctxt = context.get_admin_context() s_ref = self._create_compute_service() i_ref1 = _create_instance(project_id='p-01', host=s_ref['host']) i_ref2 = _create_instance(project_id='p-02', vcpus=3, host=s_ref['host']) result = self.controller.show(self.req, s_ref['host']) c1 = ('resource' in result['host'] and 'usage' in result['host']) compute_node = s_ref['compute_node'][0] c2 = self._dic_is_equal(result['host']['resource'], compute_node) c3 = result['host']['usage'].keys() == ['p-01', 'p-02'] keys = ['vcpus', 'memory_mb'] c4 = self._dic_is_equal( result['host']['usage']['p-01'], i_ref1, keys) disk = i_ref2['root_gb'] + i_ref2['ephemeral_gb'] if result['host']['usage']['p-01']['local_gb'] == disk: c6 = True else: c6 = False c5 = self._dic_is_equal( result['host']['usage']['p-02'], i_ref2, keys) if result['host']['usage']['p-02']['local_gb'] == disk: c7 = True else: c7 = False self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7) db.service_destroy(ctxt, s_ref['id']) db.instance_destroy(ctxt, i_ref1['id']) db.instance_destroy(ctxt, i_ref2['id'])
def _test_destroy(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'service_destroy') db.service_destroy(ctxt, 123) self.mox.ReplayAll() service_obj = service.Service() service_obj.id = 123 service_obj.destroy(ctxt)
def test_service_disabled_on_create_based_on_flag(self): self.flags(enable_new_services=False) host = 'foo' binary = 'nova-fake' app = service.Service.create(host=host, binary=binary) app.start() app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) self.assertTrue(ref['disabled'])
def test_live_migration_dest_check_service_works_correctly(self): """Confirms method finishes with no error.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host="somewhere", memory_mb_used=5) ret = self.scheduler.driver._live_migration_dest_check(self.context, i_ref, "somewhere") self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref["id"])
def test_service_disabled_on_create_based_on_flag(self): self.flags(enable_new_services=False) host = 'foo' binary = 'nova-fake' app = service.Service.create(host=host, binary=binary) app.start() app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) self.assert_(ref['disabled'])
def test_service_enabled_on_create_based_on_flag(self): self.flags(enable_new_services=True) host = "foo" binary = "nova-fake" app = service.Service.create(host=host, binary=binary) app.start() app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) self.assert_(not ref["disabled"])
def test_will_schedule_on_disabled_host_if_specified(self): s_ref = self._create_compute_service(host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') db.service_update(self.context, s1['id'], {'disabled': True}) instance_id2 = self._create_instance(availability_zone='nova:host1') host = self.scheduler.driver.schedule_run_instance(self.context, instance_id2) self.assertEqual('host1', host) db.instance_destroy(self.context, instance_id2) db.service_destroy(self.context, s_ref['id'])
def test_live_migration_src_check_works_correctly(self): """Confirms this method finishes with no error.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host=i_ref["host"]) ret = self.scheduler.driver._live_migration_src_check(self.context, i_ref) self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref["id"])
def test_live_migration_dest_check_service_same_host(self): """Confirms exceptioin raises in case dest and src is same host.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host=i_ref['host']) self.assertRaises(exception.UnableToMigrateToSelf, self.scheduler.driver._live_migration_dest_check, self.context, i_ref, i_ref['host']) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host="somewhere", memory_mb_used=12) self.assertRaises( exception.MigrationError, self.scheduler.driver._live_migration_dest_check, self.context, i_ref, "somewhere" ) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref["id"])
def test_live_migration_src_check_works_correctly(self): """Confirms this method finishes with no error.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host=i_ref['host']) ret = self.scheduler.driver._live_migration_src_check( self.context, i_ref) self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_reserved_memory(self): """Ensures no oversubscription""" FLAGS.cs_host_reserved_memory_mb = 36 s_ref = self._create_compute_service(host='host1') instance_id1 = self._create_instance() self.assertRaises(driver.NoValidHost, self.scheduler.driver.schedule_run_instance, self.context, instance_id1) db.instance_destroy(self.context, instance_id1) db.service_destroy(self.context, s_ref['id']) FLAGS.cs_host_reserved_memory_mb = 06
def test_live_migration_dest_check_service_works_correctly(self): """Confirms method finishes with no error.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host='somewhere', memory_mb_used=5) ret = self.scheduler.driver._live_migration_dest_check( self.context, i_ref, 'somewhere') self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def _test_stubbed(self, name, *args, **kwargs): self.mox.StubOutWithMock(db, name) getattr(db, name)(self.context, *args).AndReturn("fake-result") if name == "service_destroy": # TODO(russellb) This is a hack ... SetUp() starts the conductor() # service. There is a cleanup step that runs after this test which # also deletes the associated service record. This involves a call # to db.service_destroy(), which we have stubbed out. db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() result = getattr(self.conductor, name)(self.context, *args) self.assertEqual(result, "fake-result" if kwargs.get("returns", True) else None)
def test_describe_instances(self): """Makes sure describe_instances works.""" instance1 = db.instance_create(self.context, {'host': 'host2'}) comp1 = db.service_create(self.context, {'host': 'host2', 'availability_zone': 'zone1', 'topic': "compute"}) result = self.cloud.describe_instances(self.context) self.assertEqual(result['reservationSet'][0] ['instancesSet'][0] ['placement']['availabilityZone'], 'zone1') db.instance_destroy(self.context, instance1['id']) db.service_destroy(self.context, comp1['id'])
def test_no_oversubscription(self): """Ensures no oversubscription""" s_ref = self._create_compute_service(host='host1') instance_id1 = self._create_instance(host='host1') instance_id2 = self._create_instance() self.assertRaises(driver.NoValidHost, self.scheduler.driver.schedule_run_instance, self.context, instance_id2) db.instance_destroy(self.context, instance_id2) db.instance_destroy(self.context, instance_id1) db.service_destroy(self.context, s_ref['id'])
def test_live_migration_src_check_compute_node_not_alive(self): """Confirms src-compute node is alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref["host"]) self.assertRaises( exception.ComputeServiceUnavailable, self.scheduler.driver._live_migration_src_check, self.context, i_ref ) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref["id"])
def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host='somewhere', memory_mb_used=12) self.assertRaises(exception.MigrationError, self.scheduler.driver._live_migration_dest_check, self.context, i_ref, 'somewhere') db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_update_available_resource_works_correctly(self): """Confirm compute_node table is updated successfully.""" org_path = FLAGS.instances_path = '' FLAGS.instances_path = '.' # Prepare mocks def getVersion(): return 12003 def getType(): return 'qemu' def listDomainsID(): return [] service_ref = self.create_service(host='dummy') self.create_fake_libvirt_mock(getVersion=getVersion, getType=getType, listDomainsID=listDomainsID) self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, 'get_cpu_info') libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') # Start test self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) conn.update_available_resource(self.context, 'dummy') service_ref = db.service_get(self.context, service_ref['id']) compute_node = service_ref['compute_node'][0] if sys.platform.upper() == 'LINUX2': self.assertTrue(compute_node['vcpus'] >= 0) self.assertTrue(compute_node['memory_mb'] > 0) self.assertTrue(compute_node['local_gb'] > 0) self.assertTrue(compute_node['vcpus_used'] == 0) self.assertTrue(compute_node['memory_mb_used'] > 0) self.assertTrue(compute_node['local_gb_used'] > 0) self.assertTrue(len(compute_node['hypervisor_type']) > 0) self.assertTrue(compute_node['hypervisor_version'] > 0) else: self.assertTrue(compute_node['vcpus'] >= 0) self.assertTrue(compute_node['memory_mb'] == 0) self.assertTrue(compute_node['local_gb'] > 0) self.assertTrue(compute_node['vcpus_used'] == 0) self.assertTrue(compute_node['memory_mb_used'] == 0) self.assertTrue(compute_node['local_gb_used'] > 0) self.assertTrue(len(compute_node['hypervisor_type']) > 0) self.assertTrue(compute_node['hypervisor_version'] > 0) db.service_destroy(self.context, service_ref['id']) FLAGS.instances_path = org_path
def test_live_migration_dest_check_not_alive(self): """Confirms exception raises in case dest host does not exist.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) self.assertRaises(exception.ComputeServiceUnavailable, self.scheduler.driver._live_migration_dest_check, self.context, i_ref, i_ref['host']) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_specific_host_gets_instance(self): """Ensures if you set availability_zone it launches on that zone""" s_ref = self._create_compute_service(host='host1') compute1 = self.start_service('compute', host='host1') s_ref2 = self._create_compute_service(host='host2') instance_id1 = self._create_instance(host='host1', memory_mb='1') instance_id2 = self._create_instance(availability_zone='nova:host1') host = self.scheduler.driver.schedule_run_instance(self.context, instance_id2) self.assertEqual('host1', host) db.instance_destroy(self.context, instance_id2) db.instance_destroy(self.context, instance_id1) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id'])
def test_least_busy_host_gets_instance(self): """Ensures the host with less cores gets the next one""" s_ref = self._create_compute_service(host='host1') s_ref2 = self._create_compute_service(host='host2') instance_id1 = self._create_instance(host='host1') instance_id2 = self._create_instance() host = self.scheduler.driver.schedule_run_instance(self.context, instance_id2) self.assertEqual(host, 'host2') db.instance_destroy(self.context, instance_id2) db.instance_destroy(self.context, instance_id1) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id'])
def test_show_no_project(self): """No instance are running on the given host.""" ctxt = context.get_admin_context() s_ref = self._create_compute_service() result = self.controller.show(self.req, s_ref["host"]) proj = ["(total)", "(used_now)", "(used_max)"] column = ["host", "project", "cpu", "memory_mb", "disk_gb"] self.assertEqual(len(result["host"]), 3) for resource in result["host"]: self.assertTrue(resource["resource"]["project"] in proj) self.assertEqual(len(resource["resource"]), 5) self.assertTrue(set(resource["resource"].keys()) == set(column)) db.service_destroy(ctxt, s_ref["id"])
def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" instance_id = self._create_instance() instance_id2 = self._create_instance(host='somewhere', memory_mb=12) i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host='somewhere') self.assertRaises(exception.NovaException, self.scheduler.driver._live_migration_dest_check, self.context, i_ref, 'somewhere', False) db.instance_destroy(self.context, instance_id) db.instance_destroy(self.context, instance_id2) db.service_destroy(self.context, s_ref['id'])
def test_live_migration_src_check_compute_node_not_alive(self): """Confirms src-compute node is alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t = datetime.datetime.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) self.assertRaises(exception.ComputeServiceUnavailable, self.scheduler.driver._live_migration_src_check, self.context, i_ref) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_show_no_project(self): """No instance are running on the given host.""" ctxt = context.get_admin_context() s_ref = self._create_compute_service() result = self.controller.show(self.req, s_ref['host']) proj = ['(total)', '(used_now)', '(used_max)'] column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb'] self.assertEqual(len(result['host']), 3) for resource in result['host']: self.assertTrue(resource['resource']['project'] in proj) self.assertEqual(len(resource['resource']), 5) self.assertTrue(set(resource['resource'].keys()) == set(column)) db.service_destroy(ctxt, s_ref['id'])
def test_block_migration_dest_check_service_lack_disk(self): """Confirms exception raises when dest doesn't have enough disk.""" instance_id = self._create_instance() instance_id2 = self._create_instance(host='somewhere', local_gb=70) i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host='somewhere') self.assertRaises(exception.MigrationError, self.scheduler.driver._live_migration_dest_check, self.context, i_ref, 'somewhere', True) db.instance_destroy(self.context, instance_id) db.instance_destroy(self.context, instance_id2) db.service_destroy(self.context, s_ref['id'])
def test_show_host_resources_no_project(self): """No instance are running on the given host.""" scheduler = manager.SchedulerManager() ctxt = context.get_admin_context() s_ref = self._create_compute_service() result = scheduler.show_host_resources(ctxt, s_ref['host']) # result checking c1 = ('resource' in result and 'usage' in result) compute_node = s_ref['compute_node'][0] c2 = self._dic_is_equal(result['resource'], compute_node) c3 = result['usage'] == {} self.assertTrue(c1 and c2 and c3) db.service_destroy(ctxt, s_ref['id'])
def test_describe_availability_zones(self): """Makes sure describe_availability_zones works and filters results.""" service1 = db.service_create(self.context, {'host': 'host1_zones', 'binary': "nova-compute", 'topic': 'compute', 'report_count': 0, 'availability_zone': "zone1"}) service2 = db.service_create(self.context, {'host': 'host2_zones', 'binary': "nova-compute", 'topic': 'compute', 'report_count': 0, 'availability_zone': "zone2"}) result = self.cloud.describe_availability_zones(self.context) self.assertEqual(len(result['availabilityZoneInfo']), 3) db.service_destroy(self.context, service1['id']) db.service_destroy(self.context, service2['id'])
def _test_stubbed(self, name, *args, **kwargs): if args and isinstance(args[0], FakeContext): ctxt = args[0] args = args[1:] else: ctxt = self.context self.mox.StubOutWithMock(db, name) getattr(db, name)(ctxt, *args).AndReturn('fake-result') if name == 'service_destroy': # TODO(russellb) This is a hack ... SetUp() starts the conductor() # service. There is a cleanup step that runs after this test which # also deletes the associated service record. This involves a call # to db.service_destroy(), which we have stubbed out. db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() result = getattr(self.conductor, name)(self.context, *args) self.assertEqual( result, 'fake-result' if kwargs.get('returns', True) else None)
def test_live_migration_common_check_service_orig_not_exists(self): """Destination host does not exist.""" dest = 'dummydest' # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t1 = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) # mocks for mounted_on_same_shared_storage() fpath = '/test/20110127120000' self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) topic = FLAGS.compute_topic driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(self.context, topic, dest), { "method": 'create_shared_storage_test_file' }).AndReturn(fpath) driver.rpc.call( mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), { "method": 'check_shared_storage_test_file', "args": { 'filename': fpath } }) driver.rpc.call( mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, dest), { "method": 'cleanup_shared_storage_test_file', "args": { 'filename': fpath } }) self.mox.ReplayAll() self.assertRaises(exception.SourceHostUnavailable, self.scheduler.driver._live_migration_common_check, self.context, i_ref, dest) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def test_show_works_correctly(self): """show() works correctly as expected.""" ctxt = context.get_admin_context() s_ref = self._create_compute_service() i_ref1 = _create_instance(project_id='p-01', host=s_ref['host']) i_ref2 = _create_instance(project_id='p-02', vcpus=3, host=s_ref['host']) result = self.controller.show(self.req, s_ref['host']) proj = ['(total)', '(used_now)', '(used_max)', 'p-01', 'p-02'] column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb'] self.assertEqual(len(result['host']), 5) for resource in result['host']: self.assertTrue(resource['resource']['project'] in proj) self.assertEqual(len(resource['resource']), 5) self.assertTrue(set(resource['resource'].keys()) == set(column)) db.service_destroy(ctxt, s_ref['id']) db.instance_destroy(ctxt, i_ref1['uuid']) db.instance_destroy(ctxt, i_ref2['uuid'])
def test_compute_can_update_available_resource(self): """Confirm compute updates their record of compute-service table.""" host = 'foo' binary = 'nova-compute' topic = 'compute' # Any mocks are not working without UnsetStubs() here. self.mox.UnsetStubs() ctxt = context.get_admin_context() service_ref = db.service_create(ctxt, { 'host': host, 'binary': binary, 'topic': topic }) serv = service.Service(host, binary, topic, 'nova.compute.manager.ComputeManager') # This testcase want to test calling update_available_resource. # No need to call periodic call, then below variable must be set 0. serv.report_interval = 0 serv.periodic_interval = 0 # Creating mocks self.mox.StubOutWithMock(service.rpc.Connection, 'instance') service.rpc.Connection.instance(new=mox.IgnoreArg()) service.rpc.Connection.instance(new=mox.IgnoreArg()) service.rpc.Connection.instance(new=mox.IgnoreArg()) self.mox.StubOutWithMock(serv.manager.driver, 'update_available_resource') serv.manager.driver.update_available_resource(mox.IgnoreArg(), host) # Just doing start()-stop(), not confirm new db record is created, # because update_available_resource() works only in # libvirt environment. This testcase confirms # update_available_resource() is called. Otherwise, mox complains. self.mox.ReplayAll() serv.start() serv.stop() db.service_destroy(ctxt, service_ref['id'])
def destroy(self, context): db.service_destroy(context, self.id)
def destroy(self): db.service_destroy(self._context, self.id)
def _destroy_service(self, service): return db.service_destroy(self.context, service['id'])
def destroy(self): db.service_destroy(self._context, self.id) self._send_notification(fields.NotificationAction.DELETE)
self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), {"method": 'compare_cpu', "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) self.mox.ReplayAll() try: self.scheduler.driver._live_migration_common_check( self.context, i_ref, dest) except rpc.RemoteError, e: c = (e.message.find(_("doesn't have compatibility to")) >= 0) self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) class FakeZone(object): def __init__(self, id, api_url, username, password): self.id = id self.api_url = api_url self.username = username self.password = password def zone_get_all(context): return [ FakeZone(1, 'http://example.com', 'bob', 'xxx'), ]