def check_last_vm(host,context): if host != FLAGS.cluster_name: out = open("/home/fabioferretti/output","a") out.write("\nhost --> %s" % host) out.write("\nFLAGS.cluster_name --> %s" % FLAGS.cluster_name) out.close() result = db.instance_get_all_by_host(context, host) if len(result) == 0: #informo il cluster controller che non ho piu nessuna VM attiva node_informations(host, context) #se il nodo puo' essere svegliato tramite wake on lan viene sospeso if FLAGS.wakeable == True: sleep_on_lan(FLAGS.my_ip) #ultima VM in esecuzione non necessaria la live migration return True #ci sono altre VM in esecuzione sul compute node #devo controllare se gli altri nodi hanno capacita' sufficiente per eseguirle return False else: result = db.instance_get_all_by_host(context, host) if len(result) == 0: change_node_state(host,"idle") #sta eseguendo il cluster controller e le sue VM non migrano mai return True
def test_live_migration_all_checks_pass(self): # Test live migration when all checks pass. self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration') dest = 'fake_host2' block_migration = True disk_over_commit = True instance = jsonutils.to_primitive(self._live_migration_instance()) # Source checks db.service_get_by_compute_host(self.context, instance['host']).AndReturn('fake_service2') self.servicegroup_api.service_is_up('fake_service2').AndReturn(True) # Destination checks (compute is up, enough memory, disk) db.service_get_by_compute_host(self.context, dest).AndReturn('fake_service3') self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) # assert_compute_node_has_enough_memory() db.service_get_by_compute_host(self.context, dest).AndReturn( {'compute_node': [{'memory_mb': 2048, 'hypervisor_version': 1}]}) db.instance_get_all_by_host(self.context, dest).AndReturn( [dict(memory_mb=256), dict(memory_mb=512)]) # Common checks (same hypervisor, etc) db.service_get_by_compute_host(self.context, dest).AndReturn( {'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1}]}) db.service_get_by_compute_host(self.context, instance['host']).AndReturn( {'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1, 'cpu_info': 'fake_cpu_info'}]}) rpc.call(self.context, "compute.fake_host2", {"method": 'check_can_live_migrate_destination', "args": {'instance': instance, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit}, "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}, None).AndReturn({}) self.driver.compute_rpcapi.live_migration(self.context, host=instance['host'], instance=instance, dest=dest, block_migration=block_migration, migrate_data={}) self.mox.ReplayAll() result = self.driver.schedule_live_migration(self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit) self.assertEqual(result, None)
def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" self.mox.StubOutWithMock(self.driver, "_live_migration_src_check") self.mox.StubOutWithMock(db, "service_get_all_compute_by_host") self.mox.StubOutWithMock(utils, "service_is_up") self.mox.StubOutWithMock(self.driver, "_get_compute_info") self.mox.StubOutWithMock(db, "instance_get_all_by_host") dest = "fake_host2" block_migration = False disk_over_commit = False instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) db.service_get_all_compute_by_host(self.context, dest).AndReturn(["fake_service3"]) utils.service_is_up("fake_service3").AndReturn(True) self.driver._get_compute_info(self.context, dest).AndReturn({"memory_mb": 2048}) db.instance_get_all_by_host(self.context, dest).AndReturn([dict(memory_mb=1024), dict(memory_mb=512)]) self.mox.ReplayAll() self.assertRaises( exception.MigrationError, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit, )
def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) db.service_get_all_compute_by_host(self.context, dest).AndReturn(['fake_service3']) utils.service_is_up('fake_service3').AndReturn(True) self.driver._get_compute_info(self.context, dest, 'memory_mb').AndReturn(2048) db.instance_get_all_by_host(self.context, dest).AndReturn( [dict(memory_mb=1024), dict(memory_mb=512)]) self.mox.ReplayAll() self.assertRaises(exception.MigrationError, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def check_for_migration(manager,host,contexta): #La migrazione deve essere eseguita solamente dai compute node if host != FLAGS.cluster_name: contexto = nova.context.get_admin_context() results = db.service_get_all_compute_sorted_desc(contexto) istanze_attive_nodo_locale = db.instance_get_all_by_host(contexto, host) for result in results: (service, instance_cores) = result if service['host'] != host: istanze_attive_nodo_remoto = db.instance_get_all_by_host(contexto, service['host']) #condizione che abilita la migrazione if len(istanze_attive_nodo_locale) <= len(istanze_attive_nodo_remoto): for istanza in istanze_attive_nodo_locale: repeat = True while repeat: manager.live_migration(contexto, istanza['id'], service['host']) _instance_update(contexto, istanza['id'], vm_state=vm_states.MIGRATING) old_instance = istanza time.sleep(15) new_instances = db.instance_get_all_by_host(contexto, service['host']) for new_instance in new_instances: if new_instance['uuid'] == old_instance['uuid']: #Istanza migrata correttamente _instance_update(contexto, istanza['id'], vm_state=vm_states.ACTIVE) repeat = False check_last_vm(host,contexto)
def init_host(self, host=socket.gethostname()): """ Initialize anything that is necessary for the driver to function, including catching up with currently running VE's on the given host. """ ctxt = context.get_admin_context() LOG.debug("Hostname: %s" % (host,)) LOG.debug("Instances: %s" % (db.instance_get_all_by_host(ctxt, host))) for instance in db.instance_get_all_by_host(ctxt, host): try: LOG.debug("Checking state of %s" % instance["name"]) state = self.get_info(instance["name"])["state"] except exception.NotFound: state = power_state.SHUTOFF LOG.debug("Current state of %s was %s." % (instance["name"], state)) db.instance_set_state(ctxt, instance["id"], state) if state == power_state.SHUTOFF: db.instance_destroy(ctxt, instance["id"]) if state != power_state.RUNNING: continue
def test_instance_get_all(self): self.mox.StubOutWithMock(db, "instance_get_all_by_filters") db.instance_get_all(self.context) db.instance_get_all_by_host(self.context.elevated(), "fake-host") db.instance_get_all_by_filters(self.context, {"name": "fake-inst"}, "updated_at", "asc") self.mox.ReplayAll() self.conductor.instance_get_all(self.context) self.conductor.instance_get_all_by_host(self.context, "fake-host") self.conductor.instance_get_all_by_filters(self.context, {"name": "fake-inst"}, "updated_at", "asc")
def test_instance_get_all_by_host(self): self.mox.StubOutWithMock(db, "instance_get_all_by_host") self.mox.StubOutWithMock(db, "instance_get_all_by_host_and_node") db.instance_get_all_by_host(self.context.elevated(), "host").AndReturn("result") db.instance_get_all_by_host_and_node(self.context.elevated(), "host", "node").AndReturn("result") self.mox.ReplayAll() result = self.conductor.instance_get_all_by_host(self.context, "host") self.assertEqual(result, "result") result = self.conductor.instance_get_all_by_host(self.context, "host", "node") self.assertEqual(result, "result")
def test_get_by_host(self): fakes = [self.fake_instance(1), self.fake_instance(2)] self.mox.StubOutWithMock(db, "instance_get_all_by_host") db.instance_get_all_by_host(self.context, "foo", columns_to_join=None, use_slave=False).AndReturn(fakes) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_host(self.context, "foo") for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) self.assertEqual(inst_list.objects[i].uuid, fakes[i]["uuid"]) self.assertEqual(inst_list.objects[i]._context, self.context) self.assertEqual(inst_list.obj_what_changed(), set()) self.assertRemotes()
def test_instance_get_all(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all(self.context) db.instance_get_all_by_host(self.context.elevated(), 'fake-host') db.instance_get_all_by_filters(self.context, {'name': 'fake-inst'}, 'updated_at', 'asc') self.mox.ReplayAll() self.conductor.instance_get_all(self.context) self.conductor.instance_get_all_by_host(self.context, 'fake-host') self.conductor.instance_get_all_by_filters(self.context, {'name': 'fake-inst'}, 'updated_at', 'asc')
def test_describe_host(self): """ Makes sure that describe_host returns the correct information given our fake input. """ ctx = context.get_admin_context() self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') host_name = 'host_c1' db.service_get_all_compute_by_host(ctx, host_name).AndReturn( [{'host': 'fake_host', 'compute_node': [ {'vcpus': 4, 'vcpus_used': 1, 'memory_mb': 8192, 'memory_mb_used': 2048, 'local_gb': 1024, 'local_gb_used': 648} ] }]) self.mox.StubOutWithMock(db, 'instance_get_all_by_host') db.instance_get_all_by_host(ctx, 'fake_host').AndReturn( [{'project_id': 42, 'vcpus': 1, 'memory_mb': 2048, 'root_gb': 648, 'ephemeral_gb': 0, }]) self.mox.ReplayAll() result = self.api.describe_host(ctx, host_name) self.assertEqual(result, [{'resource': {'cpu': 4, 'disk_gb': 1024, 'host': 'host_c1', 'memory_mb': 8192, 'project': '(total)'}}, {'resource': {'cpu': 1, 'disk_gb': 648, 'host': 'host_c1', 'memory_mb': 2048, 'project': '(used_now)'}}, {'resource': {'cpu': 1, 'disk_gb': 648, 'host': 'host_c1', 'memory_mb': 2048, 'project': '(used_max)'}}, {'resource': {'cpu': 1, 'disk_gb': 648, 'host': 'host_c1', 'memory_mb': 2048, 'project': 42}}] ) self.mox.VerifyAll()
def test_instance_get_all_by_host(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node') db.instance_get_all_by_host(self.context.elevated(), 'host').AndReturn('result') db.instance_get_all_by_host_and_node(self.context.elevated(), 'host', 'node').AndReturn('result') self.mox.ReplayAll() result = self.conductor.instance_get_all_by_host(self.context, 'host') self.assertEqual(result, 'result') result = self.conductor.instance_get_all_by_host(self.context, 'host', 'node') self.assertEqual(result, 'result')
def test_show_host_resources(self): host = 'fake_host' compute_node = {'host': host, 'compute_node': [{'vcpus': 4, 'vcpus_used': 2, 'memory_mb': 1024, 'memory_mb_used': 512, 'local_gb': 1024, 'local_gb_used': 512}]} instances = [{'project_id': 'project1', 'vcpus': 1, 'memory_mb': 128, 'root_gb': 128, 'ephemeral_gb': 0}, {'project_id': 'project1', 'vcpus': 2, 'memory_mb': 256, 'root_gb': 384, 'ephemeral_gb': 0}, {'project_id': 'project2', 'vcpus': 2, 'memory_mb': 256, 'root_gb': 256, 'ephemeral_gb': 0}] self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') db.service_get_by_compute_host(self.context, host).AndReturn( compute_node) db.instance_get_all_by_host(self.context, host).AndReturn(instances) self.mox.ReplayAll() result = self.manager.show_host_resources(self.context, host) expected = {'usage': {'project1': {'memory_mb': 384, 'vcpus': 3, 'root_gb': 512, 'ephemeral_gb': 0}, 'project2': {'memory_mb': 256, 'vcpus': 2, 'root_gb': 256, 'ephemeral_gb': 0}}, 'resource': {'vcpus': 4, 'vcpus_used': 2, 'local_gb': 1024, 'local_gb_used': 512, 'memory_mb': 1024, 'memory_mb_used': 512}} self.assertThat(result, matchers.DictMatches(expected))
def test_init_host_with_deleted_migration(self): our_host = self.compute.host not_our_host = 'not-' + our_host fake_context = 'fake-context' deleted_instance = { 'name': 'fake-name', 'host': not_our_host, 'uuid': 'fake-uuid', } self.mox.StubOutWithMock(self.compute.driver, 'init_host') self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(self.compute, 'init_virt_events') self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.mox.StubOutWithMock(self.compute, '_init_instance') self.mox.StubOutWithMock(self.compute, '_report_driver_status') self.mox.StubOutWithMock(self.compute, 'publish_service_capabilities') self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info') self.compute.driver.init_host(host=our_host) context.get_admin_context().AndReturn(fake_context) db.instance_get_all_by_host(fake_context, our_host, columns_to_join=['info_cache'] ).AndReturn([]) self.compute.init_virt_events() # simulate failed instance self.compute._get_instances_on_driver( fake_context, {'deleted': False}).AndReturn([deleted_instance]) self.compute._get_instance_nw_info(fake_context, deleted_instance ).AndRaise(exception.InstanceNotFound( instance_id=deleted_instance['uuid'])) # ensure driver.destroy is called so that driver may # clean up any dangling files self.compute.driver.destroy(deleted_instance, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.compute._report_driver_status(fake_context) self.compute.publish_service_capabilities(fake_context) self.mox.ReplayAll() self.compute.init_host() # tearDown() uses context.get_admin_context(), so we have # to do the verification here and unstub it. self.mox.VerifyAll() self.mox.UnsetStubs()
def test_with_fault(self): fake_insts = [ fake_instance.fake_db_instance(uuid="fake-uuid", host="host"), fake_instance.fake_db_instance(uuid="fake-inst2", host="host"), ] fake_faults = test_instance_fault.fake_faults self.mox.StubOutWithMock(db, "instance_get_all_by_host") self.mox.StubOutWithMock(db, "instance_fault_get_by_instance_uuids") db.instance_get_all_by_host(self.context, "host", columns_to_join=[], use_slave=False).AndReturn(fake_insts) db.instance_fault_get_by_instance_uuids(self.context, [x["uuid"] for x in fake_insts]).AndReturn(fake_faults) self.mox.ReplayAll() instances = instance.InstanceList.get_by_host(self.context, "host", expected_attrs=["fault"], use_slave=False) self.assertEqual(2, len(instances)) self.assertEqual(fake_faults["fake-uuid"][0], dict(instances[0].fault.iteritems())) self.assertEqual(None, instances[1].fault)
def test_get_by_host(self): fakes = [self.fake_instance(1), self.fake_instance(2)] self.mox.StubOutWithMock(db, 'instance_get_all_by_host') db.instance_get_all_by_host(self.context, 'foo', columns_to_join=None).AndReturn(fakes) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_host(self.context, 'foo') for i in range(0, len(fakes)): self.assertTrue(isinstance(inst_list.objects[i], instance.Instance)) self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid']) self.assertEqual(inst_list.objects[i]._context, self.context) self.assertEqual(inst_list.obj_what_changed(), set()) self.assertRemotes()
def describe_hosts(self, context, **_kwargs): """Returns status info for all nodes. Includes: * Hostname * Compute (up, down, None) * Instance count * Volume (up, down, None) * Volume Count """ services = db.service_get_all(context, False) now = utils.utcnow() hosts = [] rv = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: compute = [s for s in services if s['host'] == host \ and s['binary'] == 'nova-compute'] if compute: compute = compute[0] instances = db.instance_get_all_by_host(context, host) volume = [s for s in services if s['host'] == host \ and s['binary'] == 'nova-volume'] if volume: volume = volume[0] volumes = db.volume_get_all_by_host(context, host) rv.append(host_dict(host, compute, instances, volume, volumes, now)) return {'hosts': rv}
def assert_compute_node_has_enough_memory(self, context, instance_ref, dest): """Checks if destination host has enough memory for live migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ # Getting total available memory of host avail = self._get_compute_info(context, dest, 'memory_mb') # Getting total used memory and disk of host # It should be sum of memories that are assigned as max value, # because overcommiting is risky. instance_refs = db.instance_get_all_by_host(context, dest) used = sum([i['memory_mb'] for i in instance_refs]) mem_inst = instance_ref['memory_mb'] avail = avail - used if avail <= mem_inst: instance_uuid = instance_ref['uuid'] reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: " "Lack of memory(host:%(avail)s <= " "instance:%(mem_inst)s)") raise exception.MigrationError(reason=reason % locals())
def _do_mock_calls(defer_iptables_apply): self.compute.driver.init_host(host=our_host) context.get_admin_context().AndReturn(fake_context) db.instance_get_all_by_host(fake_context, our_host, columns_to_join=["info_cache"]).AndReturn( startup_instances ) if defer_iptables_apply: self.compute.driver.filter_defer_apply_on() self.compute._destroy_evacuated_instances(fake_context) self.compute._init_instance(fake_context, mox.IsA(instance_obj.Instance)) self.compute._init_instance(fake_context, mox.IsA(instance_obj.Instance)) self.compute._init_instance(fake_context, mox.IsA(instance_obj.Instance)) if defer_iptables_apply: self.compute.driver.filter_defer_apply_off() self.compute._report_driver_status(fake_context) self.compute.publish_service_capabilities(fake_context)
def list_vms(host=None): """ make a list of vms and expand out their fixed_ip and floating ips sensibly """ flags.parse_args([]) my_instances = [] if host is None: instances = db.instance_get_all(context.get_admin_context()) else: instances = db.instance_get_all_by_host( context.get_admin_context(), host) for instance in instances: my_inst = {} my_inst = dict(instance).copy() for (k,v) in my_inst.items(): try: json.encoder(v) except TypeError, e: v = str(v) my_inst[k] = v ec2_id = db.get_ec2_instance_id_by_uuid(context.get_admin_context(), instance.uuid) ec2_id = 'i-' + hex(int(ec2_id)).replace('0x', '').zfill(8) my_inst['ec2_id'] = ec2_id try: fixed_ips = db.fixed_ip_get_by_instance(context.get_admin_context(), instance.uuid) except: pass my_inst['fixed_ips'] = [ ip.address for ip in fixed_ips ] my_inst['floating_ips'] = [] for ip in fixed_ips: my_inst['floating_ips'].extend([ f_ip.address for f_ip in db.floating_ip_get_by_fixed_address(context.get_admin_context(), ip.address)]) my_instances.append(my_inst)
def assert_compute_node_has_enough_memory(self, context, instance_ref, dest): """Checks if destination host has enough memory for live migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ elevated = context.elevated() # Getting total available memory and disk of host avail = self._get_compute_info(elevated, dest, 'memory_mb') # Getting total used memory and disk of host # It should be sum of memories that are assigned as max value, # because overcommiting is risky. used = 0 instance_refs = db.instance_get_all_by_host(elevated, dest) used_list = [i['memory_mb'] for i in instance_refs] if used_list: used = reduce(lambda x, y: x + y, used_list) mem_inst = instance_ref['memory_mb'] avail = avail - used - FLAGS.cs_host_reserved_memory_mb if avail <= mem_inst: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) reason = _("Lack of memory(host:%(avail)s"\ " <= instance:%(mem_inst)s)"\ "on $(dest)") raise exception.InsufficientFreeMemory(uuid=dest)
def update_available_resource(self, context): """Override in-memory calculations of compute node resource usage based on data audited from the hypervisor layer. Add in resource claims in progress to account for operations that have declared a need for resources, but not necessarily retrieved them from the hypervisor layer yet. """ resources = self.driver.get_available_resource() if not resources: # The virt driver does not support this function LOG.audit(_("Virt driver does not support " "'get_available_resource' Compute tracking is disabled.")) self.compute_node = None self.claims = {} return self._verify_resources(resources) self._report_hypervisor_resource_view(resources) self._purge_claims() # Grab all instances assigned to this host: instances = db.instance_get_all_by_host(context, self.host) # Now calculate usage based on instance utilization: self._update_usage_from_instances(resources, instances) self._report_final_resource_view(resources) self._sync_compute_node(context, resources)
def assert_compute_node_has_enough_disk(self, context, instance_ref, dest): """Checks if destination host has enough disk for block migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ # Getting total available memory and disk of host avail = self._get_compute_info(context, dest, 'local_gb') # Getting total used memory and disk of host # It should be sum of disks that are assigned as max value # because overcommiting is risky. used = 0 instance_refs = db.instance_get_all_by_host(context, dest) used_list = [i['local_gb'] for i in instance_refs] if used_list: used = reduce(lambda x, y: x + y, used_list) disk_inst = instance_ref['local_gb'] avail = avail - used if avail <= disk_inst: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) reason = _("Unable to migrate %(instance_id)s to %(dest)s: " "Lack of disk(host:%(avail)s " "<= instance:%(disk_inst)s)") raise exception.MigrationError(reason=reason % locals())
def test_show_host_resources(self): host = "fake_host" computes = [ { "host": host, "compute_node": [ { "vcpus": 4, "vcpus_used": 2, "memory_mb": 1024, "memory_mb_used": 512, "local_gb": 1024, "local_gb_used": 512, } ], } ] instances = [ {"project_id": "project1", "vcpus": 1, "memory_mb": 128, "root_gb": 128, "ephemeral_gb": 0}, {"project_id": "project1", "vcpus": 2, "memory_mb": 256, "root_gb": 384, "ephemeral_gb": 0}, {"project_id": "project2", "vcpus": 2, "memory_mb": 256, "root_gb": 256, "ephemeral_gb": 0}, ] self.mox.StubOutWithMock(db, "service_get_all_compute_by_host") self.mox.StubOutWithMock(db, "instance_get_all_by_host") db.service_get_all_compute_by_host(self.context, host).AndReturn(computes) db.instance_get_all_by_host(self.context, host).AndReturn(instances) self.mox.ReplayAll() result = self.manager.show_host_resources(self.context, host) expected = { "usage": { "project1": {"memory_mb": 384, "vcpus": 3, "root_gb": 512, "ephemeral_gb": 0}, "project2": {"memory_mb": 256, "vcpus": 2, "root_gb": 256, "ephemeral_gb": 0}, }, "resource": { "vcpus": 4, "vcpus_used": 2, "local_gb": 1024, "local_gb_used": 512, "memory_mb": 1024, "memory_mb_used": 512, }, } self.assertDictMatch(result, expected)
def get_by_host(cls, context, host, expected_attrs=None, use_slave=False): db_inst_list = db.instance_get_all_by_host( context, host, columns_to_join=_expected_cols(expected_attrs), use_slave=use_slave) return _make_instance_list(context, cls(), db_inst_list, expected_attrs)
def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context :param host: hostname :returns: example format is below. {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048, 'vcpus_used': 12, 'memory_mb_used': 10240, 'local_gb_used': 64} """ # Update latest compute_node table topic = db.queue_get_for(context, FLAGS.compute_topic, host) rpc.call(context, topic, {"method": "update_available_resource"}) # Getting compute node info and related instances info compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) # Getting total available/used resource compute_ref = compute_ref['compute_node'][0] resource = { 'vcpus': compute_ref['vcpus'], 'memory_mb': compute_ref['memory_mb'], 'local_gb': compute_ref['local_gb'], 'vcpus_used': compute_ref['vcpus_used'], 'memory_mb_used': compute_ref['memory_mb_used'], 'local_gb_used': compute_ref['local_gb_used'] } usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: vcpus = [i['vcpus'] for i in instance_refs \ if i['project_id'] == project_id] mem = [i['memory_mb'] for i in instance_refs \ if i['project_id'] == project_id] disk = [i['local_gb'] for i in instance_refs \ if i['project_id'] == project_id] usage[project_id] = { 'vcpus': reduce(lambda x, y: x + y, vcpus), 'memory_mb': reduce(lambda x, y: x + y, mem), 'local_gb': reduce(lambda x, y: x + y, disk) } return {'resource': resource, 'usage': usage}
def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context :param host: hostname :returns: example format is below. {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048, 'vcpus_used': 12, 'memory_mb_used': 10240, 'local_gb_used': 64} """ # Update latest compute_node table topic = db.queue_get_for(context, FLAGS.compute_topic, host) rpc.call(context, topic, {"method": "update_available_resource"}) # Getting compute node info and related instances info compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) # Getting total available/used resource compute_ref = compute_ref['compute_node'][0] resource = {'vcpus': compute_ref['vcpus'], 'memory_mb': compute_ref['memory_mb'], 'local_gb': compute_ref['local_gb'], 'vcpus_used': compute_ref['vcpus_used'], 'memory_mb_used': compute_ref['memory_mb_used'], 'local_gb_used': compute_ref['local_gb_used']} usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: vcpus = [i['vcpus'] for i in instance_refs if i['project_id'] == project_id] mem = [i['memory_mb'] for i in instance_refs if i['project_id'] == project_id] root = [i['root_gb'] for i in instance_refs if i['project_id'] == project_id] ephemeral = [i['ephemeral_gb'] for i in instance_refs if i['project_id'] == project_id] usage[project_id] = {'vcpus': sum(vcpus), 'memory_mb': sum(mem), 'root_gb': sum(root), 'ephemeral_gb': sum(ephemeral)} return {'resource': resource, 'usage': usage}
def _do_mock_calls(defer_iptables_apply): self.compute.driver.init_host(host=our_host) context.get_admin_context().AndReturn(fake_context) db.instance_get_all_by_host( fake_context, our_host, columns_to_join=['info_cache'], use_slave=False ).AndReturn(startup_instances) if defer_iptables_apply: self.compute.driver.filter_defer_apply_on() self.compute._destroy_evacuated_instances(fake_context) self.compute._init_instance(fake_context, mox.IsA(instance_obj.Instance)) self.compute._init_instance(fake_context, mox.IsA(instance_obj.Instance)) self.compute._init_instance(fake_context, mox.IsA(instance_obj.Instance)) if defer_iptables_apply: self.compute.driver.filter_defer_apply_off()
def test_init_host_with_deleted_migration(self): our_host = self.compute.host not_our_host = 'not-' + our_host fake_context = 'fake-context' deleted_instance = { 'name': 'fake-name', 'host': not_our_host, 'uuid': 'fake-uuid', } self.mox.StubOutWithMock(self.compute.driver, 'init_host') self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(self.compute, 'init_virt_events') self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.mox.StubOutWithMock(self.compute, '_init_instance') self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info') self.compute.driver.init_host(host=our_host) context.get_admin_context().AndReturn(fake_context) db.instance_get_all_by_host(fake_context, our_host, columns_to_join=['info_cache'] ).AndReturn([]) self.compute.init_virt_events() # simulate failed instance self.compute._get_instances_on_driver( fake_context, {'deleted': False}).AndReturn([deleted_instance]) self.compute._get_instance_nw_info(fake_context, deleted_instance ).AndRaise(exception.InstanceNotFound( instance_id=deleted_instance['uuid'])) # ensure driver.destroy is called so that driver may # clean up any dangling files self.compute.driver.destroy(deleted_instance, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.compute.init_host() # tearDown() uses context.get_admin_context(), so we have # to do the verification here and unstub it. self.mox.VerifyAll() self.mox.UnsetStubs()
def _do_mock_calls(defer_iptables_apply): self.compute.driver.init_host(host=our_host) context.get_admin_context().AndReturn(fake_context) db.instance_get_all_by_host( fake_context, our_host, columns_to_join=['info_cache']).AndReturn(startup_instances) if defer_iptables_apply: self.compute.driver.filter_defer_apply_on() self.compute._destroy_evacuated_instances(fake_context) self.compute._init_instance(fake_context, mox.IsA(instance_obj.Instance)) self.compute._init_instance(fake_context, mox.IsA(instance_obj.Instance)) self.compute._init_instance(fake_context, mox.IsA(instance_obj.Instance)) if defer_iptables_apply: self.compute.driver.filter_defer_apply_off() self.compute._report_driver_status(fake_context) self.compute.publish_service_capabilities(fake_context)
def test_with_fault(self): fake_insts = [ fake_instance.fake_db_instance(uuid='fake-uuid', host='host'), fake_instance.fake_db_instance(uuid='fake-inst2', host='host'), ] fake_faults = test_instance_fault.fake_faults self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') db.instance_get_all_by_host(self.context, 'host', columns_to_join=[] ).AndReturn(fake_insts) db.instance_fault_get_by_instance_uuids( self.context, [x['uuid'] for x in fake_insts] ).AndReturn(fake_faults) self.mox.ReplayAll() instances = instance.InstanceList.get_by_host(self.context, 'host', expected_attrs=['fault']) self.assertEqual(2, len(instances)) self.assertEqual(fake_faults['fake-uuid'][0], dict(instances[0].fault.iteritems())) self.assertEqual(None, instances[1].fault)
def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context :param host: hostname :returns: example format is below:: {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048, 'vcpus_used': 12, 'memory_mb_used': 10240, 'local_gb_used': 64} """ # Getting compute node info and related instances info compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) # Getting total available/used resource compute_ref = compute_ref['compute_node'][0] resource = {'vcpus': compute_ref['vcpus'], 'memory_mb': compute_ref['memory_mb'], 'local_gb': compute_ref['local_gb'], 'vcpus_used': compute_ref['vcpus_used'], 'memory_mb_used': compute_ref['memory_mb_used'], 'local_gb_used': compute_ref['local_gb_used']} usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: vcpus = [i['vcpus'] for i in instance_refs if i['project_id'] == project_id] mem = [i['memory_mb'] for i in instance_refs if i['project_id'] == project_id] root = [i['root_gb'] for i in instance_refs if i['project_id'] == project_id] ephemeral = [i['ephemeral_gb'] for i in instance_refs if i['project_id'] == project_id] usage[project_id] = {'vcpus': sum(vcpus), 'memory_mb': sum(mem), 'root_gb': sum(root), 'ephemeral_gb': sum(ephemeral)} return {'resource': resource, 'usage': usage}
def test_init_host_with_deleted_migration(self): our_host = self.compute.host not_our_host = "not-" + our_host fake_context = "fake-context" deleted_instance = {"name": "fake-name", "host": not_our_host, "uuid": "fake-uuid"} self.mox.StubOutWithMock(self.compute.driver, "init_host") self.mox.StubOutWithMock(self.compute.driver, "destroy") self.mox.StubOutWithMock(db, "instance_get_all_by_host") self.mox.StubOutWithMock(context, "get_admin_context") self.mox.StubOutWithMock(self.compute, "init_virt_events") self.mox.StubOutWithMock(self.compute, "_get_instances_on_driver") self.mox.StubOutWithMock(self.compute, "_init_instance") self.mox.StubOutWithMock(self.compute, "_report_driver_status") self.mox.StubOutWithMock(self.compute, "publish_service_capabilities") self.mox.StubOutWithMock(self.compute, "_get_instance_nw_info") self.compute.driver.init_host(host=our_host) context.get_admin_context().AndReturn(fake_context) db.instance_get_all_by_host(fake_context, our_host, columns_to_join=["info_cache"]).AndReturn([]) self.compute.init_virt_events() # simulate failed instance self.compute._get_instances_on_driver(fake_context, {"deleted": False}).AndReturn([deleted_instance]) self.compute._get_instance_nw_info(fake_context, deleted_instance).AndRaise( exception.InstanceNotFound(instance_id=deleted_instance["uuid"]) ) # ensure driver.destroy is called so that driver may # clean up any dangling files self.compute.driver.destroy(deleted_instance, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.compute._report_driver_status(fake_context) self.compute.publish_service_capabilities(fake_context) self.mox.ReplayAll() self.compute.init_host() # tearDown() uses context.get_admin_context(), so we have # to do the verification here and unstub it. self.mox.VerifyAll() self.mox.UnsetStubs()
def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context :param host: hostname :returns: example format is below:: {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048, 'vcpus_used': 12, 'memory_mb_used': 10240, 'local_gb_used': 64} """ # Getting compute node info and related instances info compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] instance_refs = db.instance_get_all_by_host(context, compute_ref["host"]) # Getting total available/used resource compute_ref = compute_ref["compute_node"][0] resource = { "vcpus": compute_ref["vcpus"], "memory_mb": compute_ref["memory_mb"], "local_gb": compute_ref["local_gb"], "vcpus_used": compute_ref["vcpus_used"], "memory_mb_used": compute_ref["memory_mb_used"], "local_gb_used": compute_ref["local_gb_used"], } usage = dict() if not instance_refs: return {"resource": resource, "usage": usage} # Getting usage resource per project project_ids = [i["project_id"] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: vcpus = [i["vcpus"] for i in instance_refs if i["project_id"] == project_id] mem = [i["memory_mb"] for i in instance_refs if i["project_id"] == project_id] root = [i["root_gb"] for i in instance_refs if i["project_id"] == project_id] ephemeral = [i["ephemeral_gb"] for i in instance_refs if i["project_id"] == project_id] usage[project_id] = { "vcpus": sum(vcpus), "memory_mb": sum(mem), "root_gb": sum(root), "ephemeral_gb": sum(ephemeral), } return {"resource": resource, "usage": usage}
def servers(self, req, id): context = req.environ['nova.context'] authorize(context) hypervisors = db.compute_node_search_by_hypervisor(context, id) if hypervisors: return dict(hypervisors=[self._view_hypervisor(hyp, False, db.instance_get_all_by_host(context, hyp['service']['host'])) for hyp in hypervisors]) else: msg = _("No hypervisor matching '%s' could be found.") % id raise webob.exc.HTTPNotFound(explanation=msg)
def show_host_resources(self, context, host, *args): """Shows the physical/usage resource given by hosts. :param context: security context :param host: hostname :returns: example format is below. {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048} """ compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] # Getting physical resource information compute_node_ref = compute_ref['compute_node'][0] resource = { 'vcpus': compute_node_ref['vcpus'], 'memory_mb': compute_node_ref['memory_mb'], 'local_gb': compute_node_ref['local_gb'], 'vcpus_used': compute_node_ref['vcpus_used'], 'memory_mb_used': compute_node_ref['memory_mb_used'], 'local_gb_used': compute_node_ref['local_gb_used'] } # Getting usage resource information usage = {} instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) if not instance_refs: return {'resource': resource, 'usage': usage} project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: vcpus = db.instance_get_vcpu_sum_by_host_and_project( context, host, project_id) mem = db.instance_get_memory_sum_by_host_and_project( context, host, project_id) hdd = db.instance_get_disk_sum_by_host_and_project( context, host, project_id) usage[project_id] = { 'vcpus': int(vcpus), 'memory_mb': int(mem), 'local_gb': int(hdd) } return {'resource': resource, 'usage': usage}
def servers(self, req, id): context = req.environ['nova.context'] authorize(context) hypervisors = db.compute_node_search_by_hypervisor(context, id) if hypervisors: return dict(hypervisors=[ self._view_hypervisor( hyp, False, db.instance_get_all_by_host(context, hyp['service'] ['host'])) for hyp in hypervisors ]) else: msg = _("No hypervisor matching '%s' could be found.") % id raise webob.exc.HTTPNotFound(explanation=msg)
def _get_target_instances(self, host): """ Get VM list running on the target host. :param host: the host name where compute node hosted. :returns: An instance list running on the host. """ admin_context = context.get_admin_context() instances_list = db.instance_get_all_by_host(admin_context, host, columns_to_join=None, use_slave=False) return instances_list
def index(self, req): context = req.environ['nova.context'] authorize(context) hosts = {} services = db.service_get_all(context, False) for service in services: if service['topic'] == CONF.canary_topic: if service['host'] not in hosts: instances = db.instance_get_all_by_host( context, service['host']) instance_uuids = map(lambda x: x['uuid'], instances) hosts[service['host']] = instance_uuids return webob.Response(status_int=200, body=json.dumps(hosts))
def update_available_resource(self, context): """Override in-memory calculations of compute node resource usage based on data audited from the hypervisor layer. Add in resource claims in progress to account for operations that have declared a need for resources, but not necessarily retrieved them from the hypervisor layer yet. """ if self.nodename is None: resources = self.driver.get_available_resource() else: resources = self.driver.get_available_node_resource(self.nodename) if not resources: # The virt driver does not support this function method = 'get_available_resource' if self.nodename is not None: method = 'get_available_node_resource' LOG.audit( _("Virt driver does not support " "'%s' Compute tracking is disabled.") % method) self.compute_node = None self.claims = {} return self._verify_resources(resources) self._report_hypervisor_resource_view(resources) self._purge_expired_claims() # Grab all instances assigned to this host: instances = db.instance_get_all_by_host(context, self.host) if self.nodename is not None: # Collect instances belong to the node node_instances = [] for instance in instances: smd = db.instance_system_metadata_get(context, instance['uuid']) if smd.get('node') == self.nodename: node_instances.append(instance) instances = node_instances # Now calculate usage based on instance utilization: self._update_usage_from_instances(resources, instances) self._report_final_resource_view(resources) self._sync_compute_node(context, resources)
def init_host(self, host): """Initialize anything that is necessary for the driver to function, including catching up with currently running VM's on the given host.""" context = nova_context.get_admin_context() instances = db.instance_get_all_by_host(context, host) powervm_instances = self.list_instances() # Looks for db instances that don't exist on the host side # and cleanup the inconsistencies. for db_instance in instances: task_state = db_instance['task_state'] if db_instance['name'] in powervm_instances: continue if task_state in [task_states.DELETING, task_states.SPAWNING]: db.instance_update(context, db_instance['uuid'], { 'vm_state': vm_states.DELETED, 'task_state': None }) db.instance_destroy(context, db_instance['uuid'])
def list_vms(host=None): """ make a list of vms and expand out their fixed_ip and floating ips sensibly """ flags.parse_args([]) my_instances = [] if host is None: instances = db.instance_get_all(context.get_admin_context()) else: instances = db.instance_get_all_by_host(context.get_admin_context(), host) for instance in instances: my_inst = {} my_inst = dict(instance).copy() for (k, v) in my_inst.items(): try: json.encoder(v) except TypeError, e: v = str(v) my_inst[k] = v ec2_id = db.get_ec2_instance_id_by_uuid(context.get_admin_context(), instance.uuid) ec2_id = 'i-' + hex(int(ec2_id)).replace('0x', '').zfill(8) my_inst['ec2_id'] = ec2_id try: fixed_ips = db.fixed_ip_get_by_instance( context.get_admin_context(), instance.uuid) except: pass my_inst['fixed_ips'] = [ip.address for ip in fixed_ips] my_inst['floating_ips'] = [] for ip in fixed_ips: my_inst['floating_ips'].extend([ f_ip.address for f_ip in db.floating_ip_get_by_fixed_address( context.get_admin_context(), ip.address) ]) my_instances.append(my_inst)
def list(self, host=None): """Show a list of all instances.""" print ("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" " %-10s %-10s %-10s %-5s" % (_('instance'), _('node'), _('type'), _('state'), _('launched'), _('image'), _('kernel'), _('ramdisk'), _('project'), _('user'), _('zone'), _('index'))) if host is None: instances = db.instance_get_all(context.get_admin_context()) else: instances = db.instance_get_all_by_host( context.get_admin_context(), host) for instance in instances: instance_type = instance_types.extract_instance_type(instance) print ("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" " %-10s %-10s %-10s %-5d" % (instance['display_name'], instance['host'], instance_type['name'], instance['vm_state'], instance['launched_at'], instance['image_ref'], instance['kernel_id'], instance['ramdisk_id'], instance['project_id'], instance['user_id'], instance['availability_zone'], instance['launch_index']))
def test_live_migration_all_checks_pass(self): """Test live migration when all checks pass.""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(driver, 'cast_to_compute_host') dest = 'fake_host2' block_migration = True disk_over_commit = True instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) db.service_get_all_compute_by_host(self.context, instance['host']).AndReturn(['fake_service2']) utils.service_is_up('fake_service2').AndReturn(True) # Destination checks (compute is up, enough memory, disk) db.service_get_all_compute_by_host(self.context, dest).AndReturn(['fake_service3']) utils.service_is_up('fake_service3').AndReturn(True) # assert_compute_node_has_enough_memory() self.driver._get_compute_info(self.context, dest, 'memory_mb').AndReturn(2048) db.instance_get_all_by_host(self.context, dest).AndReturn( [dict(memory_mb=256), dict(memory_mb=512)]) # assert_compute_node_has_enough_disk() self.driver._get_compute_info(self.context, dest, 'disk_available_least').AndReturn(1025) rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue1') instance_disk_info_msg = { 'method': 'get_instance_disk_info', 'args': { 'instance_name': instance['name'], }, 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION, } instance_disk_info = [{'disk_size': 1024 * (1024 ** 3)}] rpc.call(self.context, 'src_queue1', instance_disk_info_msg, None).AndReturn(jsonutils.dumps(instance_disk_info)) # Common checks (shared storage ok, same hypervisor, etc) self._check_shared_storage(dest, instance, False) db.service_get_all_compute_by_host(self.context, dest).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1}]}]) # newer hypervisor version for src db.service_get_all_compute_by_host(self.context, instance['host']).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1, 'cpu_info': 'fake_cpu_info'}]}]) rpc.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') rpc.call(self.context, 'dest_queue', {'method': 'compare_cpu', 'args': {'cpu_info': 'fake_cpu_info'}, 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None ).AndReturn(True) db.instance_update_and_get_original(self.context, instance['id'], {"vm_state": vm_states.MIGRATING}).AndReturn( (instance, instance)) driver.cast_to_compute_host(self.context, instance['host'], 'live_migration', update_db=False, instance_id=instance['id'], dest=dest, block_migration=block_migration) self.mox.ReplayAll() result = self.driver.schedule_live_migration(self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit) self.assertEqual(result, None)
def test_live_migration_all_checks_pass(self): """Test live migration when all checks pass.""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(driver, 'cast_to_compute_host') dest = 'fake_host2' block_migration = True disk_over_commit = True instance = self._live_migration_instance() instance_id = instance['id'] instance_uuid = instance['uuid'] db.instance_get(self.context, instance_id).AndReturn(instance) # Source checks db.service_get_all_compute_by_host(self.context, instance['host']).AndReturn(['fake_service2']) utils.service_is_up('fake_service2').AndReturn(True) # Destination checks (compute is up, enough memory, disk) db.service_get_all_compute_by_host(self.context, dest).AndReturn(['fake_service3']) utils.service_is_up('fake_service3').AndReturn(True) # assert_compute_node_has_enough_memory() db.service_get_all_compute_by_host(self.context, dest).AndReturn( [{'compute_node': [{'memory_mb': 2048, 'hypervisor_version': 1}]}]) db.instance_get_all_by_host(self.context, dest).AndReturn( [dict(memory_mb=256), dict(memory_mb=512)]) # Common checks (same hypervisor, etc) db.service_get_all_compute_by_host(self.context, dest).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1}]}]) db.service_get_all_compute_by_host(self.context, instance['host']).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1, 'cpu_info': 'fake_cpu_info'}]}]) rpc.call(self.context, "compute.fake_host2", {"method": 'check_can_live_migrate_destination', "args": {'instance_id': instance_id, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit}, "version": "1.2"}, None) db.instance_update_and_get_original(self.context, instance_uuid, {"task_state": task_states.MIGRATING}).AndReturn( (instance, instance)) driver.cast_to_compute_host(self.context, instance['host'], 'live_migration', update_db=False, instance_id=instance_id, dest=dest, block_migration=block_migration) self.mox.ReplayAll() result = self.driver.schedule_live_migration(self.context, instance_id=instance_id, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit) self.assertEqual(result, None)
def get_instanceList(self): 'get all vm in this host' self._instanceList = db.instance_get_all_by_host( self._admin_context, self.host) return self._instanceList
def show(self, req, id): """Shows the physical/usage resource given by hosts. :param context: security context :param host: hostname :returns: expected to use HostShowTemplate. ex.:: {'host': {'resource':D},..} D: {'host': 'hostname','project': 'admin', 'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30} """ host = id context = req.environ['nova.context'] if not context.is_admin: msg = _("Describe-resource is admin only functionality") raise webob.exc.HTTPForbidden(explanation=msg) # Getting compute node info and related instances info try: compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] except exception.ComputeHostNotFound: raise webob.exc.HTTPNotFound(explanation=_("Host not found")) instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) # Getting total available/used resource compute_ref = compute_ref['compute_node'][0] resources = [{'resource': {'host': host, 'project': '(total)', 'cpu': compute_ref['vcpus'], 'memory_mb': compute_ref['memory_mb'], 'disk_gb': compute_ref['local_gb']}}, {'resource': {'host': host, 'project': '(used_now)', 'cpu': compute_ref['vcpus_used'], 'memory_mb': compute_ref['memory_mb_used'], 'disk_gb': compute_ref['local_gb_used']}}] cpu_sum = 0 mem_sum = 0 hdd_sum = 0 for i in instance_refs: cpu_sum += i['vcpus'] mem_sum += i['memory_mb'] hdd_sum += i['root_gb'] + i['ephemeral_gb'] resources.append({'resource': {'host': host, 'project': '(used_max)', 'cpu': cpu_sum, 'memory_mb': mem_sum, 'disk_gb': hdd_sum}}) # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: vcpus = [i['vcpus'] for i in instance_refs if i['project_id'] == project_id] mem = [i['memory_mb'] for i in instance_refs if i['project_id'] == project_id] disk = [i['root_gb'] + i['ephemeral_gb'] for i in instance_refs if i['project_id'] == project_id] resources.append({'resource': {'host': host, 'project': project_id, 'cpu': reduce(lambda x, y: x + y, vcpus), 'memory_mb': reduce(lambda x, y: x + y, mem), 'disk_gb': reduce(lambda x, y: x + y, disk)}}) return {'host': resources}
def test_live_migration_all_checks_pass(self): """Test live migration when all checks pass.""" self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration') dest = 'fake_host2' block_migration = True disk_over_commit = True instance = jsonutils.to_primitive(self._live_migration_instance()) instance_id = instance['id'] instance_uuid = instance['uuid'] # Source checks db.service_get_all_compute_by_host(self.context, instance['host']).AndReturn(['fake_service2']) self.servicegroup_api.service_is_up('fake_service2').AndReturn(True) # Destination checks (compute is up, enough memory, disk) db.service_get_all_compute_by_host(self.context, dest).AndReturn(['fake_service3']) self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) # assert_compute_node_has_enough_memory() db.service_get_all_compute_by_host(self.context, dest).AndReturn( [{'compute_node': [{'memory_mb': 2048, 'hypervisor_version': 1}]}]) db.instance_get_all_by_host(self.context, dest).AndReturn( [dict(memory_mb=256), dict(memory_mb=512)]) # Common checks (same hypervisor, etc) db.service_get_all_compute_by_host(self.context, dest).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1}]}]) db.service_get_all_compute_by_host(self.context, instance['host']).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1, 'cpu_info': 'fake_cpu_info'}]}]) rpc.call(self.context, "compute.fake_host2", {"method": 'check_can_live_migrate_destination', "args": {'instance': instance, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit}, "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}, None).AndReturn({}) self.driver.compute_rpcapi.live_migration(self.context, host=instance['host'], instance=instance, dest=dest, block_migration=block_migration, migrate_data={}) self.mox.ReplayAll() result = self.driver.schedule_live_migration(self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit) self.assertEqual(result, None)
def get_by_host(cls, context, host, expected_attrs=None): db_inst_list = db.instance_get_all_by_host( context, host, columns_to_join=expected_cols(expected_attrs)) return _make_instance_list(context, cls(), db_inst_list, expected_attrs)