def __init__(self, *args, **kwargs): self.host_manager = host_manager.HostManager() self.servicegroup_api = servicegroup.API() self.notifier = rpc.get_notifier('scheduler') self.placement_client = report.SchedulerReportClient() super().__init__(service_name='scheduler', *args, **kwargs)
def setUp(self): super(HostManagerTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [ host_manager.HostState('fake_host%s' % x, 'fake-node') for x in xrange(1, 5) ]
def test_init_aggregates_one_agg_no_hosts(self, agg_get_all, mock_init_info): fake_agg = objects.Aggregate(id=1, hosts=[]) agg_get_all.return_value = [fake_agg] self.host_manager = host_manager.HostManager() self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id) self.assertEqual({}, self.host_manager.host_aggregates_map)
def setUp(self): super(HostManagerTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [host_manager.HostState('fake_host%s' % x, 'fake-node') for x in xrange(1, 5)] self.fake_hosts += [host_manager.HostState('fake_multihost', 'fake-node%s' % x) for x in xrange(1, 5)] self.addCleanup(timeutils.clear_time_override)
def setUp(self): super(HostManagerChangedNodesTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [ host_manager.HostState('host1', 'node1'), host_manager.HostState('host2', 'node2'), host_manager.HostState('host3', 'node3'), host_manager.HostState('host4', 'node4') ]
def setUp(self): super(HostManagerChangedNodesTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [ host_manager.HostState('host1', 'node1'), host_manager.HostState('host2', 'node2'), host_manager.HostState('host3', 'node3'), host_manager.HostState('host4', 'node4') ] self.addCleanup(timeutils.clear_time_override)
def setUp(self): super(HostManagerTestCase, self).setUp() self.flags(scheduler_available_filters=['%s.%s' % (__name__, cls) for cls in ['FakeFilterClass1', 'FakeFilterClass2']]) self.flags(scheduler_default_filters=['FakeFilterClass1']) self.host_manager = host_manager.HostManager() self.fake_hosts = [host_manager.HostState('fake_host%s' % x, 'fake-node') for x in xrange(1, 5)] self.fake_hosts += [host_manager.HostState('fake_multihost', 'fake-node%s' % x) for x in xrange(1, 5)]
def setUp(self, mock_init_agg, mock_init_inst): super(HostManagerTestCase, self).setUp() self.flags(scheduler_available_filters=['%s.%s' % (__name__, cls) for cls in ['FakeFilterClass1', 'FakeFilterClass2']]) self.flags(scheduler_default_filters=['FakeFilterClass1']) self.host_manager = host_manager.HostManager() self.fake_hosts = [host_manager.HostState('fake_host%s' % x, 'fake-node') for x in range(1, 5)] self.fake_hosts += [host_manager.HostState('fake_multihost', 'fake-node%s' % x) for x in range(1, 5)] self.useFixture(fixtures.SpawnIsSynchronousFixture())
def alloction_check(): print "============================ alloction check ============================" tbl = PrettyTable(["status", "hostname", "nodename", "vm_in_nodes","vm_in_allocations"]) tbl.align['hostname'] = 'l' tbl.align['nodename'] = 'l' hm = host_manager.HostManager() states = hm.get_all_host_states(cxt) node_vm_map = {} for i in states: rp = rp_obj.ResourceProvider.get_by_uuid(cxt, i.uuid) node_vm_map.setdefault(rp.name, set()) for j in i.instances: #Note(fanzhang): j should one Instance object which means instance on node. inst = i.instances[j] node_name = inst.node node_vm_map.setdefault(node_name, set()) node_vm_map[node_name].add(inst.uuid) db_allocs = rp_obj._get_allocations_by_provider_id(cxt, rp.id) vms_in_allocation = set() for j in db_allocs: vms_in_allocation.add(j['consumer_id']) vm_in_nodes = node_vm_map[rp.name] #msg = "%s(%s, %s)\033[0m: vm in nodes: %s <-> vm in allocations: %s" if vm_in_nodes == vms_in_allocation: hint = "%s%s\033[0m" % (color_tbl['green'], 'OK') hostname = "%s%s\033[0m" % (color_tbl['blue'], i.host) nodename = "%s%s\033[0m" % (color_tbl['yellow'], i.nodename) #print msg % (color_tbl['green'], i.host, i.nodename, len(vm_in_nodes), len(vms_in_allocation)) else: hint = "%s%s\033[0m" % (color_tbl['red'], 'X') hostname = "%s%s\033[0m" % (color_tbl['red'], i.host) nodename = "%s%s\033[0m" % (color_tbl['red'], i.nodename) #print msg % (color_tbl['red'], i.host, i.nodename, len(vm_in_nodes), len(vms_in_allocation)) #print vms_in_allocation - vm_in_nodes #print vm_in_nodes - vms_in_allocation tbl.add_row([hint, hostname, nodename, len(vm_in_nodes), len(vms_in_allocation)]) print tbl.get_string(sortby='hostname')
def print_hypervisor_view(): hm = host_manager.HostManager() tbl = PrettyTable(["hostname", "nodename", "updated","ip", "cpu", "cpu_ratio", "ram", "ram_ratio", "vms", "active_vms", "other_vms"]) tbl.align['hostname'] = 'l' tbl.align['ip'] = 'l' states = hm.get_all_host_states(cxt) for i in states: cpu = "%s/%s" % (i.vcpus_used, i.vcpus_total) vcpus_total = i.vcpus_total or i.vcpus_used if vcpus_total: cpu_ratio = colorizer(i.vcpus_used * 100.0 / (vcpus_total * i.cpu_allocation_ratio)) else: cpu_ratio = '-' ram_used = i.total_usable_ram_mb - i.free_ram_mb ram = "%s/%s" % (ram_used, i.total_usable_ram_mb) total_usable_ram_mb = i.total_usable_ram_mb or ram_used if total_usable_ram_mb: ram_ratio = colorizer(ram_used * 100.0 / (total_usable_ram_mb * i.ram_allocation_ratio)) else: ram_ratio = '-' disk_used = i.disk_mb_used/1024.0 num_instances = 0 if 'num_instances' in i.stats: num_instances = i.stats['num_instances'] num_vm_active = 0 if 'num_vm_active' in i.stats: num_vm_active = i.stats['num_vm_active'] num_vm_others = int(num_instances) - int(num_vm_active) tbl.add_row([i.host, i.nodename, i.updated, i.host_ip, cpu, cpu_ratio, ram, ram_ratio, num_instances, num_vm_active, num_vm_others]) print "============================ Hypervisor resource ============================" print tbl.get_string(sortby="ip")
def __init__(self, *args, **kwargs): super(FakeFilterScheduler, self).__init__(*args, **kwargs) self.host_manager = host_manager.HostManager()
def test_init_aggregates_no_aggs(self, agg_get_all, mock_init_info): agg_get_all.return_value = [] self.host_manager = host_manager.HostManager() self.assertEqual({}, self.host_manager.aggs_by_id) self.assertEqual({}, self.host_manager.host_aggregates_map)
def setUp(self): super(HostManagerTestCase, self).setUp() self.host_manager = host_manager.HostManager()
def test_manager_public_api_signatures(self): self.assertPublicAPISignatures(host_manager.HostManager(), self.host_manager)
def __init__(self): self.host_manager = host_manager.HostManager() self.servicegroup_api = servicegroup.API()
def allocation_sync(cxt): hm = host_manager.HostManager() states = hm.get_all_host_states(cxt) compute_api = compute.API() node_vm_map = {} reportclient = scheduler_client.SchedulerClient().reportclient now = datetime.now() for state in states: rp_uuid = state.uuid rp = rp_obj.ResourceProvider.get_by_uuid(cxt, rp_uuid) # NOTE(fanzhang): Constructing a mapping of instance lists on node # and node name node_vm_map.setdefault(rp.name, set()) for instance_uuid in state.instances: instance_obj = state.instances[instance_uuid] node_name = instance_obj.node node_vm_map.setdefault(node_name, set()) node_vm_map[node_name].add(instance_uuid) LOG.debug("Instance uuid is %s", instance_uuid) vms_in_node = node_vm_map[rp.name] allocations_list = rp_obj.AllocationList.\ get_all_by_resource_provider(cxt, rp) LOG.debug('AllocationList is %s', allocations_list) vms_in_allocation = set(map(lambda x: x.consumer_id, allocations_list)) if vms_in_node != vms_in_allocation: LOG.warn('Instances on node %s do not match allocations %s', vms_in_node, vms_in_allocation) # NOTE(fanzhang): Delete allocations of vms which not on compute nodes allocations_more = vms_in_allocation - vms_in_node if allocations_more: LOG.warn( 'Instances in allocations are more than those on node: %s', allocations_more) for allocation in allocations_list: if allocation.consumer_id in allocations_more: allocs = rp_obj.AllocationList.get_all_by_consumer_id( cxt, consumer_id=allocation.consumer_id) created_at = allocation.created_at.replace(tzinfo=None) delta = (now - created_at).seconds if delta >= 1800: LOG.info('Try to delete %s', allocation) LOG.debug('Allocations by consumer id are %s', allocs) # log_redo_sql(allocs, allocation.id) allocation.destroy() else: LOG.info('allocation %s created in 30 minute', allocation) # NOTE(fanzhang): Create allocations for vms on compute nodes without # allocation records. host_manager_more = vms_in_node - vms_in_allocation if host_manager_more: LOG.warn('Instances on nodes are more than allocations: %s', host_manager_more) for instance_uuid in host_manager_more: instance = compute_api.get(cxt, instance_uuid) LOG.debug(instance) LOG.warn( 'Should create allocation record with ' 'resource provider uuid is %s and consumer id is: %s', rp_uuid, instance.uuid)
def test_manager_public_api_signatures(self, mock_init_aggs, mock_init_inst): self.assertPublicAPISignatures(host_manager.HostManager(), self.host_manager)
def __init__(self, *args, **kwargs): super(FakeDistributedScheduler, self).__init__(*args, **kwargs) self.zone_manager = zone_manager.ZoneManager() self.host_manager = host_manager.HostManager()