def setUp(self): super(PciDeviceStatsTestCase, self).setUp() self.pci_stats = stats.PciDeviceStats() # The following two calls need to be made before adding the devices. patcher = fakes.fake_pci_whitelist() self.addCleanup(patcher.stop) self._create_fake_devs()
def test_object_create(self): m = objects.pci_device_pool.from_pci_stats(self.pci_stats.pools) new_stats = stats.PciDeviceStats(m) self.assertEqual(len(new_stats.pools), 3) self.assertEqual(set([d['count'] for d in new_stats]), set([1, 2])) self.assertEqual(set([d['vendor_id'] for d in new_stats]), set(['v1', 'v2', 'v3']))
def test_pci_passthrough_no_pci_stats(self): request = objects.InstancePCIRequest(count=1, spec=[{ 'vendor_id': '8086' }]) requests = objects.InstancePCIRequests(requests=[request]) filter_properties = {'pci_requests': requests} host = fakes.FakeHostState( 'host1', 'node1', attribute_dict={'pci_stats': stats.PciDeviceStats()}) self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_stat_consumption_from_instance_pci(self): inst_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(cpuset=set([0]), memory=512, id=0) ]) fake_requests = [{ 'request_id': 'fake_request1', 'count': 1, 'spec': [{ 'vendor_id': '8086' }] }] fake_requests_obj = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(**r) for r in fake_requests], instance_uuid='fake-uuid') instance = objects.Instance(root_gb=0, ephemeral_gb=0, memory_mb=512, vcpus=1, project_id='12345', vm_state=vm_states.BUILDING, task_state=task_states.SCHEDULING, os_type='Linux', uuid='fake-uuid', numa_topology=inst_topology, pci_requests=fake_requests_obj, id=1243) req_spec = sched_utils.build_request_spec( None, None, [instance], objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=1024, vcpus=1)) host = host_manager.HostState("fakehost", "fakenode") host.pci_stats = pci_stats.PciDeviceStats([ objects.PciDevicePool(vendor_id='8086', product_id='15ed', numa_node=1, count=1) ]) host.numa_topology = fakes.NUMA_TOPOLOGY host.consume_from_instance(req_spec['instance_properties']) self.assertIsInstance(req_spec['instance_properties']['numa_topology'], objects.InstanceNUMATopology) self.assertEqual(512, host.numa_topology.cells[1].memory_usage) self.assertEqual(1, host.numa_topology.cells[1].cpu_usage) self.assertEqual(0, len(host.pci_stats.pools))
def __init__(self, context, node_id=None): """Create a pci device tracker. If a node_id is passed in, it will fetch pci devices information from database, otherwise, it will create an empty devices list and the resource tracker will update the node_id information later. """ super(PciDevTracker, self).__init__() self.stale = {} self.node_id = node_id self.stats = stats.PciDeviceStats() if node_id: self.pci_devs = list( objects.PciDeviceList.get_by_compute_node(context, node_id)) else: self.pci_devs = [] self._initial_instance_usage()
def update_from_compute_node(self, compute): """Update information about a host from a ComputeNode object.""" if (self.updated and compute.updated_at and self.updated > compute.updated_at): return all_ram_mb = compute.memory_mb # Assume virtual size is all consumed by instances if use qcow2 disk. free_gb = compute.free_disk_gb least_gb = compute.disk_available_least if least_gb is not None: if least_gb > free_gb: # can occur when an instance in database is not on host LOG.warning( _LW("Host %(hostname)s has more disk space than " "database expected " "(%(physical)sgb > %(database)sgb)"), { 'physical': least_gb, 'database': free_gb, 'hostname': compute.hypervisor_hostname }) free_gb = min(least_gb, free_gb) free_disk_mb = free_gb * 1024 self.disk_mb_used = compute.local_gb_used * 1024 # NOTE(jogo) free_ram_mb can be negative self.free_ram_mb = compute.free_ram_mb self.total_usable_ram_mb = all_ram_mb self.total_usable_disk_gb = compute.local_gb self.free_disk_mb = free_disk_mb self.vcpus_total = compute.vcpus self.vcpus_used = compute.vcpus_used self.updated = compute.updated_at self.numa_topology = compute.numa_topology self.pci_stats = pci_stats.PciDeviceStats(compute.pci_device_pools) # All virt drivers report host_ip self.host_ip = compute.host_ip self.hypervisor_type = compute.hypervisor_type self.hypervisor_version = compute.hypervisor_version self.hypervisor_hostname = compute.hypervisor_hostname self.cpu_info = compute.cpu_info if compute.supported_hv_specs: self.supported_instances = [ spec.to_list() for spec in compute.supported_hv_specs ] else: self.supported_instances = [] # Don't store stats directly in host_state to make sure these don't # overwrite any values, or get overwritten themselves. Store in self so # filters can schedule with them. self.stats = compute.stats or {} # Track number of instances on host self.num_instances = int(self.stats.get('num_instances', 0)) self.num_io_ops = int(self.stats.get('io_workload', 0)) # update metrics self._update_metrics_from_compute_node(compute)
def setUp(self): super(PciDeviceStatsWithTagsTestCase, self).setUp() self.pci_stats = stats.PciDeviceStats() self._create_whitelist()