def _update_available_resource(self, context, resources): # initialise the compute node object, creating it # if it does not already exist. self._init_compute_node(context, resources) # if we could not init the compute node the tracker will be # disabled and we should quit now if self.disabled: return if 'pci_passthrough_devices' in resources: if not self.pci_tracker: n_id = self.compute_node['id'] if self.compute_node else None self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id) self.pci_tracker.set_hvdevs( jsonutils.loads(resources.pop('pci_passthrough_devices'))) # Grab all instances assigned to this node: instances = objects.InstanceList.get_by_host_and_node( context, self.host, self.nodename, expected_attrs=['system_metadata', 'numa_topology']) # Now calculate usage based on instance utilization: self._update_usage_from_instances(context, resources, instances) # Grab all in-progress migrations: capi = self.conductor_api migrations = capi.migration_get_in_progress_by_host_and_node( context, self.host, self.nodename) self._update_usage_from_migrations(context, resources, migrations) # Detect and account for orphaned instances that may exist on the # hypervisor, but are not in the DB: orphans = self._find_orphaned_instances() self._update_usage_from_orphans(context, resources, orphans) # NOTE(yjiang5): Because pci device tracker status is not cleared in # this periodic task, and also because the resource tracker is not # notified when instances are deleted, we need remove all usages # from deleted instances. if self.pci_tracker: self.pci_tracker.clean_usage(instances, migrations, orphans) resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats) else: resources['pci_stats'] = jsonutils.dumps([]) self._report_final_resource_view(resources) metrics = self._get_host_metrics(context, self.nodename) resources['metrics'] = jsonutils.dumps(metrics) self._update(context, resources) LOG.info(_LI('Compute_service record updated for %(host)s:%(node)s'), { 'host': self.host, 'node': self.nodename })
def setUp(self): super(PciDevTrackerTestCase, self).setUp() self.stubs.Set(db, 'pci_device_get_all_by_node', self._fake_get_pci_devices) self.stubs.Set(pci_request, 'get_instance_pci_requests', self._fake_get_instance_pci_requests) self._create_fake_instance() self.tracker = pci_manager.PciDevTracker(1)
def __init__(self, host, driver, nodename): self.host = host self.driver = driver self.pci_tracker = pci_manager.PciDevTracker() self.nodename = nodename self.compute_node = None self.stats = importutils.import_object(CONF.compute_stats_class) self.tracked_instances = {} self.tracked_migrations = {} self.conductor_api = conductor.API()
def setUp(self): super(PciDevTrackerTestCase, self).setUp() self.stubs.Set(db, 'pci_device_get_all_by_node', self._fake_get_pci_devices) # The fake_pci_whitelist must be called before creating the fake # devices patcher = pci_fakes.fake_pci_whitelist() self.addCleanup(patcher.stop) self._create_fake_instance() self.tracker = pci_manager.PciDevTracker(1)
class DummyTracker(object): icalled = False rcalled = False pci_tracker = pci_manager.PciDevTracker() def abort_instance_claim(self, *args, **kwargs): self.icalled = True def drop_resize_claim(self, *args, **kwargs): self.rcalled = True def new_pci_tracker(self): self.pci_tracker = pci_manager.PciDevTracker()
def test_set_compute_node_id(self): self.tracker = pci_manager.PciDevTracker() fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1), copy.deepcopy(fake_pci_2)] self.tracker.set_hvdevs(fake_pci_devs) self.tracker.set_compute_node_id(1) self.assertEqual(self.tracker.node_id, 1) self.assertEqual(self.tracker.pci_devs[0].compute_node_id, 1) fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2') fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1), copy.deepcopy(fake_pci_3), copy.deepcopy(fake_pci_3)] self.tracker.set_hvdevs(fake_pci_devs) for dev in self.tracker.pci_devs: self.assertEqual(dev.compute_node_id, 1)
def _update_available_resource(self, context, resources): if 'pci_passthrough_devices' in resources: if not self.pci_tracker: self.pci_tracker = pci_manager.PciDevTracker() self.pci_tracker.set_hvdevs( jsonutils.loads(resources.pop('pci_passthrough_devices'))) # Grab all instances assigned to this node: instances = objects.InstanceList.get_by_host_and_node( context, self.host, self.nodename, expected_attrs=['system_metadata', 'numa_topology']) # Now calculate usage based on instance utilization: self._update_usage_from_instances(context, resources, instances) # Grab all in-progress migrations: capi = self.conductor_api migrations = capi.migration_get_in_progress_by_host_and_node( context, self.host, self.nodename) self._update_usage_from_migrations(context, resources, migrations) # Detect and account for orphaned instances that may exist on the # hypervisor, but are not in the DB: orphans = self._find_orphaned_instances() self._update_usage_from_orphans(context, resources, orphans) # NOTE(yjiang5): Because pci device tracker status is not cleared in # this periodic task, and also because the resource tracker is not # notified when instances are deleted, we need remove all usages # from deleted instances. if self.pci_tracker: self.pci_tracker.clean_usage(instances, migrations, orphans) resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats) else: resources['pci_stats'] = jsonutils.dumps([]) self._report_final_resource_view(resources) metrics = self._get_host_metrics(context, self.nodename) resources['metrics'] = jsonutils.dumps(metrics) self._sync_compute_node(context, resources)
def update_available_resource(self, context): """Override in-memory calculations of compute node resource usage based on data audited from the hypervisor layer. Add in resource claims in progress to account for operations that have declared a need for resources, but not necessarily retrieved them from the hypervisor layer yet. """ LOG.audit(_("Auditing locally available compute resources")) resources = self.driver.get_available_resource(self.nodename) if not resources: # The virt driver does not support this function LOG.audit( _("Virt driver does not support " "'get_available_resource' Compute tracking is disabled.")) self.compute_node = None return resources['host_ip'] = CONF.my_ip self._verify_resources(resources) self._report_hypervisor_resource_view(resources) if 'pci_passthrough_devices' in resources: if not self.pci_tracker: self.pci_tracker = pci_manager.PciDevTracker() self.pci_tracker.set_hvdevs( jsonutils.loads(resources.pop('pci_passthrough_devices'))) # Grab all instances assigned to this node: instances = instance_obj.InstanceList.get_by_host_and_node( context, self.host, self.nodename) # Now calculate usage based on instance utilization: self._update_usage_from_instances(resources, instances) # Grab all in-progress migrations: capi = self.conductor_api migrations = capi.migration_get_in_progress_by_host_and_node( context, self.host, self.nodename) self._update_usage_from_migrations(context, resources, migrations) # Detect and account for orphaned instances that may exist on the # hypervisor, but are not in the DB: orphans = self._find_orphaned_instances() self._update_usage_from_orphans(resources, orphans) # NOTE(yjiang5): Because pci device tracker status is not cleared in # this periodic task, and also because the resource tracker is not # notified when instances are deleted, we need remove all usages # from deleted instances. if self.pci_tracker: self.pci_tracker.clean_usage(instances, migrations, orphans) resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats) else: resources['pci_stats'] = jsonutils.dumps([]) self._report_final_resource_view(resources) metrics = self._get_host_metrics(context, self.nodename) resources['metrics'] = jsonutils.dumps(metrics) self._sync_compute_node(context, resources)
def test_pcidev_tracker_create_no_nodeid(self): self.tracker = pci_manager.PciDevTracker() self.assertEqual(len(self.tracker.pci_devs), 0)
def test_pcidev_tracker_create_with_nodeid(self, mock_get_cn): self.tracker = pci_manager.PciDevTracker(self.fake_context, node_id=1) mock_get_cn.assert_called_once_with(self.fake_context, 1)
def test_pcidev_tracker_create_no_nodeid(self, mock_get_cn): self.tracker = pci_manager.PciDevTracker(self.fake_context) self.assertEqual(len(self.tracker.pci_devs), 0) self.assertFalse(mock_get_cn.called)
def new_pci_tracker(self): ctxt = context.RequestContext('testuser', 'testproject') self.pci_tracker = pci_manager.PciDevTracker(ctxt)
def new_pci_tracker(self): self.pci_tracker = pci_manager.PciDevTracker()