Exemple #1
0
 def _create_tracker(self, fake_devs):
     self.fake_devs = copy.deepcopy(fake_devs)
     self.tracker = manager.PciDevTracker(
         self.fake_context, objects.ComputeNode(id=1, numa_topology=None))
Exemple #2
0
 def _create_tracker(self, fake_devs):
     self.fake_devs = fake_devs
     self.tracker = manager.PciDevTracker(self.fake_context, 1)
Exemple #3
0
 def test_update_devices_from_hypervisor_resources(self, _mock_dev_assign):
     fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2)]
     fake_pci_devs_json = jsonutils.dumps(fake_pci_devs)
     tracker = manager.PciDevTracker(self.fake_context)
     tracker.update_devices_from_hypervisor_resources(fake_pci_devs_json)
     self.assertEqual(2, len(tracker.pci_devs))
Exemple #4
0
 def new_pci_tracker(self):
     ctxt = context.RequestContext('testuser', 'testproject')
     self.pci_tracker = pci_manager.PciDevTracker(ctxt)
Exemple #5
0
 def test_pcidev_tracker_create_no_nodeid(self, mock_get_cn):
     self.tracker = manager.PciDevTracker(self.fake_context)
     self.assertEqual(len(self.tracker.pci_devs), 0)
     self.assertFalse(mock_get_cn.called)
Exemple #6
0
 def test_pcidev_tracker_create_with_nodeid(self, mock_get_cn):
     self.tracker = manager.PciDevTracker(self.fake_context, node_id=1)
     mock_get_cn.assert_called_once_with(self.fake_context, 1)
Exemple #7
0
    def _update_available_resource(self, context, resources):

        # initialise the compute node object, creating it
        # if it does not already exist.
        self._init_compute_node(context, resources)

        # if we could not init the compute node the tracker will be
        # disabled and we should quit now
        if self.disabled:
            return

        if 'pci_passthrough_devices' in resources:
            # TODO(jaypipes): Move this into _init_compute_node()
            if not self.pci_tracker:
                n_id = self.compute_node['id'] if self.compute_node else None
                self.pci_tracker = pci_manager.PciDevTracker(context,
                                                             node_id=n_id)
            dev_json = resources.pop('pci_passthrough_devices')
            self.pci_tracker.update_devices_from_hypervisor_resources(dev_json)

        # Grab all instances assigned to this node:
        instances = objects.InstanceList.get_by_host_and_node(
            context,
            self.host,
            self.nodename,
            expected_attrs=[
                'system_metadata', 'numa_topology', 'flavor',
                'migration_context'
            ])

        # Now calculate usage based on instance utilization:
        self._update_usage_from_instances(context, instances)

        # Grab all in-progress migrations:
        migrations = objects.MigrationList.get_in_progress_by_host_and_node(
            context, self.host, self.nodename)

        self._pair_instances_to_migrations(migrations, instances)
        self._update_usage_from_migrations(context, migrations)

        # Detect and account for orphaned instances that may exist on the
        # hypervisor, but are not in the DB:
        orphans = self._find_orphaned_instances()
        self._update_usage_from_orphans(orphans)

        # NOTE(yjiang5): Because pci device tracker status is not cleared in
        # this periodic task, and also because the resource tracker is not
        # notified when instances are deleted, we need remove all usages
        # from deleted instances.
        if self.pci_tracker:
            self.pci_tracker.clean_usage(instances, migrations, orphans)
            dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
            self.compute_node.pci_device_pools = dev_pools_obj
        else:
            self.compute_node.pci_device_pools = objects.PciDevicePoolList()

        self._report_final_resource_view()

        metrics = self._get_host_metrics(context, self.nodename)
        # TODO(pmurray): metrics should not be a json string in ComputeNode,
        # but it is. This should be changed in ComputeNode
        self.compute_node.metrics = jsonutils.dumps(metrics)

        # update the compute_node
        self._update(context)
        LOG.info(_LI('Compute_service record updated for %(host)s:%(node)s'), {
            'host': self.host,
            'node': self.nodename
        })
Exemple #8
0
    def _update_available_resource(self, context, resources):

        # initialise the compute node object, creating it
        # if it does not already exist.
        self._init_compute_node(context, resources)

        # if we could not init the compute node the tracker will be
        # disabled and we should quit now
        if self.disabled:
            return

        if 'pci_passthrough_devices' in resources:
            devs = []
            for dev in jsonutils.loads(
                    resources.pop('pci_passthrough_devices')):
                if dev['dev_type'] == 'type-PF':
                    continue

                if self.pci_filter.device_assignable(dev):
                    devs.append(dev)

            if not self.pci_tracker:
                n_id = self.compute_node['id'] if self.compute_node else None
                self.pci_tracker = pci_manager.PciDevTracker(context,
                                                             node_id=n_id)
            self.pci_tracker.set_hvdevs(devs)

        # Grab all instances assigned to this node:
        instances = objects.InstanceList.get_by_host_and_node(
            context,
            self.host,
            self.nodename,
            expected_attrs=['system_metadata', 'numa_topology'])

        # Now calculate usage based on instance utilization:
        self._update_usage_from_instances(context, instances)

        # Grab all in-progress migrations:
        migrations = objects.MigrationList.get_in_progress_by_host_and_node(
            context, self.host, self.nodename)

        # Only look at resize/migrate migration records
        # NOTE(danms): RT should probably examine live migration
        # records as well and do something smart. However, ignore
        # those for now to avoid them being included in below calculations.
        migrations = [
            migration for migration in migrations
            if migration.migration_type in ('resize', 'migrate')
        ]

        self._update_usage_from_migrations(context, migrations)

        # Detect and account for orphaned instances that may exist on the
        # hypervisor, but are not in the DB:
        orphans = self._find_orphaned_instances()
        self._update_usage_from_orphans(orphans)

        # NOTE(yjiang5): Because pci device tracker status is not cleared in
        # this periodic task, and also because the resource tracker is not
        # notified when instances are deleted, we need remove all usages
        # from deleted instances.
        if self.pci_tracker:
            self.pci_tracker.clean_usage(instances, migrations, orphans)
            dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
            self.compute_node.pci_device_pools = dev_pools_obj
        else:
            self.compute_node.pci_device_pools = objects.PciDevicePoolList()

        self._report_final_resource_view()

        metrics = self._get_host_metrics(context, self.nodename)
        # TODO(pmurray): metrics should not be a json string in ComputeNode,
        # but it is. This should be changed in ComputeNode
        self.compute_node.metrics = jsonutils.dumps(metrics)

        # update the compute_node
        self._update(context)
        LOG.info(_LI('Compute_service record updated for %(host)s:%(node)s'), {
            'host': self.host,
            'node': self.nodename
        })
Exemple #9
0
 def new_pci_tracker(self):
     self.pci_tracker = pci_manager.PciDevTracker()
Exemple #10
0
 def test_pcidev_tracker_create_no_nodeid(self):
     self.tracker = manager.PciDevTracker()
     self.assertEqual(len(self.tracker.pci_devs), 0)
Exemple #11
0
    def _update_available_resource(self, context, resources):

        # initialise the compute node object, creating it
        # if it does not already exist.
        self._init_compute_node(context, resources)

        # if we could not init the compute node the tracker will be
        # disabled and we should quit now
        if self.disabled:
            return

        if 'pci_passthrough_devices' in resources:
            devs = []
            for dev in jsonutils.loads(
                    resources.pop('pci_passthrough_devices')):
                if dev['dev_type'] == 'type-PF':
                    continue

                if self.pci_filter.device_assignable(dev):
                    devs.append(dev)

            if not self.pci_tracker:
                n_id = self.compute_node['id'] if self.compute_node else None
                self.pci_tracker = pci_manager.PciDevTracker(context,
                                                             node_id=n_id)
            self.pci_tracker.set_hvdevs(devs)

        # Grab all instances assigned to this node:
        instances = objects.InstanceList.get_by_host_and_node(
            context,
            self.host,
            self.nodename,
            expected_attrs=['system_metadata', 'numa_topology'])

        # Now calculate usage based on instance utilization:
        self._update_usage_from_instances(context, resources, instances)

        # Grab all in-progress migrations:
        migrations = objects.MigrationList.get_in_progress_by_host_and_node(
            context, self.host, self.nodename)

        self._update_usage_from_migrations(context, resources, migrations)

        # Detect and account for orphaned instances that may exist on the
        # hypervisor, but are not in the DB:
        orphans = self._find_orphaned_instances()
        self._update_usage_from_orphans(context, resources, orphans)

        # NOTE(yjiang5): Because pci device tracker status is not cleared in
        # this periodic task, and also because the resource tracker is not
        # notified when instances are deleted, we need remove all usages
        # from deleted instances.
        if self.pci_tracker:
            self.pci_tracker.clean_usage(instances, migrations, orphans)
            resources['pci_device_pools'] = self.pci_tracker.stats
        else:
            resources['pci_device_pools'] = []

        self._report_final_resource_view(resources)

        metrics = self._get_host_metrics(context, self.nodename)
        resources['metrics'] = jsonutils.dumps(metrics)

        # TODO(sbauza): Juno compute nodes are missing the host field and
        # the Juno ResourceTracker does not set this field, even if
        # the ComputeNode object can show it.
        # Unfortunately, as we're not yet using ComputeNode.save(), we need
        # to add this field in the resources dict until the RT is using
        # the ComputeNode.save() method for populating the table.
        # tl;dr: To be removed once RT is using ComputeNode.save()
        resources['host'] = self.host

        self._update(context, resources)
        LOG.info(_LI('Compute_service record updated for %(host)s:%(node)s'), {
            'host': self.host,
            'node': self.nodename
        })