Exemple #1
0
def from_pci_stats(pci_stats):
    """Create and return a PciDevicePoolList from the data stored in the db,
    which can be either the serialized object, or, prior to the creation of the
    device pool objects, a simple dict or a list of such dicts.
    """
    pools = None
    if isinstance(pci_stats, six.string_types):
        try:
            pci_stats = jsonutils.loads(pci_stats)
        except (ValueError, TypeError):
            pci_stats = None
    if pci_stats:
        # Check for object-ness, or old-style storage format.
        if 'nova_object.namespace' in pci_stats:
            pools = objects.PciDevicePoolList.obj_from_primitive(pci_stats)
        else:
            # This can be either a dict or a list of dicts
            if isinstance(pci_stats, list):
                pool_list = [
                    objects.PciDevicePool.from_dict(stat) for stat in pci_stats
                ]
            else:
                pool_list = [objects.PciDevicePool.from_dict(pci_stats)]
            pools = objects.PciDevicePoolList(objects=pool_list)
    return pools
Exemple #2
0
    def _update_usage_from_instance(self, context, instance, is_removed=False):
        """Update usage for a single instance."""

        uuid = instance['uuid']
        is_new_instance = uuid not in self.tracked_instances
        is_removed_instance = (is_removed or instance['vm_state']
                               in vm_states.ALLOW_RESOURCE_REMOVAL)

        if is_new_instance:
            self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance)
            sign = 1

        if is_removed_instance:
            self.tracked_instances.pop(uuid)
            sign = -1

        self.stats.update_stats_for_instance(instance, is_removed_instance)

        # if it's a new or deleted instance:
        if is_new_instance or is_removed_instance:
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_instance(context,
                                                         instance,
                                                         sign=sign)
            # new instance, update compute node resource usage:
            self._update_usage(instance, sign=sign)

        self.compute_node.current_workload = self.stats.calculate_workload()
        if self.pci_tracker:
            obj = self.pci_tracker.stats.to_device_pools_obj()
            self.compute_node.pci_device_pools = obj
        else:
            self.compute_node.pci_device_pools = objects.PciDevicePoolList()
Exemple #3
0
    def test_save_pci_device_pools_empty(self, mock_update):
        fake_pci = jsonutils.dumps(
            objects.PciDevicePoolList(objects=[]).obj_to_primitive())
        compute_dict = fake_compute_node.copy()
        compute_dict['pci_stats'] = fake_pci
        mock_update.return_value = compute_dict

        compute = compute_node.ComputeNode(context=self.context)
        compute.id = 123
        compute.pci_device_pools = objects.PciDevicePoolList(objects=[])
        compute.save()
        self.compare_obj(compute, compute_dict,
                         subs=self.subs(),
                         comparators=self.comparators())

        mock_update.assert_called_once_with(
            self.context, 123, {'pci_stats': fake_pci})
Exemple #4
0
    def update_resource_stats(self, context, name, stats):
        """Creates or updates stats for the desired service.

        :param context: local context
        :param name: name of resource to update
        :type name: immutable (str or tuple)
        :param stats: updated stats to send to scheduler
        :type stats: dict
        """

        if 'id' in stats:
            compute_node_id = stats['id']
            updates = stats.copy()
            del updates['id']
        else:
            raise exception.ComputeHostNotCreated(name=str(name))

        if 'stats' in updates:
            # NOTE(danms): This is currently pre-serialized for us,
            # which we don't want if we're using the object. So,
            # fix it here, and follow up with removing this when the
            # RT is converted to proper objects.
            updates['stats'] = jsonutils.loads(updates['stats'])
        compute_node = objects.ComputeNode(context=context,
                                           id=compute_node_id)
        compute_node.obj_reset_changes()
        for k, v in updates.items():
            if k == 'pci_device_pools':
                # NOTE(danms): Since the updates are actually the result of
                # a obj_to_primitive() on some real objects, we need to convert
                # back to a real object (not from_dict() or _from_db_object(),
                # which expect a db-formatted object) but just an attr-based
                # reconstruction. When we start getting a ComputeNode from
                # scheduler this "bandage" can go away.
                if v:
                    devpools = [objects.PciDevicePool.from_dict(x) for x in v]
                else:
                    devpools = []
                compute_node.pci_device_pools = objects.PciDevicePoolList(
                    objects=devpools)
            else:
                setattr(compute_node, k, v)
        compute_node.save()

        LOG.info(_LI('Compute_service record updated for '
                 '%s') % str(name))
Exemple #5
0
    def _update_usage_from_instance(self,
                                    context,
                                    instance,
                                    nodename,
                                    is_removed=False):
        """Update usage for a single instance."""

        uuid = instance['uuid']
        is_new_instance = uuid not in self.tracked_instances
        # NOTE(sfinucan): Both brand new instances as well as instances that
        # are being unshelved will have is_new_instance == True
        is_removed_instance = not is_new_instance and (
            is_removed
            or instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)

        if is_new_instance:
            self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance)
            sign = 1

        if is_removed_instance:
            self.tracked_instances.pop(uuid)
            sign = -1

        cn = self.compute_nodes[nodename]
        self.stats.update_stats_for_instance(instance, is_removed_instance)
        cn.stats = copy.deepcopy(self.stats)

        # if it's a new or deleted instance:
        if is_new_instance or is_removed_instance:
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_instance(context,
                                                         instance,
                                                         sign=sign)
            self.scheduler_client.reportclient.update_instance_allocation(
                cn, instance, sign)
            # new instance, update compute node resource usage:
            self._update_usage(self._get_usage_dict(instance),
                               nodename,
                               sign=sign)

        cn.current_workload = self.stats.calculate_workload()
        if self.pci_tracker:
            obj = self.pci_tracker.stats.to_device_pools_obj()
            cn.pci_device_pools = obj
        else:
            cn.pci_device_pools = objects.PciDevicePoolList()
Exemple #6
0
    def _update_usage_from_migration(self, context, instance, image_meta,
                                     migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        if not self._is_trackable_migration(migration):
            return

        uuid = migration.instance_uuid
        LOG.info(_LI("Updating from migration %s"), uuid)

        incoming = (migration.dest_compute == self.host
                    and migration.dest_node == self.nodename)
        outbound = (migration.source_compute == self.host
                    and migration.source_node == self.nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None
        numa_topology = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] == migration.old_instance_type_id
                ):
                itype = self._get_instance_type(context, instance, 'new_',
                                                migration)
                numa_topology = self._get_migration_context_resource(
                    'numa_topology', instance)
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, 'old_',
                                                migration)
                numa_topology = self._get_migration_context_resource(
                    'numa_topology', instance, prefix='old_')

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, 'new_',
                                            migration)
            numa_topology = self._get_migration_context_resource(
                'numa_topology', instance)

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, 'old_',
                                            migration)
            numa_topology = self._get_migration_context_resource(
                'numa_topology', instance, prefix='old_')

        if image_meta is None:
            image_meta = objects.ImageMeta.from_instance(instance)
        # TODO(jaypipes): Remove when image_meta is always passed
        # as an objects.ImageMeta
        elif not isinstance(image_meta, objects.ImageMeta):
            image_meta = objects.ImageMeta.from_dict(image_meta)

        if itype:
            usage = self._get_usage_dict(itype, numa_topology=numa_topology)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(context, instance)
            self._update_usage(usage)
            if self.pci_tracker:
                obj = self.pci_tracker.stats.to_device_pools_obj()
                self.compute_node.pci_device_pools = obj
            else:
                obj = objects.PciDevicePoolList()
                self.compute_node.pci_device_pools = obj
            self.tracked_migrations[uuid] = (migration, itype)
Exemple #7
0
    def _update_available_resource(self, context, resources):

        # initialise the compute node object, creating it
        # if it does not already exist.
        self._init_compute_node(context, resources)

        # if we could not init the compute node the tracker will be
        # disabled and we should quit now
        if self.disabled:
            return

        if 'pci_passthrough_devices' in resources:
            # TODO(jaypipes): Move this into _init_compute_node()
            if not self.pci_tracker:
                n_id = self.compute_node.id if self.compute_node else None
                self.pci_tracker = pci_manager.PciDevTracker(context,
                                                             node_id=n_id)
            dev_json = resources.pop('pci_passthrough_devices')
            self.pci_tracker.update_devices_from_hypervisor_resources(dev_json)

        # Grab all instances assigned to this node:
        instances = objects.InstanceList.get_by_host_and_node(
            context,
            self.host,
            self.nodename,
            expected_attrs=[
                'system_metadata', 'numa_topology', 'flavor',
                'migration_context'
            ])

        # Now calculate usage based on instance utilization:
        self._update_usage_from_instances(context, instances)

        # Grab all in-progress migrations:
        migrations = objects.MigrationList.get_in_progress_by_host_and_node(
            context, self.host, self.nodename)

        self._pair_instances_to_migrations(migrations, instances)
        self._update_usage_from_migrations(context, migrations)

        # Detect and account for orphaned instances that may exist on the
        # hypervisor, but are not in the DB:
        orphans = self._find_orphaned_instances()
        self._update_usage_from_orphans(orphans)

        # NOTE(yjiang5): Because pci device tracker status is not cleared in
        # this periodic task, and also because the resource tracker is not
        # notified when instances are deleted, we need remove all usages
        # from deleted instances.
        if self.pci_tracker:
            self.pci_tracker.clean_usage(instances, migrations, orphans)
            dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
            self.compute_node.pci_device_pools = dev_pools_obj
        else:
            self.compute_node.pci_device_pools = objects.PciDevicePoolList()

        self._report_final_resource_view()

        metrics = self._get_host_metrics(context, self.nodename)
        # TODO(pmurray): metrics should not be a json string in ComputeNode,
        # but it is. This should be changed in ComputeNode
        self.compute_node.metrics = jsonutils.dumps(metrics)

        # update the compute_node
        self._update(context)
        LOG.info(_LI('Compute_service record updated for %(host)s:%(node)s'), {
            'host': self.host,
            'node': self.nodename
        })
Exemple #8
0
    def _update_usage_from_migration(self, context, instance, migration,
                                     nodename):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        if not _is_trackable_migration(migration):
            return

        uuid = migration.instance_uuid
        LOG.info(_LI("Updating from migration %s"), uuid)

        incoming = (migration.dest_compute == self.host
                    and migration.dest_node == nodename)
        outbound = (migration.source_compute == self.host
                    and migration.source_node == nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None
        numa_topology = None
        sign = 0
        if same_node:
            # Same node resize. Record usage for the 'new_' resources.  This
            # is executed on resize_claim().
            if (instance['instance_type_id'] == migration.old_instance_type_id
                ):
                itype = self._get_instance_type(context, instance, 'new_',
                                                migration)
                numa_topology = self._get_migration_context_resource(
                    'numa_topology', instance)
                # Allocate pci device(s) for the instance.
                sign = 1
            else:
                # The instance is already set to the new flavor (this is done
                # by the compute manager on finish_resize()), hold space for a
                # possible revert to the 'old_' resources.
                # NOTE(lbeliveau): When the periodic audit timer gets
                # triggered, the compute usage gets reset.  The usage for an
                # instance that is migrated to the new flavor but not yet
                # confirmed/reverted will first get accounted for by
                # _update_usage_from_instances().  This method will then be
                # called, and we need to account for the '_old' resources
                # (just in case).
                itype = self._get_instance_type(context, instance, 'old_',
                                                migration)
                numa_topology = self._get_migration_context_resource(
                    'numa_topology', instance, prefix='old_')

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, 'new_',
                                            migration)
            numa_topology = self._get_migration_context_resource(
                'numa_topology', instance)
            # Allocate pci device(s) for the instance.
            sign = 1

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, 'old_',
                                            migration)
            numa_topology = self._get_migration_context_resource(
                'numa_topology', instance, prefix='old_')

        if itype:
            cn = self.compute_nodes[nodename]
            usage = self._get_usage_dict(itype, numa_topology=numa_topology)
            if self.pci_tracker and sign:
                self.pci_tracker.update_pci_for_instance(context,
                                                         instance,
                                                         sign=sign)
            self._update_usage(usage, nodename)
            if self.pci_tracker:
                obj = self.pci_tracker.stats.to_device_pools_obj()
                cn.pci_device_pools = obj
            else:
                obj = objects.PciDevicePoolList()
                cn.pci_device_pools = obj
            self.tracked_migrations[uuid] = migration
    def _update_available_resource(self, context, resources):

        # initialise the compute node object, creating it
        # if it does not already exist.
        self._init_compute_node(context, resources)

        # if we could not init the compute node the tracker will be
        # disabled and we should quit now
        if self.disabled:
            return

        if 'pci_passthrough_devices' in resources:
            devs = []
            for dev in jsonutils.loads(
                    resources.pop('pci_passthrough_devices')):
                if dev['dev_type'] == 'type-PF':
                    continue

                if self.pci_filter.device_assignable(dev):
                    devs.append(dev)

            if not self.pci_tracker:
                n_id = self.compute_node['id'] if self.compute_node else None
                self.pci_tracker = pci_manager.PciDevTracker(context,
                                                             node_id=n_id)
            self.pci_tracker.set_hvdevs(devs)

        # Grab all instances assigned to this node:
        instances = objects.InstanceList.get_by_host_and_node(
            context,
            self.host,
            self.nodename,
            expected_attrs=['system_metadata', 'numa_topology'])

        # Now calculate usage based on instance utilization:
        self._update_usage_from_instances(context, instances)

        # Grab all in-progress migrations:
        migrations = objects.MigrationList.get_in_progress_by_host_and_node(
            context, self.host, self.nodename)

        # Only look at resize/migrate migration records
        # NOTE(danms): RT should probably examine live migration
        # records as well and do something smart. However, ignore
        # those for now to avoid them being included in below calculations.
        migrations = [
            migration for migration in migrations
            if migration.migration_type in ('resize', 'migrate')
        ]

        self._update_usage_from_migrations(context, migrations)

        # Detect and account for orphaned instances that may exist on the
        # hypervisor, but are not in the DB:
        orphans = self._find_orphaned_instances()
        self._update_usage_from_orphans(orphans)

        # NOTE(yjiang5): Because pci device tracker status is not cleared in
        # this periodic task, and also because the resource tracker is not
        # notified when instances are deleted, we need remove all usages
        # from deleted instances.
        if self.pci_tracker:
            self.pci_tracker.clean_usage(instances, migrations, orphans)
            dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
            self.compute_node.pci_device_pools = dev_pools_obj
        else:
            self.compute_node.pci_device_pools = objects.PciDevicePoolList()

        self._report_final_resource_view()

        metrics = self._get_host_metrics(context, self.nodename)
        # TODO(pmurray): metrics should not be a json string in ComputeNode,
        # but it is. This should be changed in ComputeNode
        self.compute_node.metrics = jsonutils.dumps(metrics)

        # update the compute_node
        self._update(context)
        LOG.info(_LI('Compute_service record updated for %(host)s:%(node)s'), {
            'host': self.host,
            'node': self.nodename
        })
Exemple #10
0
    def _update_usage_from_migration(self, context, instance, image_meta,
                                     migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration.instance_uuid
        LOG.info(_LI("Updating from migration %s") % uuid)

        incoming = (migration.dest_compute == self.host
                    and migration.dest_node == self.nodename)
        outbound = (migration.source_compute == self.host
                    and migration.source_node == self.nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] == migration.old_instance_type_id
                ):
                itype = self._get_instance_type(context, instance, 'new_',
                                                migration.new_instance_type_id)
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, 'old_',
                                                migration.old_instance_type_id)

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, 'new_',
                                            migration.new_instance_type_id)

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, 'old_',
                                            migration.old_instance_type_id)

        if image_meta is None:
            image_meta = utils.get_image_from_system_metadata(
                instance['system_metadata'])

        if itype:
            host_topology = self.compute_node.get('numa_topology')
            if host_topology:
                host_topology = objects.NUMATopology.obj_from_db_obj(
                    host_topology)
            numa_topology = hardware.numa_get_constraints(itype, image_meta)
            numa_topology = (hardware.numa_fit_instance_to_host(
                host_topology, numa_topology))
            usage = self._get_usage_dict(itype, numa_topology=numa_topology)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(context, instance)
            self._update_usage(usage)
            if self.pci_tracker:
                obj = self.pci_tracker.stats.to_device_pools_obj()
                self.compute_node.pci_device_pools = obj
            else:
                obj = objects.PciDevicePoolList()
                self.compute_node.pci_device_pools = obj
            self.tracked_migrations[uuid] = (migration, itype)