Ejemplo n.º 1
0
class PowerVMLiveMigrateData(LiveMigrateData):
    # Version 1.0: Initial version
    # Version 1.1: Added the Virtual Ethernet Adapter VLAN mappings.
    # Version 1.2: Added old_vol_attachment_ids
    # Version 1.3: Added wait_for_vif_plugged
    # Version 1.4: Inherited vifs from LiveMigrateData
    VERSION = '1.4'

    fields = {
        'host_mig_data': fields.DictOfNullableStringsField(),
        'dest_ip': fields.StringField(),
        'dest_user_id': fields.StringField(),
        'dest_sys_name': fields.StringField(),
        'public_key': fields.StringField(),
        'dest_proc_compat': fields.StringField(),
        'vol_data': fields.DictOfNullableStringsField(),
        'vea_vlan_mappings': fields.DictOfNullableStringsField(),
    }

    def obj_make_compatible(self, primitive, target_version):
        super(PowerVMLiveMigrateData,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 4) and 'vifs' in primitive:
            del primitive['vifs']
        if target_version < (1, 3) and 'wait_for_vif_plugged' in primitive:
            del primitive['wait_for_vif_plugged']
        if target_version < (1, 2):
            if 'old_vol_attachment_ids' in primitive:
                del primitive['old_vol_attachment_ids']
        if target_version < (1, 1):
            if 'vea_vlan_mappings' in primitive:
                del primitive['vea_vlan_mappings']
Ejemplo n.º 2
0
class PciDevicePool(base.NovaObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    fields = {
        'product_id': fields.StringField(),
        'vendor_id': fields.StringField(),
        'tags': fields.DictOfNullableStringsField(),
        'count': fields.IntegerField(),
        }

    # NOTE(pmurray): before this object existed the pci device pool data was
    # stored as a dict. For backward compatibility we need to be able to read
    # it in from a dict
    @classmethod
    def from_dict(cls, value):
        pool_dict = copy.copy(value)
        pool = cls()
        pool.vendor_id = pool_dict.pop("vendor_id")
        pool.product_id = pool_dict.pop("product_id")
        pool.count = pool_dict.pop("count")
        pool.tags = {}
        pool.tags.update(pool_dict)
        return pool

    # NOTE(sbauza): Before using objects, pci stats was a list of
    # dictionaries not having tags. For compatibility with other modules, let's
    # create a reversible method
    def to_dict(self):
        pci_pool = base.obj_to_primitive(self)
        tags = pci_pool.pop('tags', None)
        for k, v in six.iteritems(tags):
            pci_pool[k] = v
        return pci_pool
Ejemplo n.º 3
0
class ComputeNode(compute_node.ComputeNode):
    # This change just aim to adding pci_stats to compute_nodes obj

    fields = {
        'id': fields.IntegerField(read_only=True),
        'service_id': fields.IntegerField(),
        'vcpus': fields.IntegerField(),
        'memory_mb': fields.IntegerField(),
        'local_gb': fields.IntegerField(),
        'vcpus_used': fields.IntegerField(),
        'memory_mb_used': fields.IntegerField(),
        'local_gb_used': fields.IntegerField(),
        'hypervisor_type': fields.StringField(),
        'hypervisor_version': fields.IntegerField(),
        'hypervisor_hostname': fields.StringField(nullable=True),
        'free_ram_mb': fields.IntegerField(nullable=True),
        'free_disk_gb': fields.IntegerField(nullable=True),
        'current_workload': fields.IntegerField(nullable=True),
        'running_vms': fields.IntegerField(nullable=True),
        'cpu_info': fields.StringField(nullable=True),
        'disk_available_least': fields.IntegerField(nullable=True),
        'metrics': fields.StringField(nullable=True),
        'stats': fields.DictOfNullableStringsField(nullable=True),
        'host_ip': fields.IPAddressField(nullable=True),
        'numa_topology': fields.StringField(nullable=True),
        'pci_stats': fields.StringField(nullable=True),
    }
Ejemplo n.º 4
0
class PowerVMLiveMigrateData(LiveMigrateData):
    # Version 1.0: Initial version
    # Version 1.1: Added the Virtual Ethernet Adapter VLAN mappings.
    # Version 1.2: Added old_vol_attachment_ids
    # Version 1.3: Added wait_for_vif_plugged
    # Version 1.4: Inherited vifs from LiveMigrateData
    VERSION = '1.4'

    fields = {
        'host_mig_data': fields.DictOfNullableStringsField(),
        'dest_ip': fields.StringField(),
        'dest_user_id': fields.StringField(),
        'dest_sys_name': fields.StringField(),
        'public_key': fields.StringField(),
        'dest_proc_compat': fields.StringField(),
        'vol_data': fields.DictOfNullableStringsField(),
        'vea_vlan_mappings': fields.DictOfNullableStringsField(),
    }

    def obj_make_compatible(self, primitive, target_version):
        super(PowerVMLiveMigrateData,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 4) and 'vifs' in primitive:
            del primitive['vifs']
        if target_version < (1, 3) and 'wait_for_vif_plugged' in primitive:
            del primitive['wait_for_vif_plugged']
        if target_version < (1, 2):
            if 'old_vol_attachment_ids' in primitive:
                del primitive['old_vol_attachment_ids']
        if target_version < (1, 1):
            if 'vea_vlan_mappings' in primitive:
                del primitive['vea_vlan_mappings']

    def to_legacy_dict(self, pre_migration_result=False):
        legacy = super(PowerVMLiveMigrateData, self).to_legacy_dict()
        for field in self.fields:
            if self.obj_attr_is_set(field):
                legacy[field] = getattr(self, field)
        return legacy

    def from_legacy_dict(self, legacy):
        super(PowerVMLiveMigrateData, self).from_legacy_dict(legacy)
        for field in self.fields:
            if field in legacy:
                setattr(self, field, legacy[field])
Ejemplo n.º 5
0
 def setUp(self):
     super(TestDictOfStringsNone, self).setUp()
     self.field = fields.DictOfNullableStringsField()
     self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}),
                                ({'foo': 1}, {'foo': '1'}),
                                ({'foo': None}, {'foo': None})]
     self.coerce_bad_values = [{1: 'bar'}, 'foo']
     self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
     self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
Ejemplo n.º 6
0
class PciDevicePool(base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added numa_node field
    VERSION = '1.1'

    fields = {
        'product_id': fields.StringField(),
        'vendor_id': fields.StringField(),
        'numa_node': fields.IntegerField(nullable=True),
        'tags': fields.DictOfNullableStringsField(),
        'count': fields.IntegerField(),
    }

    def obj_make_compatible(self, primitive, target_version):
        target_version = utils.convert_version_to_tuple(target_version)
        if target_version < (1, 1) and 'numa_node' in primitive:
            del primitive['numa_node']

    # NOTE(pmurray): before this object existed the pci device pool data was
    # stored as a dict. For backward compatibility we need to be able to read
    # it in from a dict
    @classmethod
    def from_dict(cls, value):
        pool_dict = copy.copy(value)
        pool = cls()
        pool.vendor_id = pool_dict.pop("vendor_id")
        pool.product_id = pool_dict.pop("product_id")
        pool.numa_node = pool_dict.pop("numa_node", None)
        pool.count = pool_dict.pop("count")
        pool.tags = {}
        pool.tags.update(pool_dict)
        return pool

    # NOTE(sbauza): Before using objects, pci stats was a list of
    # dictionaries not having tags. For compatibility with other modules, let's
    # create a reversible method
    def to_dict(self):
        pci_pool = base.obj_to_primitive(self)
        tags = pci_pool.pop('tags', {})
        for k, v in six.iteritems(tags):
            pci_pool[k] = v
        return pci_pool
Ejemplo n.º 7
0
class PciDevicePool(base.NovaObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    fields = {
        'product_id': fields.StringField(),
        'vendor_id': fields.StringField(),
        'tags': fields.DictOfNullableStringsField(),
        'count': fields.IntegerField(),
    }

    # NOTE(pmurray): before this object existed the pci device pool data was
    # stored as a dict. For backward compatibility we need to be able to read
    # it in from a dict
    @classmethod
    def from_dict(cls, value):
        pool_dict = copy.copy(value)
        pool = cls()
        pool.vendor_id = pool_dict.pop("vendor_id")
        pool.product_id = pool_dict.pop("product_id")
        pool.count = pool_dict.pop("count")
        pool.tags = {}
        pool.tags.update(pool_dict)
        return pool
Ejemplo n.º 8
0
class ComputeNode(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added get_by_service_id()
    # Version 1.2: String attributes updated to support unicode
    # Version 1.3: Added stats field
    # Version 1.4: Added host ip field
    # Version 1.5: Added numa_topology field
    # Version 1.6: Added supported_hv_specs
    # Version 1.7: Added host field
    # Version 1.8: Added get_by_host_and_nodename()
    # Version 1.9: Added pci_device_pools
    # Version 1.10: Added get_first_node_by_host_for_old_compat()
    # Version 1.11: PciDevicePoolList version 1.1
    # Version 1.12: HVSpec version 1.1
    # Version 1.13: Changed service_id field to be nullable
    # Version 1.14: Added cpu_allocation_ratio and ram_allocation_ratio
    # Version 1.15: Added uuid
    # Version 1.16: Added disk_allocation_ratio
    # Version 1.17: Added mapped
    # Version 1.18: Added get_by_uuid().
    VERSION = '1.18'

    fields = {
        'id': fields.IntegerField(read_only=True),
        'uuid': fields.UUIDField(read_only=True),
        'service_id': fields.IntegerField(nullable=True),
        'host': fields.StringField(nullable=True),
        'vcpus': fields.IntegerField(),
        'memory_mb': fields.IntegerField(),
        'local_gb': fields.IntegerField(),
        'vcpus_used': fields.IntegerField(),
        'memory_mb_used': fields.IntegerField(),
        'local_gb_used': fields.IntegerField(),
        'hypervisor_type': fields.StringField(),
        'hypervisor_version': fields.IntegerField(),
        'hypervisor_hostname': fields.StringField(nullable=True),
        'free_ram_mb': fields.IntegerField(nullable=True),
        'free_disk_gb': fields.IntegerField(nullable=True),
        'current_workload': fields.IntegerField(nullable=True),
        'running_vms': fields.IntegerField(nullable=True),
        # TODO(melwitt): cpu_info is non-nullable in the schema but we must
        # wait until version 2.0 of ComputeNode to change it to non-nullable
        'cpu_info': fields.StringField(nullable=True),
        'disk_available_least': fields.IntegerField(nullable=True),
        'metrics': fields.StringField(nullable=True),
        'stats': fields.DictOfNullableStringsField(nullable=True),
        'host_ip': fields.IPAddressField(nullable=True),
        # TODO(rlrossit): because of history, numa_topology is held here as a
        # StringField, not a NUMATopology object. In version 2 of ComputeNode
        # this will be converted over to a fields.ObjectField('NUMATopology')
        'numa_topology': fields.StringField(nullable=True),
        # NOTE(pmurray): the supported_hv_specs field maps to the
        # supported_instances field in the database
        'supported_hv_specs': fields.ListOfObjectsField('HVSpec'),
        # NOTE(pmurray): the pci_device_pools field maps to the
        # pci_stats field in the database
        'pci_device_pools': fields.ObjectField('PciDevicePoolList',
                                               nullable=True),
        'cpu_allocation_ratio': fields.FloatField(),
        'ram_allocation_ratio': fields.FloatField(),
        'disk_allocation_ratio': fields.FloatField(),
        'mapped': fields.IntegerField(),
        }

    def obj_make_compatible(self, primitive, target_version):
        super(ComputeNode, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 17):
            if 'mapped' in primitive:
                del primitive['mapped']
        if target_version < (1, 16):
            if 'disk_allocation_ratio' in primitive:
                del primitive['disk_allocation_ratio']
        if target_version < (1, 15):
            if 'uuid' in primitive:
                del primitive['uuid']
        if target_version < (1, 14):
            if 'ram_allocation_ratio' in primitive:
                del primitive['ram_allocation_ratio']
            if 'cpu_allocation_ratio' in primitive:
                del primitive['cpu_allocation_ratio']
        if target_version < (1, 13) and primitive.get('service_id') is None:
            # service_id is non-nullable in versions before 1.13
            try:
                service = objects.Service.get_by_compute_host(
                    self._context, primitive['host'])
                primitive['service_id'] = service.id
            except (exception.ComputeHostNotFound, KeyError):
                # NOTE(hanlind): In case anything goes wrong like service not
                # found or host not being set, catch and set a fake value just
                # to allow for older versions that demand a value to work.
                # Setting to -1 will, if value is later used result in a
                # ServiceNotFound, so should be safe.
                primitive['service_id'] = -1
        if target_version < (1, 7) and 'host' in primitive:
            del primitive['host']
        if target_version < (1, 5) and 'numa_topology' in primitive:
            del primitive['numa_topology']
        if target_version < (1, 4) and 'host_ip' in primitive:
            del primitive['host_ip']
        if target_version < (1, 3) and 'stats' in primitive:
            # pre 1.3 version does not have a stats field
            del primitive['stats']

    @staticmethod
    def _host_from_db_object(compute, db_compute):
        if (('host' not in db_compute or db_compute['host'] is None)
                and 'service_id' in db_compute
                and db_compute['service_id'] is not None):
            # FIXME(sbauza) : Unconverted compute record, provide compatibility
            # This has to stay until we can be sure that any/all compute nodes
            # in the database have been converted to use the host field

            # Service field of ComputeNode could be deprecated in a next patch,
            # so let's use directly the Service object
            try:
                service = objects.Service.get_by_id(
                    compute._context, db_compute['service_id'])
            except exception.ServiceNotFound:
                compute.host = None
                return
            try:
                compute.host = service.host
            except (AttributeError, exception.OrphanedObjectError):
                # Host can be nullable in Service
                compute.host = None
        elif 'host' in db_compute and db_compute['host'] is not None:
            # New-style DB having host as a field
            compute.host = db_compute['host']
        else:
            # We assume it should not happen but in case, let's set it to None
            compute.host = None

    @staticmethod
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
            ])
        fields = set(compute.fields) - special_cases
        online_updates = {}
        for key in fields:
            value = db_compute[key]
            # NOTE(sbauza): Since all compute nodes don't possibly run the
            # latest RT code updating allocation ratios, we need to provide
            # a backwards compatible way of hydrating them.
            # As we want to care about our operators and since we don't want to
            # ask them to change their configuration files before upgrading, we
            # prefer to hardcode the default values for the ratios here until
            # the next release (Newton) where the opt default values will be
            # restored for both cpu (16.0), ram (1.5) and disk (1.0)
            # allocation ratios.
            # TODO(yikun): Remove this online migration code when all ratio
            # values are NOT 0.0 or NULL
            ratio_keys = ['cpu_allocation_ratio', 'ram_allocation_ratio',
                          'disk_allocation_ratio']
            if key in ratio_keys and value in (None, 0.0):
                # ResourceTracker is not updating the value (old node)
                # or the compute node is updated but the default value has
                # not been changed
                r = getattr(CONF, key)
                # NOTE(yikun): If the allocation ratio record is not set, the
                # allocation ratio will be changed to the
                # CONF.x_allocation_ratio value if x_allocation_ratio is
                # set, and fallback to use the CONF.initial_x_allocation_ratio
                # otherwise.
                init_x_ratio = getattr(CONF, 'initial_%s' % key)
                value = r if r else init_x_ratio
                online_updates[key] = value
            elif key == 'mapped':
                value = 0 if value is None else value

            setattr(compute, key, value)

        if online_updates:
            db.compute_node_update(context, compute.id, online_updates)

        stats = db_compute['stats']
        if stats:
            compute.stats = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [objects.HVSpec.from_list(hv_spec)
                        for hv_spec in hv_specs]
            compute.supported_hv_specs = hv_specs

        pci_stats = db_compute.get('pci_stats')
        if pci_stats is not None:
            pci_stats = pci_device_pool.from_pci_stats(pci_stats)
        compute.pci_device_pools = pci_stats
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        compute.obj_reset_changes()

        return compute

    @base.remotable_classmethod
    def get_by_id(cls, context, compute_id):
        db_compute = db.compute_node_get(context, compute_id)
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable_classmethod
    def get_by_uuid(cls, context, compute_uuid):
        nodes = ComputeNodeList.get_all_by_uuids(context, [compute_uuid])
        # We have a unique index on the uuid column so we can get back 0 or 1.
        if not nodes:
            raise exception.ComputeHostNotFound(host=compute_uuid)
        return nodes[0]

    # NOTE(hanlind): This is deprecated and should be removed on the next
    # major version bump
    @base.remotable_classmethod
    def get_by_service_id(cls, context, service_id):
        db_computes = db.compute_nodes_get_by_service_id(context, service_id)
        # NOTE(sbauza): Old version was returning an item, we need to keep this
        # behaviour for backwards compatibility
        db_compute = db_computes[0]
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable_classmethod
    def get_by_host_and_nodename(cls, context, host, nodename):
        db_compute = db.compute_node_get_by_host_and_nodename(
            context, host, nodename)
        return cls._from_db_object(context, cls(), db_compute)

    # TODO(pkholkin): Remove this method in the next major version bump
    @base.remotable_classmethod
    def get_first_node_by_host_for_old_compat(cls, context, host,
                                              use_slave=False):
        computes = ComputeNodeList.get_all_by_host(context, host, use_slave)
        # FIXME(sbauza): Ironic deployments can return multiple
        # nodes per host, we should return all the nodes and modify the callers
        # instead.
        # Arbitrarily returning the first node.
        return computes[0]

    @staticmethod
    def _convert_stats_to_db_format(updates):
        stats = updates.pop('stats', None)
        if stats is not None:
            updates['stats'] = jsonutils.dumps(stats)

    @staticmethod
    def _convert_host_ip_to_db_format(updates):
        host_ip = updates.pop('host_ip', None)
        if host_ip:
            updates['host_ip'] = str(host_ip)

    @staticmethod
    def _convert_supported_instances_to_db_format(updates):
        hv_specs = updates.pop('supported_hv_specs', None)
        if hv_specs is not None:
            hv_specs = [hv_spec.to_list() for hv_spec in hv_specs]
            updates['supported_instances'] = jsonutils.dumps(hv_specs)

    @staticmethod
    def _convert_pci_stats_to_db_format(updates):
        if 'pci_device_pools' in updates:
            pools = updates.pop('pci_device_pools')
            if pools is not None:
                pools = jsonutils.dumps(pools.obj_to_primitive())
            updates['pci_stats'] = pools

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        if 'uuid' not in updates:
            updates['uuid'] = uuidutils.generate_uuid()
            self.uuid = updates['uuid']

        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)
        self._convert_pci_stats_to_db_format(updates)

        db_compute = db.compute_node_create(self._context, updates)
        self._from_db_object(self._context, self, db_compute)

    @base.remotable
    def save(self, prune_stats=False):
        # NOTE(belliott) ignore prune_stats param, no longer relevant

        updates = self.obj_get_changes()
        updates.pop('id', None)
        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)
        self._convert_pci_stats_to_db_format(updates)

        db_compute = db.compute_node_update(self._context, self.id, updates)
        self._from_db_object(self._context, self, db_compute)

    @base.remotable
    def destroy(self):
        db.compute_node_delete(self._context, self.id)

    def update_from_virt_driver(self, resources):
        # NOTE(pmurray): the virt driver provides a dict of values that
        # can be copied into the compute node. The names and representation
        # do not exactly match.
        # TODO(pmurray): the resources dict should be formalized.
        keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
                "vcpus_used", "memory_mb_used", "local_gb_used",
                "numa_topology", "hypervisor_type",
                "hypervisor_version", "hypervisor_hostname",
                "disk_available_least", "host_ip", "uuid"]
        for key in keys:
            if key in resources:
                # The uuid field is read-only so it should only be set when
                # creating the compute node record for the first time. Ignore
                # it otherwise.
                if key == 'uuid' and 'uuid' in self:
                    continue
                setattr(self, key, resources[key])

        # supported_instances has a different name in compute_node
        if 'supported_instances' in resources:
            si = resources['supported_instances']
            self.supported_hv_specs = [objects.HVSpec.from_list(s) for s in si]
Ejemplo n.º 9
0
class Instance(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added info_cache
    # Version 1.2: Added security_groups
    # Version 1.3: Added expected_vm_state and admin_state_reset to
    #              save()
    # Version 1.4: Added locked_by and deprecated locked
    # Version 1.5: Added cleaned
    # Version 1.6: Added pci_devices
    # Version 1.7: String attributes updated to support unicode
    # Version 1.8: 'security_groups' and 'pci_devices' cannot be None
    # Version 1.9: Make uuid a non-None real string
    # Version 1.10: Added use_slave to refresh and get_by_uuid
    # Version 1.11: Update instance from database during destroy
    # Version 1.12: Added ephemeral_key_uuid
    # Version 1.13: Added delete_metadata_key()
    # Version 1.14: Added numa_topology
    # Version 1.15: PciDeviceList 1.1
    VERSION = '1.15'

    fields = {
        'id': fields.IntegerField(),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'image_ref': fields.StringField(nullable=True),
        'kernel_id': fields.StringField(nullable=True),
        'ramdisk_id': fields.StringField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'launch_index': fields.IntegerField(nullable=True),
        'key_name': fields.StringField(nullable=True),
        'key_data': fields.StringField(nullable=True),
        'power_state': fields.IntegerField(nullable=True),
        'vm_state': fields.StringField(nullable=True),
        'task_state': fields.StringField(nullable=True),
        'memory_mb': fields.IntegerField(nullable=True),
        'vcpus': fields.IntegerField(nullable=True),
        'root_gb': fields.IntegerField(nullable=True),
        'ephemeral_gb': fields.IntegerField(nullable=True),
        'ephemeral_key_uuid': fields.UUIDField(nullable=True),
        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),
        'instance_type_id': fields.IntegerField(nullable=True),
        'user_data': fields.StringField(nullable=True),
        'reservation_id': fields.StringField(nullable=True),
        'scheduled_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'launched_on': fields.StringField(nullable=True),

        # NOTE(jdillaman): locked deprecated in favor of locked_by,
        # to be removed in Icehouse
        'locked': fields.BooleanField(default=False),
        'locked_by': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'vm_mode': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(),
        'root_device_name': fields.StringField(nullable=True),
        'default_ephemeral_device': fields.StringField(nullable=True),
        'default_swap_device': fields.StringField(nullable=True),
        'config_drive': fields.StringField(nullable=True),
        'access_ip_v4': fields.IPV4AddressField(nullable=True),
        'access_ip_v6': fields.IPV6AddressField(nullable=True),
        'auto_disk_config': fields.BooleanField(default=False),
        'progress': fields.IntegerField(nullable=True),
        'shutdown_terminate': fields.BooleanField(default=False),
        'disable_terminate': fields.BooleanField(default=False),
        'cell_name': fields.StringField(nullable=True),
        'metadata': fields.DictOfStringsField(),
        'system_metadata': fields.DictOfNullableStringsField(),
        'info_cache': fields.ObjectField('InstanceInfoCache', nullable=True),
        'security_groups': fields.ObjectField('SecurityGroupList'),
        'fault': fields.ObjectField('InstanceFault', nullable=True),
        'cleaned': fields.BooleanField(default=False),
        'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
        'numa_topology': fields.ObjectField('InstanceNUMATopology',
                                            nullable=True)
    }

    obj_extra_fields = ['name']

    def __init__(self, *args, **kwargs):
        super(Instance, self).__init__(*args, **kwargs)
        self._reset_metadata_tracking()

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'system_metadata' in fields:
            self._orig_system_metadata = (dict(self.system_metadata)
                                          if 'system_metadata' in self else {})
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})

    def obj_reset_changes(self, fields=None):
        super(Instance, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    def obj_what_changed(self):
        changes = super(Instance, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if 'system_metadata' in self and (self.system_metadata !=
                                          self._orig_system_metadata):
            changes.add('system_metadata')
        return changes

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        self = super(Instance,
                     cls)._obj_from_primitive(context, objver, primitive)
        self._reset_metadata_tracking()
        return self

    def obj_make_compatible(self, primitive, target_version):
        target_version = utils.convert_version_to_tuple(target_version)
        unicode_attributes = [
            'user_id', 'project_id', 'image_ref', 'kernel_id', 'ramdisk_id',
            'hostname', 'key_name', 'key_data', 'host', 'node', 'user_data',
            'availability_zone', 'display_name', 'display_description',
            'launched_on', 'locked_by', 'os_type', 'architecture', 'vm_mode',
            'root_device_name', 'default_ephemeral_device',
            'default_swap_device', 'config_drive', 'cell_name'
        ]
        if target_version < (1, 14) and 'numa_topology' in primitive:
            del primitive['numa_topology']
        if target_version < (1, 10) and 'info_cache' in primitive:
            # NOTE(danms): Instance <= 1.9 (havana) had info_cache 1.4
            self.info_cache.obj_make_compatible(
                primitive['info_cache']['nova_object.data'], '1.4')
            primitive['info_cache']['nova_object.version'] = '1.4'
        if target_version < (1, 7):
            # NOTE(danms): Before 1.7, we couldn't handle unicode in
            # string fields, so squash it here
            for field in [
                    x for x in unicode_attributes
                    if x in primitive and primitive[x] is not None
            ]:
                primitive[field] = primitive[field].encode('ascii', 'replace')
        if target_version < (1, 15) and 'pci_devices' in primitive:
            # NOTE(baoli): Instance <= 1.14 (icehouse) had PciDeviceList 1.0
            self.pci_devices.obj_make_compatible(
                primitive['pci_devices']['nova_object.data'], '1.0')
            primitive['pci_devices']['nova_object.version'] = '1.0'
        if target_version < (1, 6):
            # NOTE(danms): Before 1.6 there was no pci_devices list
            if 'pci_devices' in primitive:
                del primitive['pci_devices']

    @property
    def name(self):
        try:
            base_name = CONF.instance_name_template % self.id
        except TypeError:
            # Support templates like "uuid-%(uuid)s", etc.
            info = {}
            # NOTE(russellb): Don't use self.iteritems() here, as it will
            # result in infinite recursion on the name property.
            for key in self.fields:
                if key == 'name':
                    # NOTE(danms): prevent recursion
                    continue
                elif not self.obj_attr_is_set(key):
                    # NOTE(danms): Don't trigger lazy-loads
                    continue
                info[key] = self[key]
            try:
                base_name = CONF.instance_name_template % info
            except KeyError:
                base_name = self.uuid
        return base_name

    @staticmethod
    def _from_db_object(context, instance, db_inst, expected_attrs=None):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        instance._context = context
        if expected_attrs is None:
            expected_attrs = []
        # Most of the field names match right now, so be quick
        for field in instance.fields:
            if field in INSTANCE_OPTIONAL_ATTRS:
                continue
            elif field == 'deleted':
                instance.deleted = db_inst['deleted'] == db_inst['id']
            elif field == 'cleaned':
                instance.cleaned = db_inst['cleaned'] == 1
            else:
                instance[field] = db_inst[field]

        if 'metadata' in expected_attrs:
            instance['metadata'] = utils.instance_meta(db_inst)
        if 'system_metadata' in expected_attrs:
            instance['system_metadata'] = utils.instance_sys_meta(db_inst)
        if 'fault' in expected_attrs:
            instance['fault'] = (objects.InstanceFault.get_latest_for_instance(
                context, instance.uuid))
        if 'numa_topology' in expected_attrs:
            instance._load_numa_topology()

        if 'info_cache' in expected_attrs:
            if db_inst['info_cache'] is None:
                instance.info_cache = None
            elif not instance.obj_attr_is_set('info_cache'):
                # TODO(danms): If this ever happens on a backlevel instance
                # passed to us by a backlevel service, things will break
                instance.info_cache = objects.InstanceInfoCache(context)
            if instance.info_cache is not None:
                instance.info_cache._from_db_object(context,
                                                    instance.info_cache,
                                                    db_inst['info_cache'])

        # TODO(danms): If we are updating these on a backlevel instance,
        # we'll end up sending back new versions of these objects (see
        # above note for new info_caches
        if 'pci_devices' in expected_attrs:
            pci_devices = base.obj_make_list(context,
                                             objects.PciDeviceList(context),
                                             objects.PciDevice,
                                             db_inst['pci_devices'])
            instance['pci_devices'] = pci_devices
        if 'security_groups' in expected_attrs:
            sec_groups = base.obj_make_list(context,
                                            objects.SecurityGroupList(context),
                                            objects.SecurityGroup,
                                            db_inst['security_groups'])
            instance['security_groups'] = sec_groups

        instance.obj_reset_changes()
        return instance

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get_by_uuid(context,
                                          uuid,
                                          columns_to_join=columns_to_join,
                                          use_slave=use_slave)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable_classmethod
    def get_by_id(cls, context, inst_id, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get(context,
                                  inst_id,
                                  columns_to_join=columns_to_join)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable
    def create(self, context):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        expected_attrs = [
            attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates
        ]
        if 'security_groups' in updates:
            updates['security_groups'] = [
                x.name for x in updates['security_groups']
            ]
        if 'info_cache' in updates:
            updates['info_cache'] = {
                'network_info': updates['info_cache'].network_info.json()
            }
        numa_topology = updates.pop('numa_topology', None)
        db_inst = db.instance_create(context, updates)
        if numa_topology:
            expected_attrs.append('numa_topology')
            numa_topology.instance_uuid = db_inst['uuid']
            numa_topology.create(context)
        self._from_db_object(context, self, db_inst, expected_attrs)

    @base.remotable
    def destroy(self, context):
        if not self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='already destroyed')
        if not self.obj_attr_is_set('uuid'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='no uuid')
        if not self.obj_attr_is_set('host') or not self.host:
            # NOTE(danms): If our host is not set, avoid a race
            constraint = db.constraint(host=db.equal_any(None))
        else:
            constraint = None

        try:
            db_inst = db.instance_destroy(context,
                                          self.uuid,
                                          constraint=constraint)
            self._from_db_object(context, self, db_inst)
        except exception.ConstraintNotMet:
            raise exception.ObjectActionError(action='destroy',
                                              reason='host changed')
        delattr(self, base.get_attrname('id'))

    def _save_info_cache(self, context):
        if self.info_cache:
            self.info_cache.save(context)

    def _save_security_groups(self, context):
        security_groups = self.security_groups or []
        for secgroup in security_groups:
            secgroup.save(context)
        self.security_groups.obj_reset_changes()

    def _save_fault(self, context):
        # NOTE(danms): I don't think we need to worry about this, do we?
        pass

    def _save_numa_topology(self, context):
        if self.numa_topology:
            self.numa_topology.instance_uuid = self.uuid
            self.numa_topology._save(context)
        else:
            objects.InstanceNUMATopology.delete_by_instance_uuid(
                context, self.uuid)

    def _save_pci_devices(self, context):
        # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
        # permitted to update the DB. all change to devices from here will
        # be dropped.
        pass

    @base.remotable
    def save(self,
             context,
             expected_vm_state=None,
             expected_task_state=None,
             admin_state_reset=False):
        """Save updates to this instance

        Column-wise updates will be made based on the result of
        self.what_changed(). If expected_task_state is provided,
        it will be checked against the in-database copy of the
        instance before updates are made.

        :param:context: Security context
        :param:expected_task_state: Optional tuple of valid task states
        for the instance to be in
        :param:expected_vm_state: Optional tuple of valid vm states
        for the instance to be in
        :param admin_state_reset: True if admin API is forcing setting
        of task_state/vm_state

        """

        cell_type = cells_opts.get_cell_type()
        if cell_type == 'api' and self.cell_name:
            # NOTE(comstud): We need to stash a copy of ourselves
            # before any updates are applied.  When we call the save
            # methods on nested objects, we will lose any changes to
            # them.  But we need to make sure child cells can tell
            # what is changed.
            #
            # We also need to nuke any updates to vm_state and task_state
            # unless admin_state_reset is True.  compute cells are
            # authoritative for their view of vm_state and task_state.
            stale_instance = self.obj_clone()

            def _handle_cell_update_from_api():
                cells_api = cells_rpcapi.CellsAPI()
                cells_api.instance_update_from_api(context, stale_instance,
                                                   expected_vm_state,
                                                   expected_task_state,
                                                   admin_state_reset)
        else:
            stale_instance = None

        updates = {}
        changes = self.obj_what_changed()

        for field in self.fields:
            if (self.obj_attr_is_set(field)
                    and isinstance(self.fields[field], fields.ObjectField)):
                try:
                    getattr(self, '_save_%s' % field)(context)
                except AttributeError:
                    LOG.exception(_LE('No save handler for %s'),
                                  field,
                                  instance=self)
            elif field in changes:
                updates[field] = self[field]

        if not updates:
            if stale_instance:
                _handle_cell_update_from_api()
            return

        # Cleaned needs to be turned back into an int here
        if 'cleaned' in updates:
            if updates['cleaned']:
                updates['cleaned'] = 1
            else:
                updates['cleaned'] = 0

        if expected_task_state is not None:
            if (self.VERSION == '1.9'
                    and expected_task_state == 'image_snapshot'):
                # NOTE(danms): Icehouse introduced a pending state which
                # Havana doesn't know about. If we're an old instance,
                # tolerate the pending state as well
                expected_task_state = [
                    expected_task_state, 'image_snapshot_pending'
                ]
            updates['expected_task_state'] = expected_task_state
        if expected_vm_state is not None:
            updates['expected_vm_state'] = expected_vm_state

        expected_attrs = [
            attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
            if self.obj_attr_is_set(attr)
        ]
        if 'pci_devices' in expected_attrs:
            # NOTE(danms): We don't refresh pci_devices on save right now
            expected_attrs.remove('pci_devices')

        # NOTE(alaski): We need to pull system_metadata for the
        # notification.send_update() below.  If we don't there's a KeyError
        # when it tries to extract the flavor.
        if 'system_metadata' not in expected_attrs:
            expected_attrs.append('system_metadata')
        old_ref, inst_ref = db.instance_update_and_get_original(
            context,
            self.uuid,
            updates,
            update_cells=False,
            columns_to_join=_expected_cols(expected_attrs))

        if stale_instance:
            _handle_cell_update_from_api()
        elif cell_type == 'compute':
            cells_api = cells_rpcapi.CellsAPI()
            cells_api.instance_update_at_top(context, inst_ref)

        self._from_db_object(context,
                             self,
                             inst_ref,
                             expected_attrs=expected_attrs)
        notifications.send_update(context, old_ref, inst_ref)
        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context, use_slave=False):
        extra = [
            field for field in INSTANCE_OPTIONAL_ATTRS
            if self.obj_attr_is_set(field)
        ]
        current = self.__class__.get_by_uuid(context,
                                             uuid=self.uuid,
                                             expected_attrs=extra,
                                             use_slave=use_slave)
        # NOTE(danms): We orphan the instance copy so we do not unexpectedly
        # trigger a lazy-load (which would mean we failed to calculate the
        # expected_attrs properly)
        current._context = None

        for field in self.fields:
            if self.obj_attr_is_set(field):
                if field == 'info_cache':
                    self.info_cache.refresh()
                    # NOTE(danms): Make sure this shows up as touched
                    self.info_cache = self.info_cache
                elif self[field] != current[field]:
                    self[field] = current[field]
        self.obj_reset_changes()

    def _load_generic(self, attrname):
        instance = self.__class__.get_by_uuid(self._context,
                                              uuid=self.uuid,
                                              expected_attrs=[attrname])

        # NOTE(danms): Never allow us to recursively-load
        if instance.obj_attr_is_set(attrname):
            self[attrname] = instance[attrname]
        else:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='loading %s requires recursion' % attrname)

    def _load_fault(self):
        self.fault = objects.InstanceFault.get_latest_for_instance(
            self._context, self.uuid)

    def _load_numa_topology(self):
        try:
            self.numa_topology = \
                objects.InstanceNUMATopology.get_by_instance_uuid(
                    self._context, self.uuid)
        except exception.NumaTopologyNotFound:
            self.numa_topology = None

    def obj_load_attr(self, attrname):
        if attrname not in INSTANCE_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='attribute %s not lazy-loadable' % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s", {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })
        # FIXME(comstud): This should be optimized to only load the attr.
        if attrname == 'fault':
            # NOTE(danms): We handle fault differently here so that we
            # can be more efficient
            self._load_fault()
        elif attrname == 'numa_topology':
            self._load_numa_topology()
        else:
            self._load_generic(attrname)
        self.obj_reset_changes([attrname])

    def get_flavor(self, namespace=None):
        prefix = ('%s_' % namespace) if namespace is not None else ''

        db_flavor = flavors.extract_flavor(self, prefix)
        flavor = objects.Flavor(self._context)
        for key in flavors.system_metadata_flavor_props:
            flavor[key] = db_flavor[key]
        return flavor

    def set_flavor(self, flavor, namespace=None):
        prefix = ('%s_' % namespace) if namespace is not None else ''

        self.system_metadata = flavors.save_flavor_info(
            self.system_metadata, flavor, prefix)
        self.save()

    def delete_flavor(self, namespace):
        self.system_metadata = flavors.delete_flavor_info(
            self.system_metadata, "%s_" % namespace)
        self.save()

    @base.remotable
    def delete_metadata_key(self, context, key):
        """Optimized metadata delete method.

        This provides a more efficient way to delete a single metadata
        key, instead of just calling instance.save(). This should be called
        with the key still present in self.metadata, which it will update
        after completion.
        """
        db.instance_metadata_delete(context, self.uuid, key)
        md_was_changed = 'metadata' in self.obj_what_changed()
        del self.metadata[key]
        self._orig_metadata.pop(key, None)
        instance_dict = base.obj_to_primitive(self)
        notifications.send_update(context, instance_dict, instance_dict)
        if not md_was_changed:
            self.obj_reset_changes(['metadata'])
Ejemplo n.º 10
0
class Instance(base.NovaPersistentObject, base.NovaObject,
               base.NovaObjectDictCompat):
    # Version 2.0: Initial version
    VERSION = '2.0'

    fields = {
        'id': fields.IntegerField(),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'image_ref': fields.StringField(nullable=True),
        'kernel_id': fields.StringField(nullable=True),
        'ramdisk_id': fields.StringField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'launch_index': fields.IntegerField(nullable=True),
        'key_name': fields.StringField(nullable=True),
        'key_data': fields.StringField(nullable=True),
        'power_state': fields.IntegerField(nullable=True),
        'vm_state': fields.StringField(nullable=True),
        'task_state': fields.StringField(nullable=True),
        'memory_mb': fields.IntegerField(nullable=True),
        'vcpus': fields.IntegerField(nullable=True),
        'root_gb': fields.IntegerField(nullable=True),
        'ephemeral_gb': fields.IntegerField(nullable=True),
        'ephemeral_key_uuid': fields.UUIDField(nullable=True),
        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),
        'instance_type_id': fields.IntegerField(nullable=True),
        'user_data': fields.StringField(nullable=True),
        'reservation_id': fields.StringField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'launched_on': fields.StringField(nullable=True),

        # NOTE(jdillaman): locked deprecated in favor of locked_by,
        # to be removed in Icehouse
        'locked': fields.BooleanField(default=False),
        'locked_by': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'vm_mode': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(),
        'root_device_name': fields.StringField(nullable=True),
        'default_ephemeral_device': fields.StringField(nullable=True),
        'default_swap_device': fields.StringField(nullable=True),
        'config_drive': fields.StringField(nullable=True),
        'access_ip_v4': fields.IPV4AddressField(nullable=True),
        'access_ip_v6': fields.IPV6AddressField(nullable=True),
        'auto_disk_config': fields.BooleanField(default=False),
        'progress': fields.IntegerField(nullable=True),
        'shutdown_terminate': fields.BooleanField(default=False),
        'disable_terminate': fields.BooleanField(default=False),
        'cell_name': fields.StringField(nullable=True),
        'metadata': fields.DictOfStringsField(),
        'system_metadata': fields.DictOfNullableStringsField(),
        'info_cache': fields.ObjectField('InstanceInfoCache', nullable=True),
        'security_groups': fields.ObjectField('SecurityGroupList'),
        'fault': fields.ObjectField('InstanceFault', nullable=True),
        'cleaned': fields.BooleanField(default=False),
        'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
        'numa_topology': fields.ObjectField('InstanceNUMATopology',
                                            nullable=True),
        'pci_requests': fields.ObjectField('InstancePCIRequests',
                                           nullable=True),
        'tags': fields.ObjectField('TagList'),
        'flavor': fields.ObjectField('Flavor'),
        'old_flavor': fields.ObjectField('Flavor', nullable=True),
        'new_flavor': fields.ObjectField('Flavor', nullable=True),
        'vcpu_model': fields.ObjectField('VirtCPUModel', nullable=True),
        'ec2_ids': fields.ObjectField('EC2Ids'),
        'migration_context': fields.ObjectField('MigrationContext',
                                                nullable=True)
    }

    obj_extra_fields = ['name']

    def __init__(self, *args, **kwargs):
        super(Instance, self).__init__(*args, **kwargs)
        self._reset_metadata_tracking()

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'system_metadata' in fields:
            self._orig_system_metadata = (dict(self.system_metadata)
                                          if 'system_metadata' in self else {})
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})

    def obj_reset_changes(self, fields=None, recursive=False):
        super(Instance, self).obj_reset_changes(fields, recursive=recursive)
        self._reset_metadata_tracking(fields=fields)

    def obj_what_changed(self):
        changes = super(Instance, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if 'system_metadata' in self and (self.system_metadata !=
                                          self._orig_system_metadata):
            changes.add('system_metadata')
        return changes

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        self = super(Instance,
                     cls)._obj_from_primitive(context, objver, primitive)
        self._reset_metadata_tracking()
        return self

    @property
    def name(self):
        try:
            base_name = CONF.instance_name_template % self.id
        except TypeError:
            # Support templates like "uuid-%(uuid)s", etc.
            info = {}
            # NOTE(russellb): Don't use self.iteritems() here, as it will
            # result in infinite recursion on the name property.
            for key in self.fields:
                if key == 'name':
                    # NOTE(danms): prevent recursion
                    continue
                elif not self.obj_attr_is_set(key):
                    # NOTE(danms): Don't trigger lazy-loads
                    continue
                info[key] = self[key]
            try:
                base_name = CONF.instance_name_template % info
            except KeyError:
                base_name = self.uuid
        return base_name

    def _flavor_from_db(self, db_flavor):
        """Load instance flavor information from instance_extra."""

        flavor_info = jsonutils.loads(db_flavor)

        self.flavor = objects.Flavor.obj_from_primitive(flavor_info['cur'])
        if flavor_info['old']:
            self.old_flavor = objects.Flavor.obj_from_primitive(
                flavor_info['old'])
        else:
            self.old_flavor = None
        if flavor_info['new']:
            self.new_flavor = objects.Flavor.obj_from_primitive(
                flavor_info['new'])
        else:
            self.new_flavor = None
        self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])

    @staticmethod
    def _from_db_object(context, instance, db_inst, expected_attrs=None):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        instance._context = context
        if expected_attrs is None:
            expected_attrs = []
        # Most of the field names match right now, so be quick
        for field in instance.fields:
            if field in INSTANCE_OPTIONAL_ATTRS:
                continue
            elif field == 'deleted':
                instance.deleted = db_inst['deleted'] == db_inst['id']
            elif field == 'cleaned':
                instance.cleaned = db_inst['cleaned'] == 1
            else:
                instance[field] = db_inst[field]

        # NOTE(danms): We can be called with a dict instead of a
        # SQLAlchemy object, so we have to be careful here
        if hasattr(db_inst, '__dict__'):
            have_extra = 'extra' in db_inst.__dict__ and db_inst['extra']
        else:
            have_extra = 'extra' in db_inst and db_inst['extra']

        if 'metadata' in expected_attrs:
            instance['metadata'] = utils.instance_meta(db_inst)
        if 'system_metadata' in expected_attrs:
            instance['system_metadata'] = utils.instance_sys_meta(db_inst)
        if 'fault' in expected_attrs:
            instance['fault'] = (objects.InstanceFault.get_latest_for_instance(
                context, instance.uuid))
        if 'numa_topology' in expected_attrs:
            if have_extra:
                instance._load_numa_topology(
                    db_inst['extra'].get('numa_topology'))
            else:
                instance.numa_topology = None
        if 'pci_requests' in expected_attrs:
            if have_extra:
                instance._load_pci_requests(
                    db_inst['extra'].get('pci_requests'))
            else:
                instance.pci_requests = None
        if 'vcpu_model' in expected_attrs:
            if have_extra:
                instance._load_vcpu_model(db_inst['extra'].get('vcpu_model'))
            else:
                instance.vcpu_model = None
        if 'ec2_ids' in expected_attrs:
            instance._load_ec2_ids()
        if 'migration_context' in expected_attrs:
            if have_extra:
                instance._load_migration_context(
                    db_inst['extra'].get('migration_context'))
            else:
                instance.migration_context = None
        if 'info_cache' in expected_attrs:
            if db_inst.get('info_cache') is None:
                instance.info_cache = None
            elif not instance.obj_attr_is_set('info_cache'):
                # TODO(danms): If this ever happens on a backlevel instance
                # passed to us by a backlevel service, things will break
                instance.info_cache = objects.InstanceInfoCache(context)
            if instance.info_cache is not None:
                instance.info_cache._from_db_object(context,
                                                    instance.info_cache,
                                                    db_inst['info_cache'])

        if any([
                x in expected_attrs
                for x in ('flavor', 'old_flavor', 'new_flavor')
        ]):
            if have_extra and db_inst['extra'].get('flavor'):
                instance._flavor_from_db(db_inst['extra']['flavor'])

        # TODO(danms): If we are updating these on a backlevel instance,
        # we'll end up sending back new versions of these objects (see
        # above note for new info_caches
        if 'pci_devices' in expected_attrs:
            pci_devices = base.obj_make_list(context,
                                             objects.PciDeviceList(context),
                                             objects.PciDevice,
                                             db_inst['pci_devices'])
            instance['pci_devices'] = pci_devices
        if 'security_groups' in expected_attrs:
            sec_groups = base.obj_make_list(context,
                                            objects.SecurityGroupList(context),
                                            objects.SecurityGroup,
                                            db_inst.get('security_groups', []))
            instance['security_groups'] = sec_groups

        if 'tags' in expected_attrs:
            tags = base.obj_make_list(context, objects.TagList(context),
                                      objects.Tag, db_inst['tags'])
            instance['tags'] = tags

        instance.obj_reset_changes()
        return instance

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get_by_uuid(context,
                                          uuid,
                                          columns_to_join=columns_to_join,
                                          use_slave=use_slave)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable_classmethod
    def get_by_id(cls, context, inst_id, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get(context,
                                  inst_id,
                                  columns_to_join=columns_to_join)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        expected_attrs = [
            attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates
        ]
        if 'security_groups' in updates:
            updates['security_groups'] = [
                x.name for x in updates['security_groups']
            ]
        if 'info_cache' in updates:
            updates['info_cache'] = {
                'network_info': updates['info_cache'].network_info.json()
            }
        updates['extra'] = {}
        numa_topology = updates.pop('numa_topology', None)
        if numa_topology:
            expected_attrs.append('numa_topology')
            updates['extra']['numa_topology'] = numa_topology._to_json()
        pci_requests = updates.pop('pci_requests', None)
        if pci_requests:
            expected_attrs.append('pci_requests')
            updates['extra']['pci_requests'] = (pci_requests.to_json())
        flavor = updates.pop('flavor', None)
        if flavor:
            expected_attrs.append('flavor')
            old = ((self.obj_attr_is_set('old_flavor') and self.old_flavor)
                   and self.old_flavor.obj_to_primitive() or None)
            new = ((self.obj_attr_is_set('new_flavor') and self.new_flavor)
                   and self.new_flavor.obj_to_primitive() or None)
            flavor_info = {
                'cur': self.flavor.obj_to_primitive(),
                'old': old,
                'new': new,
            }
            updates['extra']['flavor'] = jsonutils.dumps(flavor_info)
        vcpu_model = updates.pop('vcpu_model', None)
        if vcpu_model:
            expected_attrs.append('vcpu_model')
            updates['extra']['vcpu_model'] = (jsonutils.dumps(
                vcpu_model.obj_to_primitive()))
        db_inst = db.instance_create(self._context, updates)
        self._from_db_object(self._context, self, db_inst, expected_attrs)

    @base.remotable
    def destroy(self):
        if not self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='already destroyed')
        if not self.obj_attr_is_set('uuid'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='no uuid')
        if not self.obj_attr_is_set('host') or not self.host:
            # NOTE(danms): If our host is not set, avoid a race
            constraint = db.constraint(host=db.equal_any(None))
        else:
            constraint = None

        cell_type = cells_opts.get_cell_type()
        if cell_type is not None:
            stale_instance = self.obj_clone()

        try:
            db_inst = db.instance_destroy(self._context,
                                          self.uuid,
                                          constraint=constraint)
            self._from_db_object(self._context, self, db_inst)
        except exception.ConstraintNotMet:
            raise exception.ObjectActionError(action='destroy',
                                              reason='host changed')
        if cell_type == 'compute':
            cells_api = cells_rpcapi.CellsAPI()
            cells_api.instance_destroy_at_top(self._context, stale_instance)
        delattr(self, base.get_attrname('id'))

    def _save_info_cache(self, context):
        if self.info_cache:
            with self.info_cache.obj_alternate_context(context):
                self.info_cache.save()

    def _save_security_groups(self, context):
        security_groups = self.security_groups or []
        for secgroup in security_groups:
            with secgroup.obj_alternate_context(context):
                secgroup.save()
        self.security_groups.obj_reset_changes()

    def _save_fault(self, context):
        # NOTE(danms): I don't think we need to worry about this, do we?
        pass

    def _save_numa_topology(self, context):
        if self.numa_topology:
            self.numa_topology.instance_uuid = self.uuid
            with self.numa_topology.obj_alternate_context(context):
                self.numa_topology._save()
        else:
            objects.InstanceNUMATopology.delete_by_instance_uuid(
                context, self.uuid)

    def _save_pci_requests(self, context):
        # NOTE(danms): No need for this yet.
        pass

    def _save_pci_devices(self, context):
        # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
        # permitted to update the DB. all change to devices from here will
        # be dropped.
        pass

    def _save_flavor(self, context):
        if not any([
                x in self.obj_what_changed()
                for x in ('flavor', 'old_flavor', 'new_flavor')
        ]):
            return
        # FIXME(danms): We can do this smarterly by updating this
        # with all the other extra things at the same time
        flavor_info = {
            'cur':
            self.flavor.obj_to_primitive(),
            'old': (self.old_flavor and self.old_flavor.obj_to_primitive()
                    or None),
            'new': (self.new_flavor and self.new_flavor.obj_to_primitive()
                    or None),
        }
        db.instance_extra_update_by_uuid(
            context, self.uuid, {'flavor': jsonutils.dumps(flavor_info)})
        self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])

    def _save_old_flavor(self, context):
        if 'old_flavor' in self.obj_what_changed():
            self._save_flavor(context)

    def _save_new_flavor(self, context):
        if 'new_flavor' in self.obj_what_changed():
            self._save_flavor(context)

    def _save_vcpu_model(self, context):
        # TODO(yjiang5): should merge the db accesses for all the extra
        # fields
        if 'vcpu_model' in self.obj_what_changed():
            if self.vcpu_model:
                update = jsonutils.dumps(self.vcpu_model.obj_to_primitive())
            else:
                update = None
            db.instance_extra_update_by_uuid(context, self.uuid,
                                             {'vcpu_model': update})

    def _save_ec2_ids(self, context):
        # NOTE(hanlind): Read-only so no need to save this.
        pass

    def _save_migration_context(self, context):
        if self.migration_context:
            self.migration_context.instance_uuid = self.uuid
            with self.migration_context.obj_alternate_context(context):
                self.migration_context._save()
        else:
            objects.MigrationContext._destroy(context, self.uuid)

    @base.remotable
    def save(self,
             expected_vm_state=None,
             expected_task_state=None,
             admin_state_reset=False):
        """Save updates to this instance

        Column-wise updates will be made based on the result of
        self.what_changed(). If expected_task_state is provided,
        it will be checked against the in-database copy of the
        instance before updates are made.

        :param:context: Security context
        :param:expected_task_state: Optional tuple of valid task states
        for the instance to be in
        :param:expected_vm_state: Optional tuple of valid vm states
        for the instance to be in
        :param admin_state_reset: True if admin API is forcing setting
        of task_state/vm_state

        """
        # Store this on the class because _cell_name_blocks_sync is useless
        # after the db update call below.
        self._sync_cells = not self._cell_name_blocks_sync()

        context = self._context
        cell_type = cells_opts.get_cell_type()

        if cell_type is not None:
            # NOTE(comstud): We need to stash a copy of ourselves
            # before any updates are applied.  When we call the save
            # methods on nested objects, we will lose any changes to
            # them.  But we need to make sure child cells can tell
            # what is changed.
            #
            # We also need to nuke any updates to vm_state and task_state
            # unless admin_state_reset is True.  compute cells are
            # authoritative for their view of vm_state and task_state.
            stale_instance = self.obj_clone()

        cells_update_from_api = (cell_type == 'api' and self.cell_name
                                 and self._sync_cells)

        if cells_update_from_api:

            def _handle_cell_update_from_api():
                cells_api = cells_rpcapi.CellsAPI()
                cells_api.instance_update_from_api(context, stale_instance,
                                                   expected_vm_state,
                                                   expected_task_state,
                                                   admin_state_reset)

        updates = {}
        changes = self.obj_what_changed()

        for field in self.fields:
            # NOTE(danms): For object fields, we construct and call a
            # helper method like self._save_$attrname()
            if (self.obj_attr_is_set(field)
                    and isinstance(self.fields[field], fields.ObjectField)):
                try:
                    getattr(self, '_save_%s' % field)(context)
                except AttributeError:
                    LOG.exception(_LE('No save handler for %s'),
                                  field,
                                  instance=self)
                except db_exc.DBReferenceError as exp:
                    if exp.key != 'instance_uuid':
                        raise
                    # NOTE(melwitt): This will happen if we instance.save()
                    # before an instance.create() and FK constraint fails.
                    # In practice, this occurs in cells during a delete of
                    # an unscheduled instance. Otherwise, it could happen
                    # as a result of bug.
                    raise exception.InstanceNotFound(instance_id=self.uuid)
            elif field in changes:
                if (field == 'cell_name' and self[field] is not None and
                        self[field].startswith(cells_utils.BLOCK_SYNC_FLAG)):
                    updates[field] = self[field].replace(
                        cells_utils.BLOCK_SYNC_FLAG, '', 1)
                else:
                    updates[field] = self[field]

        if not updates:
            if cells_update_from_api:
                _handle_cell_update_from_api()
            return

        # Cleaned needs to be turned back into an int here
        if 'cleaned' in updates:
            if updates['cleaned']:
                updates['cleaned'] = 1
            else:
                updates['cleaned'] = 0

        if expected_task_state is not None:
            updates['expected_task_state'] = expected_task_state
        if expected_vm_state is not None:
            updates['expected_vm_state'] = expected_vm_state

        expected_attrs = [
            attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
            if self.obj_attr_is_set(attr)
        ]
        if 'pci_devices' in expected_attrs:
            # NOTE(danms): We don't refresh pci_devices on save right now
            expected_attrs.remove('pci_devices')

        # NOTE(alaski): We need to pull system_metadata for the
        # notification.send_update() below.  If we don't there's a KeyError
        # when it tries to extract the flavor.
        # NOTE(danms): If we have sysmeta, we need flavor since the caller
        # might be expecting flavor information as a result
        if 'system_metadata' not in expected_attrs:
            expected_attrs.append('system_metadata')
            expected_attrs.append('flavor')
        old_ref, inst_ref = db.instance_update_and_get_original(
            context,
            self.uuid,
            updates,
            columns_to_join=_expected_cols(expected_attrs))
        self._from_db_object(context,
                             self,
                             inst_ref,
                             expected_attrs=expected_attrs)

        if cells_update_from_api:
            _handle_cell_update_from_api()
        elif cell_type == 'compute':
            if self._sync_cells:
                cells_api = cells_rpcapi.CellsAPI()
                cells_api.instance_update_at_top(context, stale_instance)

        def _notify():
            # NOTE(danms): We have to be super careful here not to trigger
            # any lazy-loads that will unmigrate or unbackport something. So,
            # make a copy of the instance for notifications first.
            new_ref = self.obj_clone()

            notifications.send_update(context, old_ref, new_ref)

        # NOTE(alaski): If cell synchronization is blocked it means we have
        # already run this block of code in either the parent or child of this
        # cell.  Therefore this notification has already been sent.
        if not self._sync_cells:
            _notify = lambda: None  # noqa: F811

        _notify()

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, use_slave=False):
        extra = [
            field for field in INSTANCE_OPTIONAL_ATTRS
            if self.obj_attr_is_set(field)
        ]
        current = self.__class__.get_by_uuid(self._context,
                                             uuid=self.uuid,
                                             expected_attrs=extra,
                                             use_slave=use_slave)
        # NOTE(danms): We orphan the instance copy so we do not unexpectedly
        # trigger a lazy-load (which would mean we failed to calculate the
        # expected_attrs properly)
        current._context = None

        for field in self.fields:
            if self.obj_attr_is_set(field):
                if field == 'info_cache':
                    self.info_cache.refresh()
                elif self[field] != current[field]:
                    self[field] = current[field]
        self.obj_reset_changes()

    def _load_generic(self, attrname):
        instance = self.__class__.get_by_uuid(self._context,
                                              uuid=self.uuid,
                                              expected_attrs=[attrname])

        # NOTE(danms): Never allow us to recursively-load
        if instance.obj_attr_is_set(attrname):
            self[attrname] = instance[attrname]
        else:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='loading %s requires recursion' % attrname)

    def _load_fault(self):
        self.fault = objects.InstanceFault.get_latest_for_instance(
            self._context, self.uuid)

    def _load_numa_topology(self, db_topology=None):
        if db_topology is not None:
            self.numa_topology = \
                objects.InstanceNUMATopology.obj_from_db_obj(self.uuid,
                                                             db_topology)
        else:
            try:
                self.numa_topology = \
                    objects.InstanceNUMATopology.get_by_instance_uuid(
                        self._context, self.uuid)
            except exception.NumaTopologyNotFound:
                self.numa_topology = None

    def _load_pci_requests(self, db_requests=None):
        # FIXME: also do this if none!
        if db_requests is not None:
            self.pci_requests = objects.InstancePCIRequests.obj_from_db(
                self._context, self.uuid, db_requests)
        else:
            self.pci_requests = \
                objects.InstancePCIRequests.get_by_instance_uuid(
                    self._context, self.uuid)

    def _load_flavor(self):
        instance = self.__class__.get_by_uuid(
            self._context,
            uuid=self.uuid,
            expected_attrs=['flavor', 'system_metadata'])

        # NOTE(danms): Orphan the instance to make sure we don't lazy-load
        # anything below
        instance._context = None
        self.flavor = instance.flavor
        self.old_flavor = instance.old_flavor
        self.new_flavor = instance.new_flavor

        # NOTE(danms): The query above may have migrated the flavor from
        # system_metadata. Since we have it anyway, go ahead and refresh
        # our system_metadata from it so that a save will be accurate.
        instance.system_metadata.update(self.get('system_metadata', {}))
        self.system_metadata = instance.system_metadata

    def _load_vcpu_model(self, db_vcpu_model=None):
        if db_vcpu_model is None:
            self.vcpu_model = objects.VirtCPUModel.get_by_instance_uuid(
                self._context, self.uuid)
        else:
            db_vcpu_model = jsonutils.loads(db_vcpu_model)
            self.vcpu_model = objects.VirtCPUModel.obj_from_primitive(
                db_vcpu_model)

    def _load_ec2_ids(self):
        self.ec2_ids = objects.EC2Ids.get_by_instance(self._context, self)

    def _load_migration_context(self, db_context=_NO_DATA_SENTINEL):
        if db_context is _NO_DATA_SENTINEL:
            try:
                self.migration_context = (
                    objects.MigrationContext.get_by_instance_uuid(
                        self._context, self.uuid))
            except exception.MigrationContextNotFound:
                self.migration_context = None
        elif db_context is None:
            self.migration_context = None
        else:
            self.migration_context = objects.MigrationContext.obj_from_db_obj(
                db_context)

    def apply_migration_context(self):
        if self.migration_context:
            self.numa_topology = self.migration_context.new_numa_topology
        else:
            LOG.debug(
                "Trying to apply a migration context that does not "
                "seem to be set for this instance",
                instance=self)

    def revert_migration_context(self):
        if self.migration_context:
            self.numa_topology = self.migration_context.old_numa_topology
        else:
            LOG.debug(
                "Trying to revert a migration context that does not "
                "seem to be set for this instance",
                instance=self)

    @contextlib.contextmanager
    def mutated_migration_context(self):
        """Context manager to temporarily apply the migration context.

        Calling .save() from within the context manager means that the mutated
        context will be saved which can cause incorrect resource tracking, and
        should be avoided.
        """
        current_numa_topo = self.numa_topology
        self.apply_migration_context()
        try:
            yield
        finally:
            self.numa_topology = current_numa_topo

    @base.remotable
    def drop_migration_context(self):
        if self.migration_context:
            objects.MigrationContext._destroy(self._context, self.uuid)
            self.migration_context = None

    def obj_load_attr(self, attrname):
        if attrname not in INSTANCE_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='attribute %s not lazy-loadable' % attrname)

        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s", {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })

        # NOTE(danms): We handle some fields differently here so that we
        # can be more efficient
        if attrname == 'fault':
            self._load_fault()
        elif attrname == 'numa_topology':
            self._load_numa_topology()
        elif attrname == 'pci_requests':
            self._load_pci_requests()
        elif attrname == 'vcpu_model':
            self._load_vcpu_model()
        elif attrname == 'ec2_ids':
            self._load_ec2_ids()
        elif attrname == 'migration_context':
            self._load_migration_context()
        elif 'flavor' in attrname:
            self._load_flavor()
        else:
            # FIXME(comstud): This should be optimized to only load the attr.
            self._load_generic(attrname)
        self.obj_reset_changes([attrname])

    def get_flavor(self, namespace=None):
        prefix = ('%s_' % namespace) if namespace is not None else ''
        attr = '%sflavor' % prefix
        try:
            return getattr(self, attr)
        except exception.FlavorNotFound:
            # NOTE(danms): This only happens in the case where we don't
            # have flavor information in sysmeta or extra, and doing
            # this triggers a lookup based on our instance_type_id for
            # (very) legacy instances. That legacy code expects a None here,
            # so emulate it for this helper, even though the actual attribute
            # is not nullable.
            return None

    def set_flavor(self, flavor, namespace=None):
        prefix = ('%s_' % namespace) if namespace is not None else ''
        attr = '%sflavor' % prefix
        if not isinstance(flavor, objects.Flavor):
            flavor = objects.Flavor(**flavor)
        setattr(self, attr, flavor)

        self.save()

    def delete_flavor(self, namespace):
        prefix = ('%s_' % namespace) if namespace else ''
        attr = '%sflavor' % prefix
        setattr(self, attr, None)

        self.save()

    @base.remotable
    def delete_metadata_key(self, key):
        """Optimized metadata delete method.

        This provides a more efficient way to delete a single metadata
        key, instead of just calling instance.save(). This should be called
        with the key still present in self.metadata, which it will update
        after completion.
        """
        db.instance_metadata_delete(self._context, self.uuid, key)
        md_was_changed = 'metadata' in self.obj_what_changed()
        del self.metadata[key]
        self._orig_metadata.pop(key, None)
        notifications.send_update(self._context, self, self)
        if not md_was_changed:
            self.obj_reset_changes(['metadata'])

    def _cell_name_blocks_sync(self):
        if (self.obj_attr_is_set('cell_name') and self.cell_name is not None
                and self.cell_name.startswith(cells_utils.BLOCK_SYNC_FLAG)):
            return True
        return False

    def _normalize_cell_name(self):
        """Undo skip_cell_sync()'s cell_name modification if applied"""

        if not self.obj_attr_is_set('cell_name') or self.cell_name is None:
            return
        cn_changed = 'cell_name' in self.obj_what_changed()
        if self.cell_name.startswith(cells_utils.BLOCK_SYNC_FLAG):
            self.cell_name = self.cell_name.replace(
                cells_utils.BLOCK_SYNC_FLAG, '', 1)
            # cell_name is not normally an empty string, this means it was None
            # or unset before cells_utils.BLOCK_SYNC_FLAG was applied.
            if len(self.cell_name) == 0:
                self.cell_name = None
        if not cn_changed:
            self.obj_reset_changes(['cell_name'])

    @contextlib.contextmanager
    def skip_cells_sync(self):
        """Context manager to save an instance without syncing cells.

        Temporarily disables the cells syncing logic, if enabled.  This should
        only be used when saving an instance that has been passed down/up from
        another cell in order to avoid passing it back to the originator to be
        re-saved.
        """
        cn_changed = 'cell_name' in self.obj_what_changed()
        if not self.obj_attr_is_set('cell_name') or self.cell_name is None:
            self.cell_name = ''
        self.cell_name = '%s%s' % (cells_utils.BLOCK_SYNC_FLAG, self.cell_name)
        if not cn_changed:
            self.obj_reset_changes(['cell_name'])
        try:
            yield
        finally:
            self._normalize_cell_name()
Ejemplo n.º 11
0
class ComputeNode(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added get_by_service_id()
    # Version 1.2: String attributes updated to support unicode
    # Version 1.3: Added stats field
    # Version 1.4: Added host ip field
    # Version 1.5: Added numa_topology field
    # Version 1.6: Added supported_hv_specs
    # Version 1.7: Added host field
    # Version 1.8: Added get_by_host_and_nodename()
    # Version 1.9: Added pci_device_pools
    # Version 1.10: Added get_first_node_by_host_for_old_compat()
    # Version 1.11: PciDevicePoolList version 1.1
    # Version 1.12: HVSpec version 1.1
    # Version 1.13: Changed service_id field to be nullable
    # Version 1.14: Added cpu_allocation_ratio and ram_allocation_ratio
    # Version 1.15: Added uuid
    # Version 1.16: Added disk_allocation_ratio
    VERSION = '1.16'

    fields = {
        'id': fields.IntegerField(read_only=True),
        'uuid': fields.UUIDField(read_only=True),
        'service_id': fields.IntegerField(nullable=True),
        'host': fields.StringField(nullable=True),
        'vcpus': fields.IntegerField(),
        'memory_mb': fields.IntegerField(),
        'local_gb': fields.IntegerField(),
        'vcpus_used': fields.IntegerField(),
        'memory_mb_used': fields.IntegerField(),
        'local_gb_used': fields.IntegerField(),
        'hypervisor_type': fields.StringField(),
        'hypervisor_version': fields.IntegerField(),
        'hypervisor_hostname': fields.StringField(nullable=True),
        'free_ram_mb': fields.IntegerField(nullable=True),
        'free_disk_gb': fields.IntegerField(nullable=True),
        'current_workload': fields.IntegerField(nullable=True),
        'running_vms': fields.IntegerField(nullable=True),
        # TODO(melwitt): cpu_info is non-nullable in the schema but we must
        # wait until version 2.0 of ComputeNode to change it to non-nullable
        'cpu_info': fields.StringField(nullable=True),
        'disk_available_least': fields.IntegerField(nullable=True),
        'metrics': fields.StringField(nullable=True),
        'stats': fields.DictOfNullableStringsField(nullable=True),
        'host_ip': fields.IPAddressField(nullable=True),
        # TODO(rlrossit): because of history, numa_topology is held here as a
        # StringField, not a NUMATopology object. In version 2 of ComputeNode
        # this will be converted over to a fields.ObjectField('NUMATopology')
        'numa_topology': fields.StringField(nullable=True),
        # NOTE(pmurray): the supported_hv_specs field maps to the
        # supported_instances field in the database
        'supported_hv_specs': fields.ListOfObjectsField('HVSpec'),
        # NOTE(pmurray): the pci_device_pools field maps to the
        # pci_stats field in the database
        'pci_device_pools': fields.ObjectField('PciDevicePoolList',
                                               nullable=True),
        'cpu_allocation_ratio': fields.FloatField(),
        'ram_allocation_ratio': fields.FloatField(),
        'disk_allocation_ratio': fields.FloatField(),
    }

    def obj_make_compatible(self, primitive, target_version):
        super(ComputeNode, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 16):
            if 'disk_allocation_ratio' in primitive:
                del primitive['disk_allocation_ratio']
        if target_version < (1, 15):
            if 'uuid' in primitive:
                del primitive['uuid']
        if target_version < (1, 14):
            if 'ram_allocation_ratio' in primitive:
                del primitive['ram_allocation_ratio']
            if 'cpu_allocation_ratio' in primitive:
                del primitive['cpu_allocation_ratio']
        if target_version < (1, 13) and primitive.get('service_id') is None:
            # service_id is non-nullable in versions before 1.13
            try:
                service = objects.Service.get_by_compute_host(
                    self._context, primitive['host'])
                primitive['service_id'] = service.id
            except (exception.ComputeHostNotFound, KeyError):
                # NOTE(hanlind): In case anything goes wrong like service not
                # found or host not being set, catch and set a fake value just
                # to allow for older versions that demand a value to work.
                # Setting to -1 will, if value is later used result in a
                # ServiceNotFound, so should be safe.
                primitive['service_id'] = -1
        if target_version < (1, 7) and 'host' in primitive:
            del primitive['host']
        if target_version < (1, 5) and 'numa_topology' in primitive:
            del primitive['numa_topology']
        if target_version < (1, 4) and 'host_ip' in primitive:
            del primitive['host_ip']
        if target_version < (1, 3) and 'stats' in primitive:
            # pre 1.3 version does not have a stats field
            del primitive['stats']

    @staticmethod
    def _host_from_db_object(compute, db_compute):
        if (('host' not in db_compute or db_compute['host'] is None)
                and 'service_id' in db_compute
                and db_compute['service_id'] is not None):
            # FIXME(sbauza) : Unconverted compute record, provide compatibility
            # This has to stay until we can be sure that any/all compute nodes
            # in the database have been converted to use the host field

            # Service field of ComputeNode could be deprecated in a next patch,
            # so let's use directly the Service object
            try:
                service = objects.Service.get_by_id(compute._context,
                                                    db_compute['service_id'])
            except exception.ServiceNotFound:
                compute.host = None
                return
            try:
                compute.host = service.host
            except (AttributeError, exception.OrphanedObjectError):
                # Host can be nullable in Service
                compute.host = None
        elif 'host' in db_compute and db_compute['host'] is not None:
            # New-style DB having host as a field
            compute.host = db_compute['host']
        else:
            # We assume it should not happen but in case, let's set it to None
            compute.host = None

    @staticmethod
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
        ])
        fields = set(compute.fields) - special_cases
        for key in fields:
            value = db_compute[key]
            # NOTE(sbauza): Since all compute nodes don't possibly run the
            # latest RT code updating allocation ratios, we need to provide
            # a backwards compatible way of hydrating them.
            # As we want to care about our operators and since we don't want to
            # ask them to change their configuration files before upgrading, we
            # prefer to hardcode the default values for the ratios here until
            # the next release (Newton) where the opt default values will be
            # restored for both cpu (16.0), ram (1.5) and disk (1.0)
            # allocation ratios.
            # TODO(sbauza): Remove that in the next major version bump where
            # we break compatibilility with old Liberty computes
            if (key == 'cpu_allocation_ratio' or key == 'ram_allocation_ratio'
                    or key == 'disk_allocation_ratio'):
                if value == 0.0:
                    # Operator has not yet provided a new value for that ratio
                    # on the compute node
                    value = None
                if value is None:
                    # ResourceTracker is not updating the value (old node)
                    # or the compute node is updated but the default value has
                    # not been changed
                    value = getattr(CONF, key)
                    if value == 0.0 and key == 'cpu_allocation_ratio':
                        # It's not specified either on the controller
                        value = 16.0
                    if value == 0.0 and key == 'ram_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.5
                    if value == 0.0 and key == 'disk_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.0
            setattr(compute, key, value)

        stats = db_compute['stats']
        if stats:
            compute.stats = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [
                objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs
            ]
            compute.supported_hv_specs = hv_specs

        pci_stats = db_compute.get('pci_stats')
        if pci_stats is not None:
            pci_stats = pci_device_pool.from_pci_stats(pci_stats)
        compute.pci_device_pools = pci_stats
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        compute.obj_reset_changes()

        return compute

    @base.remotable_classmethod
    def get_by_id(cls, context, compute_id):
        db_compute = db.compute_node_get(context, compute_id)
        return cls._from_db_object(context, cls(), db_compute)

    # NOTE(hanlind): This is deprecated and should be removed on the next
    # major version bump
    @base.remotable_classmethod
    def get_by_service_id(cls, context, service_id):
        db_computes = db.compute_nodes_get_by_service_id(context, service_id)
        # NOTE(sbauza): Old version was returning an item, we need to keep this
        # behaviour for backwards compatibility
        db_compute = db_computes[0]
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable_classmethod
    def get_by_host_and_nodename(cls, context, host, nodename):
        db_compute = db.compute_node_get_by_host_and_nodename(
            context, host, nodename)
        return cls._from_db_object(context, cls(), db_compute)

    # TODO(pkholkin): Remove this method in the next major version bump
    @base.remotable_classmethod
    def get_first_node_by_host_for_old_compat(cls,
                                              context,
                                              host,
                                              use_slave=False):
        computes = ComputeNodeList.get_all_by_host(context, host, use_slave)
        # FIXME(sbauza): Some hypervisors (VMware, Ironic) can return multiple
        # nodes per host, we should return all the nodes and modify the callers
        # instead.
        # Arbitrarily returning the first node.
        return computes[0]

    @staticmethod
    def _convert_stats_to_db_format(updates):
        stats = updates.pop('stats', None)
        if stats is not None:
            updates['stats'] = jsonutils.dumps(stats)

    @staticmethod
    def _convert_host_ip_to_db_format(updates):
        host_ip = updates.pop('host_ip', None)
        if host_ip:
            updates['host_ip'] = str(host_ip)

    @staticmethod
    def _convert_supported_instances_to_db_format(updates):
        hv_specs = updates.pop('supported_hv_specs', None)
        if hv_specs is not None:
            hv_specs = [hv_spec.to_list() for hv_spec in hv_specs]
            updates['supported_instances'] = jsonutils.dumps(hv_specs)

    @staticmethod
    def _convert_pci_stats_to_db_format(updates):
        if 'pci_device_pools' in updates:
            pools = updates.pop('pci_device_pools')
            if pools is not None:
                pools = jsonutils.dumps(pools.obj_to_primitive())
            updates['pci_stats'] = pools

    def update_inventory(self):
        """Update inventory records from legacy model values."""

        inventory_list = \
            objects.InventoryList.get_all_by_resource_provider_uuid(
                self._context, self.uuid)
        if not inventory_list:
            return False

        for inventory in inventory_list:
            if inventory.resource_class == fields.ResourceClass.VCPU:
                key = 'vcpus'
            elif inventory.resource_class == fields.ResourceClass.MEMORY_MB:
                key = 'memory_mb'
            elif inventory.resource_class == fields.ResourceClass.DISK_GB:
                key = 'local_gb'
            else:
                LOG.warning(_LW('Unknown inventory class %s for compute node'),
                            inventory.resource_class)
                continue

            if key in self.obj_what_changed():
                inventory.total = getattr(self, key)
                inventory.save()

        return True

    def _ensure_resource_provider(self):
        shortname = self.host.split('.')[0]
        rp_name = 'compute-%s-%s' % (shortname, self.uuid)
        rp = objects.ResourceProvider(context=self._context,
                                      uuid=self.uuid,
                                      name=rp_name)
        try:
            rp.create()
        except db_exc.DBDuplicateEntry:
            rp = objects.ResourceProvider.get_by_uuid(self._context, self.uuid)
            if rp.name != rp_name:
                # FIXME(danms): We probably need a .save() operation on RP
                # so that we can update this
                LOG.warning(
                    _LW('Compute node %(uuid)s changed name '
                        'from %(old)s to %(new)s'), {
                            'uuid': self.uuid,
                            'old': rp.name,
                            'new': rp_name
                        })

        return rp

    def create_inventory(self):
        """Create the initial inventory objects for this compute node.

        This is only ever called once, either for the first time when a compute
        is created, or after an upgrade where the required services have
        reached the required version.
        """
        rp = self._ensure_resource_provider()

        cpu = objects.Inventory(context=self._context,
                                resource_provider=rp,
                                resource_class=fields.ResourceClass.VCPU,
                                total=self.vcpus,
                                reserved=0,
                                min_unit=1,
                                max_unit=1,
                                step_size=1,
                                allocation_ratio=self.cpu_allocation_ratio)
        cpu.create()

        mem = objects.Inventory(context=self._context,
                                resource_provider=rp,
                                resource_class=fields.ResourceClass.MEMORY_MB,
                                total=self.memory_mb,
                                reserved=0,
                                min_unit=1,
                                max_unit=1,
                                step_size=1,
                                allocation_ratio=self.ram_allocation_ratio)
        mem.create()

        # FIXME(danms): Eventually we want to not write this record
        # if the compute host is on shared storage. We'll need some
        # indication from it to that effect, so for now we always
        # write it so that we can make all the usual machinery depend
        # on these records instead of the legacy columns.
        disk = objects.Inventory(context=self._context,
                                 resource_provider=rp,
                                 resource_class=fields.ResourceClass.DISK_GB,
                                 total=self.local_gb,
                                 reserved=0,
                                 min_unit=1,
                                 max_unit=1,
                                 step_size=1,
                                 allocation_ratio=self.disk_allocation_ratio)
        disk.create()

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        if 'uuid' not in updates:
            updates['uuid'] = uuidutils.generate_uuid()
            self.uuid = updates['uuid']

        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)
        self._convert_pci_stats_to_db_format(updates)

        db_compute = db.compute_node_create(self._context, updates)
        self._from_db_object(self._context, self, db_compute)

    @base.remotable
    def save(self, prune_stats=False):
        # NOTE(belliott) ignore prune_stats param, no longer relevant

        updates = self.obj_get_changes()
        updates.pop('id', None)
        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)
        self._convert_pci_stats_to_db_format(updates)

        db_compute = db.compute_node_update(self._context, self.id, updates)
        self._from_db_object(self._context, self, db_compute)

    @base.remotable
    def destroy(self):
        db.compute_node_delete(self._context, self.id)

    def update_from_virt_driver(self, resources):
        # NOTE(pmurray): the virt driver provides a dict of values that
        # can be copied into the compute node. The names and representation
        # do not exactly match.
        # TODO(pmurray): the resources dict should be formalized.
        keys = [
            "vcpus", "memory_mb", "local_gb", "cpu_info", "vcpus_used",
            "memory_mb_used", "local_gb_used", "numa_topology",
            "hypervisor_type", "hypervisor_version", "hypervisor_hostname",
            "disk_available_least", "host_ip"
        ]
        for key in keys:
            if key in resources:
                setattr(self, key, resources[key])

        # supported_instances has a different name in compute_node
        if 'supported_instances' in resources:
            si = resources['supported_instances']
            self.supported_hv_specs = [objects.HVSpec.from_list(s) for s in si]
Ejemplo n.º 12
0
class ComputeNode(base.NovaPersistentObject, base.NovaObject,
                  base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Added get_by_service_id()
    # Version 1.2: String attributes updated to support unicode
    # Version 1.3: Added stats field
    # Version 1.4: Added host ip field
    # Version 1.5: Added numa_topology field
    # Version 1.6: Added supported_hv_specs
    # Version 1.7: Added host field
    # Version 1.8: Added get_by_host_and_nodename()
    # Version 1.9: Added pci_device_pools
    # Version 1.10: Added get_first_node_by_host_for_old_compat()
    VERSION = '1.10'

    fields = {
        'id': fields.IntegerField(read_only=True),
        'service_id': fields.IntegerField(),
        'host': fields.StringField(nullable=True),
        'vcpus': fields.IntegerField(),
        'memory_mb': fields.IntegerField(),
        'local_gb': fields.IntegerField(),
        'vcpus_used': fields.IntegerField(),
        'memory_mb_used': fields.IntegerField(),
        'local_gb_used': fields.IntegerField(),
        'hypervisor_type': fields.StringField(),
        'hypervisor_version': fields.IntegerField(),
        'hypervisor_hostname': fields.StringField(nullable=True),
        'free_ram_mb': fields.IntegerField(nullable=True),
        'free_disk_gb': fields.IntegerField(nullable=True),
        'current_workload': fields.IntegerField(nullable=True),
        'running_vms': fields.IntegerField(nullable=True),
        'cpu_info': fields.StringField(nullable=True),
        'disk_available_least': fields.IntegerField(nullable=True),
        'metrics': fields.StringField(nullable=True),
        'stats': fields.DictOfNullableStringsField(nullable=True),
        'host_ip': fields.IPAddressField(nullable=True),
        'numa_topology': fields.StringField(nullable=True),
        # NOTE(pmurray): the supported_hv_specs field maps to the
        # supported_instances field in the database
        'supported_hv_specs': fields.ListOfObjectsField('HVSpec'),
        # NOTE(pmurray): the pci_device_pools field maps to the
        # pci_stats field in the database
        'pci_device_pools': fields.ObjectField('PciDevicePoolList',
                                               nullable=True),
    }

    obj_relationships = {
        'pci_device_pools': [('1.9', '1.0')],
        'supported_hv_specs': [('1.6', '1.0')],
    }

    def obj_make_compatible(self, primitive, target_version):
        super(ComputeNode, self).obj_make_compatible(primitive, target_version)
        target_version = utils.convert_version_to_tuple(target_version)
        if target_version < (1, 7) and 'host' in primitive:
            del primitive['host']
        if target_version < (1, 5) and 'numa_topology' in primitive:
            del primitive['numa_topology']
        if target_version < (1, 4) and 'host_ip' in primitive:
            del primitive['host_ip']
        if target_version < (1, 3) and 'stats' in primitive:
            # pre 1.3 version does not have a stats field
            del primitive['stats']

    @staticmethod
    def _host_from_db_object(compute, db_compute):
        if (('host' not in db_compute or db_compute['host'] is None)
                and 'service_id' in db_compute
                and db_compute['service_id'] is not None):
            # FIXME(sbauza) : Unconverted compute record, provide compatibility
            # This has to stay until we can be sure that any/all compute nodes
            # in the database have been converted to use the host field

            # Service field of ComputeNode could be deprecated in a next patch,
            # so let's use directly the Service object
            try:
                service = objects.Service.get_by_id(compute._context,
                                                    db_compute['service_id'])
            except exception.ServiceNotFound:
                compute['host'] = None
                return
            try:
                compute['host'] = service.host
            except (AttributeError, exception.OrphanedObjectError):
                # Host can be nullable in Service
                compute['host'] = None
        elif 'host' in db_compute and db_compute['host'] is not None:
            # New-style DB having host as a field
            compute['host'] = db_compute['host']
        else:
            # We assume it should not happen but in case, let's set it to None
            compute['host'] = None

    @staticmethod
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
        ])
        fields = set(compute.fields) - special_cases
        for key in fields:
            compute[key] = db_compute[key]

        stats = db_compute['stats']
        if stats:
            compute['stats'] = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [
                objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs
            ]
            compute['supported_hv_specs'] = hv_specs

        pci_stats = db_compute.get('pci_stats')
        compute.pci_device_pools = pci_device_pool.from_pci_stats(pci_stats)
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        compute.obj_reset_changes()
        return compute

    @base.remotable_classmethod
    def get_by_id(cls, context, compute_id):
        db_compute = db.compute_node_get(context, compute_id)
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable_classmethod
    def get_by_service_id(cls, context, service_id):
        db_computes = db.compute_nodes_get_by_service_id(context, service_id)
        # NOTE(sbauza): Old version was returning an item, we need to keep this
        # behaviour for backwards compatibility
        db_compute = db_computes[0]
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable_classmethod
    def get_by_host_and_nodename(cls, context, host, nodename):
        try:
            db_compute = db.compute_node_get_by_host_and_nodename(
                context, host, nodename)
        except exception.ComputeHostNotFound:
            # FIXME(sbauza): Some old computes can still have no host record
            # We need to provide compatibility by using the old service_id
            # record.
            # We assume the compatibility as an extra penalty of one more DB
            # call but that's necessary until all nodes are upgraded.
            try:
                service = objects.Service.get_by_compute_host(context, host)
                db_computes = db.compute_nodes_get_by_service_id(
                    context, service.id)
            except exception.ServiceNotFound:
                # We need to provide the same exception upstream
                raise exception.ComputeHostNotFound(host=host)
            db_compute = None
            for compute in db_computes:
                if compute['hypervisor_hostname'] == nodename:
                    db_compute = compute
                    # We can avoid an extra call to Service object in
                    # _from_db_object
                    db_compute['host'] = service.host
                    break
            if not db_compute:
                raise exception.ComputeHostNotFound(host=host)
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable_classmethod
    def get_first_node_by_host_for_old_compat(cls,
                                              context,
                                              host,
                                              use_slave=False):
        computes = ComputeNodeList.get_all_by_host(context, host, use_slave)
        # FIXME(sbauza): Some hypervisors (VMware, Ironic) can return multiple
        # nodes per host, we should return all the nodes and modify the callers
        # instead.
        # Arbitrarily returning the first node.
        return computes[0]

    @staticmethod
    def _convert_stats_to_db_format(updates):
        stats = updates.pop('stats', None)
        if stats is not None:
            updates['stats'] = jsonutils.dumps(stats)

    @staticmethod
    def _convert_host_ip_to_db_format(updates):
        host_ip = updates.pop('host_ip', None)
        if host_ip:
            updates['host_ip'] = str(host_ip)

    @staticmethod
    def _convert_supported_instances_to_db_format(updates):
        hv_specs = updates.pop('supported_hv_specs', None)
        if hv_specs is not None:
            hv_specs = [hv_spec.to_list() for hv_spec in hv_specs]
            updates['supported_instances'] = jsonutils.dumps(hv_specs)

    @staticmethod
    def _convert_pci_stats_to_db_format(updates):
        pools = updates.pop('pci_device_pools', None)
        if pools:
            updates['pci_stats'] = jsonutils.dumps(pools.obj_to_primitive())

    @base.remotable
    def create(self, context):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)
        self._convert_pci_stats_to_db_format(updates)

        db_compute = db.compute_node_create(context, updates)
        self._from_db_object(context, self, db_compute)

    @base.remotable
    def save(self, context, prune_stats=False):
        # NOTE(belliott) ignore prune_stats param, no longer relevant

        updates = self.obj_get_changes()
        updates.pop('id', None)
        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)
        self._convert_pci_stats_to_db_format(updates)

        db_compute = db.compute_node_update(context, self.id, updates)
        self._from_db_object(context, self, db_compute)

    @base.remotable
    def destroy(self, context):
        db.compute_node_delete(context, self.id)

    @property
    def service(self):
        if not hasattr(self, '_cached_service'):
            self._cached_service = objects.Service.get_by_id(
                self._context, self.service_id)
        return self._cached_service
Ejemplo n.º 13
0
class ComputeNode(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added get_by_service_id()
    # Version 1.2: String attributes updated to support unicode
    # Version 1.3: Added stats field
    VERSION = '1.3'

    fields = {
        'id': fields.IntegerField(),
        'service_id': fields.IntegerField(),
        'vcpus': fields.IntegerField(),
        'memory_mb': fields.IntegerField(),
        'local_gb': fields.IntegerField(),
        'vcpus_used': fields.IntegerField(),
        'memory_mb_used': fields.IntegerField(),
        'local_gb_used': fields.IntegerField(),
        'hypervisor_type': fields.StringField(),
        'hypervisor_version': fields.IntegerField(),
        'hypervisor_hostname': fields.StringField(nullable=True),
        'free_ram_mb': fields.IntegerField(nullable=True),
        'free_disk_gb': fields.IntegerField(nullable=True),
        'current_workload': fields.IntegerField(nullable=True),
        'running_vms': fields.IntegerField(nullable=True),
        'cpu_info': fields.StringField(nullable=True),
        'disk_available_least': fields.IntegerField(nullable=True),
        'metrics': fields.StringField(nullable=True),
        'stats': fields.DictOfNullableStringsField(nullable=True),
    }

    def obj_make_compatible(self, primitive, target_version):
        target_version = (int(target_version.split('.')[0]),
                          int(target_version.split('.')[1]))
        if target_version < (1, 3):
            # pre 1.3 version does not have a stats field
            del primitive['stats']

    @staticmethod
    def _from_db_object(context, compute, db_compute):

        fields = set(compute.fields) - set(['stats'])
        for key in fields:
            compute[key] = db_compute[key]

        stats = db_compute['stats']
        if stats:
            compute['stats'] = jsonutils.loads(stats)
        else:
            compute['stats'] = {}

        compute._context = context
        compute.obj_reset_changes()
        return compute

    @base.remotable_classmethod
    def get_by_id(cls, context, compute_id):
        db_compute = db.compute_node_get(context, compute_id)
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable_classmethod
    def get_by_service_id(cls, context, service_id):
        db_compute = db.compute_node_get_by_service_id(context, service_id)
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable
    def create(self, context):
        updates = self.obj_get_changes()
        db_compute = db.compute_node_create(context, updates)
        self._from_db_object(context, self, db_compute)

    @base.remotable
    def save(self, context, prune_stats=False):
        # NOTE(belliott) ignore prune_stats param, no longer relevant
        updates = self.obj_get_changes()
        updates.pop('id', None)

        stats = updates.pop('stats', None)
        if stats is not None:
            updates['stats'] = jsonutils.dumps(stats)

        db_compute = db.compute_node_update(context, self.id, updates)
        self._from_db_object(context, self, db_compute)

    @base.remotable
    def destroy(self, context):
        db.compute_node_delete(context, self.id)

    @property
    def service(self):
        # NOTE(danms): avoid a circular import here
        if not hasattr(self, '_cached_service'):
            from nova.objects import service
            self._cached_service = service.Service.get_by_id(
                self._context, self.service_id)
        return self._cached_service
Ejemplo n.º 14
0
class Instance(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added info_cache
    # Version 1.2: Added security_groups
    # Version 1.3: Added expected_vm_state and admin_state_reset to
    #              save()
    # Version 1.4: Added locked_by and deprecated locked
    # Version 1.5: Added cleaned
    # Version 1.6: Added pci_devices
    # Version 1.7: String attributes updated to support unicode
    # Version 1.8: 'security_groups' and 'pci_devices' cannot be None
    # Version 1.9: Make uuid a non-None real string
    # Version 1.10: Added use_slave to refresh and get_by_uuid
    VERSION = '1.10'

    fields = {
        'id': fields.IntegerField(),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'image_ref': fields.StringField(nullable=True),
        'kernel_id': fields.StringField(nullable=True),
        'ramdisk_id': fields.StringField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'launch_index': fields.IntegerField(nullable=True),
        'key_name': fields.StringField(nullable=True),
        'key_data': fields.StringField(nullable=True),
        'power_state': fields.IntegerField(nullable=True),
        'vm_state': fields.StringField(nullable=True),
        'task_state': fields.StringField(nullable=True),
        'memory_mb': fields.IntegerField(nullable=True),
        'vcpus': fields.IntegerField(nullable=True),
        'root_gb': fields.IntegerField(nullable=True),
        'ephemeral_gb': fields.IntegerField(nullable=True),
        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),
        'instance_type_id': fields.IntegerField(nullable=True),
        'user_data': fields.StringField(nullable=True),
        'reservation_id': fields.StringField(nullable=True),
        'scheduled_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'launched_on': fields.StringField(nullable=True),

        # NOTE(jdillaman): locked deprecated in favor of locked_by,
        # to be removed in Icehouse
        'locked': fields.BooleanField(default=False),
        'locked_by': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'vm_mode': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(),
        'root_device_name': fields.StringField(nullable=True),
        'default_ephemeral_device': fields.StringField(nullable=True),
        'default_swap_device': fields.StringField(nullable=True),
        'config_drive': fields.StringField(nullable=True),
        'access_ip_v4': fields.IPV4AddressField(nullable=True),
        'access_ip_v6': fields.IPV6AddressField(nullable=True),
        'auto_disk_config': fields.BooleanField(default=False),
        'progress': fields.IntegerField(nullable=True),
        'shutdown_terminate': fields.BooleanField(default=False),
        'disable_terminate': fields.BooleanField(default=False),
        'cell_name': fields.StringField(nullable=True),
        'metadata': fields.DictOfStringsField(),
        'system_metadata': fields.DictOfNullableStringsField(),
        'info_cache': fields.ObjectField('InstanceInfoCache', nullable=True),
        'security_groups': fields.ObjectField('SecurityGroupList'),
        'fault': fields.ObjectField('InstanceFault', nullable=True),
        'cleaned': fields.BooleanField(default=False),
        'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
    }

    obj_extra_fields = ['name']

    def __init__(self, *args, **kwargs):
        super(Instance, self).__init__(*args, **kwargs)
        self._reset_metadata_tracking()

    def _reset_metadata_tracking(self):
        self._orig_system_metadata = (dict(self.system_metadata)
                                      if 'system_metadata' in self else {})
        self._orig_metadata = (dict(self.metadata)
                               if 'metadata' in self else {})

    def obj_reset_changes(self, fields=None):
        super(Instance, self).obj_reset_changes(fields)
        self._reset_metadata_tracking()

    def obj_what_changed(self):
        changes = super(Instance, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if 'system_metadata' in self and (self.system_metadata !=
                                          self._orig_system_metadata):
            changes.add('system_metadata')
        return changes

    @property
    def name(self):
        try:
            base_name = CONF.instance_name_template % self.id
        except TypeError:
            # Support templates like "uuid-%(uuid)s", etc.
            info = {}
            # NOTE(russellb): Don't use self.iteritems() here, as it will
            # result in infinite recursion on the name property.
            for key in self.fields:
                if key == 'name':
                    # NOTE(danms): prevent recursion
                    continue
                elif not self.obj_attr_is_set(key):
                    # NOTE(danms): Don't trigger lazy-loads
                    continue
                info[key] = self[key]
            try:
                base_name = CONF.instance_name_template % info
            except KeyError:
                base_name = self.uuid
        return base_name

    @staticmethod
    def _from_db_object(context, instance, db_inst, expected_attrs=None):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        if expected_attrs is None:
            expected_attrs = []
        # Most of the field names match right now, so be quick
        for field in instance.fields:
            if field in INSTANCE_OPTIONAL_ATTRS:
                continue
            elif field == 'deleted':
                instance.deleted = db_inst['deleted'] == db_inst['id']
            elif field == 'cleaned':
                instance.cleaned = db_inst['cleaned'] == 1
            else:
                instance[field] = db_inst[field]

        if 'metadata' in expected_attrs:
            instance['metadata'] = utils.instance_meta(db_inst)
        if 'system_metadata' in expected_attrs:
            instance['system_metadata'] = utils.instance_sys_meta(db_inst)
        if 'fault' in expected_attrs:
            instance['fault'] = (
                instance_fault.InstanceFault.get_latest_for_instance(
                    context, instance.uuid))

        if 'pci_devices' in expected_attrs:
            pci_devices = pci_device._make_pci_list(context,
                                                    pci_device.PciDeviceList(),
                                                    db_inst['pci_devices'])
            instance['pci_devices'] = pci_devices
        if 'info_cache' in expected_attrs:
            if db_inst['info_cache'] is None:
                info_cache = None
            else:
                info_cache = instance_info_cache.InstanceInfoCache()
                instance_info_cache.InstanceInfoCache._from_db_object(
                    context, info_cache, db_inst['info_cache'])
            instance['info_cache'] = info_cache
        if 'security_groups' in expected_attrs:
            sec_groups = security_group._make_secgroup_list(
                context, security_group.SecurityGroupList(),
                db_inst['security_groups'])
            instance['security_groups'] = sec_groups

        instance._context = context
        instance.obj_reset_changes()
        return instance

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get_by_uuid(context,
                                          uuid,
                                          columns_to_join=columns_to_join,
                                          use_slave=use_slave)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable_classmethod
    def get_by_id(cls, context, inst_id, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get(context,
                                  inst_id,
                                  columns_to_join=columns_to_join)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable
    def create(self, context):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        updates.pop('id', None)
        expected_attrs = [
            attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates
        ]
        if 'security_groups' in updates:
            updates['security_groups'] = [
                x.name for x in updates['security_groups']
            ]
        if 'info_cache' in updates:
            updates['info_cache'] = {
                'network_info': updates['info_cache'].network_info.json()
            }
        db_inst = db.instance_create(context, updates)
        Instance._from_db_object(context, self, db_inst, expected_attrs)

    @base.remotable
    def destroy(self, context):
        if not self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='already destroyed')
        if not self.obj_attr_is_set('uuid'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='no uuid')
        if not self.obj_attr_is_set('host') or not self.host:
            # NOTE(danms): If our host is not set, avoid a race
            constraint = db.constraint(host=db.equal_any(None))
        else:
            constraint = None

        try:
            db.instance_destroy(context, self.uuid, constraint=constraint)
        except exception.ConstraintNotMet:
            raise exception.ObjectActionError(action='destroy',
                                              reason='host changed')
        delattr(self, base.get_attrname('id'))

    def _save_info_cache(self, context):
        self.info_cache.save(context)

    def _save_security_groups(self, context):
        for secgroup in self.security_groups:
            secgroup.save(context)

    def _save_fault(self, context):
        # NOTE(danms): I don't think we need to worry about this, do we?
        pass

    def _save_pci_devices(self, context):
        # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
        # permitted to update the DB. all change to devices from here will
        # be dropped.
        pass

    @base.remotable
    def save(self,
             context,
             expected_vm_state=None,
             expected_task_state=None,
             admin_state_reset=False):
        """Save updates to this instance

        Column-wise updates will be made based on the result of
        self.what_changed(). If expected_task_state is provided,
        it will be checked against the in-database copy of the
        instance before updates are made.
        :param context: Security context
        :param expected_task_state: Optional tuple of valid task states
                                    for the instance to be in.
        :param expected_vm_state: Optional tuple of valid vm states
                                  for the instance to be in.
        :param admin_state_reset: True if admin API is forcing setting
                                  of task_state/vm_state.
        """

        cell_type = cells_opts.get_cell_type()
        if cell_type == 'api' and self.cell_name:
            # NOTE(comstud): We need to stash a copy of ourselves
            # before any updates are applied.  When we call the save
            # methods on nested objects, we will lose any changes to
            # them.  But we need to make sure child cells can tell
            # what is changed.
            #
            # We also need to nuke any updates to vm_state and task_state
            # unless admin_state_reset is True.  compute cells are
            # authoritative for their view of vm_state and task_state.
            stale_instance = self.obj_clone()

            def _handle_cell_update_from_api():
                cells_api = cells_rpcapi.CellsAPI()
                cells_api.instance_update_from_api(context, stale_instance,
                                                   expected_vm_state,
                                                   expected_task_state,
                                                   admin_state_reset)
        else:
            stale_instance = None

        updates = {}
        changes = self.obj_what_changed()
        for field in self.fields:
            if (self.obj_attr_is_set(field)
                    and isinstance(self[field], base.NovaObject)):
                try:
                    getattr(self, '_save_%s' % field)(context)
                except AttributeError:
                    LOG.exception(_('No save handler for %s') % field,
                                  instance=self)
            elif field in changes:
                updates[field] = self[field]

        if not updates:
            if stale_instance:
                _handle_cell_update_from_api()
            return

        # Cleaned needs to be turned back into an int here
        if 'cleaned' in updates:
            if updates['cleaned']:
                updates['cleaned'] = 1
            else:
                updates['cleaned'] = 0

        if expected_task_state is not None:
            updates['expected_task_state'] = expected_task_state
        if expected_vm_state is not None:
            updates['expected_vm_state'] = expected_vm_state

        expected_attrs = [
            attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
            if self.obj_attr_is_set(attr)
        ]
        # NOTE(alaski): We need to pull system_metadata for the
        # notification.send_update() below.  If we don't there's a KeyError
        # when it tries to extract the flavor.
        if 'system_metadata' not in expected_attrs:
            expected_attrs.append('system_metadata')
        old_ref, inst_ref = db.instance_update_and_get_original(
            context,
            self.uuid,
            updates,
            update_cells=False,
            columns_to_join=_expected_cols(expected_attrs))

        if stale_instance:
            _handle_cell_update_from_api()
        elif cell_type == 'compute':
            cells_api = cells_rpcapi.CellsAPI()
            cells_api.instance_update_at_top(context, inst_ref)

        self._from_db_object(context, self, inst_ref, expected_attrs)
        notifications.send_update(context, old_ref, inst_ref)
        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context, use_slave=False):
        extra = [
            field for field in INSTANCE_OPTIONAL_ATTRS
            if self.obj_attr_is_set(field)
        ]
        current = self.__class__.get_by_uuid(context,
                                             uuid=self.uuid,
                                             expected_attrs=extra,
                                             use_slave=use_slave)
        # NOTE(danms): We orphan the instance copy so we do not unexpectedly
        # trigger a lazy-load (which would mean we failed to calculate the
        # expected_attrs properly)
        current._context = None

        for field in self.fields:
            if self.obj_attr_is_set(field) and self[field] != current[field]:
                self[field] = current[field]
        self.obj_reset_changes()

    def obj_load_attr(self, attrname):
        if attrname not in INSTANCE_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='attribute %s not lazy-loadable' % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug(_("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s"), {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })
        # FIXME(comstud): This should be optimized to only load the attr.
        instance = self.__class__.get_by_uuid(self._context,
                                              uuid=self.uuid,
                                              expected_attrs=[attrname])

        # NOTE(danms): Never allow us to recursively-load
        if instance.obj_attr_is_set(attrname):
            self[attrname] = instance[attrname]
        else:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='loading %s requires recursion' % attrname)
Ejemplo n.º 15
0
class ComputeNode(base.NovaPersistentObject, base.NovaObject,
                  base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Added get_by_service_id()
    # Version 1.2: String attributes updated to support unicode
    # Version 1.3: Added stats field
    # Version 1.4: Added host ip field
    # Version 1.5: Added numa_topology field
    # Version 1.6: Added supported_hv_specs
    # Version 1.7: Added host field
    # Version 1.8: Added get_by_host_and_nodename()
    # Version 1.9: Added pci_device_pools
    # Version 1.10: Added get_first_node_by_host_for_old_compat()
    # Version 1.11: PciDevicePoolList version 1.1
    # Version 1.12: HVSpec version 1.1
    # Version 1.13: Changed service_id field to be nullable
    # Version 1.14: Added cpu_allocation_ratio and ram_allocation_ratio
    VERSION = '1.14'

    fields = {
        'id': fields.IntegerField(read_only=True),
        'service_id': fields.IntegerField(nullable=True),
        'host': fields.StringField(nullable=True),
        'vcpus': fields.IntegerField(),
        'memory_mb': fields.IntegerField(),
        'local_gb': fields.IntegerField(),
        'vcpus_used': fields.IntegerField(),
        'memory_mb_used': fields.IntegerField(),
        'local_gb_used': fields.IntegerField(),
        'hypervisor_type': fields.StringField(),
        'hypervisor_version': fields.IntegerField(),
        'hypervisor_hostname': fields.StringField(nullable=True),
        'free_ram_mb': fields.IntegerField(nullable=True),
        'free_disk_gb': fields.IntegerField(nullable=True),
        'current_workload': fields.IntegerField(nullable=True),
        'running_vms': fields.IntegerField(nullable=True),
        'cpu_info': fields.StringField(nullable=True),
        'disk_available_least': fields.IntegerField(nullable=True),
        'metrics': fields.StringField(nullable=True),
        'stats': fields.DictOfNullableStringsField(nullable=True),
        'host_ip': fields.IPAddressField(nullable=True),
        'numa_topology': fields.StringField(nullable=True),
        # NOTE(pmurray): the supported_hv_specs field maps to the
        # supported_instances field in the database
        'supported_hv_specs': fields.ListOfObjectsField('HVSpec'),
        # NOTE(pmurray): the pci_device_pools field maps to the
        # pci_stats field in the database
        'pci_device_pools': fields.ObjectField('PciDevicePoolList',
                                               nullable=True),
        'cpu_allocation_ratio': fields.FloatField(),
        'ram_allocation_ratio': fields.FloatField(),
        }

    def obj_make_compatible(self, primitive, target_version):
        super(ComputeNode, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 14):
            if 'ram_allocation_ratio' in primitive:
                del primitive['ram_allocation_ratio']
            if 'cpu_allocation_ratio' in primitive:
                del primitive['cpu_allocation_ratio']
        if target_version < (1, 13) and primitive.get('service_id') is None:
            # service_id is non-nullable in versions before 1.13
            try:
                service = objects.Service.get_by_compute_host(
                    self._context, primitive['host'])
                primitive['service_id'] = service.id
            except (exception.ComputeHostNotFound, KeyError):
                # NOTE(hanlind): In case anything goes wrong like service not
                # found or host not being set, catch and set a fake value just
                # to allow for older versions that demand a value to work.
                # Setting to -1 will, if value is later used result in a
                # ServiceNotFound, so should be safe.
                primitive['service_id'] = -1
        if target_version < (1, 7) and 'host' in primitive:
            del primitive['host']
        if target_version < (1, 5) and 'numa_topology' in primitive:
            del primitive['numa_topology']
        if target_version < (1, 4) and 'host_ip' in primitive:
            del primitive['host_ip']
        if target_version < (1, 3) and 'stats' in primitive:
            # pre 1.3 version does not have a stats field
            del primitive['stats']

    @staticmethod
    def _host_from_db_object(compute, db_compute):
        if (('host' not in db_compute or db_compute['host'] is None)
                and 'service_id' in db_compute
                and db_compute['service_id'] is not None):
            # FIXME(sbauza) : Unconverted compute record, provide compatibility
            # This has to stay until we can be sure that any/all compute nodes
            # in the database have been converted to use the host field

            # Service field of ComputeNode could be deprecated in a next patch,
            # so let's use directly the Service object
            try:
                service = objects.Service.get_by_id(
                    compute._context, db_compute['service_id'])
            except exception.ServiceNotFound:
                compute['host'] = None
                return
            try:
                compute['host'] = service.host
            except (AttributeError, exception.OrphanedObjectError):
                # Host can be nullable in Service
                compute['host'] = None
        elif 'host' in db_compute and db_compute['host'] is not None:
            # New-style DB having host as a field
            compute['host'] = db_compute['host']
        else:
            # We assume it should not happen but in case, let's set it to None
            compute['host'] = None

    @staticmethod
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
            ])
        fields = set(compute.fields) - special_cases
        for key in fields:
            value = db_compute[key]
            # NOTE(sbauza): Since all compute nodes don't possibly run the
            # latest RT code updating allocation ratios, we need to provide
            # a backwards compatible way of hydrating them.
            # As we want to care about our operators and since we don't want to
            # ask them to change their configuration files before upgrading, we
            # prefer to hardcode the default values for the ratios here until
            # the next release (Mitaka) where the opt default values will be
            # restored for both cpu (16.0) and ram (1.5) allocation ratios.
            # TODO(sbauza): Remove that in the next major version bump where
            # we break compatibilility with old Kilo computes
            if key == 'cpu_allocation_ratio' or key == 'ram_allocation_ratio':
                if value == 0.0:
                    # Operator has not yet provided a new value for that ratio
                    # on the compute node
                    value = None
                if value is None:
                    # ResourceTracker is not updating the value (old node)
                    # or the compute node is updated but the default value has
                    # not been changed
                    value = getattr(CONF, key)
                    if value == 0.0 and key == 'cpu_allocation_ratio':
                        # It's not specified either on the controller
                        value = 16.0
                    if value == 0.0 and key == 'ram_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.5
            compute[key] = value

        stats = db_compute['stats']
        if stats:
            compute['stats'] = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [objects.HVSpec.from_list(hv_spec)
                        for hv_spec in hv_specs]
            compute['supported_hv_specs'] = hv_specs

        pci_stats = db_compute.get('pci_stats')
        compute.pci_device_pools = pci_device_pool.from_pci_stats(pci_stats)
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        compute.obj_reset_changes()
        return compute

    @base.remotable_classmethod
    def get_by_id(cls, context, compute_id):
        db_compute = db.compute_node_get(context, compute_id)
        return cls._from_db_object(context, cls(), db_compute)

    # NOTE(hanlind): This is deprecated and should be removed on the next
    # major version bump
    @base.remotable_classmethod
    def get_by_service_id(cls, context, service_id):
        db_computes = db.compute_nodes_get_by_service_id(context, service_id)
        # NOTE(sbauza): Old version was returning an item, we need to keep this
        # behaviour for backwards compatibility
        db_compute = db_computes[0]
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable_classmethod
    def get_by_host_and_nodename(cls, context, host, nodename):
        try:
            db_compute = db.compute_node_get_by_host_and_nodename(
                context, host, nodename)
        except exception.ComputeHostNotFound:
            # FIXME(sbauza): Some old computes can still have no host record
            # We need to provide compatibility by using the old service_id
            # record.
            # We assume the compatibility as an extra penalty of one more DB
            # call but that's necessary until all nodes are upgraded.
            try:
                service = objects.Service.get_by_compute_host(context, host)
                db_computes = db.compute_nodes_get_by_service_id(
                    context, service.id)
            except exception.ServiceNotFound:
                # We need to provide the same exception upstream
                raise exception.ComputeHostNotFound(host=host)
            db_compute = None
            for compute in db_computes:
                if compute['hypervisor_hostname'] == nodename:
                    db_compute = compute
                    # We can avoid an extra call to Service object in
                    # _from_db_object
                    db_compute['host'] = service.host
                    break
            if not db_compute:
                raise exception.ComputeHostNotFound(host=host)
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable_classmethod
    def get_first_node_by_host_for_old_compat(cls, context, host,
                                              use_slave=False):
        computes = ComputeNodeList.get_all_by_host(context, host, use_slave)
        # FIXME(sbauza): Some hypervisors (VMware, Ironic) can return multiple
        # nodes per host, we should return all the nodes and modify the callers
        # instead.
        # Arbitrarily returning the first node.
        return computes[0]

    @staticmethod
    def _convert_stats_to_db_format(updates):
        stats = updates.pop('stats', None)
        if stats is not None:
            updates['stats'] = jsonutils.dumps(stats)

    @staticmethod
    def _convert_host_ip_to_db_format(updates):
        host_ip = updates.pop('host_ip', None)
        if host_ip:
            updates['host_ip'] = str(host_ip)

    @staticmethod
    def _convert_supported_instances_to_db_format(updates):
        hv_specs = updates.pop('supported_hv_specs', None)
        if hv_specs is not None:
            hv_specs = [hv_spec.to_list() for hv_spec in hv_specs]
            updates['supported_instances'] = jsonutils.dumps(hv_specs)

    @staticmethod
    def _convert_pci_stats_to_db_format(updates):
        pools = updates.pop('pci_device_pools', None)
        if pools:
            updates['pci_stats'] = jsonutils.dumps(pools.obj_to_primitive())

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)
        self._convert_pci_stats_to_db_format(updates)

        db_compute = db.compute_node_create(self._context, updates)
        self._from_db_object(self._context, self, db_compute)

    @base.remotable
    def save(self, prune_stats=False):
        # NOTE(belliott) ignore prune_stats param, no longer relevant

        updates = self.obj_get_changes()
        updates.pop('id', None)
        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)
        self._convert_pci_stats_to_db_format(updates)

        db_compute = db.compute_node_update(self._context, self.id, updates)
        self._from_db_object(self._context, self, db_compute)

    @base.remotable
    def destroy(self):
        db.compute_node_delete(self._context, self.id)

    def update_from_virt_driver(self, resources):
        # NOTE(pmurray): the virt driver provides a dict of values that
        # can be copied into the compute node. The names and representation
        # do not exactly match.
        # TODO(pmurray): the resources dict should be formalized.
        keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
                "vcpus_used", "memory_mb_used", "local_gb_used",
                "numa_topology", "hypervisor_type",
                "hypervisor_version", "hypervisor_hostname",
                "disk_available_least", "host_ip"]
        for key in keys:
            if key in resources:
                self[key] = resources[key]

        # supported_instances has a different name in compute_node
        if 'supported_instances' in resources:
            si = resources['supported_instances']
            self.supported_hv_specs = [objects.HVSpec.from_list(s) for s in si]
Ejemplo n.º 16
0
class ComputeNode(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added get_by_service_id()
    # Version 1.2: String attributes updated to support unicode
    # Version 1.3: Added stats field
    # Version 1.4: Added host ip field
    # Version 1.5: Added numa_topology field
    # Version 1.6: Added supported_instances
    VERSION = '1.6'

    fields = {
        'id': fields.IntegerField(read_only=True),
        'service_id': fields.IntegerField(),
        'vcpus': fields.IntegerField(),
        'memory_mb': fields.IntegerField(),
        'local_gb': fields.IntegerField(),
        'vcpus_used': fields.IntegerField(),
        'memory_mb_used': fields.IntegerField(),
        'local_gb_used': fields.IntegerField(),
        'hypervisor_type': fields.StringField(),
        'hypervisor_version': fields.IntegerField(),
        'hypervisor_hostname': fields.StringField(nullable=True),
        'free_ram_mb': fields.IntegerField(nullable=True),
        'free_disk_gb': fields.IntegerField(nullable=True),
        'current_workload': fields.IntegerField(nullable=True),
        'running_vms': fields.IntegerField(nullable=True),
        'cpu_info': fields.StringField(nullable=True),
        'disk_available_least': fields.IntegerField(nullable=True),
        'metrics': fields.StringField(nullable=True),
        'stats': fields.DictOfNullableStringsField(nullable=True),
        'host_ip': fields.IPAddressField(nullable=True),
        'numa_topology': fields.StringField(nullable=True),
        # NOTE(pmurray): the supported_hv_specs field maps to the
        # supported_instances field in the database
        'supported_hv_specs': fields.ListOfObjectsField('HVSpec'),
    }

    def obj_make_compatible(self, primitive, target_version):
        target_version = utils.convert_version_to_tuple(target_version)
        if target_version < (1, 6) and 'supported_hv_specs' in primitive:
            del primitive['supported_hv_specs']
        if target_version < (1, 5) and 'numa_topology' in primitive:
            del primitive['numa_topology']
        if target_version < (1, 4) and 'host_ip' in primitive:
            del primitive['host_ip']
        if target_version < (1, 3) and 'stats' in primitive:
            # pre 1.3 version does not have a stats field
            del primitive['stats']

    @staticmethod
    def _from_db_object(context, compute, db_compute):

        fields = set(compute.fields) - set(['stats', 'supported_hv_specs'])
        for key in fields:
            compute[key] = db_compute[key]

        stats = db_compute['stats']
        if stats:
            compute['stats'] = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [
                objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs
            ]
            compute['supported_hv_specs'] = hv_specs

        compute._context = context
        compute.obj_reset_changes()
        return compute

    @base.remotable_classmethod
    def get_by_id(cls, context, compute_id):
        db_compute = db.compute_node_get(context, compute_id)
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable_classmethod
    def get_by_service_id(cls, context, service_id):
        db_compute = db.compute_node_get_by_service_id(context, service_id)
        return cls._from_db_object(context, cls(), db_compute)

    def _convert_stats_to_db_format(self, updates):
        stats = updates.pop('stats', None)
        if stats is not None:
            updates['stats'] = jsonutils.dumps(stats)

    def _convert_host_ip_to_db_format(self, updates):
        host_ip = updates.pop('host_ip', None)
        if host_ip:
            updates['host_ip'] = str(host_ip)

    def _convert_supported_instances_to_db_format(selfself, updates):
        hv_specs = updates.pop('supported_hv_specs', None)
        if hv_specs is not None:
            hv_specs = [hv_spec.to_list() for hv_spec in hv_specs]
            updates['supported_instances'] = jsonutils.dumps(hv_specs)

    @base.remotable
    def create(self, context):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)

        db_compute = db.compute_node_create(context, updates)
        self._from_db_object(context, self, db_compute)

    @base.remotable
    def save(self, context, prune_stats=False):
        # NOTE(belliott) ignore prune_stats param, no longer relevant

        updates = self.obj_get_changes()
        updates.pop('id', None)
        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)

        db_compute = db.compute_node_update(context, self.id, updates)
        self._from_db_object(context, self, db_compute)

    @base.remotable
    def destroy(self, context):
        db.compute_node_delete(context, self.id)

    @property
    def service(self):
        if not hasattr(self, '_cached_service'):
            self._cached_service = objects.Service.get_by_id(
                self._context, self.service_id)
        return self._cached_service