Beispiel #1
0
 def setUp(self):
     super(TestDictOfStrings, self).setUp()
     self.field = fields.DictOfStringsField()
     self.coerce_good_values = [({
         'foo': 'bar'
     }, {
         'foo': 'bar'
     }), ({
         'foo': 1
     }, {
         'foo': '1'
     })]
     self.coerce_bad_values = [{1: 'bar'}, {'foo': None}, 'foo']
     self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
     self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
Beispiel #2
0
class LiveMigrateData(obj_base.NovaObject):
    fields = {
        'is_volume_backed': fields.BooleanField(),
        'migration': fields.ObjectField('Migration'),
        # old_vol_attachment_ids is a dict used to store the old attachment_ids
        # for each volume so they can be restored on a migration rollback. The
        # key is the volume_id, and the value is the attachment_id.
        'old_vol_attachment_ids': fields.DictOfStringsField(),
        # wait_for_vif_plugged is set in pre_live_migration on the destination
        # compute host based on the [compute]/live_migration_wait_for_vif_plug
        # config option value; a default value is not set here since the
        # default for the config option may change in the future
        'wait_for_vif_plugged': fields.BooleanField()
    }

    def to_legacy_dict(self, pre_migration_result=False):
        legacy = {}
        if self.obj_attr_is_set('is_volume_backed'):
            legacy['is_volume_backed'] = self.is_volume_backed
        if self.obj_attr_is_set('migration'):
            legacy['migration'] = self.migration
        if pre_migration_result:
            legacy['pre_live_migration_result'] = {}

        return legacy

    def from_legacy_dict(self, legacy):
        if 'is_volume_backed' in legacy:
            self.is_volume_backed = legacy['is_volume_backed']
        if 'migration' in legacy:
            self.migration = legacy['migration']

    @classmethod
    def detect_implementation(cls, legacy_dict):
        if 'instance_relative_path' in legacy_dict:
            obj = LibvirtLiveMigrateData()
        elif 'image_type' in legacy_dict:
            obj = LibvirtLiveMigrateData()
        elif 'migrate_data' in legacy_dict:
            obj = XenapiLiveMigrateData()
        else:
            obj = LiveMigrateData()
        obj.from_legacy_dict(legacy_dict)
        return obj
Beispiel #3
0
class IpPayload(base.NotificationPayloadBase):
    # Version 1.0: Initial version
    VERSION = '1.0'
    fields = {
        'label': fields.StringField(),
        'mac': fields.MACAddressField(),
        'meta': fields.DictOfStringsField(),
        'port_uuid': fields.UUIDField(nullable=True),
        'version': fields.IntegerField(),
        'address': fields.IPV4AndV6AddressField(),
        'device_name': fields.StringField(nullable=True)
    }

    def __init__(self, label, mac, meta, port_uuid, version, address,
                 device_name):
        super(IpPayload, self).__init__()
        self.label = label
        self.mac = mac
        self.meta = meta
        self.port_uuid = port_uuid
        self.version = version
        self.address = address
        self.device_name = device_name

    @classmethod
    def from_network_info(cls, network_info):
        """Returns a list of IpPayload object based on the passed
        network_info.
        """
        ips = []
        if network_info is not None:
            for vif in network_info:
                for ip in vif.fixed_ips():
                    ips.append(
                        cls(label=vif["network"]["label"],
                            mac=vif["address"],
                            meta=vif["meta"],
                            port_uuid=vif["id"],
                            version=ip["version"],
                            address=ip["address"],
                            device_name=vif["devname"]))
        return ips
Beispiel #4
0
class LiveMigrateData(obj_base.NovaObject):
    fields = {
        'is_volume_backed': fields.BooleanField(),
        'migration': fields.ObjectField('Migration'),
        # old_vol_attachment_ids is a dict used to store the old attachment_ids
        # for each volume so they can be restored on a migration rollback. The
        # key is the volume_id, and the value is the attachment_id.
        # TODO(mdbooth): This field was made redundant by change I0390c9ff. We
        # should eventually remove it.
        'old_vol_attachment_ids': fields.DictOfStringsField(),
    }

    def to_legacy_dict(self, pre_migration_result=False):
        legacy = {}
        if self.obj_attr_is_set('is_volume_backed'):
            legacy['is_volume_backed'] = self.is_volume_backed
        if self.obj_attr_is_set('migration'):
            legacy['migration'] = self.migration
        if pre_migration_result:
            legacy['pre_live_migration_result'] = {}

        return legacy

    def from_legacy_dict(self, legacy):
        if 'is_volume_backed' in legacy:
            self.is_volume_backed = legacy['is_volume_backed']
        if 'migration' in legacy:
            self.migration = legacy['migration']

    @classmethod
    def detect_implementation(cls, legacy_dict):
        if 'instance_relative_path' in legacy_dict:
            obj = LibvirtLiveMigrateData()
        elif 'image_type' in legacy_dict:
            obj = LibvirtLiveMigrateData()
        elif 'migrate_data' in legacy_dict:
            obj = XenapiLiveMigrateData()
        else:
            obj = LiveMigrateData()
        obj.from_legacy_dict(legacy_dict)
        return obj
Beispiel #5
0
class AggregatePayload(base.NotificationPayloadBase):
    SCHEMA = {
        'id': ('aggregate', 'id'),
        'uuid': ('aggregate', 'uuid'),
        'name': ('aggregate', 'name'),
        'hosts': ('aggregate', 'hosts'),
        'metadata': ('aggregate', 'metadata'),
    }
    # Version 1.0: Initial version
    VERSION = '1.0'
    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.UUIDField(nullable=False),
        'name': fields.StringField(),
        'hosts': fields.ListOfStringsField(nullable=True),
        'metadata': fields.DictOfStringsField(nullable=True),
    }

    def __init__(self, aggregate, **kwargs):
        super(AggregatePayload, self).__init__(**kwargs)
        self.populate_schema(aggregate=aggregate)
Beispiel #6
0
class LiveMigrateData(obj_base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added old_vol_attachment_ids field.
    # Version 1.2: Added wait_for_vif_plugged
    # Version 1.3: Added vifs field.
    VERSION = '1.3'

    fields = {
        'is_volume_backed': fields.BooleanField(),
        'migration': fields.ObjectField('Migration'),
        # old_vol_attachment_ids is a dict used to store the old attachment_ids
        # for each volume so they can be restored on a migration rollback. The
        # key is the volume_id, and the value is the attachment_id.
        # TODO(mdbooth): This field was made redundant by change I0390c9ff. We
        # should eventually remove it.
        'old_vol_attachment_ids': fields.DictOfStringsField(),
        # wait_for_vif_plugged is set in pre_live_migration on the destination
        # compute host based on the [compute]/live_migration_wait_for_vif_plug
        # config option value; a default value is not set here since the
        # default for the config option may change in the future
        'wait_for_vif_plugged': fields.BooleanField(),
        'vifs': fields.ListOfObjectsField('VIFMigrateData'),
    }

    @staticmethod
    def create_skeleton_migrate_vifs(vifs):
        """Create migrate vifs for live migration.

        :param vifs: a list of VIFs.
        :return: list of VIFMigrateData object corresponding to the provided
                 VIFs.
        """
        vif_mig_data = []

        for vif in vifs:
            mig_vif = VIFMigrateData(port_id=vif['id'], source_vif=vif)
            vif_mig_data.append(mig_vif)
        return vif_mig_data
Beispiel #7
0
class InstanceExternalEvent(obj_base.NovaObject):
    # Version 1.0: Initial version
    #              Supports network-changed and vif-plugged
    VERSION = '1.0'

    fields = {
        'instance_uuid': fields.UUIDField(),
        'name': fields.StringField(),
        'status': fields.StringField(),
        'tag': fields.StringField(nullable=True),
        'data': fields.DictOfStringsField(),
    }

    @staticmethod
    def make_key(name, tag=None):
        if tag is not None:
            return '%s-%s' % (name, tag)
        else:
            return name

    @property
    def key(self):
        return self.make_key(self.name, self.tag)
Beispiel #8
0
class LiveMigrateData(obj_base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added old_vol_attachment_ids field.
    # Version 1.2: Added wait_for_vif_plugged
    # Version 1.3: Added vifs field.
    VERSION = '1.3'

    fields = {
        'is_volume_backed': fields.BooleanField(),
        'migration': fields.ObjectField('Migration'),
        # old_vol_attachment_ids is a dict used to store the old attachment_ids
        # for each volume so they can be restored on a migration rollback. The
        # key is the volume_id, and the value is the attachment_id.
        # TODO(mdbooth): This field was made redundant by change Ibe9215c0. We
        # should eventually remove it.
        'old_vol_attachment_ids': fields.DictOfStringsField(),
        # wait_for_vif_plugged is set in pre_live_migration on the destination
        # compute host based on the [compute]/live_migration_wait_for_vif_plug
        # config option value; a default value is not set here since the
        # default for the config option may change in the future
        'wait_for_vif_plugged': fields.BooleanField(),
        'vifs': fields.ListOfObjectsField('VIFMigrateData'),
    }
class ServerGroupPayload(base.NotificationPayloadBase):
    SCHEMA = {
        'uuid': ('group', 'uuid'),
        'name': ('group', 'name'),
        'user_id': ('group', 'user_id'),
        'project_id': ('group', 'project_id'),
        'policies': ('group', 'policies'),
        'members': ('group', 'members'),
        'hosts': ('group', 'hosts'),
        'policy': ('group', 'policy'),
        'rules': ('group', 'rules'),
    }
    # Version 1.0: Initial version
    # Version 1.1: Deprecate policies, add policy and add rules
    VERSION = '1.1'
    fields = {
        'uuid': fields.UUIDField(),
        'name': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        # NOTE(yikun): policies is deprecated and should
        # be removed on the next major version bump
        'policies': fields.ListOfStringsField(nullable=True),
        'members': fields.ListOfStringsField(nullable=True),
        'hosts': fields.ListOfStringsField(nullable=True),
        'policy': fields.StringField(nullable=True),
        'rules': fields.DictOfStringsField(),
    }

    def __init__(self, group):
        super(ServerGroupPayload, self).__init__()
        # Note: The group is orphaned here to avoid triggering lazy-loading of
        # the group.hosts field.
        cgroup = copy.deepcopy(group)
        cgroup._context = None
        self.populate_schema(group=cgroup)
Beispiel #10
0
class AggregatePayload(base.NotificationPayloadBase):
    SCHEMA = {
        'id': ('aggregate', 'id'),
        'uuid': ('aggregate', 'uuid'),
        'name': ('aggregate', 'name'),
        'hosts': ('aggregate', 'hosts'),
        'metadata': ('aggregate', 'metadata'),
    }
    # Version 1.0: Initial version
    #         1.1: Making the id field nullable
    VERSION = '1.1'
    fields = {
        # NOTE(gibi): id is nullable as aggregate.create.start is sent before
        # the id is generated by the db
        'id': fields.IntegerField(nullable=True),
        'uuid': fields.UUIDField(nullable=False),
        'name': fields.StringField(),
        'hosts': fields.ListOfStringsField(nullable=True),
        'metadata': fields.DictOfStringsField(nullable=True),
    }

    def __init__(self, aggregate):
        super(AggregatePayload, self).__init__()
        self.populate_schema(aggregate=aggregate)
Beispiel #11
0
class Flavor(base.NovaPersistentObject, base.NovaObject,
             base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Added save_projects(), save_extra_specs(), removed
    #              remoteable from save()
    VERSION = '1.1'

    fields = {
        'id': fields.IntegerField(),
        'name': fields.StringField(nullable=True),
        'memory_mb': fields.IntegerField(),
        'vcpus': fields.IntegerField(),
        'root_gb': fields.IntegerField(),
        'ephemeral_gb': fields.IntegerField(),
        'flavorid': fields.StringField(),
        'swap': fields.IntegerField(),
        'rxtx_factor': fields.FloatField(nullable=True, default=1.0),
        'vcpu_weight': fields.IntegerField(nullable=True),
        'disabled': fields.BooleanField(),
        'is_public': fields.BooleanField(),
        'extra_specs': fields.DictOfStringsField(),
        'projects': fields.ListOfStringsField(),
    }

    def __init__(self, *args, **kwargs):
        super(Flavor, self).__init__(*args, **kwargs)
        self._orig_extra_specs = {}
        self._orig_projects = []

    @staticmethod
    def _from_db_object(context, flavor, db_flavor, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        flavor._context = context
        for name, field in flavor.fields.items():
            if name in OPTIONAL_FIELDS:
                continue
            value = db_flavor[name]
            if isinstance(field, fields.IntegerField):
                value = value if value is not None else 0
            flavor[name] = value

        if 'extra_specs' in expected_attrs:
            flavor.extra_specs = db_flavor['extra_specs']

        if 'projects' in expected_attrs:
            flavor._load_projects()

        flavor.obj_reset_changes()
        return flavor

    @base.remotable
    def _load_projects(self):
        self.projects = [
            x['project_id'] for x in db.flavor_access_get_by_flavor_id(
                self._context, self.flavorid)
        ]
        self.obj_reset_changes(['projects'])

    def obj_load_attr(self, attrname):
        # NOTE(danms): Only projects could be lazy-loaded right now
        if attrname != 'projects':
            raise exception.ObjectActionError(action='obj_load_attr',
                                              reason='unable to load %s' %
                                              attrname)

        self._load_projects()

    def obj_reset_changes(self, fields=None, recursive=False):
        super(Flavor, self).obj_reset_changes(fields=fields,
                                              recursive=recursive)
        if fields is None or 'extra_specs' in fields:
            self._orig_extra_specs = (dict(self.extra_specs)
                                      if self.obj_attr_is_set('extra_specs')
                                      else {})
        if fields is None or 'projects' in fields:
            self._orig_projects = (list(self.projects)
                                   if self.obj_attr_is_set('projects') else [])

    def obj_what_changed(self):
        changes = super(Flavor, self).obj_what_changed()
        if ('extra_specs' in self
                and self.extra_specs != self._orig_extra_specs):
            changes.add('extra_specs')
        if 'projects' in self and self.projects != self._orig_projects:
            changes.add('projects')
        return changes

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        self = super(Flavor, cls)._obj_from_primitive(context, objver,
                                                      primitive)
        changes = self.obj_what_changed()
        if 'extra_specs' not in changes:
            # This call left extra_specs "clean" so update our tracker
            self._orig_extra_specs = (dict(self.extra_specs)
                                      if self.obj_attr_is_set('extra_specs')
                                      else {})
        if 'projects' not in changes:
            # This call left projects "clean" so update our tracker
            self._orig_projects = (list(self.projects)
                                   if self.obj_attr_is_set('projects') else [])
        return self

    @base.remotable_classmethod
    def get_by_id(cls, context, id):
        db_flavor = db.flavor_get(context, id)
        return cls._from_db_object(context,
                                   cls(context),
                                   db_flavor,
                                   expected_attrs=['extra_specs'])

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        db_flavor = db.flavor_get_by_name(context, name)
        return cls._from_db_object(context,
                                   cls(context),
                                   db_flavor,
                                   expected_attrs=['extra_specs'])

    @base.remotable_classmethod
    def get_by_flavor_id(cls, context, flavor_id, read_deleted=None):
        db_flavor = db.flavor_get_by_flavor_id(context, flavor_id,
                                               read_deleted)
        return cls._from_db_object(context,
                                   cls(context),
                                   db_flavor,
                                   expected_attrs=['extra_specs'])

    @base.remotable
    def add_access(self, project_id):
        if 'projects' in self.obj_what_changed():
            raise exception.ObjectActionError(action='add_access',
                                              reason='projects modified')
        db.flavor_access_add(self._context, self.flavorid, project_id)
        self._load_projects()

    @base.remotable
    def remove_access(self, project_id):
        if 'projects' in self.obj_what_changed():
            raise exception.ObjectActionError(action='remove_access',
                                              reason='projects modified')
        db.flavor_access_remove(self._context, self.flavorid, project_id)
        self._load_projects()

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        expected_attrs = []
        for attr in OPTIONAL_FIELDS:
            if attr in updates:
                expected_attrs.append(attr)
        projects = updates.pop('projects', [])
        db_flavor = db.flavor_create(self._context, updates, projects=projects)
        self._from_db_object(self._context,
                             self,
                             db_flavor,
                             expected_attrs=expected_attrs)

    @base.remotable
    def save_projects(self, to_add=None, to_delete=None):
        """Add or delete projects.

        :param:to_add: A list of projects to add
        :param:to_delete: A list of projects to remove
        """

        to_add = to_add if to_add is not None else []
        to_delete = to_delete if to_delete is not None else []

        for project_id in to_add:
            db.flavor_access_add(self._context, self.flavorid, project_id)
        for project_id in to_delete:
            db.flavor_access_remove(self._context, self.flavorid, project_id)
        self.obj_reset_changes(['projects'])

    @base.remotable
    def save_extra_specs(self, to_add=None, to_delete=None):
        """Add or delete extra_specs.

        :param:to_add: A dict of new keys to add/update
        :param:to_delete: A list of keys to remove
        """

        to_add = to_add if to_add is not None else {}
        to_delete = to_delete if to_delete is not None else []

        if to_add:
            db.flavor_extra_specs_update_or_create(self._context,
                                                   self.flavorid, to_add)

        for key in to_delete:
            db.flavor_extra_specs_delete(self._context, self.flavorid, key)
        self.obj_reset_changes(['extra_specs'])

    def save(self):
        updates = self.obj_get_changes()
        projects = updates.pop('projects', None)
        extra_specs = updates.pop('extra_specs', None)
        if updates:
            raise exception.ObjectActionError(
                action='save', reason='read-only fields were changed')

        if extra_specs is not None:
            deleted_keys = (set(self._orig_extra_specs.keys()) -
                            set(extra_specs.keys()))
            added_keys = self.extra_specs
        else:
            added_keys = deleted_keys = None

        if projects is not None:
            deleted_projects = set(self._orig_projects) - set(projects)
            added_projects = set(projects) - set(self._orig_projects)
        else:
            added_projects = deleted_projects = None

        # NOTE(danms): The first remotable method we call will reset
        # our of the original values for projects and extra_specs. Thus,
        # we collect the added/deleted lists for both above and /then/
        # call these methods to update them.

        if added_keys or deleted_keys:
            self.save_extra_specs(self.extra_specs, deleted_keys)

        if added_projects or deleted_projects:
            self.save_projects(added_projects, deleted_projects)

    @base.remotable
    def destroy(self):
        db.flavor_destroy(self._context, self.name)
Beispiel #12
0
class Aggregate(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: String attributes updated to support unicode
    VERSION = '1.1'

    fields = {
        'id': fields.IntegerField(),
        'name': fields.StringField(),
        'hosts': fields.ListOfStringsField(nullable=True),
        'metadata': fields.DictOfStringsField(nullable=True),
        }

    obj_extra_fields = ['availability_zone']

    @staticmethod
    def _from_db_object(context, aggregate, db_aggregate):
        for key in aggregate.fields:
            if key == 'metadata':
                db_key = 'metadetails'
            else:
                db_key = key
            aggregate[key] = db_aggregate[db_key]
        aggregate._context = context
        aggregate.obj_reset_changes()
        return aggregate

    def _assert_no_hosts(self, action):
        if 'hosts' in self.obj_what_changed():
            raise exception.ObjectActionError(
                action=action,
                reason='hosts updated inline')

    @base.remotable_classmethod
    def get_by_id(cls, context, aggregate_id):
        db_aggregate = db.aggregate_get(context, aggregate_id)
        return cls._from_db_object(context, cls(), db_aggregate)

    @base.remotable
    def create(self, context):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        self._assert_no_hosts('create')
        updates = self.obj_get_changes()
        payload = dict(updates)
        if 'metadata' in updates:
            # NOTE(danms): For some reason the notification format is weird
            payload['meta_data'] = payload.pop('metadata')
        compute_utils.notify_about_aggregate_update(context,
                                                    "create.start",
                                                    payload)
        metadata = updates.pop('metadata', None)
        updates.pop('id', None)
        db_aggregate = db.aggregate_create(context, updates, metadata=metadata)
        self._from_db_object(context, self, db_aggregate)
        payload['aggregate_id'] = self.id
        compute_utils.notify_about_aggregate_update(context,
                                                    "create.end",
                                                    payload)

    @base.remotable
    def save(self, context):
        self._assert_no_hosts('save')
        updates = self.obj_get_changes()

        payload = {'aggregate_id': self.id}
        if 'metadata' in updates:
            payload['meta_data'] = updates['metadata']
        compute_utils.notify_about_aggregate_update(context,
                                                    "updateprop.start",
                                                    payload)
        updates.pop('id', None)
        db_aggregate = db.aggregate_update(context, self.id, updates)
        compute_utils.notify_about_aggregate_update(context,
                                                    "updateprop.end",
                                                    payload)
        return self._from_db_object(context, self, db_aggregate)

    @base.remotable
    def update_metadata(self, context, updates):
        payload = {'aggregate_id': self.id,
                   'meta_data': updates}
        compute_utils.notify_about_aggregate_update(context,
                                                    "updatemetadata.start",
                                                    payload)
        to_add = {}
        for key, value in updates.items():
            if value is None:
                try:
                    db.aggregate_metadata_delete(context, self.id, key)
                except exception.AggregateMetadataNotFound:
                    pass
                try:
                    self.metadata.pop(key)
                except KeyError:
                    pass
            else:
                to_add[key] = value
                self.metadata[key] = value
        db.aggregate_metadata_add(context, self.id, to_add)
        compute_utils.notify_about_aggregate_update(context,
                                                    "updatemetadata.end",
                                                    payload)
        self.obj_reset_changes(fields=['metadata'])

    @base.remotable
    def destroy(self, context):
        db.aggregate_delete(context, self.id)

    @base.remotable
    def add_host(self, context, host):
        db.aggregate_host_add(context, self.id, host)
        if self.hosts is None:
            self.hosts = []
        self.hosts.append(host)
        self.obj_reset_changes(fields=['hosts'])

    @base.remotable
    def delete_host(self, context, host):
        db.aggregate_host_delete(context, self.id, host)
        self.hosts.remove(host)
        self.obj_reset_changes(fields=['hosts'])

    @property
    def availability_zone(self):
        return self.metadata.get('availability_zone', None)
Beispiel #13
0
class InstanceGroup(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: String attributes updated to support unicode
    # Version 1.2: Use list/dict helpers for policies, metadetails, members
    # Version 1.3: Make uuid a non-None real string
    VERSION = '1.3'

    fields = {
        'id': fields.IntegerField(),

        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),

        'uuid': fields.UUIDField(),
        'name': fields.StringField(nullable=True),

        'policies': fields.ListOfStringsField(nullable=True),
        'metadetails': fields.DictOfStringsField(nullable=True),
        'members': fields.ListOfStringsField(nullable=True),
        }

    @staticmethod
    def _from_db_object(context, instance_group, db_inst):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        # Most of the field names match right now, so be quick
        for field in instance_group.fields:
            if field == 'deleted':
                instance_group.deleted = db_inst['deleted'] == db_inst['id']
            else:
                instance_group[field] = db_inst[field]

        instance_group._context = context
        instance_group.obj_reset_changes()
        return instance_group

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        db_inst = db.instance_group_get(context, uuid)
        return cls._from_db_object(context, cls(), db_inst)

    @base.remotable
    def save(self, context):
        """Save updates to this instance group."""

        updates = self.obj_get_changes()
        if not updates:
            return

        metadata = None
        if 'metadetails' in updates:
            metadata = updates.pop('metadetails')
            updates.update({'metadata': metadata})

        db.instance_group_update(context, self.uuid, updates)
        db_inst = db.instance_group_get(context, self.uuid)
        self._from_db_object(context, self, db_inst)

    @base.remotable
    def refresh(self, context):
        """Refreshes the instance group."""
        current = self.__class__.get_by_uuid(context, self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and self[field] != current[field]:
                self[field] = current[field]
        self.obj_reset_changes()

    @base.remotable
    def create(self, context):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        updates.pop('id', None)
        policies = updates.pop('policies', None)
        members = updates.pop('members', None)
        metadetails = updates.pop('metadetails', None)

        db_inst = db.instance_group_create(context, updates,
                                           policies=policies,
                                           metadata=metadetails,
                                           members=members)
        self._from_db_object(context, self, db_inst)

    @base.remotable
    def destroy(self, context):
        db.instance_group_delete(context, self.uuid)
        self.obj_reset_changes()
Beispiel #14
0
class PciDevice(base.NovaPersistentObject, base.NovaObject):
    """Object to represent a PCI device on a compute node.

    PCI devices are managed by the compute resource tracker, which discovers
    the devices from the hardware platform, claims, allocates and frees
    devices for instances.

    The PCI device information is permanently maintained in a database.
    This makes it convenient to get PCI device information, like physical
    function for a VF device, adjacent switch IP address for a NIC,
    hypervisor identification for a PCI device, etc. It also provides a
    convenient way to check device allocation information for administrator
    purposes.

    A device can be in available/claimed/allocated/deleted/removed state.

    A device is available when it is discovered..

    A device is claimed prior to being allocated to an instance. Normally the
    transition from claimed to allocated is quick. However, during a resize
    operation the transition can take longer, because devices are claimed in
    prep_resize and allocated in finish_resize.

    A device becomes removed when hot removed from a node (i.e. not found in
    the next auto-discover) but not yet synced with the DB. A removed device
    should not be allocated to any instance, and once deleted from the DB,
    the device object is changed to deleted state and no longer synced with
    the DB.

    Filed notes::

        | 'dev_id':
        |   Hypervisor's identification for the device, the string format
        |   is hypervisor specific
        | 'extra_info':
        |   Device-specific properties like PF address, switch ip address etc.

    """

    # Version 1.0: Initial version
    # Version 1.1: String attributes updated to support unicode
    # Version 1.2: added request_id field
    # Version 1.3: Added field to represent PCI device NUMA node
    VERSION = '1.3'

    fields = {
        'id': fields.IntegerField(),
        # Note(yjiang5): the compute_node_id may be None because the pci
        # device objects are created before the compute node is created in DB
        'compute_node_id': fields.IntegerField(nullable=True),
        'address': fields.StringField(),
        'vendor_id': fields.StringField(),
        'product_id': fields.StringField(),
        'dev_type': fields.PciDeviceTypeField(),
        'status': fields.PciDeviceStatusField(),
        'dev_id': fields.StringField(nullable=True),
        'label': fields.StringField(nullable=True),
        'instance_uuid': fields.StringField(nullable=True),
        'request_id': fields.StringField(nullable=True),
        'extra_info': fields.DictOfStringsField(),
        'numa_node': fields.IntegerField(nullable=True),
    }

    def obj_make_compatible(self, primitive, target_version):
        target_version = utils.convert_version_to_tuple(target_version)
        if target_version < (1, 2) and 'request_id' in primitive:
            del primitive['request_id']

    def update_device(self, dev_dict):
        """Sync the content from device dictionary to device object.

        The resource tracker updates the available devices periodically.
        To avoid meaningless syncs with the database, we update the device
        object only if a value changed.
        """

        # Note(yjiang5): status/instance_uuid should only be updated by
        # functions like claim/allocate etc. The id is allocated by
        # database. The extra_info is created by the object.
        no_changes = ('status', 'instance_uuid', 'id', 'extra_info')
        map(lambda x: dev_dict.pop(x, None), [key for key in no_changes])

        for k, v in dev_dict.items():
            if k in self.fields.keys():
                setattr(self, k, v)
            else:
                # Note (yjiang5) extra_info.update does not update
                # obj_what_changed, set it explicitely
                extra_info = self.extra_info
                extra_info.update({k: v})
                self.extra_info = extra_info

    def __init__(self, *args, **kwargs):
        super(PciDevice, self).__init__(*args, **kwargs)
        self.obj_reset_changes()
        self.extra_info = {}

    def __eq__(self, other):
        return compare_pci_device_attributes(self, other)

    def __ne__(self, other):
        return not (self == other)

    @staticmethod
    def _from_db_object(context, pci_device, db_dev):
        for key in pci_device.fields:
            if key != 'extra_info':
                setattr(pci_device, key, db_dev[key])
            else:
                extra_info = db_dev.get("extra_info")
                pci_device.extra_info = jsonutils.loads(extra_info)
        pci_device._context = context
        pci_device.obj_reset_changes()
        return pci_device

    @base.remotable_classmethod
    def get_by_dev_addr(cls, context, compute_node_id, dev_addr):
        db_dev = db.pci_device_get_by_addr(context, compute_node_id, dev_addr)
        return cls._from_db_object(context, cls(), db_dev)

    @base.remotable_classmethod
    def get_by_dev_id(cls, context, id):
        db_dev = db.pci_device_get_by_id(context, id)
        return cls._from_db_object(context, cls(), db_dev)

    @classmethod
    def create(cls, dev_dict):
        """Create a PCI device based on hypervisor information.

        As the device object is just created and is not synced with db yet
        thus we should not reset changes here for fields from dict.
        """
        pci_device = cls()
        pci_device.update_device(dev_dict)
        pci_device.status = fields.PciDeviceStatus.AVAILABLE
        return pci_device

    @base.remotable
    def save(self):
        if self.status == fields.PciDeviceStatus.REMOVED:
            self.status = fields.PciDeviceStatus.DELETED
            db.pci_device_destroy(self._context, self.compute_node_id,
                                  self.address)
        elif self.status != fields.PciDeviceStatus.DELETED:
            updates = self.obj_get_changes()
            if 'extra_info' in updates:
                updates['extra_info'] = jsonutils.dumps(updates['extra_info'])
            if updates:
                db_pci = db.pci_device_update(self._context,
                                              self.compute_node_id,
                                              self.address, updates)
                self._from_db_object(self._context, self, db_pci)

    def claim(self, instance):
        if self.status != fields.PciDeviceStatus.AVAILABLE:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=[fields.PciDeviceStatus.AVAILABLE])
        self.status = fields.PciDeviceStatus.CLAIMED
        self.instance_uuid = instance['uuid']

    def allocate(self, instance):
        ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                       fields.PciDeviceStatus.CLAIMED)
        if self.status not in ok_statuses:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=ok_statuses)
        if (self.status == fields.PciDeviceStatus.CLAIMED
                and self.instance_uuid != instance['uuid']):
            raise exception.PciDeviceInvalidOwner(
                compute_node_id=self.compute_node_id,
                address=self.address,
                owner=self.instance_uuid,
                hopeowner=instance['uuid'])

        self.status = fields.PciDeviceStatus.ALLOCATED
        self.instance_uuid = instance['uuid']

        # Notes(yjiang5): remove this check when instance object for
        # compute manager is finished
        if isinstance(instance, dict):
            if 'pci_devices' not in instance:
                instance['pci_devices'] = []
            instance['pci_devices'].append(copy.copy(self))
        else:
            instance.pci_devices.objects.append(copy.copy(self))

    def remove(self):
        if self.status != fields.PciDeviceStatus.AVAILABLE:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=[fields.PciDeviceStatus.AVAILABLE])
        self.status = fields.PciDeviceStatus.REMOVED
        self.instance_uuid = None
        self.request_id = None

    def free(self, instance=None):
        ok_statuses = (fields.PciDeviceStatus.ALLOCATED,
                       fields.PciDeviceStatus.CLAIMED)
        if self.status not in ok_statuses:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=ok_statuses)
        if instance and self.instance_uuid != instance['uuid']:
            raise exception.PciDeviceInvalidOwner(
                compute_node_id=self.compute_node_id,
                address=self.address,
                owner=self.instance_uuid,
                hopeowner=instance['uuid'])
        old_status = self.status
        self.status = fields.PciDeviceStatus.AVAILABLE
        self.instance_uuid = None
        self.request_id = None
        if old_status == fields.PciDeviceStatus.ALLOCATED and instance:
            # Notes(yjiang5): remove this check when instance object for
            # compute manager is finished
            existed = next(
                (dev for dev in instance['pci_devices'] if dev.id == self.id))
            if isinstance(instance, dict):
                instance['pci_devices'].remove(existed)
            else:
                instance.pci_devices.objects.remove(existed)
Beispiel #15
0
class Instance(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added info_cache
    # Version 1.2: Added security_groups
    # Version 1.3: Added expected_vm_state and admin_state_reset to
    #              save()
    # Version 1.4: Added locked_by and deprecated locked
    # Version 1.5: Added cleaned
    # Version 1.6: Added pci_devices
    # Version 1.7: String attributes updated to support unicode
    # Version 1.8: 'security_groups' and 'pci_devices' cannot be None
    # Version 1.9: Make uuid a non-None real string
    # Version 1.10: Added use_slave to refresh and get_by_uuid
    VERSION = '1.10'

    fields = {
        'id': fields.IntegerField(),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'image_ref': fields.StringField(nullable=True),
        'kernel_id': fields.StringField(nullable=True),
        'ramdisk_id': fields.StringField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'launch_index': fields.IntegerField(nullable=True),
        'key_name': fields.StringField(nullable=True),
        'key_data': fields.StringField(nullable=True),
        'power_state': fields.IntegerField(nullable=True),
        'vm_state': fields.StringField(nullable=True),
        'task_state': fields.StringField(nullable=True),
        'memory_mb': fields.IntegerField(nullable=True),
        'vcpus': fields.IntegerField(nullable=True),
        'root_gb': fields.IntegerField(nullable=True),
        'ephemeral_gb': fields.IntegerField(nullable=True),
        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),
        'instance_type_id': fields.IntegerField(nullable=True),
        'user_data': fields.StringField(nullable=True),
        'reservation_id': fields.StringField(nullable=True),
        'scheduled_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'launched_on': fields.StringField(nullable=True),

        # NOTE(jdillaman): locked deprecated in favor of locked_by,
        # to be removed in Icehouse
        'locked': fields.BooleanField(default=False),
        'locked_by': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'vm_mode': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(),
        'root_device_name': fields.StringField(nullable=True),
        'default_ephemeral_device': fields.StringField(nullable=True),
        'default_swap_device': fields.StringField(nullable=True),
        'config_drive': fields.StringField(nullable=True),
        'access_ip_v4': fields.IPV4AddressField(nullable=True),
        'access_ip_v6': fields.IPV6AddressField(nullable=True),
        'auto_disk_config': fields.BooleanField(default=False),
        'progress': fields.IntegerField(nullable=True),
        'shutdown_terminate': fields.BooleanField(default=False),
        'disable_terminate': fields.BooleanField(default=False),
        'cell_name': fields.StringField(nullable=True),
        'metadata': fields.DictOfStringsField(),
        'system_metadata': fields.DictOfNullableStringsField(),
        'info_cache': fields.ObjectField('InstanceInfoCache', nullable=True),
        'security_groups': fields.ObjectField('SecurityGroupList'),
        'fault': fields.ObjectField('InstanceFault', nullable=True),
        'cleaned': fields.BooleanField(default=False),
        'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
    }

    obj_extra_fields = ['name']

    def __init__(self, *args, **kwargs):
        super(Instance, self).__init__(*args, **kwargs)
        self._reset_metadata_tracking()

    def _reset_metadata_tracking(self):
        self._orig_system_metadata = (dict(self.system_metadata)
                                      if 'system_metadata' in self else {})
        self._orig_metadata = (dict(self.metadata)
                               if 'metadata' in self else {})

    def obj_reset_changes(self, fields=None):
        super(Instance, self).obj_reset_changes(fields)
        self._reset_metadata_tracking()

    def obj_what_changed(self):
        changes = super(Instance, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if 'system_metadata' in self and (self.system_metadata !=
                                          self._orig_system_metadata):
            changes.add('system_metadata')
        return changes

    @property
    def name(self):
        try:
            base_name = CONF.instance_name_template % self.id
        except TypeError:
            # Support templates like "uuid-%(uuid)s", etc.
            info = {}
            # NOTE(russellb): Don't use self.iteritems() here, as it will
            # result in infinite recursion on the name property.
            for key in self.fields:
                if key == 'name':
                    # NOTE(danms): prevent recursion
                    continue
                elif not self.obj_attr_is_set(key):
                    # NOTE(danms): Don't trigger lazy-loads
                    continue
                info[key] = self[key]
            try:
                base_name = CONF.instance_name_template % info
            except KeyError:
                base_name = self.uuid
        return base_name

    @staticmethod
    def _from_db_object(context, instance, db_inst, expected_attrs=None):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        if expected_attrs is None:
            expected_attrs = []
        # Most of the field names match right now, so be quick
        for field in instance.fields:
            if field in INSTANCE_OPTIONAL_ATTRS:
                continue
            elif field == 'deleted':
                instance.deleted = db_inst['deleted'] == db_inst['id']
            elif field == 'cleaned':
                instance.cleaned = db_inst['cleaned'] == 1
            else:
                instance[field] = db_inst[field]

        if 'metadata' in expected_attrs:
            instance['metadata'] = utils.instance_meta(db_inst)
        if 'system_metadata' in expected_attrs:
            instance['system_metadata'] = utils.instance_sys_meta(db_inst)
        if 'fault' in expected_attrs:
            instance['fault'] = (
                instance_fault.InstanceFault.get_latest_for_instance(
                    context, instance.uuid))

        if 'pci_devices' in expected_attrs:
            pci_devices = pci_device._make_pci_list(context,
                                                    pci_device.PciDeviceList(),
                                                    db_inst['pci_devices'])
            instance['pci_devices'] = pci_devices
        if 'info_cache' in expected_attrs:
            if db_inst['info_cache'] is None:
                info_cache = None
            else:
                info_cache = instance_info_cache.InstanceInfoCache()
                instance_info_cache.InstanceInfoCache._from_db_object(
                    context, info_cache, db_inst['info_cache'])
            instance['info_cache'] = info_cache
        if 'security_groups' in expected_attrs:
            sec_groups = security_group._make_secgroup_list(
                context, security_group.SecurityGroupList(),
                db_inst['security_groups'])
            instance['security_groups'] = sec_groups

        instance._context = context
        instance.obj_reset_changes()
        return instance

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get_by_uuid(context,
                                          uuid,
                                          columns_to_join=columns_to_join,
                                          use_slave=use_slave)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable_classmethod
    def get_by_id(cls, context, inst_id, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get(context,
                                  inst_id,
                                  columns_to_join=columns_to_join)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable
    def create(self, context):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        updates.pop('id', None)
        expected_attrs = [
            attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates
        ]
        if 'security_groups' in updates:
            updates['security_groups'] = [
                x.name for x in updates['security_groups']
            ]
        if 'info_cache' in updates:
            updates['info_cache'] = {
                'network_info': updates['info_cache'].network_info.json()
            }
        db_inst = db.instance_create(context, updates)
        Instance._from_db_object(context, self, db_inst, expected_attrs)

    @base.remotable
    def destroy(self, context):
        if not self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='already destroyed')
        if not self.obj_attr_is_set('uuid'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='no uuid')
        if not self.obj_attr_is_set('host') or not self.host:
            # NOTE(danms): If our host is not set, avoid a race
            constraint = db.constraint(host=db.equal_any(None))
        else:
            constraint = None

        try:
            db.instance_destroy(context, self.uuid, constraint=constraint)
        except exception.ConstraintNotMet:
            raise exception.ObjectActionError(action='destroy',
                                              reason='host changed')
        delattr(self, base.get_attrname('id'))

    def _save_info_cache(self, context):
        self.info_cache.save(context)

    def _save_security_groups(self, context):
        for secgroup in self.security_groups:
            secgroup.save(context)

    def _save_fault(self, context):
        # NOTE(danms): I don't think we need to worry about this, do we?
        pass

    def _save_pci_devices(self, context):
        # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
        # permitted to update the DB. all change to devices from here will
        # be dropped.
        pass

    @base.remotable
    def save(self,
             context,
             expected_vm_state=None,
             expected_task_state=None,
             admin_state_reset=False):
        """Save updates to this instance

        Column-wise updates will be made based on the result of
        self.what_changed(). If expected_task_state is provided,
        it will be checked against the in-database copy of the
        instance before updates are made.
        :param context: Security context
        :param expected_task_state: Optional tuple of valid task states
                                    for the instance to be in.
        :param expected_vm_state: Optional tuple of valid vm states
                                  for the instance to be in.
        :param admin_state_reset: True if admin API is forcing setting
                                  of task_state/vm_state.
        """

        cell_type = cells_opts.get_cell_type()
        if cell_type == 'api' and self.cell_name:
            # NOTE(comstud): We need to stash a copy of ourselves
            # before any updates are applied.  When we call the save
            # methods on nested objects, we will lose any changes to
            # them.  But we need to make sure child cells can tell
            # what is changed.
            #
            # We also need to nuke any updates to vm_state and task_state
            # unless admin_state_reset is True.  compute cells are
            # authoritative for their view of vm_state and task_state.
            stale_instance = self.obj_clone()

            def _handle_cell_update_from_api():
                cells_api = cells_rpcapi.CellsAPI()
                cells_api.instance_update_from_api(context, stale_instance,
                                                   expected_vm_state,
                                                   expected_task_state,
                                                   admin_state_reset)
        else:
            stale_instance = None

        updates = {}
        changes = self.obj_what_changed()
        for field in self.fields:
            if (self.obj_attr_is_set(field)
                    and isinstance(self[field], base.NovaObject)):
                try:
                    getattr(self, '_save_%s' % field)(context)
                except AttributeError:
                    LOG.exception(_('No save handler for %s') % field,
                                  instance=self)
            elif field in changes:
                updates[field] = self[field]

        if not updates:
            if stale_instance:
                _handle_cell_update_from_api()
            return

        # Cleaned needs to be turned back into an int here
        if 'cleaned' in updates:
            if updates['cleaned']:
                updates['cleaned'] = 1
            else:
                updates['cleaned'] = 0

        if expected_task_state is not None:
            updates['expected_task_state'] = expected_task_state
        if expected_vm_state is not None:
            updates['expected_vm_state'] = expected_vm_state

        expected_attrs = [
            attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
            if self.obj_attr_is_set(attr)
        ]
        # NOTE(alaski): We need to pull system_metadata for the
        # notification.send_update() below.  If we don't there's a KeyError
        # when it tries to extract the flavor.
        if 'system_metadata' not in expected_attrs:
            expected_attrs.append('system_metadata')
        old_ref, inst_ref = db.instance_update_and_get_original(
            context,
            self.uuid,
            updates,
            update_cells=False,
            columns_to_join=_expected_cols(expected_attrs))

        if stale_instance:
            _handle_cell_update_from_api()
        elif cell_type == 'compute':
            cells_api = cells_rpcapi.CellsAPI()
            cells_api.instance_update_at_top(context, inst_ref)

        self._from_db_object(context, self, inst_ref, expected_attrs)
        notifications.send_update(context, old_ref, inst_ref)
        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context, use_slave=False):
        extra = [
            field for field in INSTANCE_OPTIONAL_ATTRS
            if self.obj_attr_is_set(field)
        ]
        current = self.__class__.get_by_uuid(context,
                                             uuid=self.uuid,
                                             expected_attrs=extra,
                                             use_slave=use_slave)
        # NOTE(danms): We orphan the instance copy so we do not unexpectedly
        # trigger a lazy-load (which would mean we failed to calculate the
        # expected_attrs properly)
        current._context = None

        for field in self.fields:
            if self.obj_attr_is_set(field) and self[field] != current[field]:
                self[field] = current[field]
        self.obj_reset_changes()

    def obj_load_attr(self, attrname):
        if attrname not in INSTANCE_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='attribute %s not lazy-loadable' % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug(_("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s"), {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })
        # FIXME(comstud): This should be optimized to only load the attr.
        instance = self.__class__.get_by_uuid(self._context,
                                              uuid=self.uuid,
                                              expected_attrs=[attrname])

        # NOTE(danms): Never allow us to recursively-load
        if instance.obj_attr_is_set(attrname):
            self[attrname] = instance[attrname]
        else:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='loading %s requires recursion' % attrname)
Beispiel #16
0
class Aggregate(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: String attributes updated to support unicode
    # Version 1.2: Added uuid field
    # Version 1.3: Added get_by_uuid method
    VERSION = '1.3'

    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.UUIDField(nullable=False),
        'name': fields.StringField(),
        'hosts': fields.ListOfStringsField(nullable=True),
        'metadata': fields.DictOfStringsField(nullable=True),
    }

    obj_extra_fields = ['availability_zone']

    @staticmethod
    def _from_db_object(context, aggregate, db_aggregate):
        for key in aggregate.fields:
            if key == 'metadata':
                db_key = 'metadetails'
            elif key in DEPRECATED_FIELDS and key not in db_aggregate:
                continue
            else:
                db_key = key
            setattr(aggregate, key, db_aggregate[db_key])

        # NOTE: This can be removed when we bump Aggregate to v2.0
        aggregate.deleted_at = None
        aggregate.deleted = False

        aggregate._context = context
        aggregate.obj_reset_changes()

        return aggregate

    def _assert_no_hosts(self, action):
        if 'hosts' in self.obj_what_changed():
            raise exception.ObjectActionError(action=action,
                                              reason='hosts updated inline')

    @base.remotable_classmethod
    def get_by_id(cls, context, aggregate_id):
        db_aggregate = _aggregate_get_from_db(context, aggregate_id)
        return cls._from_db_object(context, cls(), db_aggregate)

    @base.remotable_classmethod
    def get_by_uuid(cls, context, aggregate_uuid):
        db_aggregate = _aggregate_get_from_db_by_uuid(context, aggregate_uuid)
        return cls._from_db_object(context, cls(), db_aggregate)

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')

        self._assert_no_hosts('create')
        updates = self.obj_get_changes()
        payload = dict(updates)
        if 'metadata' in updates:
            # NOTE(danms): For some reason the notification format is weird
            payload['meta_data'] = payload.pop('metadata')
        if 'uuid' not in updates:
            updates['uuid'] = uuidutils.generate_uuid()
            self.uuid = updates['uuid']
            LOG.debug('Generated uuid %(uuid)s for aggregate',
                      dict(uuid=updates['uuid']))
        compute_utils.notify_about_aggregate_update(self._context,
                                                    "create.start", payload)
        compute_utils.notify_about_aggregate_action(
            context=self._context,
            aggregate=self,
            action=fields.NotificationAction.CREATE,
            phase=fields.NotificationPhase.START)

        metadata = updates.pop('metadata', None)
        db_aggregate = _aggregate_create_in_db(self._context,
                                               updates,
                                               metadata=metadata)
        self._from_db_object(self._context, self, db_aggregate)
        payload['aggregate_id'] = self.id
        compute_utils.notify_about_aggregate_update(self._context,
                                                    "create.end", payload)
        compute_utils.notify_about_aggregate_action(
            context=self._context,
            aggregate=self,
            action=fields.NotificationAction.CREATE,
            phase=fields.NotificationPhase.END)

    @base.remotable
    def save(self):
        self._assert_no_hosts('save')
        updates = self.obj_get_changes()

        payload = {'aggregate_id': self.id}
        if 'metadata' in updates:
            payload['meta_data'] = updates['metadata']
        compute_utils.notify_about_aggregate_update(self._context,
                                                    "updateprop.start",
                                                    payload)
        updates.pop('id', None)
        db_aggregate = _aggregate_update_to_db(self._context, self.id, updates)
        compute_utils.notify_about_aggregate_update(self._context,
                                                    "updateprop.end", payload)
        self._from_db_object(self._context, self, db_aggregate)

    @base.remotable
    def update_metadata(self, updates):
        payload = {'aggregate_id': self.id, 'meta_data': updates}
        compute_utils.notify_about_aggregate_update(self._context,
                                                    "updatemetadata.start",
                                                    payload)
        compute_utils.notify_about_aggregate_action(
            context=self._context,
            aggregate=self,
            action=fields.NotificationAction.UPDATE_METADATA,
            phase=fields.NotificationPhase.START)
        to_add = {}
        for key, value in updates.items():
            if value is None:
                try:
                    _metadata_delete_from_db(self._context, self.id, key)
                except exception.AggregateMetadataNotFound:
                    pass
                try:
                    self.metadata.pop(key)
                except KeyError:
                    pass
            else:
                to_add[key] = value
                self.metadata[key] = value
        _metadata_add_to_db(self._context, self.id, to_add)
        compute_utils.notify_about_aggregate_update(self._context,
                                                    "updatemetadata.end",
                                                    payload)
        compute_utils.notify_about_aggregate_action(
            context=self._context,
            aggregate=self,
            action=fields.NotificationAction.UPDATE_METADATA,
            phase=fields.NotificationPhase.END)
        self.obj_reset_changes(fields=['metadata'])

    @base.remotable
    def destroy(self):
        _aggregate_delete_from_db(self._context, self.id)

    @base.remotable
    def add_host(self, host):
        _host_add_to_db(self._context, self.id, host)

        if self.hosts is None:
            self.hosts = []
        self.hosts.append(host)
        self.obj_reset_changes(fields=['hosts'])

    @base.remotable
    def delete_host(self, host):
        _host_delete_from_db(self._context, self.id, host)

        self.hosts.remove(host)
        self.obj_reset_changes(fields=['hosts'])

    @property
    def availability_zone(self):
        return self.metadata.get('availability_zone', None)
Beispiel #17
0
class PciDevice(base.NovaPersistentObject, base.NovaObject):
    """Object to represent a PCI device on a compute node.

    PCI devices are managed by the compute resource tracker, which discovers
    the devices from the hardware platform, claims, allocates and frees
    devices for instances.

    The PCI device information is permanently maintained in a database.
    This makes it convenient to get PCI device information, like physical
    function for a VF device, adjacent switch IP address for a NIC,
    hypervisor identification for a PCI device, etc. It also provides a
    convenient way to check device allocation information for administrator
    purposes.

    A device can be in available/claimed/allocated/deleted/removed state.

    A device is available when it is discovered..

    A device is claimed prior to being allocated to an instance. Normally the
    transition from claimed to allocated is quick. However, during a resize
    operation the transition can take longer, because devices are claimed in
    prep_resize and allocated in finish_resize.

    A device becomes removed when hot removed from a node (i.e. not found in
    the next auto-discover) but not yet synced with the DB. A removed device
    should not be allocated to any instance, and once deleted from the DB,
    the device object is changed to deleted state and no longer synced with
    the DB.

    Filed notes::

        | 'dev_id':
        |   Hypervisor's identification for the device, the string format
        |   is hypervisor specific
        | 'extra_info':
        |   Device-specific properties like PF address, switch ip address etc.

    """

    # Version 1.0: Initial version
    # Version 1.1: String attributes updated to support unicode
    # Version 1.2: added request_id field
    # Version 1.3: Added field to represent PCI device NUMA node
    # Version 1.4: Added parent_addr field
    # Version 1.5: Added 2 new device statuses: UNCLAIMABLE and UNAVAILABLE
    # Version 1.6: Added uuid field
    # Version 1.7: Added 'vdpa' to 'dev_type' field
    VERSION = '1.7'

    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.UUIDField(),
        # Note(yjiang5): the compute_node_id may be None because the pci
        # device objects are created before the compute node is created in DB
        'compute_node_id': fields.IntegerField(nullable=True),
        'address': fields.StringField(),
        'vendor_id': fields.StringField(),
        'product_id': fields.StringField(),
        'dev_type': fields.PciDeviceTypeField(),
        'status': fields.PciDeviceStatusField(),
        'dev_id': fields.StringField(nullable=True),
        'label': fields.StringField(nullable=True),
        'instance_uuid': fields.StringField(nullable=True),
        'request_id': fields.StringField(nullable=True),
        'extra_info': fields.DictOfStringsField(default={}),
        'numa_node': fields.IntegerField(nullable=True),
        'parent_addr': fields.StringField(nullable=True),
    }

    def obj_make_compatible(self, primitive, target_version):
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 2) and 'request_id' in primitive:
            del primitive['request_id']
        if target_version < (1, 4) and 'parent_addr' in primitive:
            if primitive['parent_addr'] is not None:
                extra_info = primitive.get('extra_info', {})
                extra_info['phys_function'] = primitive['parent_addr']
            del primitive['parent_addr']
        if target_version < (1, 5) and 'parent_addr' in primitive:
            added_statuses = (fields.PciDeviceStatus.UNCLAIMABLE,
                              fields.PciDeviceStatus.UNAVAILABLE)
            status = primitive['status']
            if status in added_statuses:
                raise exception.ObjectActionError(
                    action='obj_make_compatible',
                    reason='status=%s not supported in version %s' %
                    (status, target_version))
        if target_version < (1, 6) and 'uuid' in primitive:
            del primitive['uuid']
        if target_version < (1, 7) and 'dev_type' in primitive:
            dev_type = primitive['dev_type']
            if dev_type == fields.PciDeviceType.VDPA:
                raise exception.ObjectActionError(
                    action='obj_make_compatible',
                    reason='dev_type=%s not supported in version %s' %
                    (dev_type, target_version))

    def update_device(self, dev_dict):
        """Sync the content from device dictionary to device object.

        The resource tracker updates the available devices periodically.
        To avoid meaningless syncs with the database, we update the device
        object only if a value changed.
        """

        # Note(yjiang5): status/instance_uuid should only be updated by
        # functions like claim/allocate etc. The id is allocated by
        # database. The extra_info is created by the object.
        no_changes = ('status', 'instance_uuid', 'id', 'extra_info')
        for key in no_changes:
            dev_dict.pop(key, None)

        # NOTE(ndipanov): This needs to be set as it's accessed when matching
        dev_dict.setdefault('parent_addr')

        for k, v in dev_dict.items():
            if k in self.fields.keys():
                setattr(self, k, v)
            else:
                # NOTE(yjiang5): extra_info.update does not update
                # obj_what_changed, set it explicitly
                # NOTE(ralonsoh): list of parameters currently added to
                # "extra_info" dict:
                #     - "capabilities": dict of (strings/list of strings)
                extra_info = self.extra_info
                data = v if isinstance(v, str) else jsonutils.dumps(v)
                extra_info.update({k: data})
                self.extra_info = extra_info

    def __init__(self, *args, **kwargs):
        super(PciDevice, self).__init__(*args, **kwargs)

        # NOTE(ndipanov): These are required to build an in-memory device tree
        # but don't need to be proper fields (and can't easily be as they would
        # hold circular references)
        self.parent_device = None
        self.child_devices = []

    def obj_load_attr(self, attr):
        if attr in ['extra_info']:
            # NOTE(danms): extra_info used to be defaulted during init,
            # so make sure any bare instantiations of this object can
            # rely on the expectation that referencing that field will
            # not fail.
            self.obj_set_defaults(attr)
        else:
            super(PciDevice, self).obj_load_attr(attr)

    def __eq__(self, other):
        return compare_pci_device_attributes(self, other)

    def __ne__(self, other):
        return not (self == other)

    @classmethod
    def populate_dev_uuids(cls, context, count):
        @db.pick_context_manager_reader
        def get_devs_no_uuid(context):
            return context.session.query(db_models.PciDevice).\
                    filter_by(uuid=None).limit(count).all()

        db_devs = get_devs_no_uuid(context)

        done = 0
        for db_dev in db_devs:
            cls._create_uuid(context, db_dev['id'])
            done += 1

        return done, done

    @classmethod
    def _from_db_object(cls, context, pci_device, db_dev):
        for key in pci_device.fields:
            if key == 'uuid' and db_dev['uuid'] is None:
                # NOTE(danms): While the records could be nullable,
                # generate a UUID on read since the object requires it
                dev_id = db_dev['id']
                db_dev[key] = cls._create_uuid(context, dev_id)

            if key == 'extra_info':
                extra_info = db_dev.get('extra_info')
                pci_device.extra_info = jsonutils.loads(extra_info)
                continue

            setattr(pci_device, key, db_dev[key])

        pci_device._context = context
        pci_device.obj_reset_changes()
        return pci_device

    @staticmethod
    @oslo_db_api.wrap_db_retry(max_retries=1, retry_on_deadlock=True)
    def _create_uuid(context, dev_id):
        # NOTE(mdbooth): This method is only required until uuid is made
        # non-nullable in a future release.

        # NOTE(mdbooth): We wrap this method in a retry loop because it can
        # fail (safely) on multi-master galera if concurrent updates happen on
        # different masters. It will never fail on single-master. We can only
        # ever need one retry.

        uuid = uuidutils.generate_uuid()
        values = {'uuid': uuid}
        compare = db_models.PciDevice(id=dev_id, uuid=None)

        # NOTE(mdbooth): We explicitly use an independent transaction context
        # here so as not to fail if:
        # 1. We retry.
        # 2. We're in a read transaction. This is an edge case of what's
        #    normally a read operation. Forcing everything (transitively) which
        #    reads a PCI device to be in a write transaction for a narrow
        #    temporary edge case is undesirable.
        tctxt = db.get_context_manager(context).writer.independent
        with tctxt.using(context):
            query = context.session.query(db_models.PciDevice).\
                        filter_by(id=dev_id)

            try:
                query.update_on_match(compare, 'id', values)
            except update_match.NoRowsMatched:
                # We can only get here if we raced, and another writer already
                # gave this PCI device a UUID
                result = query.one()
                uuid = result['uuid']

        return uuid

    @base.remotable_classmethod
    def get_by_dev_addr(cls, context, compute_node_id, dev_addr):
        db_dev = db.pci_device_get_by_addr(context, compute_node_id, dev_addr)
        return cls._from_db_object(context, cls(), db_dev)

    @base.remotable_classmethod
    def get_by_dev_id(cls, context, id):
        db_dev = db.pci_device_get_by_id(context, id)
        return cls._from_db_object(context, cls(), db_dev)

    @classmethod
    def create(cls, context, dev_dict):
        """Create a PCI device based on hypervisor information.

        As the device object is just created and is not synced with db yet
        thus we should not reset changes here for fields from dict.
        """
        pci_device = cls()
        # NOTE(danms): extra_info used to always be defaulted during init,
        # so make sure we replicate that behavior outside of init here
        # for compatibility reasons.
        pci_device.obj_set_defaults('extra_info')
        pci_device.update_device(dev_dict)
        pci_device.status = fields.PciDeviceStatus.AVAILABLE
        pci_device.uuid = uuidutils.generate_uuid()
        pci_device._context = context
        return pci_device

    @base.remotable
    def save(self):
        if self.status == fields.PciDeviceStatus.REMOVED:
            self.status = fields.PciDeviceStatus.DELETED
            db.pci_device_destroy(self._context, self.compute_node_id,
                                  self.address)
        elif self.status != fields.PciDeviceStatus.DELETED:
            # TODO(jaypipes): Remove in 2.0 version of object. This does an
            # inline migration to populate the uuid field. A similar migration
            # is done in the _from_db_object() method to migrate objects as
            # they are read from the DB.
            if 'uuid' not in self:
                self.uuid = uuidutils.generate_uuid()
            updates = self.obj_get_changes()

            if 'extra_info' in updates:
                updates['extra_info'] = jsonutils.dumps(updates['extra_info'])
            if updates:
                db_pci = db.pci_device_update(self._context,
                                              self.compute_node_id,
                                              self.address, updates)
                self._from_db_object(self._context, self, db_pci)

    @staticmethod
    def _bulk_update_status(dev_list, status):
        for dev in dev_list:
            dev.status = status

    def claim(self, instance_uuid):
        if self.status != fields.PciDeviceStatus.AVAILABLE:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=[fields.PciDeviceStatus.AVAILABLE])

        if self.dev_type == fields.PciDeviceType.SRIOV_PF:
            # Update PF status to CLAIMED if all of it dependants are free
            # and set their status to UNCLAIMABLE
            vfs_list = self.child_devices
            if not all([vf.is_available() for vf in vfs_list]):
                raise exception.PciDeviceVFInvalidStatus(
                    compute_node_id=self.compute_node_id, address=self.address)
            self._bulk_update_status(vfs_list,
                                     fields.PciDeviceStatus.UNCLAIMABLE)

        elif self.dev_type in (fields.PciDeviceType.SRIOV_VF,
                               fields.PciDeviceType.VDPA):
            # Update VF status to CLAIMED if it's parent has not been
            # previously allocated or claimed
            # When claiming/allocating a VF, it's parent PF becomes
            # unclaimable/unavailable. Therefore, it is expected to find the
            # parent PF in an unclaimable/unavailable state for any following
            # claims to a sibling VF

            parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                                  fields.PciDeviceStatus.UNCLAIMABLE,
                                  fields.PciDeviceStatus.UNAVAILABLE)
            parent = self.parent_device
            if parent:
                if parent.status not in parent_ok_statuses:
                    raise exception.PciDevicePFInvalidStatus(
                        compute_node_id=self.compute_node_id,
                        address=self.parent_addr,
                        status=self.status,
                        vf_address=self.address,
                        hopestatus=parent_ok_statuses)
                # Set PF status
                if parent.status == fields.PciDeviceStatus.AVAILABLE:
                    parent.status = fields.PciDeviceStatus.UNCLAIMABLE
            else:
                LOG.debug(
                    'Physical function addr: %(pf_addr)s parent of '
                    'VF addr: %(vf_addr)s was not found', {
                        'pf_addr': self.parent_addr,
                        'vf_addr': self.address
                    })

        self.status = fields.PciDeviceStatus.CLAIMED
        self.instance_uuid = instance_uuid

    def allocate(self, instance):
        ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                       fields.PciDeviceStatus.CLAIMED)
        parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                              fields.PciDeviceStatus.UNCLAIMABLE,
                              fields.PciDeviceStatus.UNAVAILABLE)
        dependants_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                                  fields.PciDeviceStatus.UNCLAIMABLE)
        if self.status not in ok_statuses:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=ok_statuses)
        if (self.status == fields.PciDeviceStatus.CLAIMED
                and self.instance_uuid != instance['uuid']):
            raise exception.PciDeviceInvalidOwner(
                compute_node_id=self.compute_node_id,
                address=self.address,
                owner=self.instance_uuid,
                hopeowner=instance['uuid'])
        if self.dev_type == fields.PciDeviceType.SRIOV_PF:
            vfs_list = self.child_devices
            if not all(
                [vf.status in dependants_ok_statuses for vf in vfs_list]):
                raise exception.PciDeviceVFInvalidStatus(
                    compute_node_id=self.compute_node_id, address=self.address)
            self._bulk_update_status(vfs_list,
                                     fields.PciDeviceStatus.UNAVAILABLE)

        elif self.dev_type in (fields.PciDeviceType.SRIOV_VF,
                               fields.PciDeviceType.VDPA):
            parent = self.parent_device
            if parent:
                if parent.status not in parent_ok_statuses:
                    raise exception.PciDevicePFInvalidStatus(
                        compute_node_id=self.compute_node_id,
                        address=self.parent_addr,
                        status=self.status,
                        vf_address=self.address,
                        hopestatus=parent_ok_statuses)
                # Set PF status
                parent.status = fields.PciDeviceStatus.UNAVAILABLE
            else:
                LOG.debug(
                    'Physical function addr: %(pf_addr)s parent of '
                    'VF addr: %(vf_addr)s was not found', {
                        'pf_addr': self.parent_addr,
                        'vf_addr': self.address
                    })

        self.status = fields.PciDeviceStatus.ALLOCATED
        self.instance_uuid = instance['uuid']

        # Notes(yjiang5): remove this check when instance object for
        # compute manager is finished
        if isinstance(instance, dict):
            if 'pci_devices' not in instance:
                instance['pci_devices'] = []
            instance['pci_devices'].append(copy.copy(self))
        else:
            instance.pci_devices.objects.append(copy.copy(self))

    def remove(self):
        # We allow removal of a device is if it is unused. It can be unused
        # either by being in available state or being in a state that shows
        # that the parent or child device blocks the consumption of this device
        expected_states = [
            fields.PciDeviceStatus.AVAILABLE,
            fields.PciDeviceStatus.UNAVAILABLE,
            fields.PciDeviceStatus.UNCLAIMABLE,
        ]
        if self.status not in expected_states:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=expected_states)
        # Just to be on the safe side, do not allow removal of device that has
        # an owner even if the state of the device suggests that it is not
        # owned.
        if 'instance_uuid' in self and self.instance_uuid is not None:
            raise exception.PciDeviceInvalidOwner(
                compute_node_id=self.compute_node_id,
                address=self.address,
                owner=self.instance_uuid,
                hopeowner=None,
            )

        self.status = fields.PciDeviceStatus.REMOVED
        self.instance_uuid = None
        self.request_id = None

    def free(self, instance=None):
        ok_statuses = (fields.PciDeviceStatus.ALLOCATED,
                       fields.PciDeviceStatus.CLAIMED)
        free_devs = []
        if self.status not in ok_statuses:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=ok_statuses)
        if instance and self.instance_uuid != instance['uuid']:
            raise exception.PciDeviceInvalidOwner(
                compute_node_id=self.compute_node_id,
                address=self.address,
                owner=self.instance_uuid,
                hopeowner=instance['uuid'])
        if self.dev_type == fields.PciDeviceType.SRIOV_PF:
            # Set all PF dependants status to AVAILABLE
            vfs_list = self.child_devices
            self._bulk_update_status(vfs_list,
                                     fields.PciDeviceStatus.AVAILABLE)
            free_devs.extend(vfs_list)
        if self.dev_type in (fields.PciDeviceType.SRIOV_VF,
                             fields.PciDeviceType.VDPA):
            # Set PF status to AVAILABLE if all of it's VFs are free
            parent = self.parent_device
            if not parent:
                LOG.debug(
                    'Physical function addr: %(pf_addr)s parent of '
                    'VF addr: %(vf_addr)s was not found', {
                        'pf_addr': self.parent_addr,
                        'vf_addr': self.address
                    })
            else:
                vfs_list = parent.child_devices
                if all(
                    [vf.is_available() for vf in vfs_list
                     if vf.id != self.id]):
                    parent.status = fields.PciDeviceStatus.AVAILABLE
                    free_devs.append(parent)
        old_status = self.status
        self.status = fields.PciDeviceStatus.AVAILABLE
        free_devs.append(self)
        self.instance_uuid = None
        self.request_id = None
        if old_status == fields.PciDeviceStatus.ALLOCATED and instance:
            # Notes(yjiang5): remove this check when instance object for
            # compute manager is finished
            existed = next(
                (dev for dev in instance['pci_devices'] if dev.id == self.id))
            if isinstance(instance, dict):
                instance['pci_devices'].remove(existed)
            else:
                instance.pci_devices.objects.remove(existed)
        return free_devs

    def is_available(self):
        return self.status == fields.PciDeviceStatus.AVAILABLE

    @property
    def card_serial_number(self):
        caps_json = self.extra_info.get('capabilities', "{}")
        caps = jsonutils.loads(caps_json)
        return caps.get('vpd', {}).get('card_serial_number')

    @property
    def sriov_cap(self):
        caps_json = self.extra_info.get('capabilities', '{}')
        caps = jsonutils.loads(caps_json)
        return caps.get('sriov', {})
Beispiel #18
0
class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
                    base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: String attributes updated to support unicode
    # Version 1.2: Use list/dict helpers for policies, metadetails, members
    # Version 1.3: Make uuid a non-None real string
    # Version 1.4: Add add_members()
    # Version 1.5: Add get_hosts()
    # Version 1.6: Add get_by_name()
    # Version 1.7: Deprecate metadetails
    # Version 1.8: Add count_members_by_user()
    # Version 1.9: Add get_by_instance_uuid()
    # Version 1.10: Add hosts field
    # Version 1.11: Add policy and deprecate policies, add _rules
    VERSION = '1.11'

    fields = {
        'id': fields.IntegerField(),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(),
        'name': fields.StringField(nullable=True),
        'policies': fields.ListOfStringsField(nullable=True, read_only=True),
        'members': fields.ListOfStringsField(nullable=True),
        'hosts': fields.ListOfStringsField(nullable=True),
        'policy': fields.StringField(nullable=True),
        # NOTE(danms): Use rules not _rules for general access
        '_rules': fields.DictOfStringsField(),
    }

    def __init__(self, *args, **kwargs):
        if 'rules' in kwargs:
            kwargs['_rules'] = kwargs.pop('rules')
        super(InstanceGroup, self).__init__(*args, **kwargs)

    @property
    def rules(self):
        if '_rules' not in self:
            return {}
        # NOTE(danms): Coerce our rules into a typed dict for convenience
        rules = {}
        if 'max_server_per_host' in self._rules:
            rules['max_server_per_host'] = \
                    int(self._rules['max_server_per_host'])
        return rules

    def obj_make_compatible(self, primitive, target_version):
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 11):
            # NOTE(yikun): Before 1.11, we had a policies property which is
            # the list of policy name, even though it was a list, there was
            # ever only one entry in the list.
            policy = primitive.pop('policy', None)
            if policy:
                primitive['policies'] = [policy]
            else:
                primitive['policies'] = []
            primitive.pop('rules', None)
        if target_version < (1, 7):
            # NOTE(danms): Before 1.7, we had an always-empty
            # metadetails property
            primitive['metadetails'] = {}

    @staticmethod
    def _from_db_object(context, instance_group, db_inst):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        # Most of the field names match right now, so be quick
        for field in instance_group.fields:
            if field in LAZY_LOAD_FIELDS:
                continue
            # This is needed to handle db models from both the api
            # database and the main database. In the migration to
            # the api database, we have removed soft-delete, so
            # the object fields for delete must be filled in with
            # default values for db models from the api database.
            # TODO(mriedem): Remove this when NovaPersistentObject is removed.
            ignore = {'deleted': False, 'deleted_at': None}
            if '_rules' == field:
                db_policy = db_inst['policy']
                instance_group._rules = (jsonutils.loads(db_policy['rules'])
                                         if db_policy and db_policy['rules']
                                         else {})
            elif field in ignore and not hasattr(db_inst, field):
                instance_group[field] = ignore[field]
            elif 'policies' == field:
                continue
            # NOTE(yikun): The obj.policies is deprecated and marked as
            # read_only in version 1.11, and there is no "policies" property
            # in InstanceGroup model anymore, so we just skip to set
            # "policies" and then load the "policies" when "policy" is set.
            elif 'policy' == field:
                db_policy = db_inst['policy']
                if db_policy:
                    instance_group.policy = db_policy['policy']
                    instance_group.policies = [instance_group.policy]
                else:
                    instance_group.policy = None
                    instance_group.policies = []
            else:
                instance_group[field] = db_inst[field]

        instance_group._context = context
        instance_group.obj_reset_changes()
        return instance_group

    @staticmethod
    @db_api.api_context_manager.reader
    def _get_from_db_by_uuid(context, uuid):
        grp = _instance_group_get_query(context,
                                        id_field=api_models.InstanceGroup.uuid,
                                        id=uuid).first()
        if not grp:
            raise exception.InstanceGroupNotFound(group_uuid=uuid)
        return grp

    @staticmethod
    @db_api.api_context_manager.reader
    def _get_from_db_by_id(context, id):
        grp = _instance_group_get_query(context,
                                        id_field=api_models.InstanceGroup.id,
                                        id=id).first()
        if not grp:
            raise exception.InstanceGroupNotFound(group_uuid=id)
        return grp

    @staticmethod
    @db_api.api_context_manager.reader
    def _get_from_db_by_name(context, name):
        grp = _instance_group_get_query(context).filter_by(name=name).first()
        if not grp:
            raise exception.InstanceGroupNotFound(group_uuid=name)
        return grp

    @staticmethod
    @db_api.api_context_manager.reader
    def _get_from_db_by_instance(context, instance_uuid):
        grp_member = context.session.query(api_models.InstanceGroupMember).\
                     filter_by(instance_uuid=instance_uuid).first()
        if not grp_member:
            raise exception.InstanceGroupNotFound(group_uuid='')
        grp = InstanceGroup._get_from_db_by_id(context, grp_member.group_id)
        return grp

    @staticmethod
    @db_api.api_context_manager.writer
    def _save_in_db(context, group_uuid, values):
        grp = InstanceGroup._get_from_db_by_uuid(context, group_uuid)
        values_copy = copy.copy(values)
        members = values_copy.pop('members', None)

        grp.update(values_copy)

        if members is not None:
            _instance_group_members_add(context, grp, members)

        return grp

    @staticmethod
    @db_api.api_context_manager.writer
    def _create_in_db(context,
                      values,
                      policies=None,
                      members=None,
                      policy=None,
                      rules=None):
        try:
            group = api_models.InstanceGroup()
            group.update(values)
            group.save(context.session)
        except db_exc.DBDuplicateEntry:
            raise exception.InstanceGroupIdExists(group_uuid=values['uuid'])

        if policies:
            db_policy = api_models.InstanceGroupPolicy(group_id=group['id'],
                                                       policy=policies[0],
                                                       rules=None)
            group._policies = [db_policy]
            group.rules = None
        elif policy:
            db_rules = jsonutils.dumps(rules or {})
            db_policy = api_models.InstanceGroupPolicy(group_id=group['id'],
                                                       policy=policy,
                                                       rules=db_rules)
            group._policies = [db_policy]
        else:
            group._policies = []

        if group._policies:
            group.save(context.session)

        if members:
            group._members = _instance_group_members_add(
                context, group, members)
        else:
            group._members = []

        return group

    @staticmethod
    @db_api.api_context_manager.writer
    def _destroy_in_db(context, group_uuid):
        qry = _instance_group_get_query(context,
                                        id_field=api_models.InstanceGroup.uuid,
                                        id=group_uuid)
        if qry.count() == 0:
            raise exception.InstanceGroupNotFound(group_uuid=group_uuid)

        # Delete policies and members
        group_id = qry.first().id
        instance_models = [
            api_models.InstanceGroupPolicy, api_models.InstanceGroupMember
        ]
        for model in instance_models:
            context.session.query(model).filter_by(group_id=group_id).delete()

        qry.delete()

    @staticmethod
    @db_api.api_context_manager.writer
    def _add_members_in_db(context, group_uuid, members):
        return _instance_group_members_add_by_uuid(context, group_uuid,
                                                   members)

    @staticmethod
    @db_api.api_context_manager.writer
    def _remove_members_in_db(context, group_id, instance_uuids):
        # There is no public method provided for removing members because the
        # user-facing API doesn't allow removal of instance group members. We
        # need to be able to remove members to address quota races.
        context.session.query(api_models.InstanceGroupMember).\
            filter_by(group_id=group_id).\
            filter(api_models.InstanceGroupMember.instance_uuid.
                   in_(set(instance_uuids))).\
            delete(synchronize_session=False)

    @staticmethod
    @db_api.api_context_manager.writer
    def _destroy_members_bulk_in_db(context, instance_uuids):
        return context.session.query(api_models.InstanceGroupMember).filter(
            api_models.InstanceGroupMember.instance_uuid.in_(instance_uuids)).\
            delete(synchronize_session=False)

    @classmethod
    def destroy_members_bulk(cls, context, instance_uuids):
        return cls._destroy_members_bulk_in_db(context, instance_uuids)

    def obj_load_attr(self, attrname):
        # NOTE(sbauza): Only hosts could be lazy-loaded right now
        if attrname != 'hosts':
            raise exception.ObjectActionError(action='obj_load_attr',
                                              reason='unable to load %s' %
                                              attrname)

        LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s", {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })

        self.hosts = self.get_hosts()
        self.obj_reset_changes(['hosts'])

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        db_group = cls._get_from_db_by_uuid(context, uuid)
        return cls._from_db_object(context, cls(), db_group)

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        db_group = cls._get_from_db_by_name(context, name)
        return cls._from_db_object(context, cls(), db_group)

    @base.remotable_classmethod
    def get_by_instance_uuid(cls, context, instance_uuid):
        db_group = cls._get_from_db_by_instance(context, instance_uuid)
        return cls._from_db_object(context, cls(), db_group)

    @classmethod
    def get_by_hint(cls, context, hint):
        if uuidutils.is_uuid_like(hint):
            return cls.get_by_uuid(context, hint)
        else:
            return cls.get_by_name(context, hint)

    @base.remotable
    def save(self):
        """Save updates to this instance group."""

        updates = self.obj_get_changes()

        # NOTE(sbauza): We do NOT save the set of compute nodes that an
        # instance group is connected to in this method. Instance groups are
        # implicitly connected to compute nodes when the
        # InstanceGroup.add_members() method is called, which adds the mapping
        # table entries.
        # So, since the only way to have hosts in the updates is to set that
        # field explicitly, we prefer to raise an Exception so the developer
        # knows he has to call obj_reset_changes(['hosts']) right after setting
        # the field.
        if 'hosts' in updates:
            raise exception.InstanceGroupSaveException(field='hosts')

        # NOTE(yikun): You have to provide exactly one policy on group create,
        # and also there are no group update APIs, so we do NOT support
        # policies update.
        if 'policies' in updates:
            raise exception.InstanceGroupSaveException(field='policies')

        if not updates:
            return

        payload = dict(updates)
        payload['server_group_id'] = self.uuid

        db_group = self._save_in_db(self._context, self.uuid, updates)
        self._from_db_object(self._context, self, db_group)
        compute_utils.notify_about_server_group_update(self._context, "update",
                                                       payload)

    @base.remotable
    def refresh(self):
        """Refreshes the instance group."""
        current = self.__class__.get_by_uuid(self._context, self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and self[field] != current[field]:
                self[field] = current[field]
        self.obj_reset_changes()

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        payload = dict(updates)
        updates.pop('id', None)
        policies = updates.pop('policies', None)
        policy = updates.pop('policy', None)
        rules = updates.pop('_rules', None)
        members = updates.pop('members', None)

        if 'uuid' not in updates:
            self.uuid = uuidutils.generate_uuid()
            updates['uuid'] = self.uuid

        db_group = self._create_in_db(self._context,
                                      updates,
                                      policies=policies,
                                      members=members,
                                      policy=policy,
                                      rules=rules)
        self._from_db_object(self._context, self, db_group)
        payload['server_group_id'] = self.uuid
        compute_utils.notify_about_server_group_update(self._context, "create",
                                                       payload)
        compute_utils.notify_about_server_group_action(
            context=self._context,
            group=self,
            action=fields.NotificationAction.CREATE)

    @base.remotable
    def destroy(self):
        payload = {'server_group_id': self.uuid}
        self._destroy_in_db(self._context, self.uuid)
        self.obj_reset_changes()
        compute_utils.notify_about_server_group_update(self._context, "delete",
                                                       payload)
        compute_utils.notify_about_server_group_action(
            context=self._context,
            group=self,
            action=fields.NotificationAction.DELETE)

    @base.remotable_classmethod
    def add_members(cls, context, group_uuid, instance_uuids):
        payload = {
            'server_group_id': group_uuid,
            'instance_uuids': instance_uuids
        }
        members = cls._add_members_in_db(context, group_uuid, instance_uuids)
        members = [member['instance_uuid'] for member in members]
        compute_utils.notify_about_server_group_update(context, "addmember",
                                                       payload)
        compute_utils.notify_about_server_group_add_member(context, group_uuid)
        return list(members)

    @base.remotable
    def get_hosts(self, exclude=None):
        """Get a list of hosts for non-deleted instances in the group

        This method allows you to get a list of the hosts where instances in
        this group are currently running.  There's also an option to exclude
        certain instance UUIDs from this calculation.

        """
        filter_uuids = self.members
        if exclude:
            filter_uuids = set(filter_uuids) - set(exclude)
        filters = {'uuid': filter_uuids, 'deleted': False}
        instances = objects.InstanceList.get_by_filters(self._context,
                                                        filters=filters)
        return list(
            set([instance.host for instance in instances if instance.host]))

    @base.remotable
    def count_members_by_user(self, user_id):
        """Count the number of instances in a group belonging to a user."""
        filter_uuids = self.members
        filters = {'uuid': filter_uuids, 'user_id': user_id, 'deleted': False}
        instances = objects.InstanceList.get_by_filters(self._context,
                                                        filters=filters)
        return len(instances)
Beispiel #19
0
class XenapiLiveMigrateData(LiveMigrateData):
    # Version 1.0: Initial version
    # Version 1.1: Added vif_uuid_map
    # Version 1.2: Added old_vol_attachment_ids
    VERSION = '1.2'

    fields = {
        'block_migration': fields.BooleanField(nullable=True),
        'destination_sr_ref': fields.StringField(nullable=True),
        'migrate_send_data': fields.DictOfStringsField(nullable=True),
        'sr_uuid_map': fields.DictOfStringsField(),
        'kernel_file': fields.StringField(),
        'ramdisk_file': fields.StringField(),
        'vif_uuid_map': fields.DictOfStringsField(),
    }

    def to_legacy_dict(self, pre_migration_result=False):
        legacy = super(XenapiLiveMigrateData, self).to_legacy_dict()
        if self.obj_attr_is_set('block_migration'):
            legacy['block_migration'] = self.block_migration
        if self.obj_attr_is_set('migrate_send_data'):
            legacy['migrate_data'] = {
                'migrate_send_data': self.migrate_send_data,
                'destination_sr_ref': self.destination_sr_ref,
            }
        live_result = {
            'sr_uuid_map': ('sr_uuid_map' in self and self.sr_uuid_map or {}),
            'vif_uuid_map': ('vif_uuid_map' in self and self.vif_uuid_map
                             or {}),
        }
        if pre_migration_result:
            legacy['pre_live_migration_result'] = live_result
        return legacy

    def from_legacy_dict(self, legacy):
        super(XenapiLiveMigrateData, self).from_legacy_dict(legacy)
        if 'block_migration' in legacy:
            self.block_migration = legacy['block_migration']
        else:
            self.block_migration = False
        if 'migrate_data' in legacy:
            self.migrate_send_data = \
                legacy['migrate_data']['migrate_send_data']
            self.destination_sr_ref = \
                legacy['migrate_data']['destination_sr_ref']
        if 'pre_live_migration_result' in legacy:
            self.sr_uuid_map = \
                legacy['pre_live_migration_result']['sr_uuid_map']
            self.vif_uuid_map = \
                legacy['pre_live_migration_result'].get('vif_uuid_map', {})

    def obj_make_compatible(self, primitive, target_version):
        super(XenapiLiveMigrateData,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 2):
            if 'old_vol_attachment_ids' in primitive:
                del primitive['old_vol_attachment_ids']
        if target_version < (1, 1):
            if 'vif_uuid_map' in primitive:
                del primitive['vif_uuid_map']
Beispiel #20
0
class InstancePayload(base.NotificationPayloadBase):
    SCHEMA = {
        'uuid': ('instance', 'uuid'),
        'user_id': ('instance', 'user_id'),
        'tenant_id': ('instance', 'project_id'),
        'reservation_id': ('instance', 'reservation_id'),
        'display_name': ('instance', 'display_name'),
        'display_description': ('instance', 'display_description'),
        'host_name': ('instance', 'hostname'),
        'host': ('instance', 'host'),
        'node': ('instance', 'node'),
        'os_type': ('instance', 'os_type'),
        'architecture': ('instance', 'architecture'),
        'availability_zone': ('instance', 'availability_zone'),

        'image_uuid': ('instance', 'image_ref'),

        'kernel_id': ('instance', 'kernel_id'),
        'ramdisk_id': ('instance', 'ramdisk_id'),

        'created_at': ('instance', 'created_at'),
        'launched_at': ('instance', 'launched_at'),
        'terminated_at': ('instance', 'terminated_at'),
        'deleted_at': ('instance', 'deleted_at'),

        'state': ('instance', 'vm_state'),
        'power_state': ('instance', 'power_state'),
        'task_state': ('instance', 'task_state'),
        'progress': ('instance', 'progress'),

        'metadata': ('instance', 'metadata'),
        'locked': ('instance', 'locked'),
        'auto_disk_config': ('instance', 'auto_disk_config')
    }
    # Version 1.0: Initial version
    # Version 1.1: add locked and display_description field
    # Version 1.2: Add auto_disk_config field
    VERSION = '1.2'
    fields = {
        'uuid': fields.UUIDField(),
        'user_id': fields.StringField(nullable=True),
        'tenant_id': fields.StringField(nullable=True),
        'reservation_id': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'host_name': fields.StringField(nullable=True),
        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),

        'flavor': fields.ObjectField('FlavorPayload'),
        'image_uuid': fields.StringField(nullable=True),

        'kernel_id': fields.StringField(nullable=True),
        'ramdisk_id': fields.StringField(nullable=True),

        'created_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'deleted_at': fields.DateTimeField(nullable=True),

        'state': fields.InstanceStateField(nullable=True),
        'power_state': fields.InstancePowerStateField(nullable=True),
        'task_state': fields.InstanceTaskStateField(nullable=True),
        'progress': fields.IntegerField(nullable=True),

        'ip_addresses': fields.ListOfObjectsField('IpPayload'),

        'metadata': fields.DictOfStringsField(),
        'locked': fields.BooleanField(),
        'auto_disk_config': fields.DiskConfigField()
    }

    def __init__(self, instance, **kwargs):
        super(InstancePayload, self).__init__(**kwargs)

        # Note(gibi): ugly but needed to avoid cyclic import
        from nova.compute import utils

        network_info = utils.get_nw_info_for_instance(instance)
        ips = IpPayload.from_network_info(network_info)

        flavor = flavor_payload.FlavorPayload(flavor=instance.flavor)

        super(InstancePayload, self).__init__(
            ip_addresses=ips,
            flavor=flavor,
            **kwargs)

        self.populate_schema(instance=instance)
Beispiel #21
0
class PciDevice(base.NovaPersistentObject, base.NovaObject):
    """Object to represent a PCI device on a compute node.

    PCI devices are managed by the compute resource tracker, which discovers
    the devices from the hardware platform, claims, allocates and frees
    devices for instances.

    The PCI device information is permanently maintained in a database.
    This makes it convenient to get PCI device information, like physical
    function for a VF device, adjacent switch IP address for a NIC,
    hypervisor identification for a PCI device, etc. It also provides a
    convenient way to check device allocation information for administrator
    purposes.

    A device can be in available/claimed/allocated/deleted/removed state.

    A device is available when it is discovered..

    A device is claimed prior to being allocated to an instance. Normally the
    transition from claimed to allocated is quick. However, during a resize
    operation the transition can take longer, because devices are claimed in
    prep_resize and allocated in finish_resize.

    A device becomes removed when hot removed from a node (i.e. not found in
    the next auto-discover) but not yet synced with the DB. A removed device
    should not be allocated to any instance, and once deleted from the DB,
    the device object is changed to deleted state and no longer synced with
    the DB.

    Filed notes::

        | 'dev_id':
        |   Hypervisor's identification for the device, the string format
        |   is hypervisor specific
        | 'extra_info':
        |   Device-specific properties like PF address, switch ip address etc.

    """

    # Version 1.0: Initial version
    # Version 1.1: String attributes updated to support unicode
    # Version 1.2: added request_id field
    # Version 1.3: Added field to represent PCI device NUMA node
    # Version 1.4: Added parent_addr field
    # Version 1.5: Added 2 new device statuses: UNCLAIMABLE and UNAVAILABLE
    # Version 1.6: Added uuid field
    VERSION = '1.6'

    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.UUIDField(nullable=True),
        # Note(yjiang5): the compute_node_id may be None because the pci
        # device objects are created before the compute node is created in DB
        'compute_node_id': fields.IntegerField(nullable=True),
        'address': fields.StringField(),
        'vendor_id': fields.StringField(),
        'product_id': fields.StringField(),
        'dev_type': fields.PciDeviceTypeField(),
        'status': fields.PciDeviceStatusField(),
        'dev_id': fields.StringField(nullable=True),
        'label': fields.StringField(nullable=True),
        'instance_uuid': fields.StringField(nullable=True),
        'request_id': fields.StringField(nullable=True),
        'extra_info': fields.DictOfStringsField(),
        'numa_node': fields.IntegerField(nullable=True),
        'parent_addr': fields.StringField(nullable=True),
    }

    def obj_make_compatible(self, primitive, target_version):
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 2) and 'request_id' in primitive:
            del primitive['request_id']
        if target_version < (1, 4) and 'parent_addr' in primitive:
            if primitive['parent_addr'] is not None:
                extra_info = primitive.get('extra_info', {})
                extra_info['phys_function'] = primitive['parent_addr']
            del primitive['parent_addr']
        if target_version < (1, 5) and 'parent_addr' in primitive:
            added_statuses = (fields.PciDeviceStatus.UNCLAIMABLE,
                              fields.PciDeviceStatus.UNAVAILABLE)
            status = primitive['status']
            if status in added_statuses:
                raise exception.ObjectActionError(
                    action='obj_make_compatible',
                    reason='status=%s not supported in version %s' %
                    (status, target_version))
        if target_version < (1, 6) and 'uuid' in primitive:
            del primitive['uuid']

    def update_device(self, dev_dict):
        """Sync the content from device dictionary to device object.

        The resource tracker updates the available devices periodically.
        To avoid meaningless syncs with the database, we update the device
        object only if a value changed.
        """

        # Note(yjiang5): status/instance_uuid should only be updated by
        # functions like claim/allocate etc. The id is allocated by
        # database. The extra_info is created by the object.
        no_changes = ('status', 'instance_uuid', 'id', 'extra_info')
        for key in no_changes:
            dev_dict.pop(key, None)

        # NOTE(ndipanov): This needs to be set as it's accessed when matching
        dev_dict.setdefault('parent_addr')

        for k, v in dev_dict.items():
            if k in self.fields.keys():
                setattr(self, k, v)
            else:
                # NOTE(yjiang5): extra_info.update does not update
                # obj_what_changed, set it explicitly
                # NOTE(ralonsoh): list of parameters currently added to
                # "extra_info" dict:
                #     - "capabilities": dict of (strings/list of strings)
                extra_info = self.extra_info
                data = (v if isinstance(v, six.string_types) else
                        jsonutils.dumps(v))
                extra_info.update({k: data})
                self.extra_info = extra_info

    def __init__(self, *args, **kwargs):
        super(PciDevice, self).__init__(*args, **kwargs)
        self.obj_reset_changes()
        self.extra_info = {}
        # NOTE(ndipanov): These are required to build an in-memory device tree
        # but don't need to be proper fields (and can't easily be as they would
        # hold circular references)
        self.parent_device = None
        self.child_devices = []

    def __eq__(self, other):
        return compare_pci_device_attributes(self, other)

    def __ne__(self, other):
        return not (self == other)

    @staticmethod
    def _from_db_object(context, pci_device, db_dev):
        for key in pci_device.fields:
            if key == 'uuid' and db_dev['uuid'] is None:
                # Older records might not have a uuid field set in the
                # database so we need to skip those here and auto-generate
                # a uuid later below.
                continue
            elif key != 'extra_info':
                setattr(pci_device, key, db_dev[key])
            else:
                extra_info = db_dev.get("extra_info")
                pci_device.extra_info = jsonutils.loads(extra_info)
        pci_device._context = context
        pci_device.obj_reset_changes()

        # TODO(jaypipes): Remove in 2.0 version of object. This does an inline
        # migration to populate the uuid field. A similar inline migration is
        # performed in the save() method.
        if db_dev['uuid'] is None:
            pci_device.uuid = uuidutils.generate_uuid()
            pci_device.save()

        return pci_device

    @base.remotable_classmethod
    def get_by_dev_addr(cls, context, compute_node_id, dev_addr):
        db_dev = db.pci_device_get_by_addr(context, compute_node_id, dev_addr)
        return cls._from_db_object(context, cls(), db_dev)

    @base.remotable_classmethod
    def get_by_dev_id(cls, context, id):
        db_dev = db.pci_device_get_by_id(context, id)
        return cls._from_db_object(context, cls(), db_dev)

    @classmethod
    def create(cls, context, dev_dict):
        """Create a PCI device based on hypervisor information.

        As the device object is just created and is not synced with db yet
        thus we should not reset changes here for fields from dict.
        """
        pci_device = cls()
        pci_device.update_device(dev_dict)
        pci_device.status = fields.PciDeviceStatus.AVAILABLE
        pci_device.uuid = uuidutils.generate_uuid()
        pci_device._context = context
        return pci_device

    @base.remotable
    def save(self):
        if self.status == fields.PciDeviceStatus.REMOVED:
            self.status = fields.PciDeviceStatus.DELETED
            db.pci_device_destroy(self._context, self.compute_node_id,
                                  self.address)
        elif self.status != fields.PciDeviceStatus.DELETED:
            # TODO(jaypipes): Remove in 2.0 version of object. This does an
            # inline migration to populate the uuid field. A similar migration
            # is done in the _from_db_object() method to migrate objects as
            # they are read from the DB.
            if 'uuid' not in self:
                self.uuid = uuidutils.generate_uuid()
            updates = self.obj_get_changes()
            updates['extra_info'] = self.extra_info

            if 'extra_info' in updates:
                updates['extra_info'] = jsonutils.dumps(updates['extra_info'])
            if updates:
                db_pci = db.pci_device_update(self._context,
                                              self.compute_node_id,
                                              self.address, updates)
                self._from_db_object(self._context, self, db_pci)

    @staticmethod
    def _bulk_update_status(dev_list, status):
        for dev in dev_list:
            dev.status = status

    def claim(self, instance_uuid):
        if self.status != fields.PciDeviceStatus.AVAILABLE:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=[fields.PciDeviceStatus.AVAILABLE])

        if self.dev_type == fields.PciDeviceType.SRIOV_PF:
            # Update PF status to CLAIMED if all of it dependants are free
            # and set their status to UNCLAIMABLE
            vfs_list = self.child_devices
            if not all([vf.is_available() for vf in vfs_list]):
                raise exception.PciDeviceVFInvalidStatus(
                    compute_node_id=self.compute_node_id, address=self.address)
            self._bulk_update_status(vfs_list,
                                     fields.PciDeviceStatus.UNCLAIMABLE)

        elif self.dev_type == fields.PciDeviceType.SRIOV_VF:
            # Update VF status to CLAIMED if it's parent has not been
            # previously allocated or claimed
            # When claiming/allocating a VF, it's parent PF becomes
            # unclaimable/unavailable. Therefore, it is expected to find the
            # parent PF in an unclaimable/unavailable state for any following
            # claims to a sibling VF

            parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                                  fields.PciDeviceStatus.UNCLAIMABLE,
                                  fields.PciDeviceStatus.UNAVAILABLE)
            parent = self.parent_device
            if parent:
                if parent.status not in parent_ok_statuses:
                    raise exception.PciDevicePFInvalidStatus(
                        compute_node_id=self.compute_node_id,
                        address=self.parent_addr,
                        status=self.status,
                        vf_address=self.address,
                        hopestatus=parent_ok_statuses)
                # Set PF status
                if parent.status == fields.PciDeviceStatus.AVAILABLE:
                    parent.status = fields.PciDeviceStatus.UNCLAIMABLE
            else:
                LOG.debug(
                    'Physical function addr: %(pf_addr)s parent of '
                    'VF addr: %(vf_addr)s was not found', {
                        'pf_addr': self.parent_addr,
                        'vf_addr': self.address
                    })

        self.status = fields.PciDeviceStatus.CLAIMED
        self.instance_uuid = instance_uuid

    def allocate(self, instance):
        ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                       fields.PciDeviceStatus.CLAIMED)
        parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                              fields.PciDeviceStatus.UNCLAIMABLE,
                              fields.PciDeviceStatus.UNAVAILABLE)
        dependants_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                                  fields.PciDeviceStatus.UNCLAIMABLE)
        if self.status not in ok_statuses:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=ok_statuses)
        if (self.status == fields.PciDeviceStatus.CLAIMED
                and self.instance_uuid != instance['uuid']):
            raise exception.PciDeviceInvalidOwner(
                compute_node_id=self.compute_node_id,
                address=self.address,
                owner=self.instance_uuid,
                hopeowner=instance['uuid'])
        if self.dev_type == fields.PciDeviceType.SRIOV_PF:
            vfs_list = self.child_devices
            if not all(
                [vf.status in dependants_ok_statuses for vf in vfs_list]):
                raise exception.PciDeviceVFInvalidStatus(
                    compute_node_id=self.compute_node_id, address=self.address)
            self._bulk_update_status(vfs_list,
                                     fields.PciDeviceStatus.UNAVAILABLE)

        elif (self.dev_type == fields.PciDeviceType.SRIOV_VF):
            parent = self.parent_device
            if parent:
                if parent.status not in parent_ok_statuses:
                    raise exception.PciDevicePFInvalidStatus(
                        compute_node_id=self.compute_node_id,
                        address=self.parent_addr,
                        status=self.status,
                        vf_address=self.address,
                        hopestatus=parent_ok_statuses)
                # Set PF status
                parent.status = fields.PciDeviceStatus.UNAVAILABLE
            else:
                LOG.debug(
                    'Physical function addr: %(pf_addr)s parent of '
                    'VF addr: %(vf_addr)s was not found', {
                        'pf_addr': self.parent_addr,
                        'vf_addr': self.address
                    })

        self.status = fields.PciDeviceStatus.ALLOCATED
        self.instance_uuid = instance['uuid']

        # Notes(yjiang5): remove this check when instance object for
        # compute manager is finished
        if isinstance(instance, dict):
            if 'pci_devices' not in instance:
                instance['pci_devices'] = []
            instance['pci_devices'].append(copy.copy(self))
        else:
            instance.pci_devices.objects.append(copy.copy(self))

    def remove(self):
        if self.status != fields.PciDeviceStatus.AVAILABLE:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=[fields.PciDeviceStatus.AVAILABLE])
        self.status = fields.PciDeviceStatus.REMOVED
        self.instance_uuid = None
        self.request_id = None

    def free(self, instance=None):
        ok_statuses = (fields.PciDeviceStatus.ALLOCATED,
                       fields.PciDeviceStatus.CLAIMED)
        free_devs = []
        if self.status not in ok_statuses:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=ok_statuses)
        if instance and self.instance_uuid != instance['uuid']:
            raise exception.PciDeviceInvalidOwner(
                compute_node_id=self.compute_node_id,
                address=self.address,
                owner=self.instance_uuid,
                hopeowner=instance['uuid'])
        if self.dev_type == fields.PciDeviceType.SRIOV_PF:
            # Set all PF dependants status to AVAILABLE
            vfs_list = self.child_devices
            self._bulk_update_status(vfs_list,
                                     fields.PciDeviceStatus.AVAILABLE)
            free_devs.extend(vfs_list)
        if self.dev_type == fields.PciDeviceType.SRIOV_VF:
            # Set PF status to AVAILABLE if all of it's VFs are free
            parent = self.parent_device
            if not parent:
                LOG.debug(
                    'Physical function addr: %(pf_addr)s parent of '
                    'VF addr: %(vf_addr)s was not found', {
                        'pf_addr': self.parent_addr,
                        'vf_addr': self.address
                    })
            else:
                vfs_list = parent.child_devices
                if all(
                    [vf.is_available() for vf in vfs_list
                     if vf.id != self.id]):
                    parent.status = fields.PciDeviceStatus.AVAILABLE
                    free_devs.append(parent)
        old_status = self.status
        self.status = fields.PciDeviceStatus.AVAILABLE
        free_devs.append(self)
        self.instance_uuid = None
        self.request_id = None
        if old_status == fields.PciDeviceStatus.ALLOCATED and instance:
            # Notes(yjiang5): remove this check when instance object for
            # compute manager is finished
            existed = next(
                (dev for dev in instance['pci_devices'] if dev.id == self.id))
            if isinstance(instance, dict):
                instance['pci_devices'].remove(existed)
            else:
                instance.pci_devices.objects.remove(existed)
        return free_devs

    def is_available(self):
        return self.status == fields.PciDeviceStatus.AVAILABLE
Beispiel #22
0
class PciDevice(base.NovaPersistentObject, base.NovaObject):
    """Object to represent a PCI device on a compute node.

    PCI devices are managed by the compute resource tracker, which discovers
    the devices from the hardware platform, claims, allocates and frees
    devices for instances.

    The PCI device information is permanently maintained in a database.
    This makes it convenient to get PCI device information, like physical
    function for a VF device, adjacent switch IP address for a NIC,
    hypervisor identification for a PCI device, etc. It also provides a
    convenient way to check device allocation information for administrator
    purposes.

    A device can be in available/claimed/allocated/deleted/removed state.

    A device is available when it is discovered..

    A device is claimed prior to being allocated to an instance. Normally the
    transition from claimed to allocated is quick. However, during a resize
    operation the transition can take longer, because devices are claimed in
    prep_resize and allocated in finish_resize.

    A device becomes removed when hot removed from a node (i.e. not found in
    the next auto-discover) but not yet synced with the DB. A removed device
    should not be allocated to any instance, and once deleted from the DB,
    the device object is changed to deleted state and no longer synced with
    the DB.

    Filed notes::

        | 'dev_id':
        |   Hypervisor's identification for the device, the string format
        |   is hypervisor specific
        | 'extra_info':
        |   Device-specific properties like PF address, switch ip address etc.

    """

    # Version 1.0: Initial version
    # Version 1.1: String attributes updated to support unicode
    # Version 1.2: added request_id field
    # Version 1.3: Added field to represent PCI device NUMA node
    # Version 1.4: Added parent_addr field
    # Version 1.5: Added 2 new device statuses: UNCLAIMABLE and UNAVAILABLE
    VERSION = '1.5'

    fields = {
        'id': fields.IntegerField(),
        # Note(yjiang5): the compute_node_id may be None because the pci
        # device objects are created before the compute node is created in DB
        'compute_node_id': fields.IntegerField(nullable=True),
        'address': fields.StringField(),
        'vendor_id': fields.StringField(),
        'product_id': fields.StringField(),
        'dev_type': fields.PciDeviceTypeField(),
        'status': fields.PciDeviceStatusField(),
        'dev_id': fields.StringField(nullable=True),
        'label': fields.StringField(nullable=True),
        'instance_uuid': fields.StringField(nullable=True),
        'request_id': fields.StringField(nullable=True),
        'extra_info': fields.DictOfStringsField(),
        'numa_node': fields.IntegerField(nullable=True),
        'parent_addr': fields.StringField(nullable=True),
    }

    @staticmethod
    def should_migrate_data():
        # NOTE(ndipanov): Only migrate parent_addr if all services are up to at
        # least version 4 - this should only ever be called from save()
        services = ('conductor', 'osapi_compute')
        min_parent_addr_version = 4

        min_deployed = min(
            objects.Service.get_minimum_version(context.get_admin_context(),
                                                'nova-' + service)
            for service in services)
        return min_deployed >= min_parent_addr_version

    def obj_make_compatible(self, primitive, target_version):
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 2) and 'request_id' in primitive:
            del primitive['request_id']
        if target_version < (1, 4) and 'parent_addr' in primitive:
            if primitive['parent_addr'] is not None:
                extra_info = primitive.get('extra_info', {})
                extra_info['phys_function'] = primitive['parent_addr']
            del primitive['parent_addr']
        if target_version < (1, 5) and 'parent_addr' in primitive:
            added_statuses = (fields.PciDeviceStatus.UNCLAIMABLE,
                              fields.PciDeviceStatus.UNAVAILABLE)
            status = primitive['status']
            if status in added_statuses:
                raise exception.ObjectActionError(
                    action='obj_make_compatible',
                    reason='status=%s not supported in version %s' %
                    (status, target_version))

    def update_device(self, dev_dict):
        """Sync the content from device dictionary to device object.

        The resource tracker updates the available devices periodically.
        To avoid meaningless syncs with the database, we update the device
        object only if a value changed.
        """

        # Note(yjiang5): status/instance_uuid should only be updated by
        # functions like claim/allocate etc. The id is allocated by
        # database. The extra_info is created by the object.
        no_changes = ('status', 'instance_uuid', 'id', 'extra_info')
        map(lambda x: dev_dict.pop(x, None), [key for key in no_changes])

        # NOTE(ndipanov): This needs to be set as it's accessed when matching
        dev_dict.setdefault('parent_addr')

        for k, v in dev_dict.items():
            if k in self.fields.keys():
                setattr(self, k, v)
            else:
                # Note (yjiang5) extra_info.update does not update
                # obj_what_changed, set it explicitly
                extra_info = self.extra_info
                extra_info.update({k: v})
                self.extra_info = extra_info

    def __init__(self, *args, **kwargs):
        super(PciDevice, self).__init__(*args, **kwargs)
        self.obj_reset_changes()
        self.extra_info = {}

    def __eq__(self, other):
        return compare_pci_device_attributes(self, other)

    def __ne__(self, other):
        return not (self == other)

    @staticmethod
    def _from_db_object(context, pci_device, db_dev):
        for key in pci_device.fields:
            if key != 'extra_info':
                setattr(pci_device, key, db_dev[key])
            else:
                extra_info = db_dev.get("extra_info")
                pci_device.extra_info = jsonutils.loads(extra_info)
        pci_device._context = context
        pci_device.obj_reset_changes()
        # NOTE(ndipanov): As long as there is PF data in the old location, we
        # want to load it as it may have be the only place we have it
        if 'phys_function' in pci_device.extra_info:
            pci_device.parent_addr = pci_device.extra_info['phys_function']

        return pci_device

    @base.remotable_classmethod
    def get_by_dev_addr(cls, context, compute_node_id, dev_addr):
        db_dev = db.pci_device_get_by_addr(context, compute_node_id, dev_addr)
        return cls._from_db_object(context, cls(), db_dev)

    @base.remotable_classmethod
    def get_by_dev_id(cls, context, id):
        db_dev = db.pci_device_get_by_id(context, id)
        return cls._from_db_object(context, cls(), db_dev)

    @classmethod
    def create(cls, context, dev_dict):
        """Create a PCI device based on hypervisor information.

        As the device object is just created and is not synced with db yet
        thus we should not reset changes here for fields from dict.
        """
        pci_device = cls()
        pci_device.update_device(dev_dict)
        pci_device.status = fields.PciDeviceStatus.AVAILABLE
        pci_device._context = context
        return pci_device

    @base.remotable
    def save(self):
        if self.status == fields.PciDeviceStatus.REMOVED:
            self.status = fields.PciDeviceStatus.DELETED
            db.pci_device_destroy(self._context, self.compute_node_id,
                                  self.address)
        elif self.status != fields.PciDeviceStatus.DELETED:
            updates = self.obj_get_changes()
            if not self.should_migrate_data():
                # NOTE(ndipanov): If we are not migrating data yet, make sure
                # that any changes to parent_addr are also in the old location
                # in extra_info
                if 'parent_addr' in updates and updates['parent_addr']:
                    extra_update = updates.get('extra_info', {})
                    if not extra_update and self.obj_attr_is_set('extra_info'):
                        extra_update = self.extra_info
                    extra_update['phys_function'] = updates['parent_addr']
                    updates['extra_info'] = extra_update
            else:
                # NOTE(ndipanov): Once we start migrating, meaning all control
                # plane has been upgraded - aggressively migrate on every save
                pf_extra = self.extra_info.pop('phys_function', None)
                if pf_extra and 'parent_addr' not in updates:
                    updates['parent_addr'] = pf_extra
                updates['extra_info'] = self.extra_info

            if 'extra_info' in updates:
                updates['extra_info'] = jsonutils.dumps(updates['extra_info'])
            if updates:
                db_pci = db.pci_device_update(self._context,
                                              self.compute_node_id,
                                              self.address, updates)
                self._from_db_object(self._context, self, db_pci)

    @staticmethod
    def _bulk_update_status(dev_list, status):
        for dev in dev_list:
            dev.status = status

    def claim(self, instance_uuid):
        if self.status != fields.PciDeviceStatus.AVAILABLE:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=[fields.PciDeviceStatus.AVAILABLE])

        if self.dev_type == fields.PciDeviceType.SRIOV_PF:
            # Update PF status to CLAIMED if all of it dependants are free
            # and set their status to UNCLAIMABLE
            vfs_list = objects.PciDeviceList.get_by_parent_address(
                self._context, self.compute_node_id, self.address)
            if not all([vf.is_available() for vf in vfs_list]):
                raise exception.PciDeviceVFInvalidStatus(
                    compute_node_id=self.compute_node_id, address=self.address)
            self._bulk_update_status(vfs_list,
                                     fields.PciDeviceStatus.UNCLAIMABLE)

        elif self.dev_type == fields.PciDeviceType.SRIOV_VF:
            # Update VF status to CLAIMED if it's parent has not been
            # previuosly allocated or claimed
            # When claiming/allocating a VF, it's parent PF becomes
            # unclaimable/unavailable. Therefore, it is expected to find the
            # parent PF in an unclaimable/unavailable state for any following
            # claims to a sibling VF

            parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                                  fields.PciDeviceStatus.UNCLAIMABLE,
                                  fields.PciDeviceStatus.UNAVAILABLE)
            try:
                parent = self.get_by_dev_addr(self._context,
                                              self.compute_node_id,
                                              self.parent_addr)
                if parent.status not in parent_ok_statuses:
                    raise exception.PciDevicePFInvalidStatus(
                        compute_node_id=self.compute_node_id,
                        address=self.parent_addr,
                        status=self.status,
                        vf_address=self.address,
                        hopestatus=parent_ok_statuses)
                # Set PF status
                if parent.status == fields.PciDeviceStatus.AVAILABLE:
                    parent.status = fields.PciDeviceStatus.UNCLAIMABLE
            except exception.PciDeviceNotFound:
                LOG.debug(
                    'Physical function addr: %(pf_addr)s parent of '
                    'VF addr: %(vf_addr)s was not found', {
                        'pf_addr': self.parent_addr,
                        'vf_addr': self.address
                    })

        self.status = fields.PciDeviceStatus.CLAIMED
        self.instance_uuid = instance_uuid

    def allocate(self, instance):
        ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                       fields.PciDeviceStatus.CLAIMED)
        parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                              fields.PciDeviceStatus.UNCLAIMABLE,
                              fields.PciDeviceStatus.UNAVAILABLE)
        dependatns_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
                                  fields.PciDeviceStatus.UNCLAIMABLE)
        if self.status not in ok_statuses:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=ok_statuses)
        if (self.status == fields.PciDeviceStatus.CLAIMED
                and self.instance_uuid != instance['uuid']):
            raise exception.PciDeviceInvalidOwner(
                compute_node_id=self.compute_node_id,
                address=self.address,
                owner=self.instance_uuid,
                hopeowner=instance['uuid'])
        if self.dev_type == fields.PciDeviceType.SRIOV_PF:
            vfs_list = objects.PciDeviceList.get_by_parent_address(
                self._context, self.compute_node_id, self.address)
            if not all(
                [vf.status in dependatns_ok_statuses for vf in vfs_list]):
                raise exception.PciDeviceVFInvalidStatus(
                    compute_node_id=self.compute_node_id, address=self.address)
            self._bulk_update_status(vfs_list,
                                     fields.PciDeviceStatus.UNAVAILABLE)

        elif (self.dev_type == fields.PciDeviceType.SRIOV_VF):
            try:
                parent = self.get_by_dev_addr(self._context,
                                              self.compute_node_id,
                                              self.parent_addr)
                if parent.status not in parent_ok_statuses:
                    raise exception.PciDevicePFInvalidStatus(
                        compute_node_id=self.compute_node_id,
                        address=self.parent_addr,
                        status=self.status,
                        vf_address=self.address,
                        hopestatus=parent_ok_statuses)
                # Set PF status
                parent.status = fields.PciDeviceStatus.UNAVAILABLE
            except exception.PciDeviceNotFound:
                LOG.debug(
                    'Physical function addr: %(pf_addr)s parent of '
                    'VF addr: %(vf_addr)s was not found', {
                        'pf_addr': self.parent_addr,
                        'vf_addr': self.address
                    })

        self.status = fields.PciDeviceStatus.ALLOCATED
        self.instance_uuid = instance['uuid']

        # Notes(yjiang5): remove this check when instance object for
        # compute manager is finished
        if isinstance(instance, dict):
            if 'pci_devices' not in instance:
                instance['pci_devices'] = []
            instance['pci_devices'].append(copy.copy(self))
        else:
            instance.pci_devices.objects.append(copy.copy(self))

    def remove(self):
        if self.status != fields.PciDeviceStatus.AVAILABLE:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=[fields.PciDeviceStatus.AVAILABLE])
        self.status = fields.PciDeviceStatus.REMOVED
        self.instance_uuid = None
        self.request_id = None

    def free(self, instance=None):
        ok_statuses = (fields.PciDeviceStatus.ALLOCATED,
                       fields.PciDeviceStatus.CLAIMED)
        free_devs = []
        if self.status not in ok_statuses:
            raise exception.PciDeviceInvalidStatus(
                compute_node_id=self.compute_node_id,
                address=self.address,
                status=self.status,
                hopestatus=ok_statuses)
        if instance and self.instance_uuid != instance['uuid']:
            raise exception.PciDeviceInvalidOwner(
                compute_node_id=self.compute_node_id,
                address=self.address,
                owner=self.instance_uuid,
                hopeowner=instance['uuid'])
        if self.dev_type == fields.PciDeviceType.SRIOV_PF:
            # Set all PF dependants status to AVAILABLE
            vfs_list = objects.PciDeviceList.get_by_parent_address(
                self._context, self.compute_node_id, self.address)
            self._bulk_update_status(vfs_list,
                                     fields.PciDeviceStatus.AVAILABLE)
            free_devs.extend(vfs_list)
        if self.dev_type == fields.PciDeviceType.SRIOV_VF:
            # Set PF status to AVAILABLE if all of it's VFs are free
            vfs_list = objects.PciDeviceList.get_by_parent_address(
                self._context, self.compute_node_id, self.parent_addr)
            if all([vf.is_available() for vf in vfs_list if vf.id != self.id]):
                try:
                    parent = self.get_by_dev_addr(self._context,
                                                  self.compute_node_id,
                                                  self.parent_addr)
                    parent.status = fields.PciDeviceStatus.AVAILABLE
                    free_devs.append(parent)
                except exception.PciDeviceNotFound:
                    LOG.debug(
                        'Physical function addr: %(pf_addr)s parent of '
                        'VF addr: %(vf_addr)s was not found', {
                            'pf_addr': self.parent_addr,
                            'vf_addr': self.address
                        })
        old_status = self.status
        self.status = fields.PciDeviceStatus.AVAILABLE
        free_devs.append(self)
        self.instance_uuid = None
        self.request_id = None
        if old_status == fields.PciDeviceStatus.ALLOCATED and instance:
            # Notes(yjiang5): remove this check when instance object for
            # compute manager is finished
            existed = next(
                (dev for dev in instance['pci_devices'] if dev.id == self.id))
            if isinstance(instance, dict):
                instance['pci_devices'].remove(existed)
            else:
                instance.pci_devices.objects.remove(existed)
        return free_devs

    def is_available(self):
        return self.status == fields.PciDeviceStatus.AVAILABLE
Beispiel #23
0
class Instance(base.NovaPersistentObject, base.NovaObject,
               base.NovaObjectDictCompat):
    # Version 2.0: Initial version
    VERSION = '2.0'

    fields = {
        'id': fields.IntegerField(),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'image_ref': fields.StringField(nullable=True),
        'kernel_id': fields.StringField(nullable=True),
        'ramdisk_id': fields.StringField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'launch_index': fields.IntegerField(nullable=True),
        'key_name': fields.StringField(nullable=True),
        'key_data': fields.StringField(nullable=True),
        'power_state': fields.IntegerField(nullable=True),
        'vm_state': fields.StringField(nullable=True),
        'task_state': fields.StringField(nullable=True),
        'memory_mb': fields.IntegerField(nullable=True),
        'vcpus': fields.IntegerField(nullable=True),
        'root_gb': fields.IntegerField(nullable=True),
        'ephemeral_gb': fields.IntegerField(nullable=True),
        'ephemeral_key_uuid': fields.UUIDField(nullable=True),
        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),
        'instance_type_id': fields.IntegerField(nullable=True),
        'user_data': fields.StringField(nullable=True),
        'reservation_id': fields.StringField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'launched_on': fields.StringField(nullable=True),

        # NOTE(jdillaman): locked deprecated in favor of locked_by,
        # to be removed in Icehouse
        'locked': fields.BooleanField(default=False),
        'locked_by': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'vm_mode': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(),
        'root_device_name': fields.StringField(nullable=True),
        'default_ephemeral_device': fields.StringField(nullable=True),
        'default_swap_device': fields.StringField(nullable=True),
        'config_drive': fields.StringField(nullable=True),
        'access_ip_v4': fields.IPV4AddressField(nullable=True),
        'access_ip_v6': fields.IPV6AddressField(nullable=True),
        'auto_disk_config': fields.BooleanField(default=False),
        'progress': fields.IntegerField(nullable=True),
        'shutdown_terminate': fields.BooleanField(default=False),
        'disable_terminate': fields.BooleanField(default=False),
        'cell_name': fields.StringField(nullable=True),
        'metadata': fields.DictOfStringsField(),
        'system_metadata': fields.DictOfNullableStringsField(),
        'info_cache': fields.ObjectField('InstanceInfoCache', nullable=True),
        'security_groups': fields.ObjectField('SecurityGroupList'),
        'fault': fields.ObjectField('InstanceFault', nullable=True),
        'cleaned': fields.BooleanField(default=False),
        'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
        'numa_topology': fields.ObjectField('InstanceNUMATopology',
                                            nullable=True),
        'pci_requests': fields.ObjectField('InstancePCIRequests',
                                           nullable=True),
        'tags': fields.ObjectField('TagList'),
        'flavor': fields.ObjectField('Flavor'),
        'old_flavor': fields.ObjectField('Flavor', nullable=True),
        'new_flavor': fields.ObjectField('Flavor', nullable=True),
        'vcpu_model': fields.ObjectField('VirtCPUModel', nullable=True),
        'ec2_ids': fields.ObjectField('EC2Ids'),
        'migration_context': fields.ObjectField('MigrationContext',
                                                nullable=True)
    }

    obj_extra_fields = ['name']

    def __init__(self, *args, **kwargs):
        super(Instance, self).__init__(*args, **kwargs)
        self._reset_metadata_tracking()

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'system_metadata' in fields:
            self._orig_system_metadata = (dict(self.system_metadata)
                                          if 'system_metadata' in self else {})
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})

    def obj_reset_changes(self, fields=None, recursive=False):
        super(Instance, self).obj_reset_changes(fields, recursive=recursive)
        self._reset_metadata_tracking(fields=fields)

    def obj_what_changed(self):
        changes = super(Instance, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if 'system_metadata' in self and (self.system_metadata !=
                                          self._orig_system_metadata):
            changes.add('system_metadata')
        return changes

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        self = super(Instance,
                     cls)._obj_from_primitive(context, objver, primitive)
        self._reset_metadata_tracking()
        return self

    @property
    def name(self):
        try:
            base_name = CONF.instance_name_template % self.id
        except TypeError:
            # Support templates like "uuid-%(uuid)s", etc.
            info = {}
            # NOTE(russellb): Don't use self.iteritems() here, as it will
            # result in infinite recursion on the name property.
            for key in self.fields:
                if key == 'name':
                    # NOTE(danms): prevent recursion
                    continue
                elif not self.obj_attr_is_set(key):
                    # NOTE(danms): Don't trigger lazy-loads
                    continue
                info[key] = self[key]
            try:
                base_name = CONF.instance_name_template % info
            except KeyError:
                base_name = self.uuid
        return base_name

    def _flavor_from_db(self, db_flavor):
        """Load instance flavor information from instance_extra."""

        flavor_info = jsonutils.loads(db_flavor)

        self.flavor = objects.Flavor.obj_from_primitive(flavor_info['cur'])
        if flavor_info['old']:
            self.old_flavor = objects.Flavor.obj_from_primitive(
                flavor_info['old'])
        else:
            self.old_flavor = None
        if flavor_info['new']:
            self.new_flavor = objects.Flavor.obj_from_primitive(
                flavor_info['new'])
        else:
            self.new_flavor = None
        self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])

    @staticmethod
    def _from_db_object(context, instance, db_inst, expected_attrs=None):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        instance._context = context
        if expected_attrs is None:
            expected_attrs = []
        # Most of the field names match right now, so be quick
        for field in instance.fields:
            if field in INSTANCE_OPTIONAL_ATTRS:
                continue
            elif field == 'deleted':
                instance.deleted = db_inst['deleted'] == db_inst['id']
            elif field == 'cleaned':
                instance.cleaned = db_inst['cleaned'] == 1
            else:
                instance[field] = db_inst[field]

        # NOTE(danms): We can be called with a dict instead of a
        # SQLAlchemy object, so we have to be careful here
        if hasattr(db_inst, '__dict__'):
            have_extra = 'extra' in db_inst.__dict__ and db_inst['extra']
        else:
            have_extra = 'extra' in db_inst and db_inst['extra']

        if 'metadata' in expected_attrs:
            instance['metadata'] = utils.instance_meta(db_inst)
        if 'system_metadata' in expected_attrs:
            instance['system_metadata'] = utils.instance_sys_meta(db_inst)
        if 'fault' in expected_attrs:
            instance['fault'] = (objects.InstanceFault.get_latest_for_instance(
                context, instance.uuid))
        if 'numa_topology' in expected_attrs:
            if have_extra:
                instance._load_numa_topology(
                    db_inst['extra'].get('numa_topology'))
            else:
                instance.numa_topology = None
        if 'pci_requests' in expected_attrs:
            if have_extra:
                instance._load_pci_requests(
                    db_inst['extra'].get('pci_requests'))
            else:
                instance.pci_requests = None
        if 'vcpu_model' in expected_attrs:
            if have_extra:
                instance._load_vcpu_model(db_inst['extra'].get('vcpu_model'))
            else:
                instance.vcpu_model = None
        if 'ec2_ids' in expected_attrs:
            instance._load_ec2_ids()
        if 'migration_context' in expected_attrs:
            if have_extra:
                instance._load_migration_context(
                    db_inst['extra'].get('migration_context'))
            else:
                instance.migration_context = None
        if 'info_cache' in expected_attrs:
            if db_inst.get('info_cache') is None:
                instance.info_cache = None
            elif not instance.obj_attr_is_set('info_cache'):
                # TODO(danms): If this ever happens on a backlevel instance
                # passed to us by a backlevel service, things will break
                instance.info_cache = objects.InstanceInfoCache(context)
            if instance.info_cache is not None:
                instance.info_cache._from_db_object(context,
                                                    instance.info_cache,
                                                    db_inst['info_cache'])

        if any([
                x in expected_attrs
                for x in ('flavor', 'old_flavor', 'new_flavor')
        ]):
            if have_extra and db_inst['extra'].get('flavor'):
                instance._flavor_from_db(db_inst['extra']['flavor'])

        # TODO(danms): If we are updating these on a backlevel instance,
        # we'll end up sending back new versions of these objects (see
        # above note for new info_caches
        if 'pci_devices' in expected_attrs:
            pci_devices = base.obj_make_list(context,
                                             objects.PciDeviceList(context),
                                             objects.PciDevice,
                                             db_inst['pci_devices'])
            instance['pci_devices'] = pci_devices
        if 'security_groups' in expected_attrs:
            sec_groups = base.obj_make_list(context,
                                            objects.SecurityGroupList(context),
                                            objects.SecurityGroup,
                                            db_inst.get('security_groups', []))
            instance['security_groups'] = sec_groups

        if 'tags' in expected_attrs:
            tags = base.obj_make_list(context, objects.TagList(context),
                                      objects.Tag, db_inst['tags'])
            instance['tags'] = tags

        instance.obj_reset_changes()
        return instance

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get_by_uuid(context,
                                          uuid,
                                          columns_to_join=columns_to_join,
                                          use_slave=use_slave)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable_classmethod
    def get_by_id(cls, context, inst_id, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get(context,
                                  inst_id,
                                  columns_to_join=columns_to_join)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        expected_attrs = [
            attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates
        ]
        if 'security_groups' in updates:
            updates['security_groups'] = [
                x.name for x in updates['security_groups']
            ]
        if 'info_cache' in updates:
            updates['info_cache'] = {
                'network_info': updates['info_cache'].network_info.json()
            }
        updates['extra'] = {}
        numa_topology = updates.pop('numa_topology', None)
        if numa_topology:
            expected_attrs.append('numa_topology')
            updates['extra']['numa_topology'] = numa_topology._to_json()
        pci_requests = updates.pop('pci_requests', None)
        if pci_requests:
            expected_attrs.append('pci_requests')
            updates['extra']['pci_requests'] = (pci_requests.to_json())
        flavor = updates.pop('flavor', None)
        if flavor:
            expected_attrs.append('flavor')
            old = ((self.obj_attr_is_set('old_flavor') and self.old_flavor)
                   and self.old_flavor.obj_to_primitive() or None)
            new = ((self.obj_attr_is_set('new_flavor') and self.new_flavor)
                   and self.new_flavor.obj_to_primitive() or None)
            flavor_info = {
                'cur': self.flavor.obj_to_primitive(),
                'old': old,
                'new': new,
            }
            updates['extra']['flavor'] = jsonutils.dumps(flavor_info)
        vcpu_model = updates.pop('vcpu_model', None)
        if vcpu_model:
            expected_attrs.append('vcpu_model')
            updates['extra']['vcpu_model'] = (jsonutils.dumps(
                vcpu_model.obj_to_primitive()))
        db_inst = db.instance_create(self._context, updates)
        self._from_db_object(self._context, self, db_inst, expected_attrs)

    @base.remotable
    def destroy(self):
        if not self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='already destroyed')
        if not self.obj_attr_is_set('uuid'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='no uuid')
        if not self.obj_attr_is_set('host') or not self.host:
            # NOTE(danms): If our host is not set, avoid a race
            constraint = db.constraint(host=db.equal_any(None))
        else:
            constraint = None

        cell_type = cells_opts.get_cell_type()
        if cell_type is not None:
            stale_instance = self.obj_clone()

        try:
            db_inst = db.instance_destroy(self._context,
                                          self.uuid,
                                          constraint=constraint)
            self._from_db_object(self._context, self, db_inst)
        except exception.ConstraintNotMet:
            raise exception.ObjectActionError(action='destroy',
                                              reason='host changed')
        if cell_type == 'compute':
            cells_api = cells_rpcapi.CellsAPI()
            cells_api.instance_destroy_at_top(self._context, stale_instance)
        delattr(self, base.get_attrname('id'))

    def _save_info_cache(self, context):
        if self.info_cache:
            with self.info_cache.obj_alternate_context(context):
                self.info_cache.save()

    def _save_security_groups(self, context):
        security_groups = self.security_groups or []
        for secgroup in security_groups:
            with secgroup.obj_alternate_context(context):
                secgroup.save()
        self.security_groups.obj_reset_changes()

    def _save_fault(self, context):
        # NOTE(danms): I don't think we need to worry about this, do we?
        pass

    def _save_numa_topology(self, context):
        if self.numa_topology:
            self.numa_topology.instance_uuid = self.uuid
            with self.numa_topology.obj_alternate_context(context):
                self.numa_topology._save()
        else:
            objects.InstanceNUMATopology.delete_by_instance_uuid(
                context, self.uuid)

    def _save_pci_requests(self, context):
        # NOTE(danms): No need for this yet.
        pass

    def _save_pci_devices(self, context):
        # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
        # permitted to update the DB. all change to devices from here will
        # be dropped.
        pass

    def _save_flavor(self, context):
        if not any([
                x in self.obj_what_changed()
                for x in ('flavor', 'old_flavor', 'new_flavor')
        ]):
            return
        # FIXME(danms): We can do this smarterly by updating this
        # with all the other extra things at the same time
        flavor_info = {
            'cur':
            self.flavor.obj_to_primitive(),
            'old': (self.old_flavor and self.old_flavor.obj_to_primitive()
                    or None),
            'new': (self.new_flavor and self.new_flavor.obj_to_primitive()
                    or None),
        }
        db.instance_extra_update_by_uuid(
            context, self.uuid, {'flavor': jsonutils.dumps(flavor_info)})
        self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])

    def _save_old_flavor(self, context):
        if 'old_flavor' in self.obj_what_changed():
            self._save_flavor(context)

    def _save_new_flavor(self, context):
        if 'new_flavor' in self.obj_what_changed():
            self._save_flavor(context)

    def _save_vcpu_model(self, context):
        # TODO(yjiang5): should merge the db accesses for all the extra
        # fields
        if 'vcpu_model' in self.obj_what_changed():
            if self.vcpu_model:
                update = jsonutils.dumps(self.vcpu_model.obj_to_primitive())
            else:
                update = None
            db.instance_extra_update_by_uuid(context, self.uuid,
                                             {'vcpu_model': update})

    def _save_ec2_ids(self, context):
        # NOTE(hanlind): Read-only so no need to save this.
        pass

    def _save_migration_context(self, context):
        if self.migration_context:
            self.migration_context.instance_uuid = self.uuid
            with self.migration_context.obj_alternate_context(context):
                self.migration_context._save()
        else:
            objects.MigrationContext._destroy(context, self.uuid)

    @base.remotable
    def save(self,
             expected_vm_state=None,
             expected_task_state=None,
             admin_state_reset=False):
        """Save updates to this instance

        Column-wise updates will be made based on the result of
        self.what_changed(). If expected_task_state is provided,
        it will be checked against the in-database copy of the
        instance before updates are made.

        :param:context: Security context
        :param:expected_task_state: Optional tuple of valid task states
        for the instance to be in
        :param:expected_vm_state: Optional tuple of valid vm states
        for the instance to be in
        :param admin_state_reset: True if admin API is forcing setting
        of task_state/vm_state

        """
        # Store this on the class because _cell_name_blocks_sync is useless
        # after the db update call below.
        self._sync_cells = not self._cell_name_blocks_sync()

        context = self._context
        cell_type = cells_opts.get_cell_type()

        if cell_type is not None:
            # NOTE(comstud): We need to stash a copy of ourselves
            # before any updates are applied.  When we call the save
            # methods on nested objects, we will lose any changes to
            # them.  But we need to make sure child cells can tell
            # what is changed.
            #
            # We also need to nuke any updates to vm_state and task_state
            # unless admin_state_reset is True.  compute cells are
            # authoritative for their view of vm_state and task_state.
            stale_instance = self.obj_clone()

        cells_update_from_api = (cell_type == 'api' and self.cell_name
                                 and self._sync_cells)

        if cells_update_from_api:

            def _handle_cell_update_from_api():
                cells_api = cells_rpcapi.CellsAPI()
                cells_api.instance_update_from_api(context, stale_instance,
                                                   expected_vm_state,
                                                   expected_task_state,
                                                   admin_state_reset)

        updates = {}
        changes = self.obj_what_changed()

        for field in self.fields:
            # NOTE(danms): For object fields, we construct and call a
            # helper method like self._save_$attrname()
            if (self.obj_attr_is_set(field)
                    and isinstance(self.fields[field], fields.ObjectField)):
                try:
                    getattr(self, '_save_%s' % field)(context)
                except AttributeError:
                    LOG.exception(_LE('No save handler for %s'),
                                  field,
                                  instance=self)
                except db_exc.DBReferenceError as exp:
                    if exp.key != 'instance_uuid':
                        raise
                    # NOTE(melwitt): This will happen if we instance.save()
                    # before an instance.create() and FK constraint fails.
                    # In practice, this occurs in cells during a delete of
                    # an unscheduled instance. Otherwise, it could happen
                    # as a result of bug.
                    raise exception.InstanceNotFound(instance_id=self.uuid)
            elif field in changes:
                if (field == 'cell_name' and self[field] is not None and
                        self[field].startswith(cells_utils.BLOCK_SYNC_FLAG)):
                    updates[field] = self[field].replace(
                        cells_utils.BLOCK_SYNC_FLAG, '', 1)
                else:
                    updates[field] = self[field]

        if not updates:
            if cells_update_from_api:
                _handle_cell_update_from_api()
            return

        # Cleaned needs to be turned back into an int here
        if 'cleaned' in updates:
            if updates['cleaned']:
                updates['cleaned'] = 1
            else:
                updates['cleaned'] = 0

        if expected_task_state is not None:
            updates['expected_task_state'] = expected_task_state
        if expected_vm_state is not None:
            updates['expected_vm_state'] = expected_vm_state

        expected_attrs = [
            attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
            if self.obj_attr_is_set(attr)
        ]
        if 'pci_devices' in expected_attrs:
            # NOTE(danms): We don't refresh pci_devices on save right now
            expected_attrs.remove('pci_devices')

        # NOTE(alaski): We need to pull system_metadata for the
        # notification.send_update() below.  If we don't there's a KeyError
        # when it tries to extract the flavor.
        # NOTE(danms): If we have sysmeta, we need flavor since the caller
        # might be expecting flavor information as a result
        if 'system_metadata' not in expected_attrs:
            expected_attrs.append('system_metadata')
            expected_attrs.append('flavor')
        old_ref, inst_ref = db.instance_update_and_get_original(
            context,
            self.uuid,
            updates,
            columns_to_join=_expected_cols(expected_attrs))
        self._from_db_object(context,
                             self,
                             inst_ref,
                             expected_attrs=expected_attrs)

        if cells_update_from_api:
            _handle_cell_update_from_api()
        elif cell_type == 'compute':
            if self._sync_cells:
                cells_api = cells_rpcapi.CellsAPI()
                cells_api.instance_update_at_top(context, stale_instance)

        def _notify():
            # NOTE(danms): We have to be super careful here not to trigger
            # any lazy-loads that will unmigrate or unbackport something. So,
            # make a copy of the instance for notifications first.
            new_ref = self.obj_clone()

            notifications.send_update(context, old_ref, new_ref)

        # NOTE(alaski): If cell synchronization is blocked it means we have
        # already run this block of code in either the parent or child of this
        # cell.  Therefore this notification has already been sent.
        if not self._sync_cells:
            _notify = lambda: None  # noqa: F811

        _notify()

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, use_slave=False):
        extra = [
            field for field in INSTANCE_OPTIONAL_ATTRS
            if self.obj_attr_is_set(field)
        ]
        current = self.__class__.get_by_uuid(self._context,
                                             uuid=self.uuid,
                                             expected_attrs=extra,
                                             use_slave=use_slave)
        # NOTE(danms): We orphan the instance copy so we do not unexpectedly
        # trigger a lazy-load (which would mean we failed to calculate the
        # expected_attrs properly)
        current._context = None

        for field in self.fields:
            if self.obj_attr_is_set(field):
                if field == 'info_cache':
                    self.info_cache.refresh()
                elif self[field] != current[field]:
                    self[field] = current[field]
        self.obj_reset_changes()

    def _load_generic(self, attrname):
        instance = self.__class__.get_by_uuid(self._context,
                                              uuid=self.uuid,
                                              expected_attrs=[attrname])

        # NOTE(danms): Never allow us to recursively-load
        if instance.obj_attr_is_set(attrname):
            self[attrname] = instance[attrname]
        else:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='loading %s requires recursion' % attrname)

    def _load_fault(self):
        self.fault = objects.InstanceFault.get_latest_for_instance(
            self._context, self.uuid)

    def _load_numa_topology(self, db_topology=None):
        if db_topology is not None:
            self.numa_topology = \
                objects.InstanceNUMATopology.obj_from_db_obj(self.uuid,
                                                             db_topology)
        else:
            try:
                self.numa_topology = \
                    objects.InstanceNUMATopology.get_by_instance_uuid(
                        self._context, self.uuid)
            except exception.NumaTopologyNotFound:
                self.numa_topology = None

    def _load_pci_requests(self, db_requests=None):
        # FIXME: also do this if none!
        if db_requests is not None:
            self.pci_requests = objects.InstancePCIRequests.obj_from_db(
                self._context, self.uuid, db_requests)
        else:
            self.pci_requests = \
                objects.InstancePCIRequests.get_by_instance_uuid(
                    self._context, self.uuid)

    def _load_flavor(self):
        instance = self.__class__.get_by_uuid(
            self._context,
            uuid=self.uuid,
            expected_attrs=['flavor', 'system_metadata'])

        # NOTE(danms): Orphan the instance to make sure we don't lazy-load
        # anything below
        instance._context = None
        self.flavor = instance.flavor
        self.old_flavor = instance.old_flavor
        self.new_flavor = instance.new_flavor

        # NOTE(danms): The query above may have migrated the flavor from
        # system_metadata. Since we have it anyway, go ahead and refresh
        # our system_metadata from it so that a save will be accurate.
        instance.system_metadata.update(self.get('system_metadata', {}))
        self.system_metadata = instance.system_metadata

    def _load_vcpu_model(self, db_vcpu_model=None):
        if db_vcpu_model is None:
            self.vcpu_model = objects.VirtCPUModel.get_by_instance_uuid(
                self._context, self.uuid)
        else:
            db_vcpu_model = jsonutils.loads(db_vcpu_model)
            self.vcpu_model = objects.VirtCPUModel.obj_from_primitive(
                db_vcpu_model)

    def _load_ec2_ids(self):
        self.ec2_ids = objects.EC2Ids.get_by_instance(self._context, self)

    def _load_migration_context(self, db_context=_NO_DATA_SENTINEL):
        if db_context is _NO_DATA_SENTINEL:
            try:
                self.migration_context = (
                    objects.MigrationContext.get_by_instance_uuid(
                        self._context, self.uuid))
            except exception.MigrationContextNotFound:
                self.migration_context = None
        elif db_context is None:
            self.migration_context = None
        else:
            self.migration_context = objects.MigrationContext.obj_from_db_obj(
                db_context)

    def apply_migration_context(self):
        if self.migration_context:
            self.numa_topology = self.migration_context.new_numa_topology
        else:
            LOG.debug(
                "Trying to apply a migration context that does not "
                "seem to be set for this instance",
                instance=self)

    def revert_migration_context(self):
        if self.migration_context:
            self.numa_topology = self.migration_context.old_numa_topology
        else:
            LOG.debug(
                "Trying to revert a migration context that does not "
                "seem to be set for this instance",
                instance=self)

    @contextlib.contextmanager
    def mutated_migration_context(self):
        """Context manager to temporarily apply the migration context.

        Calling .save() from within the context manager means that the mutated
        context will be saved which can cause incorrect resource tracking, and
        should be avoided.
        """
        current_numa_topo = self.numa_topology
        self.apply_migration_context()
        try:
            yield
        finally:
            self.numa_topology = current_numa_topo

    @base.remotable
    def drop_migration_context(self):
        if self.migration_context:
            objects.MigrationContext._destroy(self._context, self.uuid)
            self.migration_context = None

    def obj_load_attr(self, attrname):
        if attrname not in INSTANCE_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='attribute %s not lazy-loadable' % attrname)

        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s", {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })

        # NOTE(danms): We handle some fields differently here so that we
        # can be more efficient
        if attrname == 'fault':
            self._load_fault()
        elif attrname == 'numa_topology':
            self._load_numa_topology()
        elif attrname == 'pci_requests':
            self._load_pci_requests()
        elif attrname == 'vcpu_model':
            self._load_vcpu_model()
        elif attrname == 'ec2_ids':
            self._load_ec2_ids()
        elif attrname == 'migration_context':
            self._load_migration_context()
        elif 'flavor' in attrname:
            self._load_flavor()
        else:
            # FIXME(comstud): This should be optimized to only load the attr.
            self._load_generic(attrname)
        self.obj_reset_changes([attrname])

    def get_flavor(self, namespace=None):
        prefix = ('%s_' % namespace) if namespace is not None else ''
        attr = '%sflavor' % prefix
        try:
            return getattr(self, attr)
        except exception.FlavorNotFound:
            # NOTE(danms): This only happens in the case where we don't
            # have flavor information in sysmeta or extra, and doing
            # this triggers a lookup based on our instance_type_id for
            # (very) legacy instances. That legacy code expects a None here,
            # so emulate it for this helper, even though the actual attribute
            # is not nullable.
            return None

    def set_flavor(self, flavor, namespace=None):
        prefix = ('%s_' % namespace) if namespace is not None else ''
        attr = '%sflavor' % prefix
        if not isinstance(flavor, objects.Flavor):
            flavor = objects.Flavor(**flavor)
        setattr(self, attr, flavor)

        self.save()

    def delete_flavor(self, namespace):
        prefix = ('%s_' % namespace) if namespace else ''
        attr = '%sflavor' % prefix
        setattr(self, attr, None)

        self.save()

    @base.remotable
    def delete_metadata_key(self, key):
        """Optimized metadata delete method.

        This provides a more efficient way to delete a single metadata
        key, instead of just calling instance.save(). This should be called
        with the key still present in self.metadata, which it will update
        after completion.
        """
        db.instance_metadata_delete(self._context, self.uuid, key)
        md_was_changed = 'metadata' in self.obj_what_changed()
        del self.metadata[key]
        self._orig_metadata.pop(key, None)
        notifications.send_update(self._context, self, self)
        if not md_was_changed:
            self.obj_reset_changes(['metadata'])

    def _cell_name_blocks_sync(self):
        if (self.obj_attr_is_set('cell_name') and self.cell_name is not None
                and self.cell_name.startswith(cells_utils.BLOCK_SYNC_FLAG)):
            return True
        return False

    def _normalize_cell_name(self):
        """Undo skip_cell_sync()'s cell_name modification if applied"""

        if not self.obj_attr_is_set('cell_name') or self.cell_name is None:
            return
        cn_changed = 'cell_name' in self.obj_what_changed()
        if self.cell_name.startswith(cells_utils.BLOCK_SYNC_FLAG):
            self.cell_name = self.cell_name.replace(
                cells_utils.BLOCK_SYNC_FLAG, '', 1)
            # cell_name is not normally an empty string, this means it was None
            # or unset before cells_utils.BLOCK_SYNC_FLAG was applied.
            if len(self.cell_name) == 0:
                self.cell_name = None
        if not cn_changed:
            self.obj_reset_changes(['cell_name'])

    @contextlib.contextmanager
    def skip_cells_sync(self):
        """Context manager to save an instance without syncing cells.

        Temporarily disables the cells syncing logic, if enabled.  This should
        only be used when saving an instance that has been passed down/up from
        another cell in order to avoid passing it back to the originator to be
        re-saved.
        """
        cn_changed = 'cell_name' in self.obj_what_changed()
        if not self.obj_attr_is_set('cell_name') or self.cell_name is None:
            self.cell_name = ''
        self.cell_name = '%s%s' % (cells_utils.BLOCK_SYNC_FLAG, self.cell_name)
        if not cn_changed:
            self.obj_reset_changes(['cell_name'])
        try:
            yield
        finally:
            self._normalize_cell_name()
Beispiel #24
0
class Aggregate(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: String attributes updated to support unicode
    # Version 1.2: Added uuid field
    # Version 1.3: Added get_by_uuid method
    VERSION = '1.3'

    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.UUIDField(nullable=False),
        'name': fields.StringField(),
        'hosts': fields.ListOfStringsField(nullable=True),
        'metadata': fields.DictOfStringsField(nullable=True),
    }

    obj_extra_fields = ['availability_zone']

    def __init__(self, *args, **kwargs):
        super(Aggregate, self).__init__(*args, **kwargs)
        self._in_api = False

    @staticmethod
    def _from_db_object(context, aggregate, db_aggregate):
        for key in aggregate.fields:
            if key == 'metadata':
                db_key = 'metadetails'
            elif key in DEPRECATED_FIELDS and key not in db_aggregate:
                continue
            else:
                db_key = key
            setattr(aggregate, key, db_aggregate[db_key])

        # NOTE: This can be removed when we remove compatibility with
        # the old aggregate model.
        if any(f not in db_aggregate for f in DEPRECATED_FIELDS):
            aggregate.deleted_at = None
            aggregate.deleted = False

        aggregate._context = context
        aggregate.obj_reset_changes()

        return aggregate

    def _assert_no_hosts(self, action):
        if 'hosts' in self.obj_what_changed():
            raise exception.ObjectActionError(action=action,
                                              reason='hosts updated inline')

    @property
    def in_api(self):
        if self._in_api:
            return True
        else:
            try:
                _aggregate_get_from_db(self._context, self.id)
                self._in_api = True
            except exception.AggregateNotFound:
                pass
            return self._in_api

    @base.remotable_classmethod
    def get_by_id(cls, context, aggregate_id):
        try:
            db_aggregate = _aggregate_get_from_db(context, aggregate_id)
        except exception.AggregateNotFound:
            db_aggregate = db.aggregate_get(context, aggregate_id)
        return cls._from_db_object(context, cls(), db_aggregate)

    @base.remotable_classmethod
    def get_by_uuid(cls, context, aggregate_uuid):
        try:
            db_aggregate = _aggregate_get_from_db_by_uuid(
                context, aggregate_uuid)
        except exception.AggregateNotFound:
            db_aggregate = db.aggregate_get_by_uuid(context, aggregate_uuid)
        return cls._from_db_object(context, cls(), db_aggregate)

    @staticmethod
    @db_api.pick_context_manager_reader
    def _ensure_migrated(context):
        result = context.session.query(main_models.Aggregate).\
                 filter_by(deleted=0).count()
        if result:
            LOG.warning(
                _LW('Main database contains %(count)i unmigrated aggregates'),
                {'count': result})
        return result == 0

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')

        # NOTE(mdoff): Once we have made it past a point where we know
        # all aggregates have been migrated, we can remove this. Ideally
        # in Ocata with a blocker migration to be sure.
        if not self._ensure_migrated(self._context):
            raise exception.ObjectActionError(
                action='create',
                reason='main database still contains aggregates')

        self._assert_no_hosts('create')
        updates = self.obj_get_changes()
        payload = dict(updates)
        if 'metadata' in updates:
            # NOTE(danms): For some reason the notification format is weird
            payload['meta_data'] = payload.pop('metadata')
        if 'uuid' not in updates:
            updates['uuid'] = uuidutils.generate_uuid()
            self.uuid = updates['uuid']
            LOG.debug('Generated uuid %(uuid)s for aggregate',
                      dict(uuid=updates['uuid']))
        compute_utils.notify_about_aggregate_update(self._context,
                                                    "create.start", payload)
        compute_utils.notify_about_aggregate_action(
            context=self._context,
            aggregate=self,
            action=fields.NotificationAction.CREATE,
            phase=fields.NotificationPhase.START)

        metadata = updates.pop('metadata', None)
        db_aggregate = _aggregate_create_in_db(self._context,
                                               updates,
                                               metadata=metadata)
        self._from_db_object(self._context, self, db_aggregate)
        payload['aggregate_id'] = self.id
        compute_utils.notify_about_aggregate_update(self._context,
                                                    "create.end", payload)
        compute_utils.notify_about_aggregate_action(
            context=self._context,
            aggregate=self,
            action=fields.NotificationAction.CREATE,
            phase=fields.NotificationPhase.END)

    @base.remotable
    def save(self):
        self._assert_no_hosts('save')
        updates = self.obj_get_changes()

        payload = {'aggregate_id': self.id}
        if 'metadata' in updates:
            payload['meta_data'] = updates['metadata']
        compute_utils.notify_about_aggregate_update(self._context,
                                                    "updateprop.start",
                                                    payload)
        updates.pop('id', None)
        try:
            db_aggregate = _aggregate_update_to_db(self._context, self.id,
                                                   updates)
        except exception.AggregateNotFound:
            db_aggregate = db.aggregate_update(self._context, self.id, updates)

        compute_utils.notify_about_aggregate_update(self._context,
                                                    "updateprop.end", payload)
        self._from_db_object(self._context, self, db_aggregate)

    @base.remotable
    def update_metadata(self, updates):
        if self.in_api:
            metadata_delete = _metadata_delete_from_db
            metadata_add = _metadata_add_to_db
        else:
            metadata_delete = db.aggregate_metadata_delete
            metadata_add = db.aggregate_metadata_add

        payload = {'aggregate_id': self.id, 'meta_data': updates}
        compute_utils.notify_about_aggregate_update(self._context,
                                                    "updatemetadata.start",
                                                    payload)
        to_add = {}
        for key, value in updates.items():
            if value is None:
                try:
                    metadata_delete(self._context, self.id, key)
                except exception.AggregateMetadataNotFound:
                    pass
                try:
                    self.metadata.pop(key)
                except KeyError:
                    pass
            else:
                to_add[key] = value
                self.metadata[key] = value
        metadata_add(self._context, self.id, to_add)
        compute_utils.notify_about_aggregate_update(self._context,
                                                    "updatemetadata.end",
                                                    payload)
        self.obj_reset_changes(fields=['metadata'])

    @base.remotable
    def destroy(self):
        try:
            _aggregate_delete_from_db(self._context, self.id)
        except exception.AggregateNotFound:
            db.aggregate_delete(self._context, self.id)

    @base.remotable
    def add_host(self, host):
        if self.in_api:
            _host_add_to_db(self._context, self.id, host)
        else:
            db.aggregate_host_add(self._context, self.id, host)

        if self.hosts is None:
            self.hosts = []
        self.hosts.append(host)
        self.obj_reset_changes(fields=['hosts'])

    @base.remotable
    def delete_host(self, host):
        if self.in_api:
            _host_delete_from_db(self._context, self.id, host)
        else:
            db.aggregate_host_delete(self._context, self.id, host)

        self.hosts.remove(host)
        self.obj_reset_changes(fields=['hosts'])

    @property
    def availability_zone(self):
        return self.metadata.get('availability_zone', None)
Beispiel #25
0
class InstancePayload(base.NotificationPayloadBase):
    SCHEMA = {
        'uuid': ('instance', 'uuid'),
        'user_id': ('instance', 'user_id'),
        'tenant_id': ('instance', 'project_id'),
        'reservation_id': ('instance', 'reservation_id'),
        'display_name': ('instance', 'display_name'),
        'display_description': ('instance', 'display_description'),
        'host_name': ('instance', 'hostname'),
        'host': ('instance', 'host'),
        'node': ('instance', 'node'),
        'os_type': ('instance', 'os_type'),
        'architecture': ('instance', 'architecture'),
        'availability_zone': ('instance', 'availability_zone'),
        'image_uuid': ('instance', 'image_ref'),
        'kernel_id': ('instance', 'kernel_id'),
        'ramdisk_id': ('instance', 'ramdisk_id'),
        'created_at': ('instance', 'created_at'),
        'launched_at': ('instance', 'launched_at'),
        'terminated_at': ('instance', 'terminated_at'),
        'deleted_at': ('instance', 'deleted_at'),
        'state': ('instance', 'vm_state'),
        'power_state': ('instance', 'power_state'),
        'task_state': ('instance', 'task_state'),
        'progress': ('instance', 'progress'),
        'metadata': ('instance', 'metadata'),
        'locked': ('instance', 'locked'),
    }
    # Version 1.0: Initial version
    # Version 1.1: add locked and display_description field
    VERSION = '1.1'
    fields = {
        'uuid': fields.UUIDField(),
        'user_id': fields.StringField(nullable=True),
        'tenant_id': fields.StringField(nullable=True),
        'reservation_id': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'host_name': fields.StringField(nullable=True),
        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'flavor': fields.ObjectField('FlavorPayload'),
        'image_uuid': fields.StringField(nullable=True),
        'kernel_id': fields.StringField(nullable=True),
        'ramdisk_id': fields.StringField(nullable=True),
        'created_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'deleted_at': fields.DateTimeField(nullable=True),
        'state': fields.InstanceStateField(nullable=True),
        'power_state': fields.InstancePowerStateField(nullable=True),
        'task_state': fields.InstanceTaskStateField(nullable=True),
        'progress': fields.IntegerField(nullable=True),
        'ip_addresses': fields.ListOfObjectsField('IpPayload'),
        'metadata': fields.DictOfStringsField(),
        'locked': fields.BooleanField(),
    }

    def __init__(self, instance, **kwargs):
        super(InstancePayload, self).__init__(**kwargs)
        self.populate_schema(instance=instance)
Beispiel #26
0
class InstancePayload(base.NotificationPayloadBase):
    SCHEMA = {
        'uuid': ('instance', 'uuid'),
        'user_id': ('instance', 'user_id'),
        'tenant_id': ('instance', 'project_id'),
        'reservation_id': ('instance', 'reservation_id'),
        'display_name': ('instance', 'display_name'),
        'display_description': ('instance', 'display_description'),
        'host_name': ('instance', 'hostname'),
        'host': ('instance', 'host'),
        'node': ('instance', 'node'),
        'os_type': ('instance', 'os_type'),
        'architecture': ('instance', 'architecture'),
        'availability_zone': ('instance', 'availability_zone'),

        'image_uuid': ('instance', 'image_ref'),

        'key_name': ('instance', 'key_name'),

        'kernel_id': ('instance', 'kernel_id'),
        'ramdisk_id': ('instance', 'ramdisk_id'),

        'created_at': ('instance', 'created_at'),
        'launched_at': ('instance', 'launched_at'),
        'terminated_at': ('instance', 'terminated_at'),
        'deleted_at': ('instance', 'deleted_at'),
        'updated_at': ('instance', 'updated_at'),

        'state': ('instance', 'vm_state'),
        'power_state': ('instance', 'power_state'),
        'task_state': ('instance', 'task_state'),
        'progress': ('instance', 'progress'),

        'metadata': ('instance', 'metadata'),
        'locked': ('instance', 'locked'),
        'auto_disk_config': ('instance', 'auto_disk_config')
    }
    # Version 1.0: Initial version
    # Version 1.1: add locked and display_description field
    # Version 1.2: Add auto_disk_config field
    # Version 1.3: Add key_name field
    # Version 1.4: Add BDM related data
    # Version 1.5: Add updated_at field
    VERSION = '1.5'
    fields = {
        'uuid': fields.UUIDField(),
        'user_id': fields.StringField(nullable=True),
        'tenant_id': fields.StringField(nullable=True),
        'reservation_id': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'host_name': fields.StringField(nullable=True),
        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),

        'flavor': fields.ObjectField('FlavorPayload'),
        'image_uuid': fields.StringField(nullable=True),

        'key_name': fields.StringField(nullable=True),

        'kernel_id': fields.StringField(nullable=True),
        'ramdisk_id': fields.StringField(nullable=True),

        'created_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'deleted_at': fields.DateTimeField(nullable=True),
        'updated_at': fields.DateTimeField(nullable=True),

        'state': fields.InstanceStateField(nullable=True),
        'power_state': fields.InstancePowerStateField(nullable=True),
        'task_state': fields.InstanceTaskStateField(nullable=True),
        'progress': fields.IntegerField(nullable=True),

        'ip_addresses': fields.ListOfObjectsField('IpPayload'),
        'block_devices': fields.ListOfObjectsField('BlockDevicePayload',
                                                   nullable=True),

        'metadata': fields.DictOfStringsField(),
        'locked': fields.BooleanField(),
        'auto_disk_config': fields.DiskConfigField()
    }

    def __init__(self, instance, bdms=None):
        super(InstancePayload, self).__init__()
        network_info = instance.get_network_info()
        self.ip_addresses = IpPayload.from_network_info(network_info)
        self.flavor = flavor_payload.FlavorPayload(flavor=instance.flavor)
        if bdms is not None:
            self.block_devices = BlockDevicePayload.from_bdms(bdms)
        else:
            self.block_devices = BlockDevicePayload.from_instance(instance)

        self.populate_schema(instance=instance)
Beispiel #27
0
class Instance(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added info_cache
    # Version 1.2: Added security_groups
    # Version 1.3: Added expected_vm_state and admin_state_reset to
    #              save()
    # Version 1.4: Added locked_by and deprecated locked
    # Version 1.5: Added cleaned
    # Version 1.6: Added pci_devices
    # Version 1.7: String attributes updated to support unicode
    # Version 1.8: 'security_groups' and 'pci_devices' cannot be None
    # Version 1.9: Make uuid a non-None real string
    # Version 1.10: Added use_slave to refresh and get_by_uuid
    # Version 1.11: Update instance from database during destroy
    # Version 1.12: Added ephemeral_key_uuid
    # Version 1.13: Added delete_metadata_key()
    # Version 1.14: Added numa_topology
    # Version 1.15: PciDeviceList 1.1
    VERSION = '1.15'

    fields = {
        'id': fields.IntegerField(),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'image_ref': fields.StringField(nullable=True),
        'kernel_id': fields.StringField(nullable=True),
        'ramdisk_id': fields.StringField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'launch_index': fields.IntegerField(nullable=True),
        'key_name': fields.StringField(nullable=True),
        'key_data': fields.StringField(nullable=True),
        'power_state': fields.IntegerField(nullable=True),
        'vm_state': fields.StringField(nullable=True),
        'task_state': fields.StringField(nullable=True),
        'memory_mb': fields.IntegerField(nullable=True),
        'vcpus': fields.IntegerField(nullable=True),
        'root_gb': fields.IntegerField(nullable=True),
        'ephemeral_gb': fields.IntegerField(nullable=True),
        'ephemeral_key_uuid': fields.UUIDField(nullable=True),
        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),
        'instance_type_id': fields.IntegerField(nullable=True),
        'user_data': fields.StringField(nullable=True),
        'reservation_id': fields.StringField(nullable=True),
        'scheduled_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'launched_on': fields.StringField(nullable=True),

        # NOTE(jdillaman): locked deprecated in favor of locked_by,
        # to be removed in Icehouse
        'locked': fields.BooleanField(default=False),
        'locked_by': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'vm_mode': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(),
        'root_device_name': fields.StringField(nullable=True),
        'default_ephemeral_device': fields.StringField(nullable=True),
        'default_swap_device': fields.StringField(nullable=True),
        'config_drive': fields.StringField(nullable=True),
        'access_ip_v4': fields.IPV4AddressField(nullable=True),
        'access_ip_v6': fields.IPV6AddressField(nullable=True),
        'auto_disk_config': fields.BooleanField(default=False),
        'progress': fields.IntegerField(nullable=True),
        'shutdown_terminate': fields.BooleanField(default=False),
        'disable_terminate': fields.BooleanField(default=False),
        'cell_name': fields.StringField(nullable=True),
        'metadata': fields.DictOfStringsField(),
        'system_metadata': fields.DictOfNullableStringsField(),
        'info_cache': fields.ObjectField('InstanceInfoCache', nullable=True),
        'security_groups': fields.ObjectField('SecurityGroupList'),
        'fault': fields.ObjectField('InstanceFault', nullable=True),
        'cleaned': fields.BooleanField(default=False),
        'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
        'numa_topology': fields.ObjectField('InstanceNUMATopology',
                                            nullable=True)
    }

    obj_extra_fields = ['name']

    def __init__(self, *args, **kwargs):
        super(Instance, self).__init__(*args, **kwargs)
        self._reset_metadata_tracking()

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'system_metadata' in fields:
            self._orig_system_metadata = (dict(self.system_metadata)
                                          if 'system_metadata' in self else {})
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})

    def obj_reset_changes(self, fields=None):
        super(Instance, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    def obj_what_changed(self):
        changes = super(Instance, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if 'system_metadata' in self and (self.system_metadata !=
                                          self._orig_system_metadata):
            changes.add('system_metadata')
        return changes

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        self = super(Instance,
                     cls)._obj_from_primitive(context, objver, primitive)
        self._reset_metadata_tracking()
        return self

    def obj_make_compatible(self, primitive, target_version):
        target_version = utils.convert_version_to_tuple(target_version)
        unicode_attributes = [
            'user_id', 'project_id', 'image_ref', 'kernel_id', 'ramdisk_id',
            'hostname', 'key_name', 'key_data', 'host', 'node', 'user_data',
            'availability_zone', 'display_name', 'display_description',
            'launched_on', 'locked_by', 'os_type', 'architecture', 'vm_mode',
            'root_device_name', 'default_ephemeral_device',
            'default_swap_device', 'config_drive', 'cell_name'
        ]
        if target_version < (1, 14) and 'numa_topology' in primitive:
            del primitive['numa_topology']
        if target_version < (1, 10) and 'info_cache' in primitive:
            # NOTE(danms): Instance <= 1.9 (havana) had info_cache 1.4
            self.info_cache.obj_make_compatible(
                primitive['info_cache']['nova_object.data'], '1.4')
            primitive['info_cache']['nova_object.version'] = '1.4'
        if target_version < (1, 7):
            # NOTE(danms): Before 1.7, we couldn't handle unicode in
            # string fields, so squash it here
            for field in [
                    x for x in unicode_attributes
                    if x in primitive and primitive[x] is not None
            ]:
                primitive[field] = primitive[field].encode('ascii', 'replace')
        if target_version < (1, 15) and 'pci_devices' in primitive:
            # NOTE(baoli): Instance <= 1.14 (icehouse) had PciDeviceList 1.0
            self.pci_devices.obj_make_compatible(
                primitive['pci_devices']['nova_object.data'], '1.0')
            primitive['pci_devices']['nova_object.version'] = '1.0'
        if target_version < (1, 6):
            # NOTE(danms): Before 1.6 there was no pci_devices list
            if 'pci_devices' in primitive:
                del primitive['pci_devices']

    @property
    def name(self):
        try:
            base_name = CONF.instance_name_template % self.id
        except TypeError:
            # Support templates like "uuid-%(uuid)s", etc.
            info = {}
            # NOTE(russellb): Don't use self.iteritems() here, as it will
            # result in infinite recursion on the name property.
            for key in self.fields:
                if key == 'name':
                    # NOTE(danms): prevent recursion
                    continue
                elif not self.obj_attr_is_set(key):
                    # NOTE(danms): Don't trigger lazy-loads
                    continue
                info[key] = self[key]
            try:
                base_name = CONF.instance_name_template % info
            except KeyError:
                base_name = self.uuid
        return base_name

    @staticmethod
    def _from_db_object(context, instance, db_inst, expected_attrs=None):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        instance._context = context
        if expected_attrs is None:
            expected_attrs = []
        # Most of the field names match right now, so be quick
        for field in instance.fields:
            if field in INSTANCE_OPTIONAL_ATTRS:
                continue
            elif field == 'deleted':
                instance.deleted = db_inst['deleted'] == db_inst['id']
            elif field == 'cleaned':
                instance.cleaned = db_inst['cleaned'] == 1
            else:
                instance[field] = db_inst[field]

        if 'metadata' in expected_attrs:
            instance['metadata'] = utils.instance_meta(db_inst)
        if 'system_metadata' in expected_attrs:
            instance['system_metadata'] = utils.instance_sys_meta(db_inst)
        if 'fault' in expected_attrs:
            instance['fault'] = (objects.InstanceFault.get_latest_for_instance(
                context, instance.uuid))
        if 'numa_topology' in expected_attrs:
            instance._load_numa_topology()

        if 'info_cache' in expected_attrs:
            if db_inst['info_cache'] is None:
                instance.info_cache = None
            elif not instance.obj_attr_is_set('info_cache'):
                # TODO(danms): If this ever happens on a backlevel instance
                # passed to us by a backlevel service, things will break
                instance.info_cache = objects.InstanceInfoCache(context)
            if instance.info_cache is not None:
                instance.info_cache._from_db_object(context,
                                                    instance.info_cache,
                                                    db_inst['info_cache'])

        # TODO(danms): If we are updating these on a backlevel instance,
        # we'll end up sending back new versions of these objects (see
        # above note for new info_caches
        if 'pci_devices' in expected_attrs:
            pci_devices = base.obj_make_list(context,
                                             objects.PciDeviceList(context),
                                             objects.PciDevice,
                                             db_inst['pci_devices'])
            instance['pci_devices'] = pci_devices
        if 'security_groups' in expected_attrs:
            sec_groups = base.obj_make_list(context,
                                            objects.SecurityGroupList(context),
                                            objects.SecurityGroup,
                                            db_inst['security_groups'])
            instance['security_groups'] = sec_groups

        instance.obj_reset_changes()
        return instance

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get_by_uuid(context,
                                          uuid,
                                          columns_to_join=columns_to_join,
                                          use_slave=use_slave)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable_classmethod
    def get_by_id(cls, context, inst_id, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get(context,
                                  inst_id,
                                  columns_to_join=columns_to_join)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable
    def create(self, context):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        expected_attrs = [
            attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates
        ]
        if 'security_groups' in updates:
            updates['security_groups'] = [
                x.name for x in updates['security_groups']
            ]
        if 'info_cache' in updates:
            updates['info_cache'] = {
                'network_info': updates['info_cache'].network_info.json()
            }
        numa_topology = updates.pop('numa_topology', None)
        db_inst = db.instance_create(context, updates)
        if numa_topology:
            expected_attrs.append('numa_topology')
            numa_topology.instance_uuid = db_inst['uuid']
            numa_topology.create(context)
        self._from_db_object(context, self, db_inst, expected_attrs)

    @base.remotable
    def destroy(self, context):
        if not self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='already destroyed')
        if not self.obj_attr_is_set('uuid'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='no uuid')
        if not self.obj_attr_is_set('host') or not self.host:
            # NOTE(danms): If our host is not set, avoid a race
            constraint = db.constraint(host=db.equal_any(None))
        else:
            constraint = None

        try:
            db_inst = db.instance_destroy(context,
                                          self.uuid,
                                          constraint=constraint)
            self._from_db_object(context, self, db_inst)
        except exception.ConstraintNotMet:
            raise exception.ObjectActionError(action='destroy',
                                              reason='host changed')
        delattr(self, base.get_attrname('id'))

    def _save_info_cache(self, context):
        if self.info_cache:
            self.info_cache.save(context)

    def _save_security_groups(self, context):
        security_groups = self.security_groups or []
        for secgroup in security_groups:
            secgroup.save(context)
        self.security_groups.obj_reset_changes()

    def _save_fault(self, context):
        # NOTE(danms): I don't think we need to worry about this, do we?
        pass

    def _save_numa_topology(self, context):
        if self.numa_topology:
            self.numa_topology.instance_uuid = self.uuid
            self.numa_topology._save(context)
        else:
            objects.InstanceNUMATopology.delete_by_instance_uuid(
                context, self.uuid)

    def _save_pci_devices(self, context):
        # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
        # permitted to update the DB. all change to devices from here will
        # be dropped.
        pass

    @base.remotable
    def save(self,
             context,
             expected_vm_state=None,
             expected_task_state=None,
             admin_state_reset=False):
        """Save updates to this instance

        Column-wise updates will be made based on the result of
        self.what_changed(). If expected_task_state is provided,
        it will be checked against the in-database copy of the
        instance before updates are made.

        :param:context: Security context
        :param:expected_task_state: Optional tuple of valid task states
        for the instance to be in
        :param:expected_vm_state: Optional tuple of valid vm states
        for the instance to be in
        :param admin_state_reset: True if admin API is forcing setting
        of task_state/vm_state

        """

        cell_type = cells_opts.get_cell_type()
        if cell_type == 'api' and self.cell_name:
            # NOTE(comstud): We need to stash a copy of ourselves
            # before any updates are applied.  When we call the save
            # methods on nested objects, we will lose any changes to
            # them.  But we need to make sure child cells can tell
            # what is changed.
            #
            # We also need to nuke any updates to vm_state and task_state
            # unless admin_state_reset is True.  compute cells are
            # authoritative for their view of vm_state and task_state.
            stale_instance = self.obj_clone()

            def _handle_cell_update_from_api():
                cells_api = cells_rpcapi.CellsAPI()
                cells_api.instance_update_from_api(context, stale_instance,
                                                   expected_vm_state,
                                                   expected_task_state,
                                                   admin_state_reset)
        else:
            stale_instance = None

        updates = {}
        changes = self.obj_what_changed()

        for field in self.fields:
            if (self.obj_attr_is_set(field)
                    and isinstance(self.fields[field], fields.ObjectField)):
                try:
                    getattr(self, '_save_%s' % field)(context)
                except AttributeError:
                    LOG.exception(_LE('No save handler for %s'),
                                  field,
                                  instance=self)
            elif field in changes:
                updates[field] = self[field]

        if not updates:
            if stale_instance:
                _handle_cell_update_from_api()
            return

        # Cleaned needs to be turned back into an int here
        if 'cleaned' in updates:
            if updates['cleaned']:
                updates['cleaned'] = 1
            else:
                updates['cleaned'] = 0

        if expected_task_state is not None:
            if (self.VERSION == '1.9'
                    and expected_task_state == 'image_snapshot'):
                # NOTE(danms): Icehouse introduced a pending state which
                # Havana doesn't know about. If we're an old instance,
                # tolerate the pending state as well
                expected_task_state = [
                    expected_task_state, 'image_snapshot_pending'
                ]
            updates['expected_task_state'] = expected_task_state
        if expected_vm_state is not None:
            updates['expected_vm_state'] = expected_vm_state

        expected_attrs = [
            attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
            if self.obj_attr_is_set(attr)
        ]
        if 'pci_devices' in expected_attrs:
            # NOTE(danms): We don't refresh pci_devices on save right now
            expected_attrs.remove('pci_devices')

        # NOTE(alaski): We need to pull system_metadata for the
        # notification.send_update() below.  If we don't there's a KeyError
        # when it tries to extract the flavor.
        if 'system_metadata' not in expected_attrs:
            expected_attrs.append('system_metadata')
        old_ref, inst_ref = db.instance_update_and_get_original(
            context,
            self.uuid,
            updates,
            update_cells=False,
            columns_to_join=_expected_cols(expected_attrs))

        if stale_instance:
            _handle_cell_update_from_api()
        elif cell_type == 'compute':
            cells_api = cells_rpcapi.CellsAPI()
            cells_api.instance_update_at_top(context, inst_ref)

        self._from_db_object(context,
                             self,
                             inst_ref,
                             expected_attrs=expected_attrs)
        notifications.send_update(context, old_ref, inst_ref)
        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context, use_slave=False):
        extra = [
            field for field in INSTANCE_OPTIONAL_ATTRS
            if self.obj_attr_is_set(field)
        ]
        current = self.__class__.get_by_uuid(context,
                                             uuid=self.uuid,
                                             expected_attrs=extra,
                                             use_slave=use_slave)
        # NOTE(danms): We orphan the instance copy so we do not unexpectedly
        # trigger a lazy-load (which would mean we failed to calculate the
        # expected_attrs properly)
        current._context = None

        for field in self.fields:
            if self.obj_attr_is_set(field):
                if field == 'info_cache':
                    self.info_cache.refresh()
                    # NOTE(danms): Make sure this shows up as touched
                    self.info_cache = self.info_cache
                elif self[field] != current[field]:
                    self[field] = current[field]
        self.obj_reset_changes()

    def _load_generic(self, attrname):
        instance = self.__class__.get_by_uuid(self._context,
                                              uuid=self.uuid,
                                              expected_attrs=[attrname])

        # NOTE(danms): Never allow us to recursively-load
        if instance.obj_attr_is_set(attrname):
            self[attrname] = instance[attrname]
        else:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='loading %s requires recursion' % attrname)

    def _load_fault(self):
        self.fault = objects.InstanceFault.get_latest_for_instance(
            self._context, self.uuid)

    def _load_numa_topology(self):
        try:
            self.numa_topology = \
                objects.InstanceNUMATopology.get_by_instance_uuid(
                    self._context, self.uuid)
        except exception.NumaTopologyNotFound:
            self.numa_topology = None

    def obj_load_attr(self, attrname):
        if attrname not in INSTANCE_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='attribute %s not lazy-loadable' % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s", {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })
        # FIXME(comstud): This should be optimized to only load the attr.
        if attrname == 'fault':
            # NOTE(danms): We handle fault differently here so that we
            # can be more efficient
            self._load_fault()
        elif attrname == 'numa_topology':
            self._load_numa_topology()
        else:
            self._load_generic(attrname)
        self.obj_reset_changes([attrname])

    def get_flavor(self, namespace=None):
        prefix = ('%s_' % namespace) if namespace is not None else ''

        db_flavor = flavors.extract_flavor(self, prefix)
        flavor = objects.Flavor(self._context)
        for key in flavors.system_metadata_flavor_props:
            flavor[key] = db_flavor[key]
        return flavor

    def set_flavor(self, flavor, namespace=None):
        prefix = ('%s_' % namespace) if namespace is not None else ''

        self.system_metadata = flavors.save_flavor_info(
            self.system_metadata, flavor, prefix)
        self.save()

    def delete_flavor(self, namespace):
        self.system_metadata = flavors.delete_flavor_info(
            self.system_metadata, "%s_" % namespace)
        self.save()

    @base.remotable
    def delete_metadata_key(self, context, key):
        """Optimized metadata delete method.

        This provides a more efficient way to delete a single metadata
        key, instead of just calling instance.save(). This should be called
        with the key still present in self.metadata, which it will update
        after completion.
        """
        db.instance_metadata_delete(context, self.uuid, key)
        md_was_changed = 'metadata' in self.obj_what_changed()
        del self.metadata[key]
        self._orig_metadata.pop(key, None)
        instance_dict = base.obj_to_primitive(self)
        notifications.send_update(context, instance_dict, instance_dict)
        if not md_was_changed:
            self.obj_reset_changes(['metadata'])
Beispiel #28
0
class AffinityGroup(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: String attributes updated to support unicode
    VERSION = '1.1'

    fields = {
        'id': fields.IntegerField(),
        'name': fields.StringField(nullable=True),
        'description': fields.StringField(nullable=True),
        'type': fields.StringField(nullable=True),
        'vms': fields.ListOfStringsField(nullable=True),
        'metadata': fields.DictOfStringsField(nullable=True),
    }

    obj_extra_fields = ['availability_zone']

    @staticmethod
    def _from_db_object(context, affinitygroup, db_affinitygroup):
        for key in affinitygroup.fields:
            if key == 'metadata':
                db_key = 'metadetails'
            else:
                db_key = key
            affinitygroup[key] = db_affinitygroup[db_key]
        affinitygroup._context = context
        affinitygroup.obj_reset_changes()
        return affinitygroup

    def _assert_no_vms(self, action):
        if 'vms' in self.obj_what_changed():
            raise exception.ObjectActionError(action=action,
                                              reason='hosts updated inline')

    @base.remotable_classmethod
    def get_by_id(cls, context, affinitygroup_id):
        db_affinitygroup = db.affinitygroup_get(context, affinitygroup_id)
        return cls._from_db_object(context, cls(), db_affinitygroup)

    @base.remotable_classmethod
    def get_by_vm_id(cls, context, vm_id):
        db_affinitygroup = db.affinitygroup_get_by_vm(context, vm_id)
        return cls._from_db_object(context, cls(), db_affinitygroup)

    @base.remotable
    def create(self, context):
        self._assert_no_vms('create')
        updates = self.obj_get_changes()
        metadata = updates.pop('metadata', None)
        db_affinitygroup = db.affinitygroup_create(context,
                                                   updates,
                                                   metadata=metadata)
        self._from_db_object(context, self, db_affinitygroup)

    @base.remotable
    def save(self, context):
        self._assert_no_vms('save')
        updates = self.obj_get_changes()
        updates.pop('id', None)
        db_aggregate = db.affinitygroup_update(context, self.id, updates)
        return self._from_db_object(context, self, db_aggregate)

    @base.remotable
    def update_metadata(self, context, updates):
        to_add = {}
        for key, value in updates.items():
            if value is None:
                try:
                    db.affinitygroup_metadata_delete(context, self.id, key)
                except huawei_exception.AffinityGroupMetadataNotFound:
                    pass
                try:
                    self.metadata.pop(key)
                except KeyError:
                    pass
            else:
                to_add[key] = value
                self.metadata[key] = value
        db.affinitygroup_metadata_add(context, self.id, to_add)
        self.obj_reset_changes(fields=['metadata'])

    @base.remotable
    def destroy(self, context):
        db.affinitygroup_delete(context, self.id)

    @base.remotable
    def add_vm(self, context, vm):
        db.affinitygroup_vm_add(context, self.id, vm)
        if self.vms is None:
            self.vms = []
        self.vms.append(vm)
        self.obj_reset_changes(fields=['vms'])

    @base.remotable
    def delete_vm(self, context, vm):
        db.affinitygroup_vm_delete(context, self.id, vm)
        self.vms.remove(vm)
        self.obj_reset_changes(fields=['vms'])

    @base.remotable
    def add_vms(self, context, vm_list):
        db.affinitygroup_vms_add(context, self.id, vm_list)
        if self.vms is None:
            self.vms = []
        self.vms = self.vms + vm_list
        self.obj_reset_changes(fields=['vms'])

    @base.remotable
    def delete_vms(self, context, vm_list):
        db.affinitygroup_vms_delete(context, self.id, vm_list)
        self.vms = list(set(self.vms) - set(vm_list))
        self.obj_reset_changes(fields=['vms'])

    @base.remotable
    def get_all_vms(self, context):
        return db.affinitygroup_vm_get_all(context, self.id)

    @property
    def availability_zone(self):
        return self.metadata.get('availability_zone', None)
Beispiel #29
0
class Flavor(base.NovaPersistentObject, base.NovaObject,
             base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Added save_projects(), save_extra_specs(), removed
    #              remotable from save()
    VERSION = '1.1'

    fields = {
        'id': fields.IntegerField(),
        'name': fields.StringField(nullable=True),
        'memory_mb': fields.IntegerField(),
        'vcpus': fields.IntegerField(),
        'root_gb': fields.IntegerField(),
        'ephemeral_gb': fields.IntegerField(),
        'flavorid': fields.StringField(),
        'swap': fields.IntegerField(),
        'rxtx_factor': fields.FloatField(nullable=True, default=1.0),
        'vcpu_weight': fields.IntegerField(nullable=True),
        'disabled': fields.BooleanField(),
        'is_public': fields.BooleanField(),
        'extra_specs': fields.DictOfStringsField(),
        'projects': fields.ListOfStringsField(),
    }

    def __init__(self, *args, **kwargs):
        super(Flavor, self).__init__(*args, **kwargs)
        self._orig_extra_specs = {}
        self._orig_projects = []
        self._in_api = False

    @property
    def in_api(self):
        if self._in_api:
            return True
        else:
            try:
                if 'id' in self:
                    self._flavor_get_from_db(self._context, self.id)
                else:
                    flavor = self._flavor_get_by_flavor_id_from_db(
                        self._context, self.flavorid)
                    # Fix us up so we can use our real id
                    self.id = flavor['id']
                self._in_api = True
            except exception.FlavorNotFound:
                pass
            return self._in_api

    @staticmethod
    def _from_db_object(context, flavor, db_flavor, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        flavor._context = context
        for name, field in flavor.fields.items():
            if name in OPTIONAL_FIELDS:
                continue
            if name in DEPRECATED_FIELDS and name not in db_flavor:
                continue
            value = db_flavor[name]
            if isinstance(field, fields.IntegerField):
                value = value if value is not None else 0
            flavor[name] = value

        # NOTE(danms): This is to support processing the API flavor
        # model, which does not have these deprecated fields. When we
        # remove compatibility with the old InstanceType model, we can
        # remove this as well.
        if any(f not in db_flavor for f in DEPRECATED_FIELDS):
            flavor.deleted_at = None
            flavor.deleted = False

        if 'extra_specs' in expected_attrs:
            flavor.extra_specs = db_flavor['extra_specs']

        if 'projects' in expected_attrs:
            if 'projects' in db_flavor:
                flavor['projects'] = [
                    x['project_id'] for x in db_flavor['projects']
                ]
            else:
                flavor._load_projects()

        flavor.obj_reset_changes()
        return flavor

    @staticmethod
    @db_api.api_context_manager.reader
    def _flavor_get_query_from_db(context):
        query = context.session.query(api_models.Flavors).\
                options(joinedload('extra_specs'))
        if not context.is_admin:
            the_filter = [api_models.Flavors.is_public == true()]
            the_filter.extend([
                api_models.Flavors.projects.any(project_id=context.project_id)
            ])
            query = query.filter(or_(*the_filter))
        return query

    @staticmethod
    @require_context
    def _flavor_get_from_db(context, id):
        """Returns a dict describing specific flavor."""
        result = Flavor._flavor_get_query_from_db(context).\
                        filter_by(id=id).\
                        first()
        if not result:
            raise exception.FlavorNotFound(flavor_id=id)
        return db_api._dict_with_extra_specs(result)

    @staticmethod
    @require_context
    def _flavor_get_by_name_from_db(context, name):
        """Returns a dict describing specific flavor."""
        result = Flavor._flavor_get_query_from_db(context).\
                            filter_by(name=name).\
                            first()
        if not result:
            raise exception.FlavorNotFoundByName(flavor_name=name)
        return db_api._dict_with_extra_specs(result)

    @staticmethod
    @require_context
    def _flavor_get_by_flavor_id_from_db(context, flavor_id):
        """Returns a dict describing specific flavor_id."""
        result = Flavor._flavor_get_query_from_db(context).\
                        filter_by(flavorid=flavor_id).\
                        order_by(asc(api_models.Flavors.id)).\
                        first()
        if not result:
            raise exception.FlavorNotFound(flavor_id=flavor_id)
        return db_api._dict_with_extra_specs(result)

    @staticmethod
    def _get_projects_from_db(context, flavorid):
        return _get_projects_from_db(context, flavorid)

    @base.remotable
    def _load_projects(self):
        try:
            self.projects = self._get_projects_from_db(self._context,
                                                       self.flavorid)
        except exception.FlavorNotFound:
            self.projects = [
                x['project_id'] for x in db.flavor_access_get_by_flavor_id(
                    self._context, self.flavorid)
            ]
        self.obj_reset_changes(['projects'])

    def obj_load_attr(self, attrname):
        # NOTE(danms): Only projects could be lazy-loaded right now
        if attrname != 'projects':
            raise exception.ObjectActionError(action='obj_load_attr',
                                              reason='unable to load %s' %
                                              attrname)

        self._load_projects()

    def obj_reset_changes(self, fields=None, recursive=False):
        super(Flavor, self).obj_reset_changes(fields=fields,
                                              recursive=recursive)
        if fields is None or 'extra_specs' in fields:
            self._orig_extra_specs = (dict(self.extra_specs)
                                      if self.obj_attr_is_set('extra_specs')
                                      else {})
        if fields is None or 'projects' in fields:
            self._orig_projects = (list(self.projects)
                                   if self.obj_attr_is_set('projects') else [])

    def obj_what_changed(self):
        changes = super(Flavor, self).obj_what_changed()
        if ('extra_specs' in self
                and self.extra_specs != self._orig_extra_specs):
            changes.add('extra_specs')
        if 'projects' in self and self.projects != self._orig_projects:
            changes.add('projects')
        return changes

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        self = super(Flavor, cls)._obj_from_primitive(context, objver,
                                                      primitive)
        changes = self.obj_what_changed()
        if 'extra_specs' not in changes:
            # This call left extra_specs "clean" so update our tracker
            self._orig_extra_specs = (dict(self.extra_specs)
                                      if self.obj_attr_is_set('extra_specs')
                                      else {})
        if 'projects' not in changes:
            # This call left projects "clean" so update our tracker
            self._orig_projects = (list(self.projects)
                                   if self.obj_attr_is_set('projects') else [])
        return self

    @base.remotable_classmethod
    def get_by_id(cls, context, id):
        try:
            db_flavor = cls._flavor_get_from_db(context, id)
        except exception.FlavorNotFound:
            db_flavor = db.flavor_get(context, id)
        return cls._from_db_object(context,
                                   cls(context),
                                   db_flavor,
                                   expected_attrs=['extra_specs'])

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        try:
            db_flavor = cls._flavor_get_by_name_from_db(context, name)
        except exception.FlavorNotFoundByName:
            db_flavor = db.flavor_get_by_name(context, name)
        return cls._from_db_object(context,
                                   cls(context),
                                   db_flavor,
                                   expected_attrs=['extra_specs'])

    @base.remotable_classmethod
    def get_by_flavor_id(cls, context, flavor_id, read_deleted=None):
        try:
            db_flavor = cls._flavor_get_by_flavor_id_from_db(
                context, flavor_id)
        except exception.FlavorNotFound:
            db_flavor = db.flavor_get_by_flavor_id(context, flavor_id,
                                                   read_deleted)
        return cls._from_db_object(context,
                                   cls(context),
                                   db_flavor,
                                   expected_attrs=['extra_specs'])

    @staticmethod
    def _flavor_add_project(context, flavor_id, project_id):
        return _flavor_add_project(context, flavor_id, project_id)

    @staticmethod
    def _flavor_del_project(context, flavor_id, project_id):
        return _flavor_del_project(context, flavor_id, project_id)

    def _add_access(self, project_id):
        if self.in_api:
            self._flavor_add_project(self._context, self.id, project_id)
        else:
            db.flavor_access_add(self._context, self.flavorid, project_id)

    @base.remotable
    def add_access(self, project_id):
        if 'projects' in self.obj_what_changed():
            raise exception.ObjectActionError(action='add_access',
                                              reason='projects modified')
        self._add_access(project_id)
        self._load_projects()
        self._send_notification(fields.NotificationAction.UPDATE)

    def _remove_access(self, project_id):
        if self.in_api:
            self._flavor_del_project(self._context, self.id, project_id)
        else:
            db.flavor_access_remove(self._context, self.flavorid, project_id)

    @base.remotable
    def remove_access(self, project_id):
        if 'projects' in self.obj_what_changed():
            raise exception.ObjectActionError(action='remove_access',
                                              reason='projects modified')
        self._remove_access(project_id)
        self._load_projects()
        self._send_notification(fields.NotificationAction.UPDATE)

    @staticmethod
    def _flavor_create(context, updates):
        return _flavor_create(context, updates)

    @staticmethod
    def _ensure_migrated(context):
        return _ensure_migrated(context)

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')

        # NOTE(danms): Once we have made it past a point where we know
        # all flavors have been migrated, we can remove this. Ideally
        # in Ocata with a blocker migration to be sure.
        if not self._ensure_migrated(self._context):
            raise exception.ObjectActionError(
                action='create', reason='main database still contains flavors')

        updates = self.obj_get_changes()
        expected_attrs = []
        for attr in OPTIONAL_FIELDS:
            if attr in updates:
                expected_attrs.append(attr)
        db_flavor = self._flavor_create(self._context, updates)
        self._from_db_object(self._context,
                             self,
                             db_flavor,
                             expected_attrs=expected_attrs)
        self._send_notification(fields.NotificationAction.CREATE)

    @base.remotable
    def save_projects(self, to_add=None, to_delete=None):
        """Add or delete projects.

        :param:to_add: A list of projects to add
        :param:to_delete: A list of projects to remove
        """

        to_add = to_add if to_add is not None else []
        to_delete = to_delete if to_delete is not None else []

        for project_id in to_add:
            self._add_access(project_id)
        for project_id in to_delete:
            self._remove_access(project_id)
        self.obj_reset_changes(['projects'])

    @staticmethod
    def _flavor_extra_specs_add(context, flavor_id, specs, max_retries=10):
        return _flavor_extra_specs_add(context, flavor_id, specs, max_retries)

    @staticmethod
    def _flavor_extra_specs_del(context, flavor_id, key):
        return _flavor_extra_specs_del(context, flavor_id, key)

    @base.remotable
    def save_extra_specs(self, to_add=None, to_delete=None):
        """Add or delete extra_specs.

        :param:to_add: A dict of new keys to add/update
        :param:to_delete: A list of keys to remove
        """

        if self.in_api:
            add_fn = self._flavor_extra_specs_add
            del_fn = self._flavor_extra_specs_del
            ident = self.id
        else:
            add_fn = db.flavor_extra_specs_update_or_create
            del_fn = db.flavor_extra_specs_delete
            ident = self.flavorid

        to_add = to_add if to_add is not None else {}
        to_delete = to_delete if to_delete is not None else []

        if to_add:
            add_fn(self._context, ident, to_add)

        for key in to_delete:
            del_fn(self._context, ident, key)
        self.obj_reset_changes(['extra_specs'])

    def save(self):
        updates = self.obj_get_changes()
        projects = updates.pop('projects', None)
        extra_specs = updates.pop('extra_specs', None)
        if updates:
            raise exception.ObjectActionError(
                action='save', reason='read-only fields were changed')

        if extra_specs is not None:
            deleted_keys = (set(self._orig_extra_specs.keys()) -
                            set(extra_specs.keys()))
            added_keys = self.extra_specs
        else:
            added_keys = deleted_keys = None

        if projects is not None:
            deleted_projects = set(self._orig_projects) - set(projects)
            added_projects = set(projects) - set(self._orig_projects)
        else:
            added_projects = deleted_projects = None

        # NOTE(danms): The first remotable method we call will reset
        # our of the original values for projects and extra_specs. Thus,
        # we collect the added/deleted lists for both above and /then/
        # call these methods to update them.

        if added_keys or deleted_keys:
            self.save_extra_specs(self.extra_specs, deleted_keys)

        if added_projects or deleted_projects:
            self.save_projects(added_projects, deleted_projects)

        if added_keys or deleted_keys or added_projects or deleted_projects:
            self._send_notification(fields.NotificationAction.UPDATE)

    @staticmethod
    def _flavor_destroy(context, flavor_id=None, flavorid=None):
        return _flavor_destroy(context, flavor_id=flavor_id, flavorid=flavorid)

    @base.remotable
    def destroy(self):
        # NOTE(danms): Historically the only way to delete a flavor
        # is via name, which is not very precise. We need to be able to
        # support the light construction of a flavor object and subsequent
        # delete request with only our name filled out. However, if we have
        # our id property, we should instead delete with that since it's
        # far more specific.
        try:
            if 'id' in self:
                db_flavor = self._flavor_destroy(self._context,
                                                 flavor_id=self.id)
            else:
                db_flavor = self._flavor_destroy(self._context,
                                                 flavorid=self.flavorid)
            self._from_db_object(self._context, self, db_flavor)
            self._send_notification(fields.NotificationAction.DELETE)
        except exception.FlavorNotFound:
            db.flavor_destroy(self._context, self.flavorid)

    def _send_notification(self, action):
        # NOTE(danms): Instead of making the below notification
        # lazy-load projects (which is a problem for instance-bound
        # flavors and compute-cell operations), just load them here.
        if 'projects' not in self:
            self._load_projects()
        notification_type = flavor_notification.FlavorNotification
        payload_type = flavor_notification.FlavorPayload

        payload = payload_type(self)
        notification_type(publisher=notification.NotificationPublisher(
            host=CONF.host, binary="nova-api"),
                          event_type=notification.EventType(object="flavor",
                                                            action=action),
                          priority=fields.NotificationPriority.INFO,
                          payload=payload).emit(self._context)
Beispiel #30
0
class InstancePayload(base.NotificationPayloadBase):
    SCHEMA = {
        'uuid': ('instance', 'uuid'),
        'user_id': ('instance', 'user_id'),
        'tenant_id': ('instance', 'project_id'),
        'reservation_id': ('instance', 'reservation_id'),
        'display_name': ('instance', 'display_name'),
        'display_description': ('instance', 'display_description'),
        'host_name': ('instance', 'hostname'),
        'host': ('instance', 'host'),
        'node': ('instance', 'node'),
        'os_type': ('instance', 'os_type'),
        'architecture': ('instance', 'architecture'),
        'availability_zone': ('instance', 'availability_zone'),
        'image_uuid': ('instance', 'image_ref'),
        'key_name': ('instance', 'key_name'),
        'kernel_id': ('instance', 'kernel_id'),
        'ramdisk_id': ('instance', 'ramdisk_id'),
        'created_at': ('instance', 'created_at'),
        'launched_at': ('instance', 'launched_at'),
        'terminated_at': ('instance', 'terminated_at'),
        'deleted_at': ('instance', 'deleted_at'),
        'updated_at': ('instance', 'updated_at'),
        'state': ('instance', 'vm_state'),
        'power_state': ('instance', 'power_state'),
        'task_state': ('instance', 'task_state'),
        'progress': ('instance', 'progress'),
        'metadata': ('instance', 'metadata'),
        'locked': ('instance', 'locked'),
        'auto_disk_config': ('instance', 'auto_disk_config')
    }
    # Version 1.0: Initial version
    # Version 1.1: add locked and display_description field
    # Version 1.2: Add auto_disk_config field
    # Version 1.3: Add key_name field
    # Version 1.4: Add BDM related data
    # Version 1.5: Add updated_at field
    # Version 1.6: Add request_id field
    # Version 1.7: Added action_initiator_user and action_initiator_project to
    #              InstancePayload
    # Version 1.8: Added locked_reason field
    VERSION = '1.8'
    fields = {
        'uuid':
        fields.UUIDField(),
        'user_id':
        fields.StringField(nullable=True),
        'tenant_id':
        fields.StringField(nullable=True),
        'reservation_id':
        fields.StringField(nullable=True),
        'display_name':
        fields.StringField(nullable=True),
        'display_description':
        fields.StringField(nullable=True),
        'host_name':
        fields.StringField(nullable=True),
        'host':
        fields.StringField(nullable=True),
        'node':
        fields.StringField(nullable=True),
        'os_type':
        fields.StringField(nullable=True),
        'architecture':
        fields.StringField(nullable=True),
        'availability_zone':
        fields.StringField(nullable=True),
        'flavor':
        fields.ObjectField('FlavorPayload'),
        'image_uuid':
        fields.StringField(nullable=True),
        'key_name':
        fields.StringField(nullable=True),
        'kernel_id':
        fields.StringField(nullable=True),
        'ramdisk_id':
        fields.StringField(nullable=True),
        'created_at':
        fields.DateTimeField(nullable=True),
        'launched_at':
        fields.DateTimeField(nullable=True),
        'terminated_at':
        fields.DateTimeField(nullable=True),
        'deleted_at':
        fields.DateTimeField(nullable=True),
        'updated_at':
        fields.DateTimeField(nullable=True),
        'state':
        fields.InstanceStateField(nullable=True),
        'power_state':
        fields.InstancePowerStateField(nullable=True),
        'task_state':
        fields.InstanceTaskStateField(nullable=True),
        'progress':
        fields.IntegerField(nullable=True),
        'ip_addresses':
        fields.ListOfObjectsField('IpPayload'),
        'block_devices':
        fields.ListOfObjectsField('BlockDevicePayload', nullable=True),
        'metadata':
        fields.DictOfStringsField(),
        'locked':
        fields.BooleanField(),
        'auto_disk_config':
        fields.DiskConfigField(),
        'request_id':
        fields.StringField(nullable=True),
        'action_initiator_user':
        fields.StringField(nullable=True),
        'action_initiator_project':
        fields.StringField(nullable=True),
        'locked_reason':
        fields.StringField(nullable=True),
    }

    def __init__(self, context, instance, bdms=None):
        super(InstancePayload, self).__init__()
        network_info = instance.get_network_info()
        self.ip_addresses = IpPayload.from_network_info(network_info)
        self.flavor = flavor_payload.FlavorPayload(flavor=instance.flavor)
        if bdms is not None:
            self.block_devices = BlockDevicePayload.from_bdms(bdms)
        else:
            self.block_devices = BlockDevicePayload.from_instance(instance)
        # NOTE(Kevin_Zheng): Don't include request_id for periodic tasks,
        # RequestContext for periodic tasks does not include project_id
        # and user_id. Consider modify this once periodic tasks got a
        # consistent request_id.
        self.request_id = context.request_id if (context.project_id
                                                 and context.user_id) else None
        self.action_initiator_user = context.user_id
        self.action_initiator_project = context.project_id
        self.locked_reason = instance.system_metadata.get("locked_reason")
        self.populate_schema(instance=instance)