class InstanceAction(base.NovaPersistentObject, base.NovaObject): fields = { 'id': int, 'action': utils.str_or_none, 'instance_uuid': utils.str_or_none, 'request_id': utils.str_or_none, 'user_id': utils.str_or_none, 'project_id': utils.str_or_none, 'start_time': utils.datetime_or_none, 'finish_time': utils.datetime_or_none, 'message': utils.str_or_none, } _attr_start_time_to_primitive = utils.dt_serializer('start_time') _attr_finish_time_to_primitive = utils.dt_serializer('finish_time') _attr_start_time_from_primitive = utils.dt_deserializer _attr_finish_time_from_primitive = utils.dt_deserializer @staticmethod def _from_db_object(context, action, db_action): for field in action.fields: action[field] = db_action[field] action._context = context action.obj_reset_changes() return action @base.remotable_classmethod def get_by_request_id(cls, context, instance_uuid, request_id): db_action = db.action_get_by_request_id(context, instance_uuid, request_id) if db_action: return cls._from_db_object(context, cls(), db_action) # NOTE(danms): Eventually the compute_utils.*action* methods # can be here, I think @base.remotable_classmethod def action_start(cls, context, instance_uuid, action_name, want_result=True): values = compute_utils.pack_action_start(context, instance_uuid, action_name) db_action = db.action_start(context, values) if want_result: return cls._from_db_object(context, cls(), db_action) @base.remotable_classmethod def action_finish(cls, context, instance_uuid, want_result=True): values = compute_utils.pack_action_finish(context, instance_uuid) db_action = db.action_finish(context, values) if want_result: return cls._from_db_object(context, cls(), db_action) @base.remotable def finish(self, context): values = compute_utils.pack_action_finish(context, self.instance_uuid) db_action = db.action_finish(context, values) self._from_db_object(context, self, db_action)
class NovaPersistentObject(object): """Mixin class for Persistent objects. This adds the fields that we use in common for all persisent objects. """ fields = { 'created_at': obj_utils.datetime_or_str_or_none, 'updated_at': obj_utils.datetime_or_str_or_none, 'deleted_at': obj_utils.datetime_or_str_or_none, 'deleted': bool, } _attr_created_at_from_primitive = obj_utils.dt_deserializer _attr_updated_at_from_primitive = obj_utils.dt_deserializer _attr_deleted_at_from_primitive = obj_utils.dt_deserializer _attr_created_at_to_primitive = obj_utils.dt_serializer('created_at') _attr_updated_at_to_primitive = obj_utils.dt_serializer('updated_at') _attr_deleted_at_to_primitive = obj_utils.dt_serializer('deleted_at')
class Obj(object): foo = utils.dt_serializer('bar')
class NovaObject(object): """Base class and object factory. This forms the base of all objects that can be remoted or instantiated via RPC. Simply defining a class that inherits from this base class will make it remotely instantiatable. Objects should implement the necessary "get" classmethod routines as well as "save" object methods as appropriate. """ __metaclass__ = NovaObjectMetaclass # Version of this object (see rules above check_object_version()) version = '1.0' # The fields present in this object as key:typefn pairs. For example: # # fields = { 'foo': int, # 'bar': str, # 'baz': lambda x: str(x).ljust(8), # } # # NOTE(danms): The base NovaObject class' fields will be inherited # by subclasses, but that is a special case. Objects inheriting from # other objects will not receive this merging of fields contents. fields = { 'created_at': obj_utils.datetime_or_str_or_none, 'updated_at': obj_utils.datetime_or_str_or_none, 'deleted_at': obj_utils.datetime_or_str_or_none, 'deleted': bool, } obj_extra_fields = [] def __init__(self): self._changed_fields = set() self._context = None @classmethod def obj_name(cls): """Return a canonical name for this object which will be used over the wire for remote hydration. """ return cls.__name__ @classmethod def obj_class_from_name(cls, objname, objver): """Returns a class from the registry based on a name and version.""" if objname not in cls._obj_classes: LOG.error(_('Unable to instantiate unregistered object type ' '%(objtype)s') % dict(objtype=objname)) raise exception.UnsupportedObjectError(objtype=objname) compatible_match = None for objclass in cls._obj_classes[objname]: if objclass.version == objver: return objclass try: check_object_version(objclass.version, objver) compatible_match = objclass except exception.IncompatibleObjectVersion: pass if compatible_match: return compatible_match raise exception.IncompatibleObjectVersion(objname=objname, objver=objver) _attr_created_at_from_primitive = obj_utils.dt_deserializer _attr_updated_at_from_primitive = obj_utils.dt_deserializer _attr_deleted_at_from_primitive = obj_utils.dt_deserializer def _attr_from_primitive(self, attribute, value): """Attribute deserialization dispatcher. This calls self._attr_foo_from_primitive(value) for an attribute foo with value, if it exists, otherwise it assumes the value is suitable for the attribute's setter method. """ handler = '_attr_%s_from_primitive' % attribute if hasattr(self, handler): return getattr(self, handler)(value) return value @classmethod def obj_from_primitive(cls, primitive, context=None): """Simple base-case hydration. This calls self._attr_from_primitive() for each item in fields. """ if primitive['nova_object.namespace'] != 'nova': # NOTE(danms): We don't do anything with this now, but it's # there for "the future" raise exception.UnsupportedObjectError( objtype='%s.%s' % (primitive['nova_object.namespace'], primitive['nova_object.name'])) objname = primitive['nova_object.name'] objver = primitive['nova_object.version'] objdata = primitive['nova_object.data'] objclass = cls.obj_class_from_name(objname, objver) self = objclass() self._context = context for name in self.fields: if name in objdata: setattr(self, name, self._attr_from_primitive(name, objdata[name])) changes = primitive.get('nova_object.changes', []) self._changed_fields = set([x for x in changes if x in self.fields]) return self _attr_created_at_to_primitive = obj_utils.dt_serializer('created_at') _attr_updated_at_to_primitive = obj_utils.dt_serializer('updated_at') _attr_deleted_at_to_primitive = obj_utils.dt_serializer('deleted_at') def _attr_to_primitive(self, attribute): """Attribute serialization dispatcher. This calls self._attr_foo_to_primitive() for an attribute foo, if it exists, otherwise it assumes the attribute itself is primitive-enough to be sent over the RPC wire. """ handler = '_attr_%s_to_primitive' % attribute if hasattr(self, handler): return getattr(self, handler)() else: return getattr(self, attribute) def obj_to_primitive(self): """Simple base-case dehydration. This calls self._attr_to_primitive() for each item in fields. """ primitive = dict() for name in self.fields: if hasattr(self, get_attrname(name)): primitive[name] = self._attr_to_primitive(name) obj = {'nova_object.name': self.obj_name(), 'nova_object.namespace': 'nova', 'nova_object.version': self.version, 'nova_object.data': primitive} if self.obj_what_changed(): obj['nova_object.changes'] = list(self.obj_what_changed()) return obj def obj_load_attr(self, attrname): """Load an additional attribute from the real object. This should use self._conductor, and cache any data that might be useful for future load operations. """ raise NotImplementedError( _("Cannot load '%(attrname)s' in the base class") % locals()) def save(self, context): """Save the changed fields back to the store. This is optional for subclasses, but is presented here in the base class for consistency among those that do. """ raise NotImplementedError('Cannot save anything in the base class') def obj_what_changed(self): """Returns a set of fields that have been modified.""" return self._changed_fields def obj_reset_changes(self, fields=None): """Reset the list of fields that have been changed. Note that this is NOT "revert to previous values" """ if fields: self._changed_fields -= set(fields) else: self._changed_fields.clear() # dictish syntactic sugar def iteritems(self): """For backwards-compatibility with dict-based objects. NOTE(danms): May be removed in the future. """ for name in self.fields.keys() + self.obj_extra_fields: if (hasattr(self, get_attrname(name)) or name in self.obj_extra_fields): yield name, getattr(self, name) items = lambda self: list(self.iteritems()) def __getitem__(self, name): """For backwards-compatibility with dict-based objects. NOTE(danms): May be removed in the future. """ return getattr(self, name) def __setitem__(self, name, value): """For backwards-compatibility with dict-based objects. NOTE(danms): May be removed in the future. """ setattr(self, name, value) def __contains__(self, name): """For backwards-compatibility with dict-based objects. NOTE(danms): May be removed in the future. """ return hasattr(self, get_attrname(name)) def get(self, key, value=None): """For backwards-compatibility with dict-based objects. NOTE(danms): May be removed in the future. """ return self[key] def update(self, updates): """For backwards-compatibility with dict-base objects. NOTE(danms): May be removed in the future. """ for key, value in updates.items(): self[key] = value
class Instance(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added info_cache # Version 1.2: Added security_groups # Version 1.3: Added expected_vm_state and admin_state_reset to # save() # Version 1.4: Added locked_by and deprecated locked # Version 1.5: Added cleaned VERSION = '1.5' fields = { 'id': int, 'user_id': obj_utils.str_or_none, 'project_id': obj_utils.str_or_none, 'image_ref': obj_utils.str_or_none, 'kernel_id': obj_utils.str_or_none, 'ramdisk_id': obj_utils.str_or_none, 'hostname': obj_utils.str_or_none, 'launch_index': obj_utils.int_or_none, 'key_name': obj_utils.str_or_none, 'key_data': obj_utils.str_or_none, 'power_state': obj_utils.int_or_none, 'vm_state': obj_utils.str_or_none, 'task_state': obj_utils.str_or_none, 'memory_mb': obj_utils.int_or_none, 'vcpus': obj_utils.int_or_none, 'root_gb': obj_utils.int_or_none, 'ephemeral_gb': obj_utils.int_or_none, 'host': obj_utils.str_or_none, 'node': obj_utils.str_or_none, 'instance_type_id': obj_utils.int_or_none, 'user_data': obj_utils.str_or_none, 'reservation_id': obj_utils.str_or_none, 'scheduled_at': obj_utils.datetime_or_str_or_none, 'launched_at': obj_utils.datetime_or_str_or_none, 'terminated_at': obj_utils.datetime_or_str_or_none, 'availability_zone': obj_utils.str_or_none, 'display_name': obj_utils.str_or_none, 'display_description': obj_utils.str_or_none, 'launched_on': obj_utils.str_or_none, # NOTE(jdillaman): locked deprecated in favor of locked_by, # to be removed in Icehouse 'locked': bool, 'locked_by': obj_utils.str_or_none, 'os_type': obj_utils.str_or_none, 'architecture': obj_utils.str_or_none, 'vm_mode': obj_utils.str_or_none, 'uuid': obj_utils.str_or_none, 'root_device_name': obj_utils.str_or_none, 'default_ephemeral_device': obj_utils.str_or_none, 'default_swap_device': obj_utils.str_or_none, 'config_drive': obj_utils.str_or_none, 'access_ip_v4': obj_utils.ip_or_none(4), 'access_ip_v6': obj_utils.ip_or_none(6), 'auto_disk_config': bool, 'progress': obj_utils.int_or_none, 'shutdown_terminate': bool, 'disable_terminate': bool, 'cell_name': obj_utils.str_or_none, 'metadata': dict, 'system_metadata': dict, 'info_cache': obj_utils.nested_object_or_none(instance_info_cache.InstanceInfoCache), 'security_groups': obj_utils.nested_object_or_none(security_group.SecurityGroupList), 'fault': obj_utils.nested_object_or_none(instance_fault.InstanceFault), 'cleaned': bool, } obj_extra_fields = ['name'] def __init__(self, *args, **kwargs): super(Instance, self).__init__(*args, **kwargs) self.obj_reset_changes() def obj_reset_changes(self, fields=None): super(Instance, self).obj_reset_changes(fields) self._orig_system_metadata = (dict(self.system_metadata) if 'system_metadata' in self else {}) self._orig_metadata = (dict(self.metadata) if 'metadata' in self else {}) def obj_what_changed(self): changes = super(Instance, self).obj_what_changed() if 'metadata' in self and self.metadata != self._orig_metadata: changes.add('metadata') if 'system_metadata' in self and (self.system_metadata != self._orig_system_metadata): changes.add('system_metadata') return changes @property def name(self): try: base_name = CONF.instance_name_template % self.id except TypeError: # Support templates like "uuid-%(uuid)s", etc. info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. for key in self.fields: # prevent recursion if someone specifies %(name)s # %(name)s will not be valid. if key == 'name': continue info[key] = self[key] try: base_name = CONF.instance_name_template % info except KeyError: base_name = self.uuid return base_name def _attr_access_ip_v4_to_primitive(self): if self.access_ip_v4 is not None: return str(self.access_ip_v4) else: return None def _attr_access_ip_v6_to_primitive(self): if self.access_ip_v6 is not None: return str(self.access_ip_v6) else: return None _attr_scheduled_at_to_primitive = obj_utils.dt_serializer('scheduled_at') _attr_launched_at_to_primitive = obj_utils.dt_serializer('launched_at') _attr_terminated_at_to_primitive = obj_utils.dt_serializer('terminated_at') _attr_info_cache_to_primitive = obj_utils.obj_serializer('info_cache') _attr_security_groups_to_primitive = obj_utils.obj_serializer( 'security_groups') _attr_scheduled_at_from_primitive = obj_utils.dt_deserializer _attr_launched_at_from_primitive = obj_utils.dt_deserializer _attr_terminated_at_from_primitive = obj_utils.dt_deserializer def _attr_info_cache_from_primitive(self, val): return base.NovaObject.obj_from_primitive(val) def _attr_security_groups_from_primitive(self, val): return base.NovaObject.obj_from_primitive(val) @staticmethod def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_FIELDS + INSTANCE_IMPLIED_FIELDS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] elif field == 'cleaned': instance.cleaned = db_inst['cleaned'] == 1 else: instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.metadata_to_dict(db_inst['metadata']) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.metadata_to_dict( db_inst['system_metadata']) if 'fault' in expected_attrs: instance['fault'] = ( instance_fault.InstanceFault.get_latest_for_instance( context, instance.uuid)) # NOTE(danms): info_cache and security_groups are almost # always joined in the DB layer right now, so check to see if # they are asked for and are present in the resulting object if 'info_cache' in expected_attrs and db_inst.get('info_cache'): instance['info_cache'] = instance_info_cache.InstanceInfoCache() instance_info_cache.InstanceInfoCache._from_db_object( context, instance['info_cache'], db_inst['info_cache']) if ('security_groups' in expected_attrs and db_inst.get('security_groups')): instance['security_groups'] = security_group.SecurityGroupList() security_group._make_secgroup_list(context, instance['security_groups'], db_inst['security_groups']) instance._context = context instance.obj_reset_changes() return instance @staticmethod def _attrs_to_columns(attrs): """Translate instance attributes into columns needing joining.""" columns_to_join = [] if 'metadata' in attrs: columns_to_join.append('metadata') if 'system_metadata' in attrs: columns_to_join.append('system_metadata') # NOTE(danms): The DB API currently always joins info_cache and # security_groups for get operations, so don't add them to the # list of columns return columns_to_join @base.remotable_classmethod def get_by_uuid(cls, context, uuid, expected_attrs=None): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = cls._attrs_to_columns(expected_attrs) db_inst = db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable_classmethod def get_by_id(cls, context, inst_id, expected_attrs=None): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = cls._attrs_to_columns(expected_attrs) db_inst = db.instance_get(context, inst_id, columns_to_join=columns_to_join) return cls._from_db_object(context, cls(), db_inst, expected_attrs) def _save_info_cache(self, context): self.info_cache.save(context) def _save_security_groups(self, context): for secgroup in self.security_groups: secgroup.save(context) def _save_instance_fault(self, context): # NOTE(danms): I don't think we need to worry about this, do we? pass @base.remotable def save(self, context, expected_vm_state=None, expected_task_state=None, admin_state_reset=False): """Save updates to this instance Column-wise updates will be made based on the result of self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param context: Security context :param expected_task_state: Optional tuple of valid task states for the instance to be in. :param expected_vm_state: Optional tuple of valid vm states for the instance to be in. :param admin_state_reset: True if admin API is forcing setting of task_state/vm_state. """ cell_type = cells_opts.get_cell_type() if cell_type == 'api' and self.cell_name: # NOTE(comstud): We need to stash a copy of ourselves # before any updates are applied. When we call the save # methods on nested objects, we will lose any changes to # them. But we need to make sure child cells can tell # what is changed. # # We also need to nuke any updates to vm_state and task_state # unless admin_state_reset is True. compute cells are # authoritative for their view of vm_state and task_state. stale_instance = copy.deepcopy(self) def _handle_cell_update_from_api(): cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_from_api(context, stale_instance, expected_vm_state, expected_task_state, admin_state_reset) else: stale_instance = None updates = {} changes = self.obj_what_changed() for field in self.fields: if (hasattr(self, base.get_attrname(field)) and isinstance(self[field], base.NovaObject)): getattr(self, '_save_%s' % field)(context) elif field in changes: updates[field] = self[field] if not updates: if stale_instance: _handle_cell_update_from_api() return # Cleaned needs to be turned back into an int here if 'cleaned' in updates: if updates['cleaned']: updates['cleaned'] = 1 else: updates['cleaned'] = 0 if expected_task_state is not None: updates['expected_task_state'] = expected_task_state if expected_vm_state is not None: updates['expected_vm_state'] = expected_vm_state old_ref, inst_ref = db.instance_update_and_get_original( context, self.uuid, updates, update_cells=False) if stale_instance: _handle_cell_update_from_api() elif cell_type == 'compute': cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_at_top(context, inst_ref) expected_attrs = [] for attr in INSTANCE_OPTIONAL_FIELDS: if hasattr(self, base.get_attrname(attr)): expected_attrs.append(attr) self._from_db_object(context, self, inst_ref, expected_attrs) if 'vm_state' in changes or 'task_state' in changes: notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes() @base.remotable def refresh(self, context): extra = [] for field in INSTANCE_DEFAULT_FIELDS: if hasattr(self, base.get_attrname(field)): extra.append(field) current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] self.obj_reset_changes() def obj_load_attr(self, attrname): extra = [] if attrname == 'system_metadata': extra.append('system_metadata') elif attrname == 'metadata': extra.append('metadata') elif attrname == 'info_cache': extra.append('info_cache') elif attrname == 'security_groups': extra.append('security_groups') elif attrname == 'fault': extra.append('fault') if not extra: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) # NOTE(danms): This could be optimized to just load the bits we need instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=extra) # NOTE(danms): Never allow us to recursively-load if hasattr(instance, base.get_attrname(attrname)): self[attrname] = instance[attrname] else: raise exception.ObjectActionError( action='obj_load_attr', reason='loading %s requires recursion' % attrname)
class Instance(base.NovaObject): fields = { 'id': int, 'user_id': obj_utils.str_or_none, 'project_id': obj_utils.str_or_none, 'image_ref': obj_utils.str_or_none, 'kernel_id': obj_utils.str_or_none, 'ramdisk_id': obj_utils.str_or_none, 'hostname': obj_utils.str_or_none, 'launch_index': obj_utils.int_or_none, 'key_name': obj_utils.str_or_none, 'key_data': obj_utils.str_or_none, 'power_state': obj_utils.int_or_none, 'vm_state': obj_utils.str_or_none, 'task_state': obj_utils.str_or_none, 'memory_mb': obj_utils.int_or_none, 'vcpus': obj_utils.int_or_none, 'root_gb': obj_utils.int_or_none, 'ephemeral_gb': obj_utils.int_or_none, 'host': obj_utils.str_or_none, 'node': obj_utils.str_or_none, 'instance_type_id': obj_utils.int_or_none, 'user_data': obj_utils.str_or_none, 'reservation_id': obj_utils.str_or_none, 'scheduled_at': obj_utils.datetime_or_none, 'launched_at': obj_utils.datetime_or_none, 'terminated_at': obj_utils.datetime_or_none, 'availability_zone': obj_utils.str_or_none, 'display_name': obj_utils.str_or_none, 'display_description': obj_utils.str_or_none, 'launched_on': obj_utils.str_or_none, 'locked': bool, 'os_type': obj_utils.str_or_none, 'architecture': obj_utils.str_or_none, 'vm_mode': obj_utils.str_or_none, 'uuid': obj_utils.str_or_none, 'root_device_name': obj_utils.str_or_none, 'default_ephemeral_device': obj_utils.str_or_none, 'default_swap_device': obj_utils.str_or_none, 'config_drive': obj_utils.str_or_none, 'access_ip_v4': obj_utils.ip_or_none(4), 'access_ip_v6': obj_utils.ip_or_none(6), 'auto_disk_config': bool, 'progress': obj_utils.int_or_none, 'shutdown_terminate': bool, 'disable_terminate': bool, 'cell_name': obj_utils.str_or_none, 'metadata': dict, 'system_metadata': dict, } @property def name(self): try: base_name = CONF.instance_name_template % self.id except TypeError: # Support templates like "uuid-%(uuid)s", etc. info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. for key in self.fields: # prevent recursion if someone specifies %(name)s # %(name)s will not be valid. if key == 'name': continue info[key] = self[key] try: base_name = CONF.instance_name_template % info except KeyError: base_name = self.uuid return base_name def _attr_access_ip_v4_to_primitive(self): if self.access_ip_v4 is not None: return str(self.access_ip_v4) else: return None def _attr_access_ip_v6_to_primitive(self): if self.access_ip_v6 is not None: return str(self.access_ip_v6) else: return None _attr_scheduled_at_to_primitive = obj_utils.dt_serializer('scheduled_at') _attr_launched_at_to_primitive = obj_utils.dt_serializer('launched_at') _attr_terminated_at_to_primitive = obj_utils.dt_serializer('terminated_at') _attr_scheduled_at_from_primitive = obj_utils.dt_deserializer _attr_launched_at_from_primitive = obj_utils.dt_deserializer _attr_terminated_at_from_primitive = obj_utils.dt_deserializer @staticmethod def _from_db_object(instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in ['metadata', 'system_metadata']: continue instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.metadata_to_dict(db_inst['metadata']) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.metadata_to_dict( db_inst['system_metadata']) instance.obj_reset_changes() return instance @base.remotable_classmethod def get_by_uuid(cls, context, uuid=None, expected_attrs=None): if expected_attrs is None: expected_attrs = [] # Construct DB-specific columns from generic expected_attrs columns_to_join = [] if 'metadata' in expected_attrs: columns_to_join.append('metadata') if 'system_metadata' in expected_attrs: columns_to_join.append('system_metadata') db_inst = db.instance_get_by_uuid(context, uuid, columns_to_join) return Instance._from_db_object(cls(), db_inst, expected_attrs) @base.remotable def save(self, context, expected_task_state=None): """Save updates to this instance Column-wise updates will be made based on the result of self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param context: Security context :param expected_task_state: Optional tuple of valid task states for the instance to be in. """ updates = {} changes = self.obj_what_changed() for field in changes: updates[field] = self[field] if expected_task_state is not None: updates['expected_task_state'] = expected_task_state old_ref, inst_ref = db.instance_update_and_get_original( context, self.uuid, updates) expected_attrs = [] for attr in ('metadata', 'system_metadata'): if hasattr(self, base.get_attrname(attr)): expected_attrs.append(attr) Instance._from_db_object(self, inst_ref, expected_attrs) if 'vm_state' in changes or 'task_state' in changes: notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes() @base.remotable def refresh(self, context): extra = [] for field in ['system_metadata', 'metadata']: if hasattr(self, base.get_attrname(field)): extra.append(field) current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] def obj_load(self, attrname): extra = [] if attrname == 'system_metadata': extra.append('system_metadata') elif attrname == 'metadata': extra.append('metadata') if not extra: raise Exception('Cannot load "%s" from instance' % attrname) # NOTE(danms): This could be optimized to just load the bits we need instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=extra) self[attrname] = instance[attrname]
class InstanceActionEvent(base.NovaPersistentObject, base.NovaObject): fields = { 'id': int, 'event': utils.str_or_none, 'action_id': utils.int_or_none, 'start_time': utils.datetime_or_none, 'finish_time': utils.datetime_or_none, 'result': utils.str_or_none, 'traceback': utils.str_or_none, } _attr_start_time_to_primitive = utils.dt_serializer('start_time') _attr_finish_time_to_primitive = utils.dt_serializer('finish_time') _attr_start_time_from_primitive = utils.dt_deserializer _attr_finish_time_from_primitive = utils.dt_deserializer @staticmethod def _from_db_object(context, event, db_event): for field in event.fields: event[field] = db_event[field] event._context = context event.obj_reset_changes() return event @base.remotable_classmethod def get_by_id(cls, context, action_id, event_id): db_event = db.action_event_get_by_id(context, action_id, event_id) return cls._from_db_object(context, cls(), db_event) @base.remotable_classmethod def event_start(cls, context, instance_uuid, event_name, want_result=True): values = compute_utils.pack_action_event_start(context, instance_uuid, event_name) db_event = db.action_event_start(context, values) if want_result: return cls._from_db_object(context, cls(), db_event) @base.remotable_classmethod def event_finish_with_failure(cls, context, instance_uuid, event_name, exc_val=None, exc_tb=None, want_result=None): values = compute_utils.pack_action_event_finish(context, instance_uuid, event_name, exc_val=exc_val, exc_tb=exc_tb) db_event = db.action_event_finish(context, values) if want_result: return cls._from_db_object(context, cls(), db_event) @base.remotable_classmethod def event_finish(cls, context, instance_uuid, event_name, want_result=True): return cls.event_finish_with_failure(context, instance_uuid, event_name, exc_val=None, exc_tb=None, want_result=want_result) @base.remotable def finish_with_failure(self, context, exc_val, exc_tb): values = compute_utils.pack_action_event_finish(context, self.instance_uuid, self.event, exc_val=exc_val, exc_tb=exc_tb) db_event = db.action_event_finish(context, values) self._from_db_object(context, self, db_event) @base.remotable def finish(self, context): self.finish_with_failure(context, exc_val=None, exc_tb=None)
class Instance(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added info_cache # Version 1.2: Added security_groups # Version 1.3: Added expected_vm_state and admin_state_reset to # save() # Version 1.4: Added locked_by and deprecated locked # Version 1.5: Added cleaned # Version 1.6: Added pci_devices # Version 1.7: String attributes updated to support unicode # Version 1.8: 'security_groups' and 'pci_devices' cannot be None # Version 1.9: Make uuid a non-None real string VERSION = '1.9' fields = { 'id': int, 'user_id': obj_utils.str_or_none, 'project_id': obj_utils.str_or_none, 'image_ref': obj_utils.str_or_none, 'kernel_id': obj_utils.str_or_none, 'ramdisk_id': obj_utils.str_or_none, 'hostname': obj_utils.str_or_none, 'launch_index': obj_utils.int_or_none, 'key_name': obj_utils.str_or_none, 'key_data': obj_utils.str_or_none, 'power_state': obj_utils.int_or_none, 'vm_state': obj_utils.str_or_none, 'task_state': obj_utils.str_or_none, 'memory_mb': obj_utils.int_or_none, 'vcpus': obj_utils.int_or_none, 'root_gb': obj_utils.int_or_none, 'ephemeral_gb': obj_utils.int_or_none, 'host': obj_utils.str_or_none, 'node': obj_utils.str_or_none, 'instance_type_id': obj_utils.int_or_none, 'user_data': obj_utils.str_or_none, 'reservation_id': obj_utils.str_or_none, 'scheduled_at': obj_utils.datetime_or_str_or_none, 'launched_at': obj_utils.datetime_or_str_or_none, 'terminated_at': obj_utils.datetime_or_str_or_none, 'availability_zone': obj_utils.str_or_none, 'display_name': obj_utils.str_or_none, 'display_description': obj_utils.str_or_none, 'launched_on': obj_utils.str_or_none, # NOTE(jdillaman): locked deprecated in favor of locked_by, # to be removed in Icehouse 'locked': bool, 'locked_by': obj_utils.str_or_none, 'os_type': obj_utils.str_or_none, 'architecture': obj_utils.str_or_none, 'vm_mode': obj_utils.str_or_none, 'uuid': obj_utils.cstring, 'root_device_name': obj_utils.str_or_none, 'default_ephemeral_device': obj_utils.str_or_none, 'default_swap_device': obj_utils.str_or_none, 'config_drive': obj_utils.str_or_none, 'access_ip_v4': obj_utils.ip_or_none(4), 'access_ip_v6': obj_utils.ip_or_none(6), 'auto_disk_config': bool, 'progress': obj_utils.int_or_none, 'shutdown_terminate': bool, 'disable_terminate': bool, 'cell_name': obj_utils.str_or_none, 'metadata': dict, 'system_metadata': dict, 'info_cache': obj_utils.nested_object(instance_info_cache.InstanceInfoCache), 'security_groups': obj_utils.nested_object(security_group.SecurityGroupList, none_ok=False), 'fault': obj_utils.nested_object(instance_fault.InstanceFault), 'cleaned': bool, 'pci_devices': obj_utils.nested_object(pci_device.PciDeviceList, none_ok=False), } obj_extra_fields = ['name'] def __init__(self, *args, **kwargs): super(Instance, self).__init__(*args, **kwargs) self.obj_reset_changes() def obj_reset_changes(self, fields=None): super(Instance, self).obj_reset_changes(fields) self._orig_system_metadata = (dict(self.system_metadata) if 'system_metadata' in self else {}) self._orig_metadata = (dict(self.metadata) if 'metadata' in self else {}) def obj_what_changed(self): changes = super(Instance, self).obj_what_changed() if 'metadata' in self and self.metadata != self._orig_metadata: changes.add('metadata') if 'system_metadata' in self and (self.system_metadata != self._orig_system_metadata): changes.add('system_metadata') return changes @property def name(self): try: base_name = CONF.instance_name_template % self.id except TypeError: # Support templates like "uuid-%(uuid)s", etc. info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. for key in self.fields: if key == 'name': # NOTE(danms): prevent recursion continue elif not self.obj_attr_is_set(key): # NOTE(danms): Don't trigger lazy-loads continue info[key] = self[key] try: base_name = CONF.instance_name_template % info except KeyError: base_name = self.uuid return base_name def _attr_access_ip_v4_to_primitive(self): if self.access_ip_v4 is not None: return str(self.access_ip_v4) else: return None def _attr_access_ip_v6_to_primitive(self): if self.access_ip_v6 is not None: return str(self.access_ip_v6) else: return None _attr_scheduled_at_to_primitive = obj_utils.dt_serializer('scheduled_at') _attr_launched_at_to_primitive = obj_utils.dt_serializer('launched_at') _attr_terminated_at_to_primitive = obj_utils.dt_serializer('terminated_at') _attr_info_cache_to_primitive = obj_utils.obj_serializer('info_cache') _attr_security_groups_to_primitive = obj_utils.obj_serializer( 'security_groups') _attr_pci_devices_to_primitive = obj_utils.obj_serializer('pci_devices') _attr_scheduled_at_from_primitive = obj_utils.dt_deserializer _attr_launched_at_from_primitive = obj_utils.dt_deserializer _attr_terminated_at_from_primitive = obj_utils.dt_deserializer def _attr_info_cache_from_primitive(self, val): if val is None: return val return base.NovaObject.obj_from_primitive(val) def _attr_security_groups_from_primitive(self, val): return base.NovaObject.obj_from_primitive(val) def _attr_pci_devices_from_primitive(self, val): if val is None: # Only possible in version <= 1.7 return pci_device.PciDeviceList() return base.NovaObject.obj_from_primitive(val) @staticmethod def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_ATTRS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] elif field == 'cleaned': instance.cleaned = db_inst['cleaned'] == 1 else: instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.instance_meta(db_inst) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.instance_sys_meta(db_inst) if 'fault' in expected_attrs: instance['fault'] = ( instance_fault.InstanceFault.get_latest_for_instance( context, instance.uuid)) if 'pci_devices' in expected_attrs: pci_devices = pci_device._make_pci_list(context, pci_device.PciDeviceList(), db_inst['pci_devices']) instance['pci_devices'] = pci_devices if 'info_cache' in expected_attrs: if db_inst['info_cache'] is None: info_cache = None else: info_cache = instance_info_cache.InstanceInfoCache() instance_info_cache.InstanceInfoCache._from_db_object( context, info_cache, db_inst['info_cache']) instance['info_cache'] = info_cache if 'security_groups' in expected_attrs: sec_groups = security_group._make_secgroup_list( context, security_group.SecurityGroupList(), db_inst['security_groups']) instance['security_groups'] = sec_groups instance._context = context instance.obj_reset_changes() return instance @base.remotable_classmethod def get_by_uuid(cls, context, uuid, expected_attrs=None): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = _expected_cols(expected_attrs) db_inst = db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable_classmethod def get_by_id(cls, context, inst_id, expected_attrs=None): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = _expected_cols(expected_attrs) db_inst = db.instance_get(context, inst_id, columns_to_join=columns_to_join) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable def create(self, context): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() updates.pop('id', None) expected_attrs = [ attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates ] if 'security_groups' in updates: updates['security_groups'] = [ x.name for x in updates['security_groups'] ] if 'info_cache' in updates: updates['info_cache'] = { 'network_info': updates['info_cache'].network_info.json() } db_inst = db.instance_create(context, updates) Instance._from_db_object(context, self, db_inst, expected_attrs) @base.remotable def destroy(self, context): if not self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='destroy', reason='already destroyed') if not self.obj_attr_is_set('uuid'): raise exception.ObjectActionError(action='destroy', reason='no uuid') if not self.obj_attr_is_set('host') or not self.host: # NOTE(danms): If our host is not set, avoid a race constraint = db.constraint(host=db.equal_any(None)) else: constraint = None try: db.instance_destroy(context, self.uuid, constraint=constraint) except exception.ConstraintNotMet: raise exception.ObjectActionError(action='destroy', reason='host changed') delattr(self, base.get_attrname('id')) def _save_info_cache(self, context): self.info_cache.save(context) def _save_security_groups(self, context): for secgroup in self.security_groups: secgroup.save(context) def _save_fault(self, context): # NOTE(danms): I don't think we need to worry about this, do we? pass def _save_pci_devices(self, context): # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker # permitted to update the DB. all change to devices from here will # be dropped. pass @base.remotable def save(self, context, expected_vm_state=None, expected_task_state=None, admin_state_reset=False): """Save updates to this instance Column-wise updates will be made based on the result of self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param context: Security context :param expected_task_state: Optional tuple of valid task states for the instance to be in. :param expected_vm_state: Optional tuple of valid vm states for the instance to be in. :param admin_state_reset: True if admin API is forcing setting of task_state/vm_state. """ cell_type = cells_opts.get_cell_type() if cell_type == 'api' and self.cell_name: # NOTE(comstud): We need to stash a copy of ourselves # before any updates are applied. When we call the save # methods on nested objects, we will lose any changes to # them. But we need to make sure child cells can tell # what is changed. # # We also need to nuke any updates to vm_state and task_state # unless admin_state_reset is True. compute cells are # authoritative for their view of vm_state and task_state. stale_instance = self.obj_clone() def _handle_cell_update_from_api(): cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_from_api(context, stale_instance, expected_vm_state, expected_task_state, admin_state_reset) else: stale_instance = None updates = {} changes = self.obj_what_changed() for field in self.fields: if (self.obj_attr_is_set(field) and isinstance(self[field], base.NovaObject)): try: getattr(self, '_save_%s' % field)(context) except AttributeError: LOG.exception(_('No save handler for %s') % field, instance=self) elif field in changes: updates[field] = self[field] if not updates: if stale_instance: _handle_cell_update_from_api() return # Cleaned needs to be turned back into an int here if 'cleaned' in updates: if updates['cleaned']: updates['cleaned'] = 1 else: updates['cleaned'] = 0 if expected_task_state is not None: updates['expected_task_state'] = expected_task_state if expected_vm_state is not None: updates['expected_vm_state'] = expected_vm_state expected_attrs = [ attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS if self.obj_attr_is_set(attr) ] # NOTE(alaski): We need to pull system_metadata for the # notification.send_update() below. If we don't there's a KeyError # when it tries to extract the flavor. if 'system_metadata' not in expected_attrs: expected_attrs.append('system_metadata') old_ref, inst_ref = db.instance_update_and_get_original( context, self.uuid, updates, update_cells=False, columns_to_join=_expected_cols(expected_attrs)) if stale_instance: _handle_cell_update_from_api() elif cell_type == 'compute': cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_at_top(context, inst_ref) self._from_db_object(context, self, inst_ref, expected_attrs) notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes() @base.remotable def refresh(self, context): extra = [ field for field in INSTANCE_OPTIONAL_ATTRS if self.obj_attr_is_set(field) ] current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] self.obj_reset_changes() def obj_load_attr(self, attrname): if attrname not in INSTANCE_OPTIONAL_ATTRS: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) LOG.debug(_("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s"), { 'attr': attrname, 'name': self.obj_name(), 'uuid': self.uuid, }) # FIXME(comstud): This should be optimized to only load the attr. instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=[attrname]) # NOTE(danms): Never allow us to recursively-load if instance.obj_attr_is_set(attrname): self[attrname] = instance[attrname] else: raise exception.ObjectActionError( action='obj_load_attr', reason='loading %s requires recursion' % attrname)
class Instance(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added info_cache # Version 1.2: Added security_groups VERSION = '1.2' fields = { 'id': int, 'user_id': obj_utils.str_or_none, 'project_id': obj_utils.str_or_none, 'image_ref': obj_utils.str_or_none, 'kernel_id': obj_utils.str_or_none, 'ramdisk_id': obj_utils.str_or_none, 'hostname': obj_utils.str_or_none, 'launch_index': obj_utils.int_or_none, 'key_name': obj_utils.str_or_none, 'key_data': obj_utils.str_or_none, 'power_state': obj_utils.int_or_none, 'vm_state': obj_utils.str_or_none, 'task_state': obj_utils.str_or_none, 'memory_mb': obj_utils.int_or_none, 'vcpus': obj_utils.int_or_none, 'root_gb': obj_utils.int_or_none, 'ephemeral_gb': obj_utils.int_or_none, 'host': obj_utils.str_or_none, 'node': obj_utils.str_or_none, 'instance_type_id': obj_utils.int_or_none, 'user_data': obj_utils.str_or_none, 'reservation_id': obj_utils.str_or_none, 'scheduled_at': obj_utils.datetime_or_str_or_none, 'launched_at': obj_utils.datetime_or_str_or_none, 'terminated_at': obj_utils.datetime_or_str_or_none, 'availability_zone': obj_utils.str_or_none, 'display_name': obj_utils.str_or_none, 'display_description': obj_utils.str_or_none, 'launched_on': obj_utils.str_or_none, 'locked': bool, 'os_type': obj_utils.str_or_none, 'architecture': obj_utils.str_or_none, 'vm_mode': obj_utils.str_or_none, 'uuid': obj_utils.str_or_none, 'root_device_name': obj_utils.str_or_none, 'default_ephemeral_device': obj_utils.str_or_none, 'default_swap_device': obj_utils.str_or_none, 'config_drive': obj_utils.str_or_none, 'access_ip_v4': obj_utils.ip_or_none(4), 'access_ip_v6': obj_utils.ip_or_none(6), 'auto_disk_config': bool, 'progress': obj_utils.int_or_none, 'shutdown_terminate': bool, 'disable_terminate': bool, 'cell_name': obj_utils.str_or_none, 'metadata': dict, 'system_metadata': dict, 'info_cache': obj_utils.nested_object_or_none(instance_info_cache.InstanceInfoCache), 'security_groups': obj_utils.nested_object_or_none(security_group.SecurityGroupList), 'fault': obj_utils.nested_object_or_none(instance_fault.InstanceFault), } obj_extra_fields = ['name'] @property def name(self): try: base_name = CONF.instance_name_template % self.id except TypeError: # Support templates like "uuid-%(uuid)s", etc. info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. for key in self.fields: # prevent recursion if someone specifies %(name)s # %(name)s will not be valid. if key == 'name': continue info[key] = self[key] try: base_name = CONF.instance_name_template % info except KeyError: base_name = self.uuid return base_name def _attr_access_ip_v4_to_primitive(self): if self.access_ip_v4 is not None: return str(self.access_ip_v4) else: return None def _attr_access_ip_v6_to_primitive(self): if self.access_ip_v6 is not None: return str(self.access_ip_v6) else: return None _attr_scheduled_at_to_primitive = obj_utils.dt_serializer('scheduled_at') _attr_launched_at_to_primitive = obj_utils.dt_serializer('launched_at') _attr_terminated_at_to_primitive = obj_utils.dt_serializer('terminated_at') _attr_info_cache_to_primitive = obj_utils.obj_serializer('info_cache') _attr_security_groups_to_primitive = obj_utils.obj_serializer( 'security_groups') _attr_scheduled_at_from_primitive = obj_utils.dt_deserializer _attr_launched_at_from_primitive = obj_utils.dt_deserializer _attr_terminated_at_from_primitive = obj_utils.dt_deserializer def _attr_info_cache_from_primitive(self, val): return base.NovaObject.obj_from_primitive(val) def _attr_security_groups_from_primitive(self, val): return base.NovaObject.obj_from_primitive(val) @staticmethod def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_FIELDS + INSTANCE_IMPLIED_FIELDS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] else: instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.metadata_to_dict(db_inst['metadata']) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.metadata_to_dict( db_inst['system_metadata']) if 'fault' in expected_attrs: instance['fault'] = ( instance_fault.InstanceFault.get_latest_for_instance( context, instance.uuid)) # NOTE(danms): info_cache and security_groups are almost always joined # in the DB layer right now, so check to see if they're filled instead # of looking at expected_attrs if db_inst['info_cache']: instance['info_cache'] = instance_info_cache.InstanceInfoCache() instance_info_cache.InstanceInfoCache._from_db_object( context, instance['info_cache'], db_inst['info_cache']) if db_inst['security_groups']: instance['security_groups'] = security_group.SecurityGroupList() security_group._make_secgroup_list(context, instance['security_groups'], db_inst['security_groups']) instance._context = context instance.obj_reset_changes() return instance @staticmethod def _attrs_to_columns(attrs): """Translate instance attributes into columns needing joining.""" columns_to_join = [] if 'metadata' in attrs: columns_to_join.append('metadata') if 'system_metadata' in attrs: columns_to_join.append('system_metadata') # NOTE(danms): The DB API currently always joins info_cache and # security_groups for get operations, so don't add them to the # list of columns return columns_to_join @base.remotable_classmethod def get_by_uuid(cls, context, uuid, expected_attrs=None): if expected_attrs is None: expected_attrs = [] columns_to_join = cls._attrs_to_columns(expected_attrs) db_inst = db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join) return Instance._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable_classmethod def get_by_id(cls, context, inst_id, expected_attrs=None): if expected_attrs is None: expected_attrs = [] columns_to_join = cls._attrs_to_columns(expected_attrs) db_inst = db.instance_get(context, inst_id, columns_to_join=columns_to_join) return Instance._from_db_object(context, cls(), db_inst, expected_attrs) def _save_info_cache(self, context): self.info_cache.save(context) def _save_security_groups(self, context): for secgroup in self.security_groups: secgroup.save(context) def _save_instance_fault(self, context): # NOTE(danms): I don't think we need to worry about this, do we? pass @base.remotable def save(self, context, expected_task_state=None): """Save updates to this instance Column-wise updates will be made based on the result of self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param context: Security context :param expected_task_state: Optional tuple of valid task states for the instance to be in. """ updates = {} changes = self.obj_what_changed() for field in self.fields: if (hasattr(self, base.get_attrname(field)) and isinstance(self[field], base.NovaObject)): getattr(self, '_save_%s' % field)(context) elif field in changes: updates[field] = self[field] if expected_task_state is not None: updates['expected_task_state'] = expected_task_state if updates: old_ref, inst_ref = db.instance_update_and_get_original( context, self.uuid, updates) expected_attrs = [] for attr in INSTANCE_OPTIONAL_FIELDS: if hasattr(self, base.get_attrname(attr)): expected_attrs.append(attr) Instance._from_db_object(context, self, inst_ref, expected_attrs) if 'vm_state' in changes or 'task_state' in changes: notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes() @base.remotable def refresh(self, context): extra = [] for field in INSTANCE_OPTIONAL_FIELDS: if hasattr(self, base.get_attrname(field)): extra.append(field) current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] self.obj_reset_changes() def obj_load_attr(self, attrname): extra = [] if attrname == 'system_metadata': extra.append('system_metadata') elif attrname == 'metadata': extra.append('metadata') elif attrname == 'info_cache': extra.append('info_cache') elif attrname == 'security_groups': extra.append('security_groups') elif attrname == 'fault': extra.append('fault') if not extra: raise Exception('Cannot load "%s" from instance' % attrname) # NOTE(danms): This could be optimized to just load the bits we need instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=extra) self[attrname] = instance[attrname]