class Instance(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added info_cache # Version 1.2: Added security_groups # Version 1.3: Added expected_vm_state and admin_state_reset to # save() # Version 1.4: Added locked_by and deprecated locked # Version 1.5: Added cleaned VERSION = '1.5' fields = { 'id': int, 'user_id': obj_utils.str_or_none, 'project_id': obj_utils.str_or_none, 'image_ref': obj_utils.str_or_none, 'kernel_id': obj_utils.str_or_none, 'ramdisk_id': obj_utils.str_or_none, 'hostname': obj_utils.str_or_none, 'launch_index': obj_utils.int_or_none, 'key_name': obj_utils.str_or_none, 'key_data': obj_utils.str_or_none, 'power_state': obj_utils.int_or_none, 'vm_state': obj_utils.str_or_none, 'task_state': obj_utils.str_or_none, 'memory_mb': obj_utils.int_or_none, 'vcpus': obj_utils.int_or_none, 'root_gb': obj_utils.int_or_none, 'ephemeral_gb': obj_utils.int_or_none, 'host': obj_utils.str_or_none, 'node': obj_utils.str_or_none, 'instance_type_id': obj_utils.int_or_none, 'user_data': obj_utils.str_or_none, 'reservation_id': obj_utils.str_or_none, 'scheduled_at': obj_utils.datetime_or_str_or_none, 'launched_at': obj_utils.datetime_or_str_or_none, 'terminated_at': obj_utils.datetime_or_str_or_none, 'availability_zone': obj_utils.str_or_none, 'display_name': obj_utils.str_or_none, 'display_description': obj_utils.str_or_none, 'launched_on': obj_utils.str_or_none, # NOTE(jdillaman): locked deprecated in favor of locked_by, # to be removed in Icehouse 'locked': bool, 'locked_by': obj_utils.str_or_none, 'os_type': obj_utils.str_or_none, 'architecture': obj_utils.str_or_none, 'vm_mode': obj_utils.str_or_none, 'uuid': obj_utils.str_or_none, 'root_device_name': obj_utils.str_or_none, 'default_ephemeral_device': obj_utils.str_or_none, 'default_swap_device': obj_utils.str_or_none, 'config_drive': obj_utils.str_or_none, 'access_ip_v4': obj_utils.ip_or_none(4), 'access_ip_v6': obj_utils.ip_or_none(6), 'auto_disk_config': bool, 'progress': obj_utils.int_or_none, 'shutdown_terminate': bool, 'disable_terminate': bool, 'cell_name': obj_utils.str_or_none, 'metadata': dict, 'system_metadata': dict, 'info_cache': obj_utils.nested_object_or_none(instance_info_cache.InstanceInfoCache), 'security_groups': obj_utils.nested_object_or_none(security_group.SecurityGroupList), 'fault': obj_utils.nested_object_or_none(instance_fault.InstanceFault), 'cleaned': bool, } obj_extra_fields = ['name'] def __init__(self, *args, **kwargs): super(Instance, self).__init__(*args, **kwargs) self.obj_reset_changes() def obj_reset_changes(self, fields=None): super(Instance, self).obj_reset_changes(fields) self._orig_system_metadata = (dict(self.system_metadata) if 'system_metadata' in self else {}) self._orig_metadata = (dict(self.metadata) if 'metadata' in self else {}) def obj_what_changed(self): changes = super(Instance, self).obj_what_changed() if 'metadata' in self and self.metadata != self._orig_metadata: changes.add('metadata') if 'system_metadata' in self and (self.system_metadata != self._orig_system_metadata): changes.add('system_metadata') return changes @property def name(self): try: base_name = CONF.instance_name_template % self.id except TypeError: # Support templates like "uuid-%(uuid)s", etc. info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. for key in self.fields: # prevent recursion if someone specifies %(name)s # %(name)s will not be valid. if key == 'name': continue info[key] = self[key] try: base_name = CONF.instance_name_template % info except KeyError: base_name = self.uuid return base_name def _attr_access_ip_v4_to_primitive(self): if self.access_ip_v4 is not None: return str(self.access_ip_v4) else: return None def _attr_access_ip_v6_to_primitive(self): if self.access_ip_v6 is not None: return str(self.access_ip_v6) else: return None _attr_scheduled_at_to_primitive = obj_utils.dt_serializer('scheduled_at') _attr_launched_at_to_primitive = obj_utils.dt_serializer('launched_at') _attr_terminated_at_to_primitive = obj_utils.dt_serializer('terminated_at') _attr_info_cache_to_primitive = obj_utils.obj_serializer('info_cache') _attr_security_groups_to_primitive = obj_utils.obj_serializer( 'security_groups') _attr_scheduled_at_from_primitive = obj_utils.dt_deserializer _attr_launched_at_from_primitive = obj_utils.dt_deserializer _attr_terminated_at_from_primitive = obj_utils.dt_deserializer def _attr_info_cache_from_primitive(self, val): return base.NovaObject.obj_from_primitive(val) def _attr_security_groups_from_primitive(self, val): return base.NovaObject.obj_from_primitive(val) @staticmethod def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_FIELDS + INSTANCE_IMPLIED_FIELDS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] elif field == 'cleaned': instance.cleaned = db_inst['cleaned'] == 1 else: instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.metadata_to_dict(db_inst['metadata']) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.metadata_to_dict( db_inst['system_metadata']) if 'fault' in expected_attrs: instance['fault'] = ( instance_fault.InstanceFault.get_latest_for_instance( context, instance.uuid)) # NOTE(danms): info_cache and security_groups are almost # always joined in the DB layer right now, so check to see if # they are asked for and are present in the resulting object if 'info_cache' in expected_attrs and db_inst.get('info_cache'): instance['info_cache'] = instance_info_cache.InstanceInfoCache() instance_info_cache.InstanceInfoCache._from_db_object( context, instance['info_cache'], db_inst['info_cache']) if ('security_groups' in expected_attrs and db_inst.get('security_groups')): instance['security_groups'] = security_group.SecurityGroupList() security_group._make_secgroup_list(context, instance['security_groups'], db_inst['security_groups']) instance._context = context instance.obj_reset_changes() return instance @staticmethod def _attrs_to_columns(attrs): """Translate instance attributes into columns needing joining.""" columns_to_join = [] if 'metadata' in attrs: columns_to_join.append('metadata') if 'system_metadata' in attrs: columns_to_join.append('system_metadata') # NOTE(danms): The DB API currently always joins info_cache and # security_groups for get operations, so don't add them to the # list of columns return columns_to_join @base.remotable_classmethod def get_by_uuid(cls, context, uuid, expected_attrs=None): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = cls._attrs_to_columns(expected_attrs) db_inst = db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable_classmethod def get_by_id(cls, context, inst_id, expected_attrs=None): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = cls._attrs_to_columns(expected_attrs) db_inst = db.instance_get(context, inst_id, columns_to_join=columns_to_join) return cls._from_db_object(context, cls(), db_inst, expected_attrs) def _save_info_cache(self, context): self.info_cache.save(context) def _save_security_groups(self, context): for secgroup in self.security_groups: secgroup.save(context) def _save_instance_fault(self, context): # NOTE(danms): I don't think we need to worry about this, do we? pass @base.remotable def save(self, context, expected_vm_state=None, expected_task_state=None, admin_state_reset=False): """Save updates to this instance Column-wise updates will be made based on the result of self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param context: Security context :param expected_task_state: Optional tuple of valid task states for the instance to be in. :param expected_vm_state: Optional tuple of valid vm states for the instance to be in. :param admin_state_reset: True if admin API is forcing setting of task_state/vm_state. """ cell_type = cells_opts.get_cell_type() if cell_type == 'api' and self.cell_name: # NOTE(comstud): We need to stash a copy of ourselves # before any updates are applied. When we call the save # methods on nested objects, we will lose any changes to # them. But we need to make sure child cells can tell # what is changed. # # We also need to nuke any updates to vm_state and task_state # unless admin_state_reset is True. compute cells are # authoritative for their view of vm_state and task_state. stale_instance = copy.deepcopy(self) def _handle_cell_update_from_api(): cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_from_api(context, stale_instance, expected_vm_state, expected_task_state, admin_state_reset) else: stale_instance = None updates = {} changes = self.obj_what_changed() for field in self.fields: if (hasattr(self, base.get_attrname(field)) and isinstance(self[field], base.NovaObject)): getattr(self, '_save_%s' % field)(context) elif field in changes: updates[field] = self[field] if not updates: if stale_instance: _handle_cell_update_from_api() return # Cleaned needs to be turned back into an int here if 'cleaned' in updates: if updates['cleaned']: updates['cleaned'] = 1 else: updates['cleaned'] = 0 if expected_task_state is not None: updates['expected_task_state'] = expected_task_state if expected_vm_state is not None: updates['expected_vm_state'] = expected_vm_state old_ref, inst_ref = db.instance_update_and_get_original( context, self.uuid, updates, update_cells=False) if stale_instance: _handle_cell_update_from_api() elif cell_type == 'compute': cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_at_top(context, inst_ref) expected_attrs = [] for attr in INSTANCE_OPTIONAL_FIELDS: if hasattr(self, base.get_attrname(attr)): expected_attrs.append(attr) self._from_db_object(context, self, inst_ref, expected_attrs) if 'vm_state' in changes or 'task_state' in changes: notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes() @base.remotable def refresh(self, context): extra = [] for field in INSTANCE_DEFAULT_FIELDS: if hasattr(self, base.get_attrname(field)): extra.append(field) current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] self.obj_reset_changes() def obj_load_attr(self, attrname): extra = [] if attrname == 'system_metadata': extra.append('system_metadata') elif attrname == 'metadata': extra.append('metadata') elif attrname == 'info_cache': extra.append('info_cache') elif attrname == 'security_groups': extra.append('security_groups') elif attrname == 'fault': extra.append('fault') if not extra: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) # NOTE(danms): This could be optimized to just load the bits we need instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=extra) # NOTE(danms): Never allow us to recursively-load if hasattr(instance, base.get_attrname(attrname)): self[attrname] = instance[attrname] else: raise exception.ObjectActionError( action='obj_load_attr', reason='loading %s requires recursion' % attrname)
class Instance(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added info_cache # Version 1.2: Added security_groups # Version 1.3: Added expected_vm_state and admin_state_reset to # save() # Version 1.4: Added locked_by and deprecated locked # Version 1.5: Added cleaned # Version 1.6: Added pci_devices # Version 1.7: String attributes updated to support unicode # Version 1.8: 'security_groups' and 'pci_devices' cannot be None # Version 1.9: Make uuid a non-None real string VERSION = '1.9' fields = { 'id': int, 'user_id': obj_utils.str_or_none, 'project_id': obj_utils.str_or_none, 'image_ref': obj_utils.str_or_none, 'kernel_id': obj_utils.str_or_none, 'ramdisk_id': obj_utils.str_or_none, 'hostname': obj_utils.str_or_none, 'launch_index': obj_utils.int_or_none, 'key_name': obj_utils.str_or_none, 'key_data': obj_utils.str_or_none, 'power_state': obj_utils.int_or_none, 'vm_state': obj_utils.str_or_none, 'task_state': obj_utils.str_or_none, 'memory_mb': obj_utils.int_or_none, 'vcpus': obj_utils.int_or_none, 'root_gb': obj_utils.int_or_none, 'ephemeral_gb': obj_utils.int_or_none, 'host': obj_utils.str_or_none, 'node': obj_utils.str_or_none, 'instance_type_id': obj_utils.int_or_none, 'user_data': obj_utils.str_or_none, 'reservation_id': obj_utils.str_or_none, 'scheduled_at': obj_utils.datetime_or_str_or_none, 'launched_at': obj_utils.datetime_or_str_or_none, 'terminated_at': obj_utils.datetime_or_str_or_none, 'availability_zone': obj_utils.str_or_none, 'display_name': obj_utils.str_or_none, 'display_description': obj_utils.str_or_none, 'launched_on': obj_utils.str_or_none, # NOTE(jdillaman): locked deprecated in favor of locked_by, # to be removed in Icehouse 'locked': bool, 'locked_by': obj_utils.str_or_none, 'os_type': obj_utils.str_or_none, 'architecture': obj_utils.str_or_none, 'vm_mode': obj_utils.str_or_none, 'uuid': obj_utils.cstring, 'root_device_name': obj_utils.str_or_none, 'default_ephemeral_device': obj_utils.str_or_none, 'default_swap_device': obj_utils.str_or_none, 'config_drive': obj_utils.str_or_none, 'access_ip_v4': obj_utils.ip_or_none(4), 'access_ip_v6': obj_utils.ip_or_none(6), 'auto_disk_config': bool, 'progress': obj_utils.int_or_none, 'shutdown_terminate': bool, 'disable_terminate': bool, 'cell_name': obj_utils.str_or_none, 'metadata': dict, 'system_metadata': dict, 'info_cache': obj_utils.nested_object(instance_info_cache.InstanceInfoCache), 'security_groups': obj_utils.nested_object(security_group.SecurityGroupList, none_ok=False), 'fault': obj_utils.nested_object(instance_fault.InstanceFault), 'cleaned': bool, 'pci_devices': obj_utils.nested_object(pci_device.PciDeviceList, none_ok=False), } obj_extra_fields = ['name'] def __init__(self, *args, **kwargs): super(Instance, self).__init__(*args, **kwargs) self.obj_reset_changes() def obj_reset_changes(self, fields=None): super(Instance, self).obj_reset_changes(fields) self._orig_system_metadata = (dict(self.system_metadata) if 'system_metadata' in self else {}) self._orig_metadata = (dict(self.metadata) if 'metadata' in self else {}) def obj_what_changed(self): changes = super(Instance, self).obj_what_changed() if 'metadata' in self and self.metadata != self._orig_metadata: changes.add('metadata') if 'system_metadata' in self and (self.system_metadata != self._orig_system_metadata): changes.add('system_metadata') return changes @property def name(self): try: base_name = CONF.instance_name_template % self.id except TypeError: # Support templates like "uuid-%(uuid)s", etc. info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. for key in self.fields: if key == 'name': # NOTE(danms): prevent recursion continue elif not self.obj_attr_is_set(key): # NOTE(danms): Don't trigger lazy-loads continue info[key] = self[key] try: base_name = CONF.instance_name_template % info except KeyError: base_name = self.uuid return base_name def _attr_access_ip_v4_to_primitive(self): if self.access_ip_v4 is not None: return str(self.access_ip_v4) else: return None def _attr_access_ip_v6_to_primitive(self): if self.access_ip_v6 is not None: return str(self.access_ip_v6) else: return None _attr_scheduled_at_to_primitive = obj_utils.dt_serializer('scheduled_at') _attr_launched_at_to_primitive = obj_utils.dt_serializer('launched_at') _attr_terminated_at_to_primitive = obj_utils.dt_serializer('terminated_at') _attr_info_cache_to_primitive = obj_utils.obj_serializer('info_cache') _attr_security_groups_to_primitive = obj_utils.obj_serializer( 'security_groups') _attr_pci_devices_to_primitive = obj_utils.obj_serializer('pci_devices') _attr_scheduled_at_from_primitive = obj_utils.dt_deserializer _attr_launched_at_from_primitive = obj_utils.dt_deserializer _attr_terminated_at_from_primitive = obj_utils.dt_deserializer def _attr_info_cache_from_primitive(self, val): if val is None: return val return base.NovaObject.obj_from_primitive(val) def _attr_security_groups_from_primitive(self, val): return base.NovaObject.obj_from_primitive(val) def _attr_pci_devices_from_primitive(self, val): if val is None: # Only possible in version <= 1.7 return pci_device.PciDeviceList() return base.NovaObject.obj_from_primitive(val) @staticmethod def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_ATTRS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] elif field == 'cleaned': instance.cleaned = db_inst['cleaned'] == 1 else: instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.instance_meta(db_inst) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.instance_sys_meta(db_inst) if 'fault' in expected_attrs: instance['fault'] = ( instance_fault.InstanceFault.get_latest_for_instance( context, instance.uuid)) if 'pci_devices' in expected_attrs: pci_devices = pci_device._make_pci_list(context, pci_device.PciDeviceList(), db_inst['pci_devices']) instance['pci_devices'] = pci_devices if 'info_cache' in expected_attrs: if db_inst['info_cache'] is None: info_cache = None else: info_cache = instance_info_cache.InstanceInfoCache() instance_info_cache.InstanceInfoCache._from_db_object( context, info_cache, db_inst['info_cache']) instance['info_cache'] = info_cache if 'security_groups' in expected_attrs: sec_groups = security_group._make_secgroup_list( context, security_group.SecurityGroupList(), db_inst['security_groups']) instance['security_groups'] = sec_groups instance._context = context instance.obj_reset_changes() return instance @base.remotable_classmethod def get_by_uuid(cls, context, uuid, expected_attrs=None): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = _expected_cols(expected_attrs) db_inst = db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable_classmethod def get_by_id(cls, context, inst_id, expected_attrs=None): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = _expected_cols(expected_attrs) db_inst = db.instance_get(context, inst_id, columns_to_join=columns_to_join) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable def create(self, context): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() updates.pop('id', None) expected_attrs = [ attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates ] if 'security_groups' in updates: updates['security_groups'] = [ x.name for x in updates['security_groups'] ] if 'info_cache' in updates: updates['info_cache'] = { 'network_info': updates['info_cache'].network_info.json() } db_inst = db.instance_create(context, updates) Instance._from_db_object(context, self, db_inst, expected_attrs) @base.remotable def destroy(self, context): if not self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='destroy', reason='already destroyed') if not self.obj_attr_is_set('uuid'): raise exception.ObjectActionError(action='destroy', reason='no uuid') if not self.obj_attr_is_set('host') or not self.host: # NOTE(danms): If our host is not set, avoid a race constraint = db.constraint(host=db.equal_any(None)) else: constraint = None try: db.instance_destroy(context, self.uuid, constraint=constraint) except exception.ConstraintNotMet: raise exception.ObjectActionError(action='destroy', reason='host changed') delattr(self, base.get_attrname('id')) def _save_info_cache(self, context): self.info_cache.save(context) def _save_security_groups(self, context): for secgroup in self.security_groups: secgroup.save(context) def _save_fault(self, context): # NOTE(danms): I don't think we need to worry about this, do we? pass def _save_pci_devices(self, context): # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker # permitted to update the DB. all change to devices from here will # be dropped. pass @base.remotable def save(self, context, expected_vm_state=None, expected_task_state=None, admin_state_reset=False): """Save updates to this instance Column-wise updates will be made based on the result of self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param context: Security context :param expected_task_state: Optional tuple of valid task states for the instance to be in. :param expected_vm_state: Optional tuple of valid vm states for the instance to be in. :param admin_state_reset: True if admin API is forcing setting of task_state/vm_state. """ cell_type = cells_opts.get_cell_type() if cell_type == 'api' and self.cell_name: # NOTE(comstud): We need to stash a copy of ourselves # before any updates are applied. When we call the save # methods on nested objects, we will lose any changes to # them. But we need to make sure child cells can tell # what is changed. # # We also need to nuke any updates to vm_state and task_state # unless admin_state_reset is True. compute cells are # authoritative for their view of vm_state and task_state. stale_instance = self.obj_clone() def _handle_cell_update_from_api(): cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_from_api(context, stale_instance, expected_vm_state, expected_task_state, admin_state_reset) else: stale_instance = None updates = {} changes = self.obj_what_changed() for field in self.fields: if (self.obj_attr_is_set(field) and isinstance(self[field], base.NovaObject)): try: getattr(self, '_save_%s' % field)(context) except AttributeError: LOG.exception(_('No save handler for %s') % field, instance=self) elif field in changes: updates[field] = self[field] if not updates: if stale_instance: _handle_cell_update_from_api() return # Cleaned needs to be turned back into an int here if 'cleaned' in updates: if updates['cleaned']: updates['cleaned'] = 1 else: updates['cleaned'] = 0 if expected_task_state is not None: updates['expected_task_state'] = expected_task_state if expected_vm_state is not None: updates['expected_vm_state'] = expected_vm_state expected_attrs = [ attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS if self.obj_attr_is_set(attr) ] # NOTE(alaski): We need to pull system_metadata for the # notification.send_update() below. If we don't there's a KeyError # when it tries to extract the flavor. if 'system_metadata' not in expected_attrs: expected_attrs.append('system_metadata') old_ref, inst_ref = db.instance_update_and_get_original( context, self.uuid, updates, update_cells=False, columns_to_join=_expected_cols(expected_attrs)) if stale_instance: _handle_cell_update_from_api() elif cell_type == 'compute': cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_at_top(context, inst_ref) self._from_db_object(context, self, inst_ref, expected_attrs) notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes() @base.remotable def refresh(self, context): extra = [ field for field in INSTANCE_OPTIONAL_ATTRS if self.obj_attr_is_set(field) ] current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] self.obj_reset_changes() def obj_load_attr(self, attrname): if attrname not in INSTANCE_OPTIONAL_ATTRS: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) LOG.debug(_("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s"), { 'attr': attrname, 'name': self.obj_name(), 'uuid': self.uuid, }) # FIXME(comstud): This should be optimized to only load the attr. instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=[attrname]) # NOTE(danms): Never allow us to recursively-load if instance.obj_attr_is_set(attrname): self[attrname] = instance[attrname] else: raise exception.ObjectActionError( action='obj_load_attr', reason='loading %s requires recursion' % attrname)
class Service(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added compute_node nested object VERSION = '1.1' fields = { 'id': int, 'host': utils.str_or_none, 'binary': utils.str_or_none, 'topic': utils.str_or_none, 'report_count': int, 'disabled': bool, 'disabled_reason': utils.str_or_none, 'availability_zone': utils.str_or_none, 'compute_node': utils.nested_object_or_none(compute_node.ComputeNode), } @staticmethod def _do_compute_node(context, service, db_service): try: # NOTE(danms): The service.compute_node relationship returns # a list, which should only have one item in it. If it's empty # or otherwise malformed, ignore it. db_compute = db_service['compute_node'][0] except Exception: return service.compute_node = compute_node.ComputeNode._from_db_object( context, compute_node.ComputeNode(), db_compute) @staticmethod def _from_db_object(context, service, db_service): allow_missing = ('availability_zone', ) for key in service.fields: if key in allow_missing and key not in db_service: continue if key == 'compute_node': service._do_compute_node(context, service, db_service) else: service[key] = db_service[key] service._context = context service.obj_reset_changes() return service def obj_load_attr(self, attrname): if attrname != 'compute_node': raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) self.compute_node = compute_node.ComputeNode.get_by_service_id( self._context, self.id) _attr_compute_node_to_primitive = utils.obj_serializer('compute_node') def _attr_compute_node_from_primitive(self, val): return base.NovaObject.obj_from_primitive(val) @base.remotable_classmethod def get_by_id(cls, context, service_id): db_service = db.service_get(context, service_id) return cls._from_db_object(context, cls(), db_service) @base.remotable_classmethod def get_by_host_and_topic(cls, context, host, topic): db_service = db.service_get_by_host_and_topic(context, host, topic) return cls._from_db_object(context, cls(), db_service) @base.remotable_classmethod def get_by_compute_host(cls, context, host): db_service = db.service_get_by_compute_host(context, host) return cls._from_db_object(context, cls(), db_service) @base.remotable_classmethod def get_by_args(cls, context, host, binary): db_service = db.service_get_by_args(context, host, binary) return cls._from_db_object(context, cls(), db_service) @base.remotable def create(self, context): updates = {} for key in self.obj_what_changed(): updates[key] = self[key] db_service = db.service_create(context, updates) self._from_db_object(context, self, db_service) @base.remotable def save(self, context): updates = {} for key in self.obj_what_changed(): updates[key] = self[key] updates.pop('id', None) db_service = db.service_update(context, self.id, updates) self._from_db_object(context, self, db_service) @base.remotable def destroy(self, context): db.service_destroy(context, self.id)
class Instance(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added info_cache # Version 1.2: Added security_groups VERSION = '1.2' fields = { 'id': int, 'user_id': obj_utils.str_or_none, 'project_id': obj_utils.str_or_none, 'image_ref': obj_utils.str_or_none, 'kernel_id': obj_utils.str_or_none, 'ramdisk_id': obj_utils.str_or_none, 'hostname': obj_utils.str_or_none, 'launch_index': obj_utils.int_or_none, 'key_name': obj_utils.str_or_none, 'key_data': obj_utils.str_or_none, 'power_state': obj_utils.int_or_none, 'vm_state': obj_utils.str_or_none, 'task_state': obj_utils.str_or_none, 'memory_mb': obj_utils.int_or_none, 'vcpus': obj_utils.int_or_none, 'root_gb': obj_utils.int_or_none, 'ephemeral_gb': obj_utils.int_or_none, 'host': obj_utils.str_or_none, 'node': obj_utils.str_or_none, 'instance_type_id': obj_utils.int_or_none, 'user_data': obj_utils.str_or_none, 'reservation_id': obj_utils.str_or_none, 'scheduled_at': obj_utils.datetime_or_str_or_none, 'launched_at': obj_utils.datetime_or_str_or_none, 'terminated_at': obj_utils.datetime_or_str_or_none, 'availability_zone': obj_utils.str_or_none, 'display_name': obj_utils.str_or_none, 'display_description': obj_utils.str_or_none, 'launched_on': obj_utils.str_or_none, 'locked': bool, 'os_type': obj_utils.str_or_none, 'architecture': obj_utils.str_or_none, 'vm_mode': obj_utils.str_or_none, 'uuid': obj_utils.str_or_none, 'root_device_name': obj_utils.str_or_none, 'default_ephemeral_device': obj_utils.str_or_none, 'default_swap_device': obj_utils.str_or_none, 'config_drive': obj_utils.str_or_none, 'access_ip_v4': obj_utils.ip_or_none(4), 'access_ip_v6': obj_utils.ip_or_none(6), 'auto_disk_config': bool, 'progress': obj_utils.int_or_none, 'shutdown_terminate': bool, 'disable_terminate': bool, 'cell_name': obj_utils.str_or_none, 'metadata': dict, 'system_metadata': dict, 'info_cache': obj_utils.nested_object_or_none(instance_info_cache.InstanceInfoCache), 'security_groups': obj_utils.nested_object_or_none(security_group.SecurityGroupList), 'fault': obj_utils.nested_object_or_none(instance_fault.InstanceFault), } obj_extra_fields = ['name'] @property def name(self): try: base_name = CONF.instance_name_template % self.id except TypeError: # Support templates like "uuid-%(uuid)s", etc. info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. for key in self.fields: # prevent recursion if someone specifies %(name)s # %(name)s will not be valid. if key == 'name': continue info[key] = self[key] try: base_name = CONF.instance_name_template % info except KeyError: base_name = self.uuid return base_name def _attr_access_ip_v4_to_primitive(self): if self.access_ip_v4 is not None: return str(self.access_ip_v4) else: return None def _attr_access_ip_v6_to_primitive(self): if self.access_ip_v6 is not None: return str(self.access_ip_v6) else: return None _attr_scheduled_at_to_primitive = obj_utils.dt_serializer('scheduled_at') _attr_launched_at_to_primitive = obj_utils.dt_serializer('launched_at') _attr_terminated_at_to_primitive = obj_utils.dt_serializer('terminated_at') _attr_info_cache_to_primitive = obj_utils.obj_serializer('info_cache') _attr_security_groups_to_primitive = obj_utils.obj_serializer( 'security_groups') _attr_scheduled_at_from_primitive = obj_utils.dt_deserializer _attr_launched_at_from_primitive = obj_utils.dt_deserializer _attr_terminated_at_from_primitive = obj_utils.dt_deserializer def _attr_info_cache_from_primitive(self, val): return base.NovaObject.obj_from_primitive(val) def _attr_security_groups_from_primitive(self, val): return base.NovaObject.obj_from_primitive(val) @staticmethod def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_FIELDS + INSTANCE_IMPLIED_FIELDS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] else: instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.metadata_to_dict(db_inst['metadata']) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.metadata_to_dict( db_inst['system_metadata']) if 'fault' in expected_attrs: instance['fault'] = ( instance_fault.InstanceFault.get_latest_for_instance( context, instance.uuid)) # NOTE(danms): info_cache and security_groups are almost always joined # in the DB layer right now, so check to see if they're filled instead # of looking at expected_attrs if db_inst['info_cache']: instance['info_cache'] = instance_info_cache.InstanceInfoCache() instance_info_cache.InstanceInfoCache._from_db_object( context, instance['info_cache'], db_inst['info_cache']) if db_inst['security_groups']: instance['security_groups'] = security_group.SecurityGroupList() security_group._make_secgroup_list(context, instance['security_groups'], db_inst['security_groups']) instance._context = context instance.obj_reset_changes() return instance @staticmethod def _attrs_to_columns(attrs): """Translate instance attributes into columns needing joining.""" columns_to_join = [] if 'metadata' in attrs: columns_to_join.append('metadata') if 'system_metadata' in attrs: columns_to_join.append('system_metadata') # NOTE(danms): The DB API currently always joins info_cache and # security_groups for get operations, so don't add them to the # list of columns return columns_to_join @base.remotable_classmethod def get_by_uuid(cls, context, uuid, expected_attrs=None): if expected_attrs is None: expected_attrs = [] columns_to_join = cls._attrs_to_columns(expected_attrs) db_inst = db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join) return Instance._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable_classmethod def get_by_id(cls, context, inst_id, expected_attrs=None): if expected_attrs is None: expected_attrs = [] columns_to_join = cls._attrs_to_columns(expected_attrs) db_inst = db.instance_get(context, inst_id, columns_to_join=columns_to_join) return Instance._from_db_object(context, cls(), db_inst, expected_attrs) def _save_info_cache(self, context): self.info_cache.save(context) def _save_security_groups(self, context): for secgroup in self.security_groups: secgroup.save(context) def _save_instance_fault(self, context): # NOTE(danms): I don't think we need to worry about this, do we? pass @base.remotable def save(self, context, expected_task_state=None): """Save updates to this instance Column-wise updates will be made based on the result of self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param context: Security context :param expected_task_state: Optional tuple of valid task states for the instance to be in. """ updates = {} changes = self.obj_what_changed() for field in self.fields: if (hasattr(self, base.get_attrname(field)) and isinstance(self[field], base.NovaObject)): getattr(self, '_save_%s' % field)(context) elif field in changes: updates[field] = self[field] if expected_task_state is not None: updates['expected_task_state'] = expected_task_state if updates: old_ref, inst_ref = db.instance_update_and_get_original( context, self.uuid, updates) expected_attrs = [] for attr in INSTANCE_OPTIONAL_FIELDS: if hasattr(self, base.get_attrname(attr)): expected_attrs.append(attr) Instance._from_db_object(context, self, inst_ref, expected_attrs) if 'vm_state' in changes or 'task_state' in changes: notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes() @base.remotable def refresh(self, context): extra = [] for field in INSTANCE_OPTIONAL_FIELDS: if hasattr(self, base.get_attrname(field)): extra.append(field) current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] self.obj_reset_changes() def obj_load_attr(self, attrname): extra = [] if attrname == 'system_metadata': extra.append('system_metadata') elif attrname == 'metadata': extra.append('metadata') elif attrname == 'info_cache': extra.append('info_cache') elif attrname == 'security_groups': extra.append('security_groups') elif attrname == 'fault': extra.append('fault') if not extra: raise Exception('Cannot load "%s" from instance' % attrname) # NOTE(danms): This could be optimized to just load the bits we need instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=extra) self[attrname] = instance[attrname]