def save(self, context, expected_task_state=None): """Save updates to this instance Column-wise updates will be made based on the result of self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param context: Security context :param expected_task_state: Optional tuple of valid task states for the instance to be in. """ updates = {} changes = self.obj_what_changed() for field in self.fields: if (hasattr(self, base.get_attrname(field)) and isinstance(self[field], base.NovaObject)): getattr(self, '_save_%s' % field)(context) elif field in changes: updates[field] = self[field] if expected_task_state is not None: updates['expected_task_state'] = expected_task_state if updates: old_ref, inst_ref = db.instance_update_and_get_original( context, self.uuid, updates) expected_attrs = [] for attr in INSTANCE_OPTIONAL_FIELDS: if hasattr(self, base.get_attrname(attr)): expected_attrs.append(attr) Instance._from_db_object(context, self, inst_ref, expected_attrs) if 'vm_state' in changes or 'task_state' in changes: notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes()
def save(self, context, expected_task_state=None): """Save updates to this instance Column-wise updates will be made based on the result of self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param context: Security context :param expected_task_state: Optional tuple of valid task states for the instance to be in. """ updates = {} changes = self.obj_what_changed() for field in self.fields: if (hasattr(self, base.get_attrname(field)) and isinstance(self[field], base.NovaObject)): self[field].save(context) elif field in changes: updates[field] = self[field] if expected_task_state is not None: updates['expected_task_state'] = expected_task_state if updates: old_ref, inst_ref = db.instance_update_and_get_original(context, self.uuid, updates) expected_attrs = [] for attr in INSTANCE_OPTIONAL_FIELDS: if hasattr(self, base.get_attrname(attr)): expected_attrs.append(attr) Instance._from_db_object(self, inst_ref, expected_attrs) if 'vm_state' in changes or 'task_state' in changes: notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes()
def refresh(self, context): extra = [] for field in INSTANCE_OPTIONAL_FIELDS: if hasattr(self, base.get_attrname(field)): extra.append(field) current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra) for field in self.fields: if hasattr(self, base.get_attrname(field)) and self[field] != current[field]: self[field] = current[field]
def refresh(self, context): extra = [] for field in ['system_metadata', 'metadata']: if hasattr(self, base.get_attrname(field)): extra.append(field) current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field]
def refresh(self, context): extra = [] for field in INSTANCE_OPTIONAL_FIELDS + INSTANCE_IMPLIED_FIELDS: if hasattr(self, base.get_attrname(field)): extra.append(field) current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] self.obj_reset_changes()
def refresh(self, context): extra = [] for field in INSTANCE_DEFAULT_FIELDS: if hasattr(self, base.get_attrname(field)): extra.append(field) current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] self.obj_reset_changes()
def _from_db_object(context, service, db_service): allow_missing = ('availability_zone',) for key in service.fields: if key in allow_missing and key not in db_service: continue if key == 'compute_node': # NOTE(sbauza); We want to only lazy-load compute_node continue elif key == 'version': # NOTE(danms): Special handling of the version field, since # it is read_only and set in our init. setattr(service, base.get_attrname(key), db_service[key]) elif key == 'uuid' and not db_service.get(key): # Leave uuid off the object if undefined in the database # so that it will be generated below. continue else: service[key] = db_service[key] service._context = context service.obj_reset_changes() # TODO(dpeschman): Drop this once all services have uuids in database if 'uuid' not in service: service.uuid = uuidutils.generate_uuid() LOG.debug('Generated UUID %(uuid)s for service %(id)i', dict(uuid=service.uuid, id=service.id)) service.save() return service
def obj_load_attr(self, attrname): extra = [] if attrname == 'system_metadata': extra.append('system_metadata') elif attrname == 'metadata': extra.append('metadata') elif attrname == 'info_cache': extra.append('info_cache') elif attrname == 'security_groups': extra.append('security_groups') elif attrname == 'fault': extra.append('fault') if not extra: raise Exception('Cannot load "%s" from instance' % attrname) # NOTE(danms): This could be optimized to just load the bits we need instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=extra) # NOTE(danms): Never allow us to recursively-load if hasattr(instance, base.get_attrname(attrname)): self[attrname] = instance[attrname] else: raise Exception('Cannot load "%s" from instance' % attrname)
def obj_load_attr(self, attrname): extra = [] if attrname == "system_metadata": extra.append("system_metadata") elif attrname == "metadata": extra.append("metadata") elif attrname == "info_cache": extra.append("info_cache") elif attrname == "security_groups": extra.append("security_groups") elif attrname == "pci_devices": extra.append("pci_devices") elif attrname == "fault": extra.append("fault") if not extra: raise exception.ObjectActionError( action="obj_load_attr", reason="attribute %s not lazy-loadable" % attrname ) # NOTE(danms): This could be optimized to just load the bits we need instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=extra) # NOTE(danms): Never allow us to recursively-load if hasattr(instance, base.get_attrname(attrname)): self[attrname] = instance[attrname] else: raise exception.ObjectActionError(action="obj_load_attr", reason="loading %s requires recursion" % attrname)
def _from_db_object(context, service, db_service): allow_missing = ('availability_zone', ) for key in service.fields: if key in allow_missing and key not in db_service: continue if key == 'compute_node': # NOTE(sbauza); We want to only lazy-load compute_node continue elif key == 'version': # NOTE(danms): Special handling of the version field, since # it is read_only and set in our init. setattr(service, base.get_attrname(key), db_service[key]) elif key == 'uuid' and not db_service.get(key): # Leave uuid off the object if undefined in the database # so that it will be generated below. continue else: service[key] = db_service[key] service._context = context service.obj_reset_changes() # TODO(dpeschman): Drop this once all services have uuids in database if 'uuid' not in service: service.uuid = uuidutils.generate_uuid() LOG.debug('Generated UUID %(uuid)s for service %(id)i', dict(uuid=service.uuid, id=service.id)) service.save() return service
def destroy(self): if not self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='destroy', reason='already destroyed') if not self.obj_attr_is_set('uuid'): raise exception.ObjectActionError(action='destroy', reason='no uuid') if not self.obj_attr_is_set('host') or not self.host: # NOTE(danms): If our host is not set, avoid a race constraint = db.constraint(host=db.equal_any(None)) else: constraint = None cell_type = cells_opts.get_cell_type() if cell_type is not None: stale_instance = self.obj_clone() try: db_inst = db.instance_destroy(self._context, self.uuid, constraint=constraint) self._from_db_object(self._context, self, db_inst) except exception.ConstraintNotMet: raise exception.ObjectActionError(action='destroy', reason='host changed') if cell_type == 'compute': cells_api = cells_rpcapi.CellsAPI() cells_api.instance_destroy_at_top(self._context, stale_instance) delattr(self, base.get_attrname('id'))
def obj_load_attr(self, attrname): extra = [] if attrname == 'system_metadata': extra.append('system_metadata') elif attrname == 'metadata': extra.append('metadata') elif attrname == 'info_cache': extra.append('info_cache') elif attrname == 'security_groups': extra.append('security_groups') elif attrname == 'fault': extra.append('fault') if not extra: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) # NOTE(danms): This could be optimized to just load the bits we need instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=extra) # NOTE(danms): Never allow us to recursively-load if hasattr(instance, base.get_attrname(attrname)): self[attrname] = instance[attrname] else: raise exception.ObjectActionError( action='obj_load_attr', reason='loading %s requires recursion' % attrname)
def refresh(self, context): """Refreshes the instance group.""" current = self.__class__.get_by_uuid(context, self.uuid) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] self.obj_reset_changes()
def destroy(self, context): if not self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='destroy', reason='already destroyed') db.block_device_mapping_destroy(context, self.id) delattr(self, base.get_attrname('id')) cells_api = cells_rpcapi.CellsAPI() cells_api.bdm_destroy_at_top(context, self.instance_uuid, device_name=self.device_name, volume_id=self.volume_id)
def test_get_with_expected(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, "instance_get_by_uuid") db.instance_get_by_uuid(ctxt, "uuid", ["metadata", "system_metadata"]).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(ctxt, "uuid", expected_attrs=instance.INSTANCE_OPTIONAL_FIELDS) for attr in instance.INSTANCE_OPTIONAL_FIELDS: attrname = base.get_attrname(attr) self.assertTrue(hasattr(inst, attrname)) self.assertRemotes()
def test_get_without_expected(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(ctxt, 'uuid', []).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(ctxt, 'uuid') # Make sure these weren't loaded for attr in instance.INSTANCE_OPTIONAL_FIELDS: attrname = base.get_attrname(attr) self.assertFalse(hasattr(inst, attrname)) self.assertRemotes()
def test_get_without_expected(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, "instance_get_by_uuid") db.instance_get_by_uuid(ctxt, "uuid", []).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(ctxt, "uuid") # Make sure these weren't loaded for attr in instance.INSTANCE_OPTIONAL_FIELDS: attrname = base.get_attrname(attr) self.assertFalse(hasattr(inst, attrname)) self.assertRemotes()
def destroy(self): if not self.obj_attr_is_set("id"): raise exception.ObjectActionError(action="destroy", reason="already destroyed") db.block_device_mapping_destroy(self._context, self.id) delattr(self, base.get_attrname("id")) cell_type = cells_opts.get_cell_type() if cell_type == "compute": cells_api = cells_rpcapi.CellsAPI() cells_api.bdm_destroy_at_top( self._context, self.instance_uuid, device_name=self.device_name, volume_id=self.volume_id )
def test_get_with_expected(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(ctxt, 'uuid', ['metadata', 'system_metadata']).AndReturn( self.fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid( ctxt, 'uuid', expected_attrs=instance.INSTANCE_OPTIONAL_FIELDS) for attr in instance.INSTANCE_OPTIONAL_FIELDS: attrname = base.get_attrname(attr) self.assertTrue(hasattr(inst, attrname)) self.assertRemotes()
def _get_resource_values(self, changes_only=False): """Helper Method to Parse the Attributes to Write to Persistence""" values = {} changes = self.obj_what_changed() for field in self._get_public_resource_fields(): if not changes_only or field in changes: if hasattr(self, base.get_attrname(field)): values[field] = self[field] #Need to convert all date times to strings for the db if isinstance(values[field], datetime): format = '%Y-%m-%dT%H:%M:%S.%f' values[field] = values[field].strftime(format) return values
def destroy(self, context): if not self.obj_attr_is_set("id"): raise exception.ObjectActionError(action="destroy", reason="already destroyed") if not self.obj_attr_is_set("uuid"): raise exception.ObjectActionError(action="destroy", reason="no uuid") if not self.obj_attr_is_set("host") or not self.host: # NOTE(danms): If our host is not set, avoid a race constraint = db.constraint(host=db.equal_any(None)) else: constraint = None try: db.instance_destroy(context, self.uuid, constraint=constraint) except exception.ConstraintNotMet: raise exception.ObjectActionError(action="destroy", reason="host changed") delattr(self, base.get_attrname("id"))
def _from_db_object(context, service, db_service): allow_missing = ('availability_zone',) for key in service.fields: if key in allow_missing and key not in db_service: continue if key == 'compute_node': # NOTE(sbauza); We want to only lazy-load compute_node continue elif key == 'version': # NOTE(danms): Special handling of the version field, since # it is read_only and set in our init. setattr(service, base.get_attrname(key), db_service[key]) else: service[key] = db_service[key] service._context = context service.obj_reset_changes() return service
def object_action(self, context, objinst, objmethod, args, kwargs): """Perform an action on an object.""" oldobj = copy.copy(objinst) result = getattr(objinst, objmethod)(context, *args, **kwargs) updates = dict() # NOTE(danms): Diff the object with the one passed to us and # generate a list of changes to forward back for field in objinst.fields: if not hasattr(objinst, nova_object.get_attrname(field)): # Avoid demand-loading anything continue if oldobj[field] != objinst[field]: updates[field] = objinst._attr_to_primitive(field) # This is safe since a field named this would conflict with the # method anyway updates['obj_what_changed'] = objinst.obj_what_changed() return updates, result
def refresh(self, context=None, transaction=None): """Refreshes the latest state of the Resource attributes""" self._update_transaction(context, transaction) #Get the Identifier Attribute to know which Resource to Refresh id_attr = self._get_resource_id_attribute() resource_id = self._get_resource_identifer() filters = {id_attr: resource_id} #Delegate to the Factory to Retrieve the Refreshed Resource new_resource = self._get_resource_factory().get_resource( self.__class__, self._context, filters, self._transaction) #Loop through the retrieved resource, updating the current resource for field in self._get_public_resource_fields(): if hasattr(new_resource, base.get_attrname(field)): setattr(self, field, getattr(new_resource, field)) #Resets any attributes that have been cached on this resource self._reset_cached_attributes() self.obj_reset_changes()
def destroy(self, context): if not self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='destroy', reason='already destroyed') if not self.obj_attr_is_set('uuid'): raise exception.ObjectActionError(action='destroy', reason='no uuid') if not self.obj_attr_is_set('host') or not self.host: # NOTE(danms): If our host is not set, avoid a race constraint = db.constraint(host=db.equal_any(None)) else: constraint = None try: db.instance_destroy(context, self.uuid, constraint=constraint) except exception.ConstraintNotMet: raise exception.ObjectActionError(action='destroy', reason='host changed') delattr(self, base.get_attrname('id'))
def destroy(self, context): if not self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='destroy', reason='already destroyed') if not self.obj_attr_is_set('uuid'): raise exception.ObjectActionError(action='destroy', reason='no uuid') if not self.obj_attr_is_set('host') or not self.host: # NOTE(danms): If our host is not set, avoid a race constraint = db.constraint(host=db.equal_any(None)) else: constraint = None try: db_inst = db.instance_destroy(context, self.uuid, constraint=constraint) self._from_db_object(context, self, db_inst) except exception.ConstraintNotMet: raise exception.ObjectActionError(action='destroy', reason='host changed') delattr(self, base.get_attrname('id'))
def destroy(self): if not self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='destroy', reason='already destroyed') db.block_device_mapping_destroy(self._context, self.id) delattr(self, base.get_attrname('id'))
def save(self, context, expected_vm_state=None, expected_task_state=None, admin_state_reset=False): """Save updates to this instance Column-wise updates will be made based on the result of self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param context: Security context :param expected_task_state: Optional tuple of valid task states for the instance to be in. :param expected_vm_state: Optional tuple of valid vm states for the instance to be in. :param admin_state_reset: True if admin API is forcing setting of task_state/vm_state. """ cell_type = cells_opts.get_cell_type() if cell_type == 'api' and self.cell_name: # NOTE(comstud): We need to stash a copy of ourselves # before any updates are applied. When we call the save # methods on nested objects, we will lose any changes to # them. But we need to make sure child cells can tell # what is changed. # # We also need to nuke any updates to vm_state and task_state # unless admin_state_reset is True. compute cells are # authoritative for their view of vm_state and task_state. stale_instance = copy.deepcopy(self) def _handle_cell_update_from_api(): cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_from_api(context, stale_instance, expected_vm_state, expected_task_state, admin_state_reset) else: stale_instance = None updates = {} changes = self.obj_what_changed() for field in self.fields: if (hasattr(self, base.get_attrname(field)) and isinstance(self[field], base.NovaObject)): getattr(self, '_save_%s' % field)(context) elif field in changes: updates[field] = self[field] if not updates: if stale_instance: _handle_cell_update_from_api() return # Cleaned needs to be turned back into an int here if 'cleaned' in updates: if updates['cleaned']: updates['cleaned'] = 1 else: updates['cleaned'] = 0 if expected_task_state is not None: updates['expected_task_state'] = expected_task_state if expected_vm_state is not None: updates['expected_vm_state'] = expected_vm_state old_ref, inst_ref = db.instance_update_and_get_original( context, self.uuid, updates, update_cells=False) if stale_instance: _handle_cell_update_from_api() elif cell_type == 'compute': cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_at_top(context, inst_ref) expected_attrs = [] for attr in INSTANCE_OPTIONAL_FIELDS: if hasattr(self, base.get_attrname(attr)): expected_attrs.append(attr) self._from_db_object(context, self, inst_ref, expected_attrs) if 'vm_state' in changes or 'task_state' in changes: notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes()