def fake_instance_obj(context, obj_instance_class=None, **updates): if obj_instance_class is None: obj_instance_class = objects.Instance expected_attrs = updates.pop('expected_attrs', None) flavor = updates.pop('flavor', None) if not flavor: flavor = objects.Flavor(id=1, name='flavor1', memory_mb=256, vcpus=1, root_gb=1, ephemeral_gb=1, flavorid='1', swap=0, rxtx_factor=1.0, vcpu_weight=1, disabled=False, is_public=True, extra_specs={}, projects=[]) flavor.obj_reset_changes() inst = obj_instance_class._from_db_object(context, obj_instance_class(), fake_db_instance(**updates), expected_attrs=expected_attrs) inst.keypairs = objects.KeyPairList(objects=[]) inst.tags = objects.TagList() if flavor: inst.flavor = flavor # This is needed for instance quota counting until we have the # ability to count allocations in placement. if 'vcpus' in flavor and 'vcpus' not in updates: inst.vcpus = flavor.vcpus if 'memory_mb' in flavor and 'memory_mb' not in updates: inst.memory_mb = flavor.memory_mb inst.old_flavor = None inst.new_flavor = None inst.obj_reset_changes() return inst
def fake_db_req(**updates): ctxt = context.RequestContext('fake-user', 'fake-project') instance_uuid = uuidutils.generate_uuid() instance = fake_instance.fake_instance_obj(ctxt, objects.Instance, uuid=instance_uuid) # This will always be set this way for an instance at build time instance.host = None block_devices = objects.BlockDeviceMappingList(objects=[ fake_block_device.fake_bdm_object( context, fake_block_device.FakeDbBlockDeviceDict( source_type='blank', destination_type='local', guest_format='foo', device_type='disk', disk_bus='', boot_index=1, device_name='xvda', delete_on_termination=False, snapshot_id=None, volume_id=None, volume_size=0, image_id='bar', no_device=False, connection_info=None, tag='', instance_uuid=uuids.instance)) ]) tags = objects.TagList( objects=[objects.Tag(tag='tag1', resource_id=instance_uuid)]) db_build_request = { 'id': 1, 'project_id': 'fake-project', 'instance_uuid': instance_uuid, 'instance': jsonutils.dumps(instance.obj_to_primitive()), 'block_device_mappings': jsonutils.dumps(block_devices.obj_to_primitive()), 'tags': jsonutils.dumps(tags.obj_to_primitive()), 'created_at': datetime.datetime(2016, 1, 16), 'updated_at': datetime.datetime(2016, 1, 16), } for name, field in objects.BuildRequest.fields.items(): if name in db_build_request: continue if field.nullable: db_build_request[name] = None elif field.default != fields.UnspecifiedDefault: db_build_request[name] = field.default else: raise Exception('fake_db_req needs help with %s' % name) if updates: db_build_request.update(updates) return db_build_request
def test_send_version_instance_update_uses_flavor(self, mock_emit): # instance.update notification needs some tags value to avoid lazy-load self.instance.tags = objects.TagList() # Make sure that the notification payload chooses the values in # instance.flavor.$value instead of instance.$value notification_base._send_versioned_instance_update( mock.MagicMock(), self.instance, self.payload, 'host', 'compute') payload = mock_emit.call_args_list[0][1]['payload']['nova_object.data'] flavor_payload = payload['flavor']['nova_object.data'] data = {k: flavor_payload[k] for k in self.test_keys} self.assertEqual(self.flavor_values, data)
def _load_instance(self, db_instance): # NOTE(alaski): Be very careful with instance loading because it # changes more than most objects. try: self.instance = objects.Instance.obj_from_primitive( jsonutils.loads(db_instance)) except TypeError: LOG.debug( 'Failed to load instance from BuildRequest with uuid ' '%s because it is None', self.instance_uuid) raise exception.BuildRequestNotFound(uuid=self.instance_uuid) except ovoo_exc.IncompatibleObjectVersion as exc: # This should only happen if proper service upgrade strategies are # not followed. Log the exception and raise BuildRequestNotFound. # If the instance can't be loaded this object is useless and may # as well not exist. LOG.debug( 'Could not deserialize instance store in BuildRequest ' 'with uuid %(instance_uuid)s. Found version %(version)s ' 'which is not supported here.', dict(instance_uuid=self.instance_uuid, version=exc.objver)) LOG.exception( _LE('Could not deserialize instance in ' 'BuildRequest')) raise exception.BuildRequestNotFound(uuid=self.instance_uuid) # NOTE(sbauza): The instance primitive should already have the deleted # field being set, so when hydrating it back here, we should get the # right value but in case we don't have it, let's suppose that the # instance is not deleted, which is the default value for that field. self.instance.obj_set_defaults('deleted') # NOTE(alaski): Set some fields on instance that are needed by the api, # not lazy-loadable, and don't change. self.instance.disable_terminate = False self.instance.terminated_at = None self.instance.host = None self.instance.node = None self.instance.launched_at = None self.instance.launched_on = None self.instance.cell_name = None # The fields above are not set until the instance is in a cell at # which point this BuildRequest will be gone. locked_by could # potentially be set by an update so it should not be overwritten. if not self.instance.obj_attr_is_set('locked_by'): self.instance.locked_by = None # created_at/updated_at are not on the serialized instance because it # was never persisted. self.instance.created_at = self.created_at self.instance.updated_at = self.updated_at self.instance.tags = objects.TagList([])
def _load_tags(self, db_tags): # 'db_tags' is a serialized TagList object. If it's None # we're in a mixed version nova-api scenario and can't retrieve the # actual list. Set it to an empty list here which will cause a # temporary API inconsistency that will be resolved as soon as the # instance is scheduled and on a compute. if db_tags is None: LOG.debug( 'Failed to load tags from BuildRequest ' 'for instance %s because it is None', self.instance_uuid) self.tags = objects.TagList() return self.tags = (objects.TagList.obj_from_primitive( jsonutils.loads(db_tags)))
def delete_all(self, req, server_id): context = req.environ["nova.context"] context.can(st_policies.POLICY_ROOT % 'delete_all') im = _get_instance_mapping(context, server_id) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( cctxt, server_id, 'delete tags') try: with nova_context.target_cell(context, im.cell_mapping) as cctxt: objects.TagList.destroy(cctxt, server_id) instance.tags = objects.TagList() except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) notifications_base.send_instance_update_notification( context, instance, service="nova-api")
def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ instance._context = context if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_ATTRS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] elif field == 'cleaned': instance.cleaned = db_inst['cleaned'] == 1 else: instance[field] = db_inst[field] # NOTE(danms): We can be called with a dict instead of a # SQLAlchemy object, so we have to be careful here if hasattr(db_inst, '__dict__'): have_extra = 'extra' in db_inst.__dict__ and db_inst['extra'] else: have_extra = 'extra' in db_inst and db_inst['extra'] if 'metadata' in expected_attrs: instance['metadata'] = utils.instance_meta(db_inst) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.instance_sys_meta(db_inst) if 'fault' in expected_attrs: instance['fault'] = (objects.InstanceFault.get_latest_for_instance( context, instance.uuid)) if 'numa_topology' in expected_attrs: if have_extra: instance._load_numa_topology( db_inst['extra'].get('numa_topology')) else: instance.numa_topology = None if 'pci_requests' in expected_attrs: if have_extra: instance._load_pci_requests( db_inst['extra'].get('pci_requests')) else: instance.pci_requests = None if 'vcpu_model' in expected_attrs: if have_extra: instance._load_vcpu_model(db_inst['extra'].get('vcpu_model')) else: instance.vcpu_model = None if 'ec2_ids' in expected_attrs: instance._load_ec2_ids() if 'migration_context' in expected_attrs: if have_extra: instance._load_migration_context( db_inst['extra'].get('migration_context')) else: instance.migration_context = None if 'info_cache' in expected_attrs: if db_inst.get('info_cache') is None: instance.info_cache = None elif not instance.obj_attr_is_set('info_cache'): # TODO(danms): If this ever happens on a backlevel instance # passed to us by a backlevel service, things will break instance.info_cache = objects.InstanceInfoCache(context) if instance.info_cache is not None: instance.info_cache._from_db_object(context, instance.info_cache, db_inst['info_cache']) if any([ x in expected_attrs for x in ('flavor', 'old_flavor', 'new_flavor') ]): if have_extra and db_inst['extra'].get('flavor'): instance._flavor_from_db(db_inst['extra']['flavor']) # TODO(danms): If we are updating these on a backlevel instance, # we'll end up sending back new versions of these objects (see # above note for new info_caches if 'pci_devices' in expected_attrs: pci_devices = base.obj_make_list(context, objects.PciDeviceList(context), objects.PciDevice, db_inst['pci_devices']) instance['pci_devices'] = pci_devices if 'security_groups' in expected_attrs: sec_groups = base.obj_make_list(context, objects.SecurityGroupList(context), objects.SecurityGroup, db_inst.get('security_groups', [])) instance['security_groups'] = sec_groups if 'tags' in expected_attrs: tags = base.obj_make_list(context, objects.TagList(context), objects.Tag, db_inst['tags']) instance['tags'] = tags instance.obj_reset_changes() return instance
def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ instance._context = context if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_ATTRS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] elif field == 'cleaned': instance.cleaned = db_inst['cleaned'] == 1 else: instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.instance_meta(db_inst) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.instance_sys_meta(db_inst) if 'fault' in expected_attrs: instance['fault'] = (objects.InstanceFault.get_latest_for_instance( context, instance.uuid)) if 'numa_topology' in expected_attrs: instance._load_numa_topology( db_inst.get('extra').get('numa_topology')) if 'pci_requests' in expected_attrs: instance._load_pci_requests( db_inst.get('extra').get('pci_requests')) if 'info_cache' in expected_attrs: if db_inst['info_cache'] is None: instance.info_cache = None elif not instance.obj_attr_is_set('info_cache'): # TODO(danms): If this ever happens on a backlevel instance # passed to us by a backlevel service, things will break instance.info_cache = objects.InstanceInfoCache(context) if instance.info_cache is not None: instance.info_cache._from_db_object(context, instance.info_cache, db_inst['info_cache']) # TODO(danms): If we are updating these on a backlevel instance, # we'll end up sending back new versions of these objects (see # above note for new info_caches if 'pci_devices' in expected_attrs: pci_devices = base.obj_make_list(context, objects.PciDeviceList(context), objects.PciDevice, db_inst['pci_devices']) instance['pci_devices'] = pci_devices if 'security_groups' in expected_attrs: sec_groups = base.obj_make_list(context, objects.SecurityGroupList(context), objects.SecurityGroup, db_inst['security_groups']) instance['security_groups'] = sec_groups if 'tags' in expected_attrs: tags = base.obj_make_list(context, objects.TagList(context), objects.Tag, db_inst['tags']) instance['tags'] = tags instance.obj_reset_changes() return instance