class MigrationContext(base.NovaPersistentObject, base.NovaObject): """Data representing additional resources related to a migration. Some resources cannot be calculated from knowing the flavor alone for the purpose of resources tracking, but need to be persisted at the time the claim was made, for subsequent resource tracking runs to be consistent. MigrationContext objects are created when the claim is done and are there to facilitate resource tracking and final provisioning of the instance on the destination host. """ # Version 1.0: Initial version # Version 1.1: Add old/new pci_devices and pci_requests VERSION = '1.1' fields = { 'instance_uuid': fields.UUIDField(), 'migration_id': fields.IntegerField(), 'new_numa_topology': fields.ObjectField('InstanceNUMATopology', nullable=True), 'old_numa_topology': fields.ObjectField('InstanceNUMATopology', nullable=True), 'new_pci_devices': fields.ObjectField('PciDeviceList', nullable=True), 'old_pci_devices': fields.ObjectField('PciDeviceList', nullable=True), 'new_pci_requests': fields.ObjectField('InstancePCIRequests', nullable=True), 'old_pci_requests': fields.ObjectField('InstancePCIRequests', nullable=True), } @classmethod def obj_make_compatible(cls, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1): primitive.pop('old_pci_devices', None) primitive.pop('new_pci_devices', None) primitive.pop('old_pci_requests', None) primitive.pop('new_pci_requests', None) @classmethod def obj_from_db_obj(cls, db_obj): primitive = jsonutils.loads(db_obj) return cls.obj_from_primitive(primitive) @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_extra = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['migration_context']) if not db_extra: raise exception.MigrationContextNotFound( instance_uuid=instance_uuid) if db_extra['migration_context'] is None: return None return cls.obj_from_db_obj(db_extra['migration_context']) def get_pci_mapping_for_migration(self, revert): """Get the mapping between the old PCI devices and the new PCI devices that have been allocated during this migration. The correlation is based on PCI request ID which is unique per PCI devices for SR-IOV ports. :param revert: If True, return a reverse mapping i.e mapping between new PCI devices and old PCI devices. :returns: dictionary of PCI mapping. if revert==False: {'<old pci address>': <New PciDevice>} if revert==True: {'<new pci address>': <Old PciDevice>} """ step = -1 if revert else 1 current_pci_devs, updated_pci_devs = (self.old_pci_devices, self.new_pci_devices)[::step] if current_pci_devs and updated_pci_devs: LOG.debug( "Determining PCI devices mapping using migration " "context: current_pci_devs: %(cur)s, " "updated_pci_devs: %(upd)s", { 'cur': [dev for dev in current_pci_devs], 'upd': [dev for dev in updated_pci_devs] }) return { curr_dev.address: upd_dev for curr_dev in current_pci_devs for upd_dev in updated_pci_devs if curr_dev.request_id == upd_dev.request_id } return {}
class RequestGroup(base.NovaObject): """Versioned object based on the unversioned nova.api.openstack.placement.lib.RequestGroup object. """ # Version 1.0: Initial version # Version 1.1: add requester_id and provider_uuids fields # Version 1.2: add in_tree field VERSION = '1.2' fields = { 'use_same_provider': fields.BooleanField(default=True), 'resources': fields.DictOfIntegersField(default={}), 'required_traits': fields.SetOfStringsField(default=set()), 'forbidden_traits': fields.SetOfStringsField(default=set()), # The aggregates field has a form of # [[aggregate_UUID1], # [aggregate_UUID2, aggregate_UUID3]] # meaning that the request should be fulfilled from an RP that is a # member of the aggregate aggregate_UUID1 and member of the aggregate # aggregate_UUID2 or aggregate_UUID3 . 'aggregates': fields.ListOfListsOfStringsField(default=[]), # The entity the request is coming from (e.g. the Neutron port uuid) # which may not always be a UUID. 'requester_id': fields.StringField(nullable=True, default=None), # The resource provider UUIDs that together fulfill the request # NOTE(gibi): this can be more than one if this is the unnumbered # request group (i.e. use_same_provider=False) 'provider_uuids': fields.ListOfUUIDField(default=[]), 'in_tree': fields.UUIDField(nullable=True, default=None), } def __init__(self, context=None, **kwargs): super(RequestGroup, self).__init__(context=context, **kwargs) self.obj_set_defaults() @classmethod def from_port_request(cls, context, port_uuid, port_resource_request): """Init the group from the resource request of a neutron port :param context: the request context :param port_uuid: the port requesting the resources :param port_resource_request: the resource_request attribute of the neutron port For example: port_resource_request = { "resources": { "NET_BW_IGR_KILOBIT_PER_SEC": 1000, "NET_BW_EGR_KILOBIT_PER_SEC": 1000}, "required": ["CUSTOM_PHYSNET_2", "CUSTOM_VNIC_TYPE_NORMAL"] } """ # NOTE(gibi): Assumptions: # * a port requests resource from a single provider. # * a port only specifies resources and required traits # NOTE(gibi): Placement rejects allocation candidates where a request # group has traits but no resources specified. This is why resources # are handled as mandatory below but not traits. obj = cls(context=context, use_same_provider=True, resources=port_resource_request['resources'], required_traits=set(port_resource_request.get( 'required', [])), requester_id=port_uuid) obj.obj_set_defaults() return obj def obj_make_compatible(self, primitive, target_version): super(RequestGroup, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2): if 'in_tree' in primitive: del primitive['in_tree'] if target_version < (1, 1): if 'requester_id' in primitive: del primitive['requester_id'] if 'provider_uuids' in primitive: del primitive['provider_uuids']
class ConsoleAuthToken(base.NovaTimestampObject, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(), 'console_type': fields.StringField(nullable=False), 'host': fields.StringField(nullable=False), 'port': fields.IntegerField(nullable=False), 'internal_access_path': fields.StringField(nullable=True), 'instance_uuid': fields.UUIDField(nullable=False), 'access_url_base': fields.StringField(nullable=True), # NOTE(PaulMurray): The unhashed token field is not stored in the # database. A hash of the token is stored instead and is not a # field on the object. 'token': fields.StringField(nullable=False), } @property def access_url(self): """The access url with token parameter. :returns: the access url with credential parameters access_url_base is the base url used to access a console. Adding the unhashed token as a parameter in a query string makes it specific to this authorization. """ if self.obj_attr_is_set('id'): return '%s?token=%s' % (self.access_url_base, self.token) @staticmethod def _from_db_object(context, obj, db_obj): # NOTE(PaulMurray): token is not stored in the database but # this function assumes it is in db_obj. The unhashed token # field is populated in the authorize method after the token # authorization is created in the database. for field in obj.fields: setattr(obj, field, db_obj[field]) obj._context = context obj.obj_reset_changes() return obj @base.remotable def authorize(self, ttl): """Authorise the console token and store in the database. :param ttl: time to live in seconds :returns: an authorized token The expires value is set for ttl seconds in the future and the token hash is stored in the database. This function can only succeed if the token is unique and the object has not already been stored. """ if self.obj_attr_is_set('id'): raise exception.ObjectActionError( action='authorize', reason=_('must be a new object to authorize')) token = uuidutils.generate_uuid() token_hash = utils.get_sha256_str(token) expires = timeutils.utcnow_ts() + ttl updates = self.obj_get_changes() # NOTE(melwitt): token could be in the updates if authorize() has been # called twice on the same object. 'token' is not a database column and # should not be included in the call to create the database record. if 'token' in updates: del updates['token'] updates['token_hash'] = token_hash updates['expires'] = expires try: db_obj = db.console_auth_token_create(self._context, updates) db_obj['token'] = token self._from_db_object(self._context, self, db_obj) except DBDuplicateEntry: # NOTE(PaulMurray) we are generating the token above so this # should almost never happen - but technically its possible raise exception.TokenInUse() LOG.debug( "Authorized token with expiry %(expires)s for console " "connection %(console)s", { 'expires': expires, 'console': strutils.mask_password(self) }) return token @base.remotable_classmethod def validate(cls, context, token): """Validate the token. :param context: the context :param token: the token for the authorization :returns: The ConsoleAuthToken object if valid The token is valid if the token is in the database and the expires time has not passed. """ token_hash = utils.get_sha256_str(token) db_obj = db.console_auth_token_get_valid(context, token_hash) if db_obj is not None: db_obj['token'] = token obj = cls._from_db_object(context, cls(), db_obj) LOG.debug("Validated token - console connection is " "%(console)s", {'console': strutils.mask_password(obj)}) return obj else: LOG.debug("Token validation failed") raise exception.InvalidToken(token='***') @base.remotable_classmethod def clean_console_auths_for_instance(cls, context, instance_uuid): """Remove all console authorizations for the instance. :param context: the context :param instance_uuid: the instance to be cleaned All authorizations related to the specified instance will be removed from the database. """ db.console_auth_token_destroy_all_by_instance(context, instance_uuid) @base.remotable_classmethod def clean_expired_console_auths_for_host(cls, context, host): """Remove all expired console authorizations for the host. :param context: the context :param host: the host name All expired authorizations related to the specified host will be removed. Tokens that have not expired will remain. """ db.console_auth_token_destroy_expired_by_host(context, host)
class ResourceProvider(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Add destroy() VERSION = '1.1' fields = { 'id': fields.IntegerField(read_only=True), 'uuid': fields.UUIDField(nullable=False), 'name': fields.StringField(nullable=False), 'generation': fields.IntegerField(nullable=False), } @base.remotable def create(self): if 'id' in self: raise exception.ObjectActionError(action='create', reason='already created') if 'uuid' not in self: raise exception.ObjectActionError(action='create', reason='uuid is required') if 'name' not in self: raise exception.ObjectActionError(action='create', reason='name is required') updates = self.obj_get_changes() db_rp = self._create_in_db(self._context, updates) self._from_db_object(self._context, self, db_rp) @base.remotable def destroy(self): self._delete(self._context, self.id) @base.remotable def save(self): updates = self.obj_get_changes() if updates and updates.keys() != ['name']: raise exception.ObjectActionError( action='save', reason='Immutable fields changed') self._update_in_db(self._context, self.id, updates) @base.remotable_classmethod def get_by_uuid(cls, context, uuid): db_resource_provider = cls._get_by_uuid_from_db(context, uuid) return cls._from_db_object(context, cls(), db_resource_provider) @base.remotable def add_inventory(self, inventory): """Add one new Inventory to the resource provider. Fails if Inventory of the provided resource class is already present. """ _add_inventory(self._context, self, inventory) self.obj_reset_changes() @base.remotable def delete_inventory(self, resource_class): """Delete Inventory of provided resource_class.""" resource_class_id = fields.ResourceClass.index(resource_class) _delete_inventory(self._context, self, resource_class_id) self.obj_reset_changes() @base.remotable def set_inventory(self, inv_list): """Set all resource provider Inventory to be the provided list.""" exceeded = _set_inventory(self._context, self, inv_list) for uuid, rclass in exceeded: LOG.warning( _LW('Resource provider %(uuid)s is now over-' 'capacity for %(resource)s'), { 'uuid': uuid, 'resource': rclass }) self.obj_reset_changes() @base.remotable def update_inventory(self, inventory): """Update one existing Inventory of the same resource class. Fails if no Inventory of the same class is present. """ exceeded = _update_inventory(self._context, self, inventory) for uuid, rclass in exceeded: LOG.warning( _LW('Resource provider %(uuid)s is now over-' 'capacity for %(resource)s'), { 'uuid': uuid, 'resource': rclass }) self.obj_reset_changes() @staticmethod @db_api.api_context_manager.writer def _create_in_db(context, updates): db_rp = models.ResourceProvider() db_rp.update(updates) context.session.add(db_rp) return db_rp @staticmethod @db_api.api_context_manager.writer def _delete(context, _id): # Don't delete the resource provider if it has allocations. rp_allocations = context.session.query(models.Allocation).\ filter(models.Allocation.resource_provider_id == _id).\ count() if rp_allocations: raise exception.ResourceProviderInUse() # Delete any inventory associated with the resource provider context.session.query(models.Inventory).\ filter(models.Inventory.resource_provider_id == _id).delete() result = context.session.query(models.ResourceProvider).\ filter(models.ResourceProvider.id == _id).delete() if not result: raise exception.NotFound() @staticmethod @db_api.api_context_manager.writer def _update_in_db(context, id, updates): db_rp = context.session.query( models.ResourceProvider).filter_by(id=id).first() db_rp.update(updates) db_rp.save(context.session) @staticmethod def _from_db_object(context, resource_provider, db_resource_provider): for field in resource_provider.fields: setattr(resource_provider, field, db_resource_provider[field]) resource_provider._context = context resource_provider.obj_reset_changes() return resource_provider @staticmethod @db_api.api_context_manager.reader def _get_by_uuid_from_db(context, uuid): result = context.session.query( models.ResourceProvider).filter_by(uuid=uuid).first() if not result: raise exception.NotFound() return result
class FixedIP(obj_base.NovaPersistentObject, obj_base.NovaObject, obj_base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added virtual_interface field # Version 1.2: Instance version 1.14 # Version 1.3: Instance 1.15 # Version 1.4: Added default_route field # Version 1.5: Added floating_ips field # Version 1.6: Instance 1.16 # Version 1.7: Instance 1.17 # Version 1.8: Instance 1.18 VERSION = '1.8' fields = { 'id': fields.IntegerField(), 'address': fields.IPV4AndV6AddressField(), 'network_id': fields.IntegerField(nullable=True), 'virtual_interface_id': fields.IntegerField(nullable=True), 'instance_uuid': fields.UUIDField(nullable=True), 'allocated': fields.BooleanField(), 'leased': fields.BooleanField(), 'reserved': fields.BooleanField(), 'host': fields.StringField(nullable=True), 'default_route': fields.BooleanField(), 'instance': fields.ObjectField('Instance', nullable=True), 'network': fields.ObjectField('Network', nullable=True), 'virtual_interface': fields.ObjectField('VirtualInterface', nullable=True), # NOTE(danms): This should not ever be made lazy-loadable # because it would create a bit of a loop between FixedIP # and FloatingIP 'floating_ips': fields.ObjectField('FloatingIPList'), } obj_relationships = { 'instance': [('1.0', '1.13'), ('1.2', '1.14'), ('1.3', '1.15'), ('1.6', '1.16'), ('1.7', '1.17'), ('1.8', '1.18')], 'network': [('1.0', '1.2')], 'virtual_interface': [('1.1', '1.0')], 'floating_ips': [('1.5', '1.7')], } def obj_make_compatible(self, primitive, target_version): super(FixedIP, self).obj_make_compatible(primitive, target_version) target_version = utils.convert_version_to_tuple(target_version) if target_version < (1, 4) and 'default_route' in primitive: del primitive['default_route'] @staticmethod def _from_db_object(context, fixedip, db_fixedip, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for field in fixedip.fields: if field == 'default_route': # NOTE(danms): This field is only set when doing a # FixedIPList.get_by_network() because it's a relatively # special-case thing, so skip it here continue if field not in FIXED_IP_OPTIONAL_ATTRS: fixedip[field] = db_fixedip[field] # NOTE(danms): Instance could be deleted, and thus None if 'instance' in expected_attrs: fixedip.instance = objects.Instance._from_db_object( context, objects.Instance(context), db_fixedip['instance']) if db_fixedip['instance'] else None if 'network' in expected_attrs: fixedip.network = objects.Network._from_db_object( context, objects.Network(context), db_fixedip['network']) if db_fixedip['network'] else None if 'virtual_interface' in expected_attrs: db_vif = db_fixedip['virtual_interface'] vif = objects.VirtualInterface._from_db_object( context, objects.VirtualInterface(context), db_fixedip['virtual_interface']) if db_vif else None fixedip.virtual_interface = vif if 'floating_ips' in expected_attrs: fixedip.floating_ips = obj_base.obj_make_list( context, objects.FloatingIPList(context), objects.FloatingIP, db_fixedip['floating_ips']) fixedip._context = context fixedip.obj_reset_changes() return fixedip @obj_base.remotable_classmethod def get_by_id(cls, context, id, expected_attrs=None): if expected_attrs is None: expected_attrs = [] get_network = 'network' in expected_attrs db_fixedip = db.fixed_ip_get(context, id, get_network=get_network) return cls._from_db_object(context, cls(context), db_fixedip, expected_attrs) @obj_base.remotable_classmethod def get_by_address(cls, context, address, expected_attrs=None): if expected_attrs is None: expected_attrs = [] db_fixedip = db.fixed_ip_get_by_address(context, str(address), columns_to_join=expected_attrs) return cls._from_db_object(context, cls(context), db_fixedip, expected_attrs) @obj_base.remotable_classmethod def get_by_floating_address(cls, context, address): db_fixedip = db.fixed_ip_get_by_floating_address(context, str(address)) if db_fixedip is not None: return cls._from_db_object(context, cls(context), db_fixedip) @obj_base.remotable_classmethod def get_by_network_and_host(cls, context, network_id, host): db_fixedip = db.fixed_ip_get_by_network_host(context, network_id, host) return cls._from_db_object(context, cls(context), db_fixedip) @obj_base.remotable_classmethod def associate(cls, context, address, instance_uuid, network_id=None, reserved=False): db_fixedip = db.fixed_ip_associate(context, address, instance_uuid, network_id=network_id, reserved=reserved) return cls._from_db_object(context, cls(context), db_fixedip) @obj_base.remotable_classmethod def associate_pool(cls, context, network_id, instance_uuid=None, host=None): db_fixedip = db.fixed_ip_associate_pool(context, network_id, instance_uuid=instance_uuid, host=host) return cls._from_db_object(context, cls(context), db_fixedip) @obj_base.remotable_classmethod def disassociate_by_address(cls, context, address): db.fixed_ip_disassociate(context, address) @obj_base.remotable_classmethod def _disassociate_all_by_timeout(cls, context, host, time_str): time = timeutils.parse_isotime(time_str) return db.fixed_ip_disassociate_all_by_timeout(context, host, time) @classmethod def disassociate_all_by_timeout(cls, context, host, time): return cls._disassociate_all_by_timeout(context, host, timeutils.isotime(time)) @obj_base.remotable def create(self, context): updates = self.obj_get_changes() if 'id' in updates: raise exception.ObjectActionError(action='create', reason='already created') if 'address' in updates: updates['address'] = str(updates['address']) db_fixedip = db.fixed_ip_create(context, updates) self._from_db_object(context, self, db_fixedip) @obj_base.remotable def save(self, context): updates = self.obj_get_changes() if 'address' in updates: raise exception.ObjectActionError(action='save', reason='address is not mutable') db.fixed_ip_update(context, str(self.address), updates) self.obj_reset_changes() @obj_base.remotable def disassociate(self, context): db.fixed_ip_disassociate(context, str(self.address)) self.instance_uuid = None self.instance = None self.obj_reset_changes(['instance_uuid', 'instance'])
class InstanceInfoCache(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Converted network_info to store the model. # Version 1.2: Added new() and update_cells kwarg to save(). # Version 1.3: Added delete() # Version 1.4: String attributes updated to support unicode # Version 1.5: Actually set the deleted, created_at, updated_at, and # deleted_at attributes VERSION = '1.5' fields = { 'instance_uuid': fields.UUIDField(), 'network_info': fields.Field(fields.NetworkModel(), nullable=True), } @staticmethod def _from_db_object(context, info_cache, db_obj): for field in info_cache.fields: info_cache[field] = db_obj[field] info_cache.obj_reset_changes() info_cache._context = context return info_cache @classmethod def new(cls, context, instance_uuid): """Create an InfoCache object that can be used to create the DB entry for the first time. When save()ing this object, the info_cache_update() DB call will properly handle creating it if it doesn't exist already. """ info_cache = cls() info_cache.instance_uuid = instance_uuid info_cache.network_info = None info_cache._context = context # Leave the fields dirty return info_cache @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_obj = db.instance_info_cache_get(context, instance_uuid) if not db_obj: raise exception.InstanceInfoCacheNotFound( instance_uuid=instance_uuid) return cls._from_db_object(context, cls(context), db_obj) @staticmethod def _info_cache_cells_update(ctxt, info_cache): cell_type = cells_opts.get_cell_type() if cell_type != 'compute': return cells_api = cells_rpcapi.CellsAPI() try: cells_api.instance_info_cache_update_at_top(ctxt, info_cache) except Exception: LOG.exception(_LE("Failed to notify cells of instance info " "cache update")) @base.remotable def save(self, update_cells=True): if 'network_info' in self.obj_what_changed(): if update_cells: stale_instance = self.obj_clone() nw_info_json = self.fields['network_info'].to_primitive( self, 'network_info', self.network_info) rv = db.instance_info_cache_update(self._context, self.instance_uuid, {'network_info': nw_info_json}) self._from_db_object(self._context, self, rv) if update_cells: # Send a copy of ourselves before updates are applied so # that cells can tell what changed. self._info_cache_cells_update(self._context, stale_instance) self.obj_reset_changes() @base.remotable def delete(self): db.instance_info_cache_delete(self._context, self.instance_uuid) @base.remotable def refresh(self): current = self.__class__.get_by_instance_uuid(self._context, self.instance_uuid) current._context = None for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] self.obj_reset_changes()
class VirtualInterface(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Add tag field # Version 1.2: Adding a save method # Version 1.3: Added destroy() method VERSION = '1.3' fields = { 'id': fields.IntegerField(), # This is a MAC address. 'address': fields.StringField(nullable=True), 'network_id': fields.IntegerField(), 'instance_uuid': fields.UUIDField(), 'uuid': fields.UUIDField(), 'tag': fields.StringField(nullable=True), } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1) and 'tag' in primitive: del primitive['tag'] @staticmethod def _from_db_object(context, vif, db_vif): for field in vif.fields: if not db_vif[field] and field in VIF_OPTIONAL_FIELDS: continue else: setattr(vif, field, db_vif[field]) # NOTE(danms): The neutronv2 module namespaces mac addresses # with port id to avoid uniqueness constraints currently on # our table. Strip that out here so nobody else needs to care. if 'address' in vif and '/' in vif.address: vif.address, _ = vif.address.split('/', 1) vif._context = context vif.obj_reset_changes() return vif @base.remotable_classmethod def get_by_id(cls, context, vif_id): db_vif = db.virtual_interface_get(context, vif_id) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable_classmethod def get_by_uuid(cls, context, vif_uuid): db_vif = db.virtual_interface_get_by_uuid(context, vif_uuid) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable_classmethod def get_by_address(cls, context, address): db_vif = db.virtual_interface_get_by_address(context, address) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable_classmethod def get_by_instance_and_network(cls, context, instance_uuid, network_id): db_vif = db.virtual_interface_get_by_instance_and_network(context, instance_uuid, network_id) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() db_vif = db.virtual_interface_create(self._context, updates) self._from_db_object(self._context, self, db_vif) @base.remotable def save(self): updates = self.obj_get_changes() if 'address' in updates: raise exception.ObjectActionError(action='save', reason='address is not mutable') db_vif = db.virtual_interface_update(self._context, self.address, updates) return self._from_db_object(self._context, self, db_vif) @base.remotable_classmethod def delete_by_instance_uuid(cls, context, instance_uuid): db.virtual_interface_delete_by_instance(context, instance_uuid) @base.remotable def destroy(self): db.virtual_interface_delete(self._context, self.id)
class InstancePayload(base.NotificationPayloadBase): SCHEMA = { 'uuid': ('instance', 'uuid'), 'user_id': ('instance', 'user_id'), 'tenant_id': ('instance', 'project_id'), 'reservation_id': ('instance', 'reservation_id'), 'display_name': ('instance', 'display_name'), 'host_name': ('instance', 'hostname'), 'host': ('instance', 'host'), 'node': ('instance', 'node'), 'os_type': ('instance', 'os_type'), 'architecture': ('instance', 'architecture'), 'availability_zone': ('instance', 'availability_zone'), 'image_uuid': ('instance', 'image_ref'), 'kernel_id': ('instance', 'kernel_id'), 'ramdisk_id': ('instance', 'ramdisk_id'), 'created_at': ('instance', 'created_at'), 'launched_at': ('instance', 'launched_at'), 'terminated_at': ('instance', 'terminated_at'), 'deleted_at': ('instance', 'deleted_at'), 'state': ('instance', 'vm_state'), 'power_state': ('instance', 'power_state'), 'task_state': ('instance', 'task_state'), 'progress': ('instance', 'progress'), 'metadata': ('instance', 'metadata'), } # Version 1.0: Initial version VERSION = '1.0' fields = { 'uuid': fields.UUIDField(), 'user_id': fields.StringField(nullable=True), 'tenant_id': fields.StringField(nullable=True), 'reservation_id': fields.StringField(nullable=True), 'display_name': fields.StringField(nullable=True), 'host_name': fields.StringField(nullable=True), 'host': fields.StringField(nullable=True), 'node': fields.StringField(nullable=True), 'os_type': fields.StringField(nullable=True), 'architecture': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'flavor': fields.ObjectField('FlavorPayload'), 'image_uuid': fields.StringField(nullable=True), 'kernel_id': fields.StringField(nullable=True), 'ramdisk_id': fields.StringField(nullable=True), 'created_at': fields.DateTimeField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'deleted_at': fields.DateTimeField(nullable=True), 'state': fields.InstanceStateField(nullable=True), 'power_state': fields.InstancePowerStateField(nullable=True), 'task_state': fields.InstanceTaskStateField(nullable=True), 'progress': fields.IntegerField(nullable=True), 'ip_addresses': fields.ListOfObjectsField('IpPayload'), 'metadata': fields.DictOfStringsField(), } def __init__(self, instance, **kwargs): super(InstancePayload, self).__init__(**kwargs) self.populate_schema(instance=instance)
class InstanceGroup(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Use list/dict helpers for policies, metadetails, members # Version 1.3: Make uuid a non-None real string # Version 1.4: Add add_members() # Version 1.5: Add get_hosts() # Version 1.6: Add get_by_name() # Version 1.7: Deprecate metadetails # Version 1.8: Add count_members_by_user() # Version 1.9: Add get_by_instance_uuid() # Version 1.10: Add hosts field VERSION = '1.10' fields = { 'id': fields.IntegerField(), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'uuid': fields.UUIDField(), 'name': fields.StringField(nullable=True), 'policies': fields.ListOfStringsField(nullable=True), 'members': fields.ListOfStringsField(nullable=True), 'hosts': fields.ListOfStringsField(nullable=True), } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 7): # NOTE(danms): Before 1.7, we had an always-empty # metadetails property primitive['metadetails'] = {} @staticmethod def _from_db_object(context, instance_group, db_inst): """Method to help with migration to objects. Converts a database entity to a formal object. """ # Most of the field names match right now, so be quick for field in instance_group.fields: if field in LAZY_LOAD_FIELDS: continue # This is needed to handle db models from both the api # database and the main database. In the migration to # the api database, we have removed soft-delete, so # the object fields for delete must be filled in with # default values for db models from the api database. ignore = {'deleted': False, 'deleted_at': None} if field in ignore and not hasattr(db_inst, field): instance_group[field] = ignore[field] else: instance_group[field] = db_inst[field] instance_group._context = context instance_group.obj_reset_changes() return instance_group @staticmethod @db_api.api_context_manager.reader def _get_from_db_by_uuid(context, uuid): grp = _instance_group_get_query(context, id_field=api_models.InstanceGroup.uuid, id=uuid).first() if not grp: raise exception.InstanceGroupNotFound(group_uuid=uuid) return grp @staticmethod @db_api.api_context_manager.reader def _get_from_db_by_id(context, id): grp = _instance_group_get_query(context, id_field=api_models.InstanceGroup.id, id=id).first() if not grp: raise exception.InstanceGroupNotFound(group_uuid=id) return grp @staticmethod @db_api.api_context_manager.reader def _get_from_db_by_name(context, name): grp = _instance_group_get_query(context).filter_by(name=name).first() if not grp: raise exception.InstanceGroupNotFound(group_uuid=name) return grp @staticmethod @db_api.api_context_manager.reader def _get_from_db_by_instance(context, instance_uuid): grp_member = context.session.query(api_models.InstanceGroupMember).\ filter_by(instance_uuid=instance_uuid).first() if not grp_member: raise exception.InstanceGroupNotFound(group_uuid='') grp = InstanceGroup._get_from_db_by_id(context, grp_member.group_id) return grp @staticmethod @db_api.api_context_manager.writer def _save_in_db(context, group_uuid, values): grp = _instance_group_get_query(context, id_field=api_models.InstanceGroup.uuid, id=group_uuid).first() if not grp: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) values_copy = copy.copy(values) policies = values_copy.pop('policies', None) members = values_copy.pop('members', None) grp.update(values_copy) if policies is not None: _instance_group_policies_add(context, grp, policies) if members is not None: _instance_group_members_add(context, grp, members) return grp @staticmethod @db_api.api_context_manager.writer def _create_in_db(context, values, policies=None, members=None): try: group = api_models.InstanceGroup() group.update(values) group.save(context.session) except db_exc.DBDuplicateEntry: raise exception.InstanceGroupIdExists(group_uuid=values['uuid']) if policies: group._policies = _instance_group_policies_add( context, group, policies) else: group._policies = [] if members: group._members = _instance_group_members_add( context, group, members) else: group._members = [] return group @staticmethod @db_api.api_context_manager.writer def _destroy_in_db(context, group_uuid): qry = _instance_group_get_query(context, id_field=api_models.InstanceGroup.uuid, id=group_uuid) if qry.count() == 0: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) # Delete policies and members group_id = qry.first().id instance_models = [ api_models.InstanceGroupPolicy, api_models.InstanceGroupMember ] for model in instance_models: context.session.query(model).filter_by(group_id=group_id).delete() qry.delete() @staticmethod @db_api.api_context_manager.writer def _add_members_in_db(context, group_uuid, members): return _instance_group_members_add_by_uuid(context, group_uuid, members) @staticmethod @db_api.api_context_manager.writer def _remove_members_in_db(context, group_id, instance_uuids): # There is no public method provided for removing members because the # user-facing API doesn't allow removal of instance group members. We # need to be able to remove members to address quota races. context.session.query(api_models.InstanceGroupMember).\ filter_by(group_id=group_id).\ filter(api_models.InstanceGroupMember.instance_uuid. in_(set(instance_uuids))).\ delete(synchronize_session=False) def obj_load_attr(self, attrname): # NOTE(sbauza): Only hosts could be lazy-loaded right now if attrname != 'hosts': raise exception.ObjectActionError(action='obj_load_attr', reason='unable to load %s' % attrname) self.hosts = self.get_hosts() self.obj_reset_changes(['hosts']) @base.remotable_classmethod def get_by_uuid(cls, context, uuid): db_group = None try: db_group = cls._get_from_db_by_uuid(context, uuid) except exception.InstanceGroupNotFound: pass if db_group is None: db_group = db.instance_group_get(context, uuid) return cls._from_db_object(context, cls(), db_group) @base.remotable_classmethod def get_by_name(cls, context, name): try: db_group = cls._get_from_db_by_name(context, name) except exception.InstanceGroupNotFound: igs = InstanceGroupList._get_main_by_project_id( context, context.project_id) for ig in igs: if ig.name == name: return ig raise exception.InstanceGroupNotFound(group_uuid=name) return cls._from_db_object(context, cls(), db_group) @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_group = None try: db_group = cls._get_from_db_by_instance(context, instance_uuid) except exception.InstanceGroupNotFound: pass if db_group is None: db_group = db.instance_group_get_by_instance( context, instance_uuid) return cls._from_db_object(context, cls(), db_group) @classmethod def get_by_hint(cls, context, hint): if uuidutils.is_uuid_like(hint): return cls.get_by_uuid(context, hint) else: return cls.get_by_name(context, hint) @base.remotable def save(self): """Save updates to this instance group.""" updates = self.obj_get_changes() # NOTE(sbauza): We do NOT save the set of compute nodes that an # instance group is connected to in this method. Instance groups are # implicitly connected to compute nodes when the # InstanceGroup.add_members() method is called, which adds the mapping # table entries. # So, since the only way to have hosts in the updates is to set that # field explicitly, we prefer to raise an Exception so the developer # knows he has to call obj_reset_changes(['hosts']) right after setting # the field. if 'hosts' in updates: raise exception.InstanceGroupSaveException(field='hosts') if not updates: return payload = dict(updates) payload['server_group_id'] = self.uuid try: db_group = self._save_in_db(self._context, self.uuid, updates) except exception.InstanceGroupNotFound: db.instance_group_update(self._context, self.uuid, updates) db_group = db.instance_group_get(self._context, self.uuid) self._from_db_object(self._context, self, db_group) compute_utils.notify_about_server_group_update(self._context, "update", payload) @base.remotable def refresh(self): """Refreshes the instance group.""" current = self.__class__.get_by_uuid(self._context, self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] self.obj_reset_changes() def _create(self, skipcheck=False): # NOTE(danms): This is just for the migration routine, and # can be removed once we're no longer supporting the migration # of instance groups from the main to api database. if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() payload = dict(updates) updates.pop('id', None) policies = updates.pop('policies', None) members = updates.pop('members', None) if 'uuid' not in updates: self.uuid = uuidutils.generate_uuid() updates['uuid'] = self.uuid if not skipcheck: try: db.instance_group_get(self._context, self.uuid) raise exception.ObjectActionError( action='create', reason='already created in main') except exception.InstanceGroupNotFound: pass db_group = self._create_in_db(self._context, updates, policies=policies, members=members) self._from_db_object(self._context, self, db_group) payload['server_group_id'] = self.uuid compute_utils.notify_about_server_group_update(self._context, "create", payload) compute_utils.notify_about_server_group_action( context=self._context, group=self, action=fields.NotificationAction.CREATE) @base.remotable def create(self): self._create() @base.remotable def destroy(self): payload = {'server_group_id': self.uuid} try: self._destroy_in_db(self._context, self.uuid) except exception.InstanceGroupNotFound: db.instance_group_delete(self._context, self.uuid) self.obj_reset_changes() compute_utils.notify_about_server_group_update(self._context, "delete", payload) compute_utils.notify_about_server_group_action( context=self._context, group=self, action=fields.NotificationAction.DELETE) @base.remotable_classmethod def add_members(cls, context, group_uuid, instance_uuids): payload = { 'server_group_id': group_uuid, 'instance_uuids': instance_uuids } try: members = cls._add_members_in_db(context, group_uuid, instance_uuids) members = [member['instance_uuid'] for member in members] except exception.InstanceGroupNotFound: members = db.instance_group_members_add(context, group_uuid, instance_uuids) compute_utils.notify_about_server_group_update(context, "addmember", payload) return list(members) @base.remotable def get_hosts(self, exclude=None): """Get a list of hosts for non-deleted instances in the group This method allows you to get a list of the hosts where instances in this group are currently running. There's also an option to exclude certain instance UUIDs from this calculation. """ filter_uuids = self.members if exclude: filter_uuids = set(filter_uuids) - set(exclude) filters = {'uuid': filter_uuids, 'deleted': False} instances = objects.InstanceList.get_by_filters(self._context, filters=filters) return list( set([instance.host for instance in instances if instance.host])) @base.remotable def count_members_by_user(self, user_id): """Count the number of instances in a group belonging to a user.""" filter_uuids = self.members filters = {'uuid': filter_uuids, 'user_id': user_id, 'deleted': False} instances = objects.InstanceList.get_by_filters(self._context, filters=filters) return len(instances)
class Network(obj_base.NovaPersistentObject, obj_base.NovaObject, obj_base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added in_use_on_host() # Version 1.2: Added mtu, dhcp_server, enable_dhcp, share_address VERSION = '1.2' fields = { 'id': fields.IntegerField(), 'label': fields.StringField(), 'injected': fields.BooleanField(), 'cidr': fields.IPV4NetworkField(nullable=True), 'cidr_v6': fields.IPV6NetworkField(nullable=True), 'multi_host': fields.BooleanField(), 'netmask': fields.IPV4AddressField(nullable=True), 'gateway': fields.IPV4AddressField(nullable=True), 'broadcast': fields.IPV4AddressField(nullable=True), 'netmask_v6': fields.IPV6AddressField(nullable=True), 'gateway_v6': fields.IPV6AddressField(nullable=True), 'bridge': fields.StringField(nullable=True), 'bridge_interface': fields.StringField(nullable=True), 'dns1': fields.IPAddressField(nullable=True), 'dns2': fields.IPAddressField(nullable=True), 'vlan': fields.IntegerField(nullable=True), 'vpn_public_address': fields.IPAddressField(nullable=True), 'vpn_public_port': fields.IntegerField(nullable=True), 'vpn_private_address': fields.IPAddressField(nullable=True), 'dhcp_start': fields.IPV4AddressField(nullable=True), 'rxtx_base': fields.IntegerField(nullable=True), 'project_id': fields.UUIDField(nullable=True), 'priority': fields.IntegerField(nullable=True), 'host': fields.StringField(nullable=True), 'uuid': fields.UUIDField(), 'mtu': fields.IntegerField(nullable=True), 'dhcp_server': fields.IPAddressField(nullable=True), 'enable_dhcp': fields.BooleanField(), 'share_address': fields.BooleanField(), } @staticmethod def _convert_legacy_ipv6_netmask(netmask): """Handle netmask_v6 possibilities from the database. Historically, this was stored as just an integral CIDR prefix, but in the future it should be stored as an actual netmask. Be tolerant of either here. """ try: prefix = int(netmask) return netaddr.IPNetwork('1::/%i' % prefix).netmask except ValueError: pass try: return netaddr.IPNetwork(netmask).netmask except netaddr.AddrFormatError: raise ValueError( _('IPv6 netmask "%s" must be a netmask ' 'or integral prefix') % netmask) def obj_make_compatible(self, primitive, target_version): target_version = utils.convert_version_to_tuple(target_version) if target_version < (1, 2): if 'mtu' in primitive: del primitive['mtu'] if 'enable_dhcp' in primitive: del primitive['enable_dhcp'] if 'dhcp_server' in primitive: del primitive['dhcp_server'] if 'share_address' in primitive: del primitive['share_address'] @staticmethod def _from_db_object(context, network, db_network): for field in network.fields: db_value = db_network[field] if field is 'netmask_v6' and db_value is not None: db_value = network._convert_legacy_ipv6_netmask(db_value) if field is 'mtu' and db_value is None: db_value = CONF.network_device_mtu if field is 'dhcp_server' and db_value is None: db_value = db_network['gateway'] if field is 'share_address' and CONF.share_dhcp_address: db_value = CONF.share_dhcp_address network[field] = db_value network._context = context network.obj_reset_changes() return network @obj_base.remotable_classmethod def get_by_id(cls, context, network_id, project_only='allow_none'): db_network = db.network_get(context, network_id, project_only=project_only) return cls._from_db_object(context, cls(), db_network) @obj_base.remotable_classmethod def get_by_uuid(cls, context, network_uuid): db_network = db.network_get_by_uuid(context, network_uuid) return cls._from_db_object(context, cls(), db_network) @obj_base.remotable_classmethod def get_by_cidr(cls, context, cidr): db_network = db.network_get_by_cidr(context, cidr) return cls._from_db_object(context, cls(), db_network) @obj_base.remotable_classmethod def associate(cls, context, project_id, network_id=None, force=False): db.network_associate(context, project_id, network_id=network_id, force=force) @obj_base.remotable_classmethod def disassociate(cls, context, network_id, host=False, project=False): db.network_disassociate(context, network_id, host, project) @obj_base.remotable_classmethod def in_use_on_host(cls, context, network_id, host): return db.network_in_use_on_host(context, network_id, host) def _get_primitive_changes(self): changes = {} for key, value in self.obj_get_changes().items(): if isinstance(value, netaddr.IPAddress): changes[key] = str(value) else: changes[key] = value return changes @obj_base.remotable def create(self, context): updates = self._get_primitive_changes() if 'id' in updates: raise exception.ObjectActionError(action='create', reason='already created') db_network = db.network_create_safe(context, updates) self._from_db_object(context, self, db_network) @obj_base.remotable def destroy(self, context): db.network_delete_safe(context, self.id) self.deleted = True self.obj_reset_changes(['deleted']) @obj_base.remotable def save(self, context): updates = self._get_primitive_changes() if 'netmask_v6' in updates: # NOTE(danms): For some reason, historical code stores the # IPv6 netmask as just the CIDR mask length, so convert that # back here before saving for now. updates['netmask_v6'] = netaddr.IPNetwork( updates['netmask_v6']).netmask set_host = 'host' in updates if set_host: db.network_set_host(context, self.id, updates.pop('host')) if updates: db_network = db.network_update(context, self.id, updates) elif set_host: db_network = db.network_get(context, self.id) else: db_network = None if db_network is not None: self._from_db_object(context, self, db_network)
class InstancePCIRequests(base.NovaObject): # Version 1.0: Initial version # Version 1.1: InstancePCIRequest 1.1 VERSION = '1.1' fields = { 'instance_uuid': fields.UUIDField(), 'requests': fields.ListOfObjectsField('InstancePCIRequest'), } @classmethod def obj_from_db(cls, context, instance_uuid, db_requests): self = cls(context=context, requests=[], instance_uuid=instance_uuid) if db_requests is not None: requests = jsonutils.loads(db_requests) else: requests = [] for request in requests: # Note(moshele): is_new is deprecated and therefore we load it # with default value of False request_obj = InstancePCIRequest( count=request['count'], spec=request['spec'], alias_name=request['alias_name'], is_new=False, numa_policy=request.get('numa_policy', fields.PCINUMAAffinityPolicy.LEGACY), request_id=request['request_id'], requester_id=request.get('requester_id')) request_obj.obj_reset_changes() self.requests.append(request_obj) self.obj_reset_changes() return self @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_pci_requests = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['pci_requests']) if db_pci_requests is not None: db_pci_requests = db_pci_requests['pci_requests'] return cls.obj_from_db(context, instance_uuid, db_pci_requests) @staticmethod def _load_legacy_requests(sysmeta_value, is_new=False): if sysmeta_value is None: return [] requests = [] db_requests = jsonutils.loads(sysmeta_value) for db_request in db_requests: request = InstancePCIRequest(count=db_request['count'], spec=db_request['spec'], alias_name=db_request['alias_name'], is_new=is_new) request.obj_reset_changes() requests.append(request) return requests @classmethod def get_by_instance(cls, context, instance): # NOTE (baoli): not all callers are passing instance as object yet. # Therefore, use the dict syntax in this routine if 'pci_requests' in instance['system_metadata']: # NOTE(danms): This instance hasn't been converted to use # instance_extra yet, so extract the data from sysmeta sysmeta = instance['system_metadata'] _requests = (cls._load_legacy_requests(sysmeta['pci_requests']) + cls._load_legacy_requests( sysmeta.get('new_pci_requests'), is_new=True)) requests = cls(instance_uuid=instance['uuid'], requests=_requests) requests.obj_reset_changes() return requests else: return cls.get_by_instance_uuid(context, instance['uuid']) def to_json(self): blob = [{ 'count': x.count, 'spec': x.spec, 'alias_name': x.alias_name, 'is_new': x.is_new, 'numa_policy': x.numa_policy, 'request_id': x.request_id, 'requester_id': x.requester_id } for x in self.requests] return jsonutils.dumps(blob)
class PciDevice(base.NovaPersistentObject, base.NovaObject): """Object to represent a PCI device on a compute node. PCI devices are managed by the compute resource tracker, which discovers the devices from the hardware platform, claims, allocates and frees devices for instances. The PCI device information is permanently maintained in a database. This makes it convenient to get PCI device information, like physical function for a VF device, adjacent switch IP address for a NIC, hypervisor identification for a PCI device, etc. It also provides a convenient way to check device allocation information for administrator purposes. A device can be in available/claimed/allocated/deleted/removed state. A device is available when it is discovered.. A device is claimed prior to being allocated to an instance. Normally the transition from claimed to allocated is quick. However, during a resize operation the transition can take longer, because devices are claimed in prep_resize and allocated in finish_resize. A device becomes removed when hot removed from a node (i.e. not found in the next auto-discover) but not yet synced with the DB. A removed device should not be allocated to any instance, and once deleted from the DB, the device object is changed to deleted state and no longer synced with the DB. Filed notes:: | 'dev_id': | Hypervisor's identification for the device, the string format | is hypervisor specific | 'extra_info': | Device-specific properties like PF address, switch ip address etc. """ # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: added request_id field # Version 1.3: Added field to represent PCI device NUMA node # Version 1.4: Added parent_addr field # Version 1.5: Added 2 new device statuses: UNCLAIMABLE and UNAVAILABLE # Version 1.6: Added uuid field VERSION = '1.6' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(), # Note(yjiang5): the compute_node_id may be None because the pci # device objects are created before the compute node is created in DB 'compute_node_id': fields.IntegerField(nullable=True), 'address': fields.StringField(), 'vendor_id': fields.StringField(), 'product_id': fields.StringField(), 'dev_type': fields.PciDeviceTypeField(), 'status': fields.PciDeviceStatusField(), 'dev_id': fields.StringField(nullable=True), 'label': fields.StringField(nullable=True), 'instance_uuid': fields.StringField(nullable=True), 'request_id': fields.StringField(nullable=True), 'extra_info': fields.DictOfStringsField(), 'numa_node': fields.IntegerField(nullable=True), 'parent_addr': fields.StringField(nullable=True), } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2) and 'request_id' in primitive: del primitive['request_id'] if target_version < (1, 4) and 'parent_addr' in primitive: if primitive['parent_addr'] is not None: extra_info = primitive.get('extra_info', {}) extra_info['phys_function'] = primitive['parent_addr'] del primitive['parent_addr'] if target_version < (1, 5) and 'parent_addr' in primitive: added_statuses = (fields.PciDeviceStatus.UNCLAIMABLE, fields.PciDeviceStatus.UNAVAILABLE) status = primitive['status'] if status in added_statuses: raise exception.ObjectActionError( action='obj_make_compatible', reason='status=%s not supported in version %s' % (status, target_version)) if target_version < (1, 6) and 'uuid' in primitive: del primitive['uuid'] def update_device(self, dev_dict): """Sync the content from device dictionary to device object. The resource tracker updates the available devices periodically. To avoid meaningless syncs with the database, we update the device object only if a value changed. """ # Note(yjiang5): status/instance_uuid should only be updated by # functions like claim/allocate etc. The id is allocated by # database. The extra_info is created by the object. no_changes = ('status', 'instance_uuid', 'id', 'extra_info') for key in no_changes: dev_dict.pop(key, None) # NOTE(ndipanov): This needs to be set as it's accessed when matching dev_dict.setdefault('parent_addr') for k, v in dev_dict.items(): if k in self.fields.keys(): setattr(self, k, v) else: # NOTE(yjiang5): extra_info.update does not update # obj_what_changed, set it explicitly # NOTE(ralonsoh): list of parameters currently added to # "extra_info" dict: # - "capabilities": dict of (strings/list of strings) extra_info = self.extra_info data = (v if isinstance(v, six.string_types) else jsonutils.dumps(v)) extra_info.update({k: data}) self.extra_info = extra_info def __init__(self, *args, **kwargs): super(PciDevice, self).__init__(*args, **kwargs) self.obj_reset_changes() self.extra_info = {} # NOTE(ndipanov): These are required to build an in-memory device tree # but don't need to be proper fields (and can't easily be as they would # hold circular references) self.parent_device = None self.child_devices = [] def __eq__(self, other): return compare_pci_device_attributes(self, other) def __ne__(self, other): return not (self == other) @staticmethod def _from_db_object(context, pci_device, db_dev): for key in pci_device.fields: if key == 'uuid' and db_dev['uuid'] is None: # Older records might not have a uuid field set in the # database so we need to skip those here and auto-generate # a uuid later below. continue elif key != 'extra_info': setattr(pci_device, key, db_dev[key]) else: extra_info = db_dev.get("extra_info") pci_device.extra_info = jsonutils.loads(extra_info) pci_device._context = context pci_device.obj_reset_changes() # TODO(jaypipes): Remove in 2.0 version of object. This does an inline # migration to populate the uuid field. A similar inline migration is # performed in the save() method. if db_dev['uuid'] is None: pci_device.uuid = uuidutils.generate_uuid() pci_device.save() return pci_device @base.remotable_classmethod def get_by_dev_addr(cls, context, compute_node_id, dev_addr): db_dev = db.pci_device_get_by_addr(context, compute_node_id, dev_addr) return cls._from_db_object(context, cls(), db_dev) @base.remotable_classmethod def get_by_dev_id(cls, context, id): db_dev = db.pci_device_get_by_id(context, id) return cls._from_db_object(context, cls(), db_dev) @classmethod def create(cls, context, dev_dict): """Create a PCI device based on hypervisor information. As the device object is just created and is not synced with db yet thus we should not reset changes here for fields from dict. """ pci_device = cls() pci_device.update_device(dev_dict) pci_device.status = fields.PciDeviceStatus.AVAILABLE pci_device.uuid = uuidutils.generate_uuid() pci_device._context = context return pci_device @base.remotable def save(self): if self.status == fields.PciDeviceStatus.REMOVED: self.status = fields.PciDeviceStatus.DELETED db.pci_device_destroy(self._context, self.compute_node_id, self.address) elif self.status != fields.PciDeviceStatus.DELETED: # TODO(jaypipes): Remove in 2.0 version of object. This does an # inline migration to populate the uuid field. A similar migration # is done in the _from_db_object() method to migrate objects as # they are read from the DB. if 'uuid' not in self: self.uuid = uuidutils.generate_uuid() updates = self.obj_get_changes() updates['extra_info'] = self.extra_info if 'extra_info' in updates: updates['extra_info'] = jsonutils.dumps(updates['extra_info']) if updates: db_pci = db.pci_device_update(self._context, self.compute_node_id, self.address, updates) self._from_db_object(self._context, self, db_pci) @staticmethod def _bulk_update_status(dev_list, status): for dev in dev_list: dev.status = status def claim(self, instance_uuid): if self.status != fields.PciDeviceStatus.AVAILABLE: raise exception.PciDeviceInvalidStatus( compute_node_id=self.compute_node_id, address=self.address, status=self.status, hopestatus=[fields.PciDeviceStatus.AVAILABLE]) if self.dev_type == fields.PciDeviceType.SRIOV_PF: # Update PF status to CLAIMED if all of it dependants are free # and set their status to UNCLAIMABLE vfs_list = self.child_devices if not all([vf.is_available() for vf in vfs_list]): raise exception.PciDeviceVFInvalidStatus( compute_node_id=self.compute_node_id, address=self.address) self._bulk_update_status(vfs_list, fields.PciDeviceStatus.UNCLAIMABLE) elif self.dev_type == fields.PciDeviceType.SRIOV_VF: # Update VF status to CLAIMED if it's parent has not been # previously allocated or claimed # When claiming/allocating a VF, it's parent PF becomes # unclaimable/unavailable. Therefore, it is expected to find the # parent PF in an unclaimable/unavailable state for any following # claims to a sibling VF parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE, fields.PciDeviceStatus.UNCLAIMABLE, fields.PciDeviceStatus.UNAVAILABLE) parent = self.parent_device if parent: if parent.status not in parent_ok_statuses: raise exception.PciDevicePFInvalidStatus( compute_node_id=self.compute_node_id, address=self.parent_addr, status=self.status, vf_address=self.address, hopestatus=parent_ok_statuses) # Set PF status if parent.status == fields.PciDeviceStatus.AVAILABLE: parent.status = fields.PciDeviceStatus.UNCLAIMABLE else: LOG.debug( 'Physical function addr: %(pf_addr)s parent of ' 'VF addr: %(vf_addr)s was not found', { 'pf_addr': self.parent_addr, 'vf_addr': self.address }) self.status = fields.PciDeviceStatus.CLAIMED self.instance_uuid = instance_uuid def allocate(self, instance): ok_statuses = (fields.PciDeviceStatus.AVAILABLE, fields.PciDeviceStatus.CLAIMED) parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE, fields.PciDeviceStatus.UNCLAIMABLE, fields.PciDeviceStatus.UNAVAILABLE) dependants_ok_statuses = (fields.PciDeviceStatus.AVAILABLE, fields.PciDeviceStatus.UNCLAIMABLE) if self.status not in ok_statuses: raise exception.PciDeviceInvalidStatus( compute_node_id=self.compute_node_id, address=self.address, status=self.status, hopestatus=ok_statuses) if (self.status == fields.PciDeviceStatus.CLAIMED and self.instance_uuid != instance['uuid']): raise exception.PciDeviceInvalidOwner( compute_node_id=self.compute_node_id, address=self.address, owner=self.instance_uuid, hopeowner=instance['uuid']) if self.dev_type == fields.PciDeviceType.SRIOV_PF: vfs_list = self.child_devices if not all( [vf.status in dependants_ok_statuses for vf in vfs_list]): raise exception.PciDeviceVFInvalidStatus( compute_node_id=self.compute_node_id, address=self.address) self._bulk_update_status(vfs_list, fields.PciDeviceStatus.UNAVAILABLE) elif (self.dev_type == fields.PciDeviceType.SRIOV_VF): parent = self.parent_device if parent: if parent.status not in parent_ok_statuses: raise exception.PciDevicePFInvalidStatus( compute_node_id=self.compute_node_id, address=self.parent_addr, status=self.status, vf_address=self.address, hopestatus=parent_ok_statuses) # Set PF status parent.status = fields.PciDeviceStatus.UNAVAILABLE else: LOG.debug( 'Physical function addr: %(pf_addr)s parent of ' 'VF addr: %(vf_addr)s was not found', { 'pf_addr': self.parent_addr, 'vf_addr': self.address }) self.status = fields.PciDeviceStatus.ALLOCATED self.instance_uuid = instance['uuid'] # Notes(yjiang5): remove this check when instance object for # compute manager is finished if isinstance(instance, dict): if 'pci_devices' not in instance: instance['pci_devices'] = [] instance['pci_devices'].append(copy.copy(self)) else: instance.pci_devices.objects.append(copy.copy(self)) def remove(self): if self.status != fields.PciDeviceStatus.AVAILABLE: raise exception.PciDeviceInvalidStatus( compute_node_id=self.compute_node_id, address=self.address, status=self.status, hopestatus=[fields.PciDeviceStatus.AVAILABLE]) self.status = fields.PciDeviceStatus.REMOVED self.instance_uuid = None self.request_id = None def free(self, instance=None): ok_statuses = (fields.PciDeviceStatus.ALLOCATED, fields.PciDeviceStatus.CLAIMED) free_devs = [] if self.status not in ok_statuses: raise exception.PciDeviceInvalidStatus( compute_node_id=self.compute_node_id, address=self.address, status=self.status, hopestatus=ok_statuses) if instance and self.instance_uuid != instance['uuid']: raise exception.PciDeviceInvalidOwner( compute_node_id=self.compute_node_id, address=self.address, owner=self.instance_uuid, hopeowner=instance['uuid']) if self.dev_type == fields.PciDeviceType.SRIOV_PF: # Set all PF dependants status to AVAILABLE vfs_list = self.child_devices self._bulk_update_status(vfs_list, fields.PciDeviceStatus.AVAILABLE) free_devs.extend(vfs_list) if self.dev_type == fields.PciDeviceType.SRIOV_VF: # Set PF status to AVAILABLE if all of it's VFs are free parent = self.parent_device if not parent: LOG.debug( 'Physical function addr: %(pf_addr)s parent of ' 'VF addr: %(vf_addr)s was not found', { 'pf_addr': self.parent_addr, 'vf_addr': self.address }) else: vfs_list = parent.child_devices if all( [vf.is_available() for vf in vfs_list if vf.id != self.id]): parent.status = fields.PciDeviceStatus.AVAILABLE free_devs.append(parent) old_status = self.status self.status = fields.PciDeviceStatus.AVAILABLE free_devs.append(self) self.instance_uuid = None self.request_id = None if old_status == fields.PciDeviceStatus.ALLOCATED and instance: # Notes(yjiang5): remove this check when instance object for # compute manager is finished existed = next( (dev for dev in instance['pci_devices'] if dev.id == self.id)) if isinstance(instance, dict): instance['pci_devices'].remove(existed) else: instance.pci_devices.objects.remove(existed) return free_devs def is_available(self): return self.status == fields.PciDeviceStatus.AVAILABLE
class Migration(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Added migration_type and hidden # Version 1.3: Added get_by_id_and_instance() # Version 1.4: Added migration progress detail # Version 1.5: Added uuid # Version 1.6: Added cross_cell_move and get_by_uuid(). # Version 1.7: Added user_id and project_id VERSION = '1.7' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(), 'source_compute': fields.StringField(nullable=True), # source hostname 'dest_compute': fields.StringField(nullable=True), # dest hostname 'source_node': fields.StringField(nullable=True), # source nodename 'dest_node': fields.StringField(nullable=True), # dest nodename 'dest_host': fields.StringField(nullable=True), # dest host IP 'old_instance_type_id': fields.IntegerField(nullable=True), 'new_instance_type_id': fields.IntegerField(nullable=True), 'instance_uuid': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'migration_type': fields.MigrationTypeField(nullable=False), 'hidden': fields.BooleanField(nullable=False, default=False), 'memory_total': fields.IntegerField(nullable=True), 'memory_processed': fields.IntegerField(nullable=True), 'memory_remaining': fields.IntegerField(nullable=True), 'disk_total': fields.IntegerField(nullable=True), 'disk_processed': fields.IntegerField(nullable=True), 'disk_remaining': fields.IntegerField(nullable=True), 'cross_cell_move': fields.BooleanField(default=False), # request context user id 'user_id': fields.StringField(nullable=True), # request context project id 'project_id': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, migration, db_migration): for key in migration.fields: value = db_migration[key] if key == 'migration_type' and value is None: value = determine_migration_type(db_migration) elif key == 'uuid' and value is None: continue migration[key] = value migration._context = context migration.obj_reset_changes() migration._ensure_uuid() return migration def obj_make_compatible(self, primitive, target_version): super(Migration, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2): if 'migration_type' in primitive: del primitive['migration_type'] del primitive['hidden'] if target_version < (1, 4): if 'memory_total' in primitive: del primitive['memory_total'] del primitive['memory_processed'] del primitive['memory_remaining'] del primitive['disk_total'] del primitive['disk_processed'] del primitive['disk_remaining'] if target_version < (1, 5): if 'uuid' in primitive: del primitive['uuid'] if target_version < (1, 6) and 'cross_cell_move' in primitive: del primitive['cross_cell_move'] if target_version < (1, 7): if 'user_id' in primitive: del primitive['user_id'] if 'project_id' in primitive: del primitive['project_id'] def obj_load_attr(self, attrname): if attrname == 'migration_type': # NOTE(danms): The only reason we'd need to load this is if # some older node sent us one. So, guess the type. self.migration_type = determine_migration_type(self) elif attrname in ['hidden', 'cross_cell_move']: self.obj_set_defaults(attrname) else: super(Migration, self).obj_load_attr(attrname) def _ensure_uuid(self): if 'uuid' in self: return self.uuid = uuidutils.generate_uuid() try: self.save() except db_exc.DBDuplicateEntry: # NOTE(danms) We raced to generate a uuid for this, # so fetch the winner and use that uuid fresh = self.__class__.get_by_id(self.context, self.id) self.uuid = fresh.uuid @base.remotable_classmethod def get_by_uuid(cls, context, migration_uuid): db_migration = db.migration_get_by_uuid(context, migration_uuid) return cls._from_db_object(context, cls(), db_migration) @base.remotable_classmethod def get_by_id(cls, context, migration_id): db_migration = db.migration_get(context, migration_id) return cls._from_db_object(context, cls(), db_migration) @base.remotable_classmethod def get_by_id_and_instance(cls, context, migration_id, instance_uuid): db_migration = db.migration_get_by_id_and_instance( context, migration_id, instance_uuid) return cls._from_db_object(context, cls(), db_migration) @base.remotable_classmethod def get_by_instance_and_status(cls, context, instance_uuid, status): db_migration = db.migration_get_by_instance_and_status( context, instance_uuid, status) return cls._from_db_object(context, cls(), db_migration) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') if 'uuid' not in self: self.uuid = uuidutils.generate_uuid() # Record who is initiating the migration which is # not necessarily the owner of the instance. if 'user_id' not in self: self.user_id = self._context.user_id if 'project_id' not in self: self.project_id = self._context.project_id updates = self.obj_get_changes() if 'migration_type' not in updates: raise exception.ObjectActionError( action="create", reason=_("cannot create a Migration object without a " "migration_type set")) db_migration = db.migration_create(self._context, updates) self._from_db_object(self._context, self, db_migration) @base.remotable def save(self): updates = self.obj_get_changes() updates.pop('id', None) db_migration = db.migration_update(self._context, self.id, updates) self._from_db_object(self._context, self, db_migration) self.obj_reset_changes() @property def instance(self): if not hasattr(self, '_cached_instance'): self._cached_instance = objects.Instance.get_by_uuid( self._context, self.instance_uuid, expected_attrs=['migration_context', 'flavor']) return self._cached_instance @instance.setter def instance(self, instance): self._cached_instance = instance def is_same_host(self): return self.source_compute == self.dest_compute @property def is_live_migration(self): return self.migration_type == fields.MigrationType.LIVE_MIGRATION @property def is_resize(self): return self.migration_type == fields.MigrationType.RESIZE
class RequestSpec(base.NovaObject): # Version 1.0: Initial version # Version 1.1: ImageMeta version 1.6 # Version 1.2: SchedulerRetries version 1.1 # Version 1.3: InstanceGroup version 1.10 # Version 1.4: ImageMeta version 1.7 # Version 1.5: Added get_by_instance_uuid(), create(), save() # Version 1.6: Added requested_destination # Version 1.7: Added destroy() # Version 1.8: Added security_groups # Version 1.9: Added user_id # Version 1.10: Added network_metadata # Version 1.11: Added is_bfv VERSION = '1.11' fields = { 'id': fields.IntegerField(), 'image': fields.ObjectField('ImageMeta', nullable=True), 'numa_topology': fields.ObjectField('InstanceNUMATopology', nullable=True), 'pci_requests': fields.ObjectField('InstancePCIRequests', nullable=True), # TODO(mriedem): The project_id shouldn't be nullable since the # scheduler relies on it being set. 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'flavor': fields.ObjectField('Flavor', nullable=False), 'num_instances': fields.IntegerField(default=1), 'ignore_hosts': fields.ListOfStringsField(nullable=True), # NOTE(mriedem): In reality, you can only ever have one # host in the force_hosts list. The fact this is a list # is a mistake perpetuated over time. 'force_hosts': fields.ListOfStringsField(nullable=True), # NOTE(mriedem): In reality, you can only ever have one # node in the force_nodes list. The fact this is a list # is a mistake perpetuated over time. 'force_nodes': fields.ListOfStringsField(nullable=True), 'requested_destination': fields.ObjectField('Destination', nullable=True, default=None), 'retry': fields.ObjectField('SchedulerRetries', nullable=True), 'limits': fields.ObjectField('SchedulerLimits', nullable=True), 'instance_group': fields.ObjectField('InstanceGroup', nullable=True), # NOTE(sbauza): Since hints are depending on running filters, we prefer # to leave the API correctly validating the hints per the filters and # just provide to the RequestSpec object a free-form dictionary 'scheduler_hints': fields.DictOfListOfStringsField(nullable=True), 'instance_uuid': fields.UUIDField(), 'security_groups': fields.ObjectField('SecurityGroupList'), 'network_metadata': fields.ObjectField('NetworkMetadata'), 'is_bfv': fields.BooleanField(), } def obj_make_compatible(self, primitive, target_version): super(RequestSpec, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 11) and 'is_bfv' in primitive: del primitive['is_bfv'] if target_version < (1, 10): if 'network_metadata' in primitive: del primitive['network_metadata'] if target_version < (1, 9): if 'user_id' in primitive: del primitive['user_id'] if target_version < (1, 8): if 'security_groups' in primitive: del primitive['security_groups'] if target_version < (1, 6): if 'requested_destination' in primitive: del primitive['requested_destination'] def obj_load_attr(self, attrname): if attrname not in REQUEST_SPEC_OPTIONAL_ATTRS: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) if attrname == 'security_groups': self.security_groups = objects.SecurityGroupList(objects=[]) return if attrname == 'network_metadata': self.network_metadata = objects.NetworkMetadata(physnets=set(), tunneled=False) return # NOTE(sbauza): In case the primitive was not providing that field # because of a previous RequestSpec version, we want to default # that field in order to have the same behaviour. self.obj_set_defaults(attrname) @property def vcpus(self): return self.flavor.vcpus @property def memory_mb(self): return self.flavor.memory_mb @property def root_gb(self): return self.flavor.root_gb @property def ephemeral_gb(self): return self.flavor.ephemeral_gb @property def swap(self): return self.flavor.swap def _image_meta_from_image(self, image): if isinstance(image, objects.ImageMeta): self.image = image elif isinstance(image, dict): # NOTE(sbauza): Until Nova is fully providing an ImageMeta object # for getting properties, we still need to hydrate it here # TODO(sbauza): To be removed once all RequestSpec hydrations are # done on the conductor side and if the image is an ImageMeta self.image = objects.ImageMeta.from_dict(image) else: self.image = None def _from_instance(self, instance): if isinstance(instance, obj_instance.Instance): # NOTE(sbauza): Instance should normally be a NovaObject... getter = getattr elif isinstance(instance, dict): # NOTE(sbauza): ... but there are some cases where request_spec # has an instance key as a dictionary, just because # select_destinations() is getting a request_spec dict made by # sched_utils.build_request_spec() # TODO(sbauza): To be removed once all RequestSpec hydrations are # done on the conductor side getter = lambda x, y: x.get(y) else: # If the instance is None, there is no reason to set the fields return instance_fields = [ 'numa_topology', 'pci_requests', 'uuid', 'project_id', 'user_id', 'availability_zone' ] for field in instance_fields: if field == 'uuid': setattr(self, 'instance_uuid', getter(instance, field)) elif field == 'pci_requests': self._from_instance_pci_requests(getter(instance, field)) elif field == 'numa_topology': self._from_instance_numa_topology(getter(instance, field)) else: setattr(self, field, getter(instance, field)) def _from_instance_pci_requests(self, pci_requests): if isinstance(pci_requests, dict): pci_req_cls = objects.InstancePCIRequests self.pci_requests = pci_req_cls.from_request_spec_instance_props( pci_requests) else: self.pci_requests = pci_requests def _from_instance_numa_topology(self, numa_topology): if isinstance(numa_topology, dict): self.numa_topology = hardware.instance_topology_from_instance( dict(numa_topology=numa_topology)) else: self.numa_topology = numa_topology def _from_flavor(self, flavor): if isinstance(flavor, objects.Flavor): self.flavor = flavor elif isinstance(flavor, dict): # NOTE(sbauza): Again, request_spec is primitived by # sched_utils.build_request_spec() and passed to # select_destinations() like this # TODO(sbauza): To be removed once all RequestSpec hydrations are # done on the conductor side self.flavor = objects.Flavor(**flavor) def _from_retry(self, retry_dict): self.retry = (SchedulerRetries.from_dict(self._context, retry_dict) if retry_dict else None) def _populate_group_info(self, filter_properties): if filter_properties.get('instance_group'): # New-style group information as a NovaObject, we can directly set # the field self.instance_group = filter_properties.get('instance_group') elif filter_properties.get('group_updated') is True: # Old-style group information having ugly dict keys containing sets # NOTE(sbauza): Can be dropped once select_destinations is removed policies = list(filter_properties.get('group_policies')) hosts = list(filter_properties.get('group_hosts')) members = list(filter_properties.get('group_members')) self.instance_group = objects.InstanceGroup(policy=policies[0], hosts=hosts, members=members) # hosts has to be not part of the updates for saving the object self.instance_group.obj_reset_changes(['hosts']) else: # Set the value anyway to avoid any call to obj_attr_is_set for it self.instance_group = None def _from_limits(self, limits): if isinstance(limits, dict): self.limits = SchedulerLimits.from_dict(limits) else: # Already a SchedulerLimits object. self.limits = limits def _from_hints(self, hints_dict): if hints_dict is None: self.scheduler_hints = None return self.scheduler_hints = { hint: value if isinstance(value, list) else [value] for hint, value in hints_dict.items() } @classmethod def from_primitives(cls, context, request_spec, filter_properties): """Returns a new RequestSpec object by hydrating it from legacy dicts. Deprecated. A RequestSpec object is created early in the boot process using the from_components method. That object will either be passed to places that require it, or it can be looked up with get_by_instance_uuid. This method can be removed when there are no longer any callers. Because the method is not remotable it is not tied to object versioning. That helper is not intended to leave the legacy dicts kept in the nova codebase, but is rather just for giving a temporary solution for populating the Spec object until we get rid of scheduler_utils' build_request_spec() and the filter_properties hydratation in the conductor. :param context: a context object :param request_spec: An old-style request_spec dictionary :param filter_properties: An old-style filter_properties dictionary """ num_instances = request_spec.get('num_instances', 1) spec = cls(context, num_instances=num_instances) # Hydrate from request_spec first image = request_spec.get('image') spec._image_meta_from_image(image) instance = request_spec.get('instance_properties') spec._from_instance(instance) flavor = request_spec.get('instance_type') spec._from_flavor(flavor) # Hydrate now from filter_properties spec.ignore_hosts = filter_properties.get('ignore_hosts') spec.force_hosts = filter_properties.get('force_hosts') spec.force_nodes = filter_properties.get('force_nodes') retry = filter_properties.get('retry', {}) spec._from_retry(retry) limits = filter_properties.get('limits', {}) spec._from_limits(limits) spec._populate_group_info(filter_properties) scheduler_hints = filter_properties.get('scheduler_hints', {}) spec._from_hints(scheduler_hints) spec.requested_destination = filter_properties.get( 'requested_destination') # NOTE(sbauza): Default the other fields that are not part of the # original contract spec.obj_set_defaults() return spec def get_scheduler_hint(self, hint_name, default=None): """Convenient helper for accessing a particular scheduler hint since it is hydrated by putting a single item into a list. In order to reduce the complexity, that helper returns a string if the requested hint is a list of only one value, and if not, returns the value directly (ie. the list). If the hint is not existing (or scheduler_hints is None), then it returns the default value. :param hint_name: name of the hint :param default: the default value if the hint is not there """ if (not self.obj_attr_is_set('scheduler_hints') or self.scheduler_hints is None): return default hint_val = self.scheduler_hints.get(hint_name, default) return (hint_val[0] if isinstance(hint_val, list) and len(hint_val) == 1 else hint_val) def _to_legacy_image(self): return base.obj_to_primitive(self.image) if ( self.obj_attr_is_set('image') and self.image) else {} def _to_legacy_instance(self): # NOTE(sbauza): Since the RequestSpec only persists a few Instance # fields, we can only return a dict. instance = {} instance_fields = [ 'numa_topology', 'pci_requests', 'project_id', 'user_id', 'availability_zone', 'instance_uuid' ] for field in instance_fields: if not self.obj_attr_is_set(field): continue if field == 'instance_uuid': instance['uuid'] = getattr(self, field) else: instance[field] = getattr(self, field) flavor_fields = ['root_gb', 'ephemeral_gb', 'memory_mb', 'vcpus'] if not self.obj_attr_is_set('flavor'): return instance for field in flavor_fields: instance[field] = getattr(self.flavor, field) return instance def _to_legacy_group_info(self): # NOTE(sbauza): Since this is only needed until the AffinityFilters are # modified by using directly the RequestSpec object, we need to keep # the existing dictionary as a primitive. return { 'group_updated': True, 'group_hosts': set(self.instance_group.hosts), 'group_policies': set([self.instance_group.policy]), 'group_members': set(self.instance_group.members) } def to_legacy_request_spec_dict(self): """Returns a legacy request_spec dict from the RequestSpec object. Since we need to manage backwards compatibility and rolling upgrades within our RPC API, we need to accept to provide an helper for primitiving the right RequestSpec object into a legacy dict until we drop support for old Scheduler RPC API versions. If you don't understand why this method is needed, please don't use it. """ req_spec = {} if not self.obj_attr_is_set('num_instances'): req_spec['num_instances'] = self.fields['num_instances'].default else: req_spec['num_instances'] = self.num_instances req_spec['image'] = self._to_legacy_image() req_spec['instance_properties'] = self._to_legacy_instance() if self.obj_attr_is_set('flavor'): req_spec['instance_type'] = self.flavor else: req_spec['instance_type'] = {} return req_spec def to_legacy_filter_properties_dict(self): """Returns a legacy filter_properties dict from the RequestSpec object. Since we need to manage backwards compatibility and rolling upgrades within our RPC API, we need to accept to provide an helper for primitiving the right RequestSpec object into a legacy dict until we drop support for old Scheduler RPC API versions. If you don't understand why this method is needed, please don't use it. """ filt_props = {} if self.obj_attr_is_set('ignore_hosts') and self.ignore_hosts: filt_props['ignore_hosts'] = self.ignore_hosts if self.obj_attr_is_set('force_hosts') and self.force_hosts: filt_props['force_hosts'] = self.force_hosts if self.obj_attr_is_set('force_nodes') and self.force_nodes: filt_props['force_nodes'] = self.force_nodes if self.obj_attr_is_set('retry') and self.retry: filt_props['retry'] = self.retry.to_dict() if self.obj_attr_is_set('limits') and self.limits: filt_props['limits'] = self.limits.to_dict() if self.obj_attr_is_set('instance_group') and self.instance_group: filt_props.update(self._to_legacy_group_info()) if self.obj_attr_is_set('scheduler_hints') and self.scheduler_hints: # NOTE(sbauza): We need to backport all the hints correctly since # we had to hydrate the field by putting a single item into a list. filt_props['scheduler_hints'] = { hint: self.get_scheduler_hint(hint) for hint in self.scheduler_hints } if self.obj_attr_is_set( 'requested_destination') and self.requested_destination: filt_props['requested_destination'] = self.requested_destination return filt_props @classmethod def from_components(cls, context, instance_uuid, image, flavor, numa_topology, pci_requests, filter_properties, instance_group, availability_zone, security_groups=None, project_id=None, user_id=None): """Returns a new RequestSpec object hydrated by various components. This helper is useful in creating the RequestSpec from the various objects that are assembled early in the boot process. This method creates a complete RequestSpec object with all properties set or intentionally left blank. :param context: a context object :param instance_uuid: the uuid of the instance to schedule :param image: a dict of properties for an image or volume :param flavor: a flavor NovaObject :param numa_topology: InstanceNUMATopology or None :param pci_requests: InstancePCIRequests :param filter_properties: a dict of properties for scheduling :param instance_group: None or an instance group NovaObject :param availability_zone: an availability_zone string :param security_groups: A SecurityGroupList object. If None, don't set security_groups on the resulting object. :param project_id: The project_id for the requestspec (should match the instance project_id). :param user_id: The user_id for the requestspec (should match the instance user_id). """ spec_obj = cls(context) spec_obj.num_instances = 1 spec_obj.instance_uuid = instance_uuid spec_obj.instance_group = instance_group if spec_obj.instance_group is None and filter_properties: spec_obj._populate_group_info(filter_properties) spec_obj.project_id = project_id or context.project_id spec_obj.user_id = user_id or context.user_id spec_obj._image_meta_from_image(image) spec_obj._from_flavor(flavor) spec_obj._from_instance_pci_requests(pci_requests) spec_obj._from_instance_numa_topology(numa_topology) spec_obj.ignore_hosts = filter_properties.get('ignore_hosts') spec_obj.force_hosts = filter_properties.get('force_hosts') spec_obj.force_nodes = filter_properties.get('force_nodes') spec_obj._from_retry(filter_properties.get('retry', {})) spec_obj._from_limits(filter_properties.get('limits', {})) spec_obj._from_hints(filter_properties.get('scheduler_hints', {})) spec_obj.availability_zone = availability_zone if security_groups is not None: spec_obj.security_groups = security_groups spec_obj.requested_destination = filter_properties.get( 'requested_destination') # NOTE(sbauza): Default the other fields that are not part of the # original contract spec_obj.obj_set_defaults() return spec_obj def ensure_project_and_user_id(self, instance): if 'project_id' not in self or self.project_id is None: self.project_id = instance.project_id if 'user_id' not in self or self.user_id is None: self.user_id = instance.user_id def ensure_network_metadata(self, instance): if not (instance.info_cache and instance.info_cache.network_info): return physnets = set([]) tunneled = True # physical_network and tunneled might not be in the cache for old # instances that haven't had their info_cache healed yet for vif in instance.info_cache.network_info: physnet = vif.get('network', {}).get('meta', {}).get('physical_network', None) if physnet: physnets.add(physnet) tunneled |= vif.get('network', {}).get('meta', {}).get('tunneled', False) self.network_metadata = objects.NetworkMetadata(physnets=physnets, tunneled=tunneled) @staticmethod def _from_db_object(context, spec, db_spec): spec_obj = spec.obj_from_primitive(jsonutils.loads(db_spec['spec'])) for key in spec.fields: # Load these from the db model not the serialized object within, # though they should match. if key in ['id', 'instance_uuid']: setattr(spec, key, db_spec[key]) elif key in spec_obj: setattr(spec, key, getattr(spec_obj, key)) spec._context = context if 'instance_group' in spec and spec.instance_group: # NOTE(danms): We don't store the full instance group in # the reqspec since it would be stale almost immediately. # Instead, load it by uuid here so it's up-to-date. try: spec.instance_group = objects.InstanceGroup.get_by_uuid( context, spec.instance_group.uuid) except exception.InstanceGroupNotFound: # NOTE(danms): Instance group may have been deleted spec.instance_group = None spec.obj_reset_changes() return spec @staticmethod @db.api_context_manager.reader def _get_by_instance_uuid_from_db(context, instance_uuid): db_spec = context.session.query(api_models.RequestSpec).filter_by( instance_uuid=instance_uuid).first() if not db_spec: raise exception.RequestSpecNotFound(instance_uuid=instance_uuid) return db_spec @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_spec = cls._get_by_instance_uuid_from_db(context, instance_uuid) return cls._from_db_object(context, cls(), db_spec) @staticmethod @db.api_context_manager.writer def _create_in_db(context, updates): db_spec = api_models.RequestSpec() db_spec.update(updates) db_spec.save(context.session) return db_spec def _get_update_primitives(self): """Serialize object to match the db model. We store copies of embedded objects rather than references to these objects because we want a snapshot of the request at this point. If the references changed or were deleted we would not be able to reschedule this instance under the same conditions as it was originally scheduled with. """ updates = self.obj_get_changes() db_updates = None # NOTE(alaski): The db schema is the full serialized object in a # 'spec' column. If anything has changed we rewrite the full thing. if updates: # NOTE(danms): Don't persist the could-be-large and could-be-stale # properties of InstanceGroup spec = self.obj_clone() if 'instance_group' in spec and spec.instance_group: spec.instance_group.members = None spec.instance_group.hosts = None # NOTE(mriedem): Don't persist retries since those are per-request if 'retry' in spec and spec.retry: spec.retry = None # NOTE(stephenfin): Don't persist network metadata since we have # no need for it after scheduling if 'network_metadata' in spec and spec.network_metadata: del spec.network_metadata db_updates = {'spec': jsonutils.dumps(spec.obj_to_primitive())} if 'instance_uuid' in updates: db_updates['instance_uuid'] = updates['instance_uuid'] return db_updates @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self._get_update_primitives() if not updates: raise exception.ObjectActionError(action='create', reason='no fields are set') db_spec = self._create_in_db(self._context, updates) self._from_db_object(self._context, self, db_spec) @staticmethod @db.api_context_manager.writer def _save_in_db(context, instance_uuid, updates): # FIXME(sbauza): Provide a classmethod when oslo.db bug #1520195 is # fixed and released db_spec = RequestSpec._get_by_instance_uuid_from_db( context, instance_uuid) db_spec.update(updates) db_spec.save(context.session) return db_spec @base.remotable def save(self): updates = self._get_update_primitives() if updates: db_spec = self._save_in_db(self._context, self.instance_uuid, updates) self._from_db_object(self._context, self, db_spec) self.obj_reset_changes() @staticmethod @db.api_context_manager.writer def _destroy_in_db(context, instance_uuid): result = context.session.query(api_models.RequestSpec).filter_by( instance_uuid=instance_uuid).delete() if not result: raise exception.RequestSpecNotFound(instance_uuid=instance_uuid) @base.remotable def destroy(self): self._destroy_in_db(self._context, self.instance_uuid) @staticmethod @db.api_context_manager.writer def _destroy_bulk_in_db(context, instance_uuids): return context.session.query(api_models.RequestSpec).filter( api_models.RequestSpec.instance_uuid.in_(instance_uuids)).\ delete(synchronize_session=False) @classmethod def destroy_bulk(cls, context, instance_uuids): return cls._destroy_bulk_in_db(context, instance_uuids) def reset_forced_destinations(self): """Clears the forced destination fields from the RequestSpec object. This method is for making sure we don't ask the scheduler to give us again the same destination(s) without persisting the modifications. """ self.force_hosts = None self.force_nodes = None # NOTE(sbauza): Make sure we don't persist this, we need to keep the # original request for the forced hosts self.obj_reset_changes(['force_hosts', 'force_nodes'])
class InstanceNUMATopology(base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Takes into account pagesize # Version 1.2: InstanceNUMACell 1.2 # Version 1.3: Add emulator threads policy VERSION = '1.3' def obj_make_compatible(self, primitive, target_version): super(InstanceNUMATopology, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 3): primitive.pop('emulator_threads_policy', None) fields = { # NOTE(danms): The 'id' field is no longer used and should be # removed in the future when convenient 'id': obj_fields.IntegerField(), 'instance_uuid': obj_fields.UUIDField(), 'cells': obj_fields.ListOfObjectsField('InstanceNUMACell'), 'emulator_threads_policy': (obj_fields.CPUEmulatorThreadsPolicyField(nullable=True)), } @classmethod def obj_from_db_obj(cls, context, instance_uuid, db_obj): primitive = jsonutils.loads(db_obj) if 'nova_object.name' in primitive: obj = cls.obj_from_primitive(primitive) cls._migrate_legacy_dedicated_instance_cpuset( context, instance_uuid, obj) else: obj = cls._migrate_legacy_object(context, instance_uuid, primitive) return obj # TODO(huaqiang): Remove after Wallaby once we are sure these objects have # been loaded at least once. @classmethod def _migrate_legacy_dedicated_instance_cpuset(cls, context, instance_uuid, obj): # NOTE(huaqiang): We may meet some topology object with the old version # 'InstanceNUMACell' cells, in that case, the 'dedicated' CPU is kept # in 'InstanceNUMACell.cpuset' field, but it should be kept in # 'InstanceNUMACell.pcpuset' field since Victoria. Making an upgrade # and persisting to database. update_db = False for cell in obj.cells: if len(cell.cpuset) == 0: continue if cell.cpu_policy != obj_fields.CPUAllocationPolicy.DEDICATED: continue cell.pcpuset = cell.cpuset cell.cpuset = set() update_db = True if update_db: db_obj = jsonutils.dumps(obj.obj_to_primitive()) values = { 'numa_topology': db_obj, } db.instance_extra_update_by_uuid(context, instance_uuid, values) # TODO(stephenfin): Remove in X or later, once this has bedded in @classmethod def _migrate_legacy_object(cls, context, instance_uuid, primitive): """Convert a pre-Liberty object to a real o.vo. Handle an unversioned object created prior to Liberty, by transforming to a versioned object and saving back the serialized version of this. :param context: RequestContext :param instance_uuid: The UUID of the instance this topology is associated with. :param primitive: A serialized representation of the legacy object. :returns: A serialized representation of the updated object. """ obj = cls( instance_uuid=instance_uuid, cells=[ InstanceNUMACell( id=cell.get('id'), cpuset=hardware.parse_cpu_spec(cell.get('cpus', '')), pcpuset=set(), memory=cell.get('mem', {}).get('total', 0), pagesize=cell.get('pagesize'), ) for cell in primitive.get('cells', []) ], ) db_obj = jsonutils.dumps(obj.obj_to_primitive()) values = { 'numa_topology': db_obj, } db.instance_extra_update_by_uuid(context, instance_uuid, values) return obj # TODO(ndipanov) Remove this method on the major version bump to 2.0 @base.remotable def create(self): values = {'numa_topology': self._to_json()} db.instance_extra_update_by_uuid(self._context, self.instance_uuid, values) self.obj_reset_changes() @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_extra = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['numa_topology']) if not db_extra: raise exception.NumaTopologyNotFound(instance_uuid=instance_uuid) if db_extra['numa_topology'] is None: return None return cls.obj_from_db_obj(context, instance_uuid, db_extra['numa_topology']) def _to_json(self): return jsonutils.dumps(self.obj_to_primitive()) def __len__(self): """Defined so that boolean testing works the same as for lists.""" return len(self.cells) # TODO(stephenfin): We should add a real 'cpu_policy' field on this object # and deprecate the one found on the cell @property def cpu_policy(self): cpu_policy = set(cell.cpu_policy for cell in self.cells) if len(cpu_policy) > 1: # NOTE(stephenfin): This should never happen in real life; it's to # prevent programmer error. raise exception.InternalError( 'Instance NUMA cells must have the same CPU policy.') return cpu_policy.pop() @property def cpu_pinning(self): """Return a set of all host CPUs this NUMATopology is pinned to.""" return set( itertools.chain.from_iterable([ cell.cpu_pinning.values() for cell in self.cells if cell.cpu_pinning ])) def clear_host_pinning(self): """Clear any data related to how instance is pinned to the host. Needed for aborting claims as we do not want to keep stale data around. """ for cell in self.cells: cell.clear_host_pinning() return self @property def emulator_threads_isolated(self): """Determines whether emulator threads should be isolated""" return (self.obj_attr_is_set('emulator_threads_policy') and (self.emulator_threads_policy == obj_fields.CPUEmulatorThreadsPolicy.ISOLATE))
class Aggregate(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Added uuid field VERSION = '1.2' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(nullable=False), 'name': fields.StringField(), 'hosts': fields.ListOfStringsField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), } obj_extra_fields = ['availability_zone'] @staticmethod def _from_db_object(context, aggregate, db_aggregate): for key in aggregate.fields: if key == 'metadata': db_key = 'metadetails' elif key == 'uuid': continue elif key in DEPRECATED_FIELDS and key not in db_aggregate: continue else: db_key = key setattr(aggregate, key, db_aggregate[db_key]) # NOTE(danms): Remove this conditional load (and remove uuid # special cases above) once we're in Newton and have enforced # that all UUIDs in the database are not NULL. if db_aggregate.get('uuid'): aggregate.uuid = db_aggregate['uuid'] # NOTE: This can be removed when we remove compatibility with # the old aggregate model. if any(f not in db_aggregate for f in DEPRECATED_FIELDS): aggregate.deleted_at = None aggregate.deleted = False aggregate._context = context aggregate.obj_reset_changes() # NOTE(danms): This needs to come after obj_reset_changes() to make # sure we only save the uuid, if we generate one. # FIXME(danms): Remove this in Newton once we have enforced that # all aggregates have uuids set in the database. if 'uuid' not in aggregate: aggregate.uuid = uuidutils.generate_uuid() LOG.debug('Generating UUID %(uuid)s for aggregate %(agg)i', dict(uuid=aggregate.uuid, agg=aggregate.id)) aggregate.save() return aggregate def _assert_no_hosts(self, action): if 'hosts' in self.obj_what_changed(): raise exception.ObjectActionError(action=action, reason='hosts updated inline') @base.remotable_classmethod def get_by_id(cls, context, aggregate_id): try: db_aggregate = _aggregate_get_from_db(context, aggregate_id) except exception.AggregateNotFound: db_aggregate = db.aggregate_get(context, aggregate_id) return cls._from_db_object(context, cls(), db_aggregate) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') self._assert_no_hosts('create') updates = self.obj_get_changes() payload = dict(updates) if 'metadata' in updates: # NOTE(danms): For some reason the notification format is weird payload['meta_data'] = payload.pop('metadata') if 'uuid' not in updates: updates['uuid'] = uuidutils.generate_uuid() LOG.debug('Generated uuid %(uuid)s for aggregate', dict(uuid=updates['uuid'])) compute_utils.notify_about_aggregate_update(self._context, "create.start", payload) metadata = updates.pop('metadata', None) db_aggregate = db.aggregate_create(self._context, updates, metadata=metadata) self._from_db_object(self._context, self, db_aggregate) payload['aggregate_id'] = self.id compute_utils.notify_about_aggregate_update(self._context, "create.end", payload) @base.remotable def save(self): self._assert_no_hosts('save') updates = self.obj_get_changes() payload = {'aggregate_id': self.id} if 'metadata' in updates: payload['meta_data'] = updates['metadata'] compute_utils.notify_about_aggregate_update(self._context, "updateprop.start", payload) updates.pop('id', None) db_aggregate = db.aggregate_update(self._context, self.id, updates) compute_utils.notify_about_aggregate_update(self._context, "updateprop.end", payload) self._from_db_object(self._context, self, db_aggregate) @base.remotable def update_metadata(self, updates): payload = {'aggregate_id': self.id, 'meta_data': updates} compute_utils.notify_about_aggregate_update(self._context, "updatemetadata.start", payload) to_add = {} for key, value in updates.items(): if value is None: try: db.aggregate_metadata_delete(self._context, self.id, key) except exception.AggregateMetadataNotFound: pass try: self.metadata.pop(key) except KeyError: pass else: to_add[key] = value self.metadata[key] = value db.aggregate_metadata_add(self._context, self.id, to_add) compute_utils.notify_about_aggregate_update(self._context, "updatemetadata.end", payload) self.obj_reset_changes(fields=['metadata']) @base.remotable def destroy(self): db.aggregate_delete(self._context, self.id) @base.remotable def add_host(self, host): db.aggregate_host_add(self._context, self.id, host) if self.hosts is None: self.hosts = [] self.hosts.append(host) self.obj_reset_changes(fields=['hosts']) @base.remotable def delete_host(self, host): db.aggregate_host_delete(self._context, self.id, host) self.hosts.remove(host) self.obj_reset_changes(fields=['hosts']) @property def availability_zone(self): return self.metadata.get('availability_zone', None)
class FixedIP(obj_base.NovaPersistentObject, obj_base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added virtual_interface field # Version 1.2: Instance version 1.14 VERSION = '1.2' fields = { 'id': fields.IntegerField(), 'address': fields.IPV4AndV6AddressField(), 'network_id': fields.IntegerField(nullable=True), 'virtual_interface_id': fields.IntegerField(nullable=True), 'instance_uuid': fields.UUIDField(nullable=True), 'allocated': fields.BooleanField(), 'leased': fields.BooleanField(), 'reserved': fields.BooleanField(), 'host': fields.StringField(nullable=True), 'instance': fields.ObjectField('Instance', nullable=True), 'network': fields.ObjectField('Network', nullable=True), 'virtual_interface': fields.ObjectField('VirtualInterface', nullable=True), } def obj_make_compatible(self, primitive, target_version): target_version = utils.convert_version_to_tuple(target_version) if target_version < (1, 2) and 'instance' in primitive: primitive['instance'] = ( objects.Instance().object_make_compatible( primitive['instance']['nova_object.data'], '1.13')) @property def floating_ips(self): return objects.FloatingIPList.get_by_fixed_ip_id(self._context, self.id) @staticmethod def _from_db_object(context, fixedip, db_fixedip, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for field in fixedip.fields: if field == 'virtual_interface': # NOTE(danms): This field is only set when doing a # FixedIPList.get_by_network() because it's a relatively # special-case thing, so skip it here continue if field not in FIXED_IP_OPTIONAL_ATTRS: fixedip[field] = db_fixedip[field] # NOTE(danms): Instance could be deleted, and thus None if 'instance' in expected_attrs: fixedip.instance = objects.Instance._from_db_object( context, objects.Instance(context), db_fixedip['instance']) if db_fixedip['instance'] else None if 'network' in expected_attrs: fixedip.network = objects.Network._from_db_object( context, objects.Network(context), db_fixedip['network']) fixedip._context = context fixedip.obj_reset_changes() return fixedip @obj_base.remotable_classmethod def get_by_id(cls, context, id, expected_attrs=None): if expected_attrs is None: expected_attrs = [] get_network = 'network' in expected_attrs db_fixedip = db.fixed_ip_get(context, id, get_network=get_network) return cls._from_db_object(context, cls(context), db_fixedip, expected_attrs) @obj_base.remotable_classmethod def get_by_address(cls, context, address, expected_attrs=None): if expected_attrs is None: expected_attrs = [] db_fixedip = db.fixed_ip_get_by_address(context, str(address), columns_to_join=expected_attrs) return cls._from_db_object(context, cls(context), db_fixedip, expected_attrs) @obj_base.remotable_classmethod def get_by_floating_address(cls, context, address): db_fixedip = db.fixed_ip_get_by_floating_address(context, str(address)) if db_fixedip is not None: return cls._from_db_object(context, cls(context), db_fixedip) @obj_base.remotable_classmethod def get_by_network_and_host(cls, context, network_id, host): db_fixedip = db.fixed_ip_get_by_network_host(context, network_id, host) return cls._from_db_object(context, cls(context), db_fixedip) @obj_base.remotable_classmethod def associate(cls, context, address, instance_uuid, network_id=None, reserved=False): db_fixedip = db.fixed_ip_associate(context, address, instance_uuid, network_id=network_id, reserved=reserved) return cls._from_db_object(context, cls(context), db_fixedip) @obj_base.remotable_classmethod def associate_pool(cls, context, network_id, instance_uuid=None, host=None): db_fixedip = db.fixed_ip_associate_pool(context, network_id, instance_uuid=instance_uuid, host=host) return cls._from_db_object(context, cls(context), db_fixedip) @obj_base.remotable_classmethod def disassociate_by_address(cls, context, address): db.fixed_ip_disassociate(context, address) @obj_base.remotable_classmethod def _disassociate_all_by_timeout(cls, context, host, time_str): time = timeutils.parse_isotime(time_str) return db.fixed_ip_disassociate_all_by_timeout(context, host, time) @classmethod def disassociate_all_by_timeout(cls, context, host, time): return cls._disassociate_all_by_timeout(context, host, timeutils.isotime(time)) @obj_base.remotable def create(self, context): updates = self.obj_get_changes() if 'id' in updates: raise exception.ObjectActionError(action='create', reason='already created') if 'address' in updates: updates['address'] = str(updates['address']) db_fixedip = db.fixed_ip_create(context, updates) self._from_db_object(context, self, db_fixedip) @obj_base.remotable def save(self, context): updates = self.obj_get_changes() if 'address' in updates: raise exception.ObjectActionError(action='save', reason='address is not mutable') db.fixed_ip_update(context, str(self.address), updates) self.obj_reset_changes() @obj_base.remotable def disassociate(self, context): db.fixed_ip_disassociate(context, str(self.address)) self.instance_uuid = None self.instance = None self.obj_reset_changes(['instance_uuid', 'instance'])
class InstanceNUMATopology(base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Takes into account pagesize VERSION = '1.1' fields = { # NOTE(danms): The 'id' field is no longer used and should be # removed in the future when convenient 'id': obj_fields.IntegerField(), 'instance_uuid': obj_fields.UUIDField(), 'cells': obj_fields.ListOfObjectsField('InstanceNUMACell'), } obj_relationships = { 'cells': [('1.0', '1.0')], } @classmethod def obj_from_primitive(cls, primitive): if 'nova_object.name' in primitive: obj_topology = super(InstanceNUMATopology, cls).obj_from_primitive(primitive) else: # NOTE(sahid): This compatibility code needs to stay until we can # guarantee that there are no cases of the old format stored in # the database (or forever, if we can never guarantee that). obj_topology = InstanceNUMATopology._from_dict(primitive) obj_topology.id = 0 return obj_topology @classmethod def obj_from_db_obj(cls, instance_uuid, db_obj): primitive = jsonutils.loads(db_obj) obj_topology = cls.obj_from_primitive(primitive) if 'nova_object.name' not in db_obj: obj_topology.instance_uuid = instance_uuid # No benefit to store a list of changed fields obj_topology.obj_reset_changes() return obj_topology # TODO(ndipanov) Remove this method on the major version bump to 2.0 @base.remotable def create(self, context): self._save(context) # NOTE(ndipanov): We can't rename create and want to avoid version bump # as this needs to be backported to stable so this is not a @remotable # That's OK since we only call it from inside Instance.save() which is. def _save(self, context): values = {'numa_topology': self._to_json()} db.instance_extra_update_by_uuid(context, self.instance_uuid, values) self.obj_reset_changes() # NOTE(ndipanov): We want to avoid version bump # as this needs to be backported to stable so this is not a @remotable # That's OK since we only call it from inside Instance.save() which is. @classmethod def delete_by_instance_uuid(cls, context, instance_uuid): values = {'numa_topology': None} db.instance_extra_update_by_uuid(context, instance_uuid, values) @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_extra = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['numa_topology']) if not db_extra: raise exception.NumaTopologyNotFound(instance_uuid=instance_uuid) if db_extra['numa_topology'] is None: return None return cls.obj_from_db_obj(instance_uuid, db_extra['numa_topology']) def _to_json(self): return jsonutils.dumps(self.obj_to_primitive()) def __len__(self): """Defined so that boolean testing works the same as for lists.""" return len(self.cells) def _to_dict(self): # NOTE(sahid): Used as legacy, could be renamed in _legacy_to_dict_ # in the future to avoid confusing. return {'cells': [cell._to_dict() for cell in self.cells]} @classmethod def _from_dict(cls, data_dict): # NOTE(sahid): Used as legacy, could be renamed in _legacy_from_dict_ # in the future to avoid confusing. return cls(cells=[ InstanceNUMACell._from_dict(cell_dict) for cell_dict in data_dict.get('cells', []) ])
class InstanceAction(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode VERSION = '1.1' fields = { 'id': fields.IntegerField(), 'action': fields.StringField(nullable=True), 'instance_uuid': fields.UUIDField(nullable=True), 'request_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'start_time': fields.DateTimeField(nullable=True), 'finish_time': fields.DateTimeField(nullable=True), 'message': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, action, db_action): for field in action.fields: action[field] = db_action[field] action._context = context action.obj_reset_changes() return action @staticmethod def pack_action_start(context, instance_uuid, action_name): values = { 'request_id': context.request_id, 'instance_uuid': instance_uuid, 'user_id': context.user_id, 'project_id': context.project_id, 'action': action_name, 'start_time': context.timestamp } return values @staticmethod def pack_action_finish(context, instance_uuid): values = { 'request_id': context.request_id, 'instance_uuid': instance_uuid, 'finish_time': timeutils.utcnow() } return values @base.remotable_classmethod def get_by_request_id(cls, context, instance_uuid, request_id): db_action = db.action_get_by_request_id(context, instance_uuid, request_id) if db_action: return cls._from_db_object(context, cls(), db_action) @base.remotable_classmethod def action_start(cls, context, instance_uuid, action_name, want_result=True): values = cls.pack_action_start(context, instance_uuid, action_name) db_action = db.action_start(context, values) if want_result: return cls._from_db_object(context, cls(), db_action) @base.remotable_classmethod def action_finish(cls, context, instance_uuid, want_result=True): values = cls.pack_action_finish(context, instance_uuid) db_action = db.action_finish(context, values) if want_result: return cls._from_db_object(context, cls(), db_action) @base.remotable def finish(self, context): values = self.pack_action_finish(context, self.instance_uuid) db_action = db.action_finish(context, values) self._from_db_object(context, self, db_action)
class Instance(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added info_cache # Version 1.2: Added security_groups # Version 1.3: Added expected_vm_state and admin_state_reset to # save() # Version 1.4: Added locked_by and deprecated locked # Version 1.5: Added cleaned # Version 1.6: Added pci_devices # Version 1.7: String attributes updated to support unicode # Version 1.8: 'security_groups' and 'pci_devices' cannot be None # Version 1.9: Make uuid a non-None real string # Version 1.10: Added use_slave to refresh and get_by_uuid VERSION = '1.10' fields = { 'id': fields.IntegerField(), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'image_ref': fields.StringField(nullable=True), 'kernel_id': fields.StringField(nullable=True), 'ramdisk_id': fields.StringField(nullable=True), 'hostname': fields.StringField(nullable=True), 'launch_index': fields.IntegerField(nullable=True), 'key_name': fields.StringField(nullable=True), 'key_data': fields.StringField(nullable=True), 'power_state': fields.IntegerField(nullable=True), 'vm_state': fields.StringField(nullable=True), 'task_state': fields.StringField(nullable=True), 'memory_mb': fields.IntegerField(nullable=True), 'vcpus': fields.IntegerField(nullable=True), 'root_gb': fields.IntegerField(nullable=True), 'ephemeral_gb': fields.IntegerField(nullable=True), 'host': fields.StringField(nullable=True), 'node': fields.StringField(nullable=True), 'instance_type_id': fields.IntegerField(nullable=True), 'user_data': fields.StringField(nullable=True), 'reservation_id': fields.StringField(nullable=True), 'scheduled_at': fields.DateTimeField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'launched_on': fields.StringField(nullable=True), # NOTE(jdillaman): locked deprecated in favor of locked_by, # to be removed in Icehouse 'locked': fields.BooleanField(default=False), 'locked_by': fields.StringField(nullable=True), 'os_type': fields.StringField(nullable=True), 'architecture': fields.StringField(nullable=True), 'vm_mode': fields.StringField(nullable=True), 'uuid': fields.UUIDField(), 'root_device_name': fields.StringField(nullable=True), 'default_ephemeral_device': fields.StringField(nullable=True), 'default_swap_device': fields.StringField(nullable=True), 'config_drive': fields.StringField(nullable=True), 'access_ip_v4': fields.IPV4AddressField(nullable=True), 'access_ip_v6': fields.IPV6AddressField(nullable=True), 'auto_disk_config': fields.BooleanField(default=False), 'progress': fields.IntegerField(nullable=True), 'shutdown_terminate': fields.BooleanField(default=False), 'disable_terminate': fields.BooleanField(default=False), 'cell_name': fields.StringField(nullable=True), 'metadata': fields.DictOfStringsField(), 'system_metadata': fields.DictOfNullableStringsField(), 'info_cache': fields.ObjectField('InstanceInfoCache', nullable=True), 'security_groups': fields.ObjectField('SecurityGroupList'), 'fault': fields.ObjectField('InstanceFault', nullable=True), 'cleaned': fields.BooleanField(default=False), 'pci_devices': fields.ObjectField('PciDeviceList', nullable=True), } obj_extra_fields = ['name'] def __init__(self, *args, **kwargs): super(Instance, self).__init__(*args, **kwargs) self._reset_metadata_tracking() def _reset_metadata_tracking(self): self._orig_system_metadata = (dict(self.system_metadata) if 'system_metadata' in self else {}) self._orig_metadata = (dict(self.metadata) if 'metadata' in self else {}) def obj_reset_changes(self, fields=None): super(Instance, self).obj_reset_changes(fields) self._reset_metadata_tracking() def obj_what_changed(self): changes = super(Instance, self).obj_what_changed() if 'metadata' in self and self.metadata != self._orig_metadata: changes.add('metadata') if 'system_metadata' in self and (self.system_metadata != self._orig_system_metadata): changes.add('system_metadata') return changes @property def name(self): try: base_name = CONF.instance_name_template % self.id except TypeError: # Support templates like "uuid-%(uuid)s", etc. info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. for key in self.fields: if key == 'name': # NOTE(danms): prevent recursion continue elif not self.obj_attr_is_set(key): # NOTE(danms): Don't trigger lazy-loads continue info[key] = self[key] try: base_name = CONF.instance_name_template % info except KeyError: base_name = self.uuid return base_name @staticmethod def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_ATTRS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] elif field == 'cleaned': instance.cleaned = db_inst['cleaned'] == 1 else: instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.instance_meta(db_inst) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.instance_sys_meta(db_inst) if 'fault' in expected_attrs: instance['fault'] = ( instance_fault.InstanceFault.get_latest_for_instance( context, instance.uuid)) if 'pci_devices' in expected_attrs: pci_devices = pci_device._make_pci_list(context, pci_device.PciDeviceList(), db_inst['pci_devices']) instance['pci_devices'] = pci_devices if 'info_cache' in expected_attrs: if db_inst['info_cache'] is None: info_cache = None else: info_cache = instance_info_cache.InstanceInfoCache() instance_info_cache.InstanceInfoCache._from_db_object( context, info_cache, db_inst['info_cache']) instance['info_cache'] = info_cache if 'security_groups' in expected_attrs: sec_groups = security_group._make_secgroup_list( context, security_group.SecurityGroupList(), db_inst['security_groups']) instance['security_groups'] = sec_groups instance._context = context instance.obj_reset_changes() return instance @base.remotable_classmethod def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = _expected_cols(expected_attrs) db_inst = db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join, use_slave=use_slave) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable_classmethod def get_by_id(cls, context, inst_id, expected_attrs=None): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = _expected_cols(expected_attrs) db_inst = db.instance_get(context, inst_id, columns_to_join=columns_to_join) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable def create(self, context): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() updates.pop('id', None) expected_attrs = [ attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates ] if 'security_groups' in updates: updates['security_groups'] = [ x.name for x in updates['security_groups'] ] if 'info_cache' in updates: updates['info_cache'] = { 'network_info': updates['info_cache'].network_info.json() } db_inst = db.instance_create(context, updates) Instance._from_db_object(context, self, db_inst, expected_attrs) @base.remotable def destroy(self, context): if not self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='destroy', reason='already destroyed') if not self.obj_attr_is_set('uuid'): raise exception.ObjectActionError(action='destroy', reason='no uuid') if not self.obj_attr_is_set('host') or not self.host: # NOTE(danms): If our host is not set, avoid a race constraint = db.constraint(host=db.equal_any(None)) else: constraint = None try: db.instance_destroy(context, self.uuid, constraint=constraint) except exception.ConstraintNotMet: raise exception.ObjectActionError(action='destroy', reason='host changed') delattr(self, base.get_attrname('id')) def _save_info_cache(self, context): self.info_cache.save(context) def _save_security_groups(self, context): for secgroup in self.security_groups: secgroup.save(context) def _save_fault(self, context): # NOTE(danms): I don't think we need to worry about this, do we? pass def _save_pci_devices(self, context): # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker # permitted to update the DB. all change to devices from here will # be dropped. pass @base.remotable def save(self, context, expected_vm_state=None, expected_task_state=None, admin_state_reset=False): """Save updates to this instance Column-wise updates will be made based on the result of self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param context: Security context :param expected_task_state: Optional tuple of valid task states for the instance to be in. :param expected_vm_state: Optional tuple of valid vm states for the instance to be in. :param admin_state_reset: True if admin API is forcing setting of task_state/vm_state. """ cell_type = cells_opts.get_cell_type() if cell_type == 'api' and self.cell_name: # NOTE(comstud): We need to stash a copy of ourselves # before any updates are applied. When we call the save # methods on nested objects, we will lose any changes to # them. But we need to make sure child cells can tell # what is changed. # # We also need to nuke any updates to vm_state and task_state # unless admin_state_reset is True. compute cells are # authoritative for their view of vm_state and task_state. stale_instance = self.obj_clone() def _handle_cell_update_from_api(): cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_from_api(context, stale_instance, expected_vm_state, expected_task_state, admin_state_reset) else: stale_instance = None updates = {} changes = self.obj_what_changed() for field in self.fields: if (self.obj_attr_is_set(field) and isinstance(self[field], base.NovaObject)): try: getattr(self, '_save_%s' % field)(context) except AttributeError: LOG.exception(_('No save handler for %s') % field, instance=self) elif field in changes: updates[field] = self[field] if not updates: if stale_instance: _handle_cell_update_from_api() return # Cleaned needs to be turned back into an int here if 'cleaned' in updates: if updates['cleaned']: updates['cleaned'] = 1 else: updates['cleaned'] = 0 if expected_task_state is not None: updates['expected_task_state'] = expected_task_state if expected_vm_state is not None: updates['expected_vm_state'] = expected_vm_state expected_attrs = [ attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS if self.obj_attr_is_set(attr) ] # NOTE(alaski): We need to pull system_metadata for the # notification.send_update() below. If we don't there's a KeyError # when it tries to extract the flavor. if 'system_metadata' not in expected_attrs: expected_attrs.append('system_metadata') old_ref, inst_ref = db.instance_update_and_get_original( context, self.uuid, updates, update_cells=False, columns_to_join=_expected_cols(expected_attrs)) if stale_instance: _handle_cell_update_from_api() elif cell_type == 'compute': cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_at_top(context, inst_ref) self._from_db_object(context, self, inst_ref, expected_attrs) notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes() @base.remotable def refresh(self, context, use_slave=False): extra = [ field for field in INSTANCE_OPTIONAL_ATTRS if self.obj_attr_is_set(field) ] current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra, use_slave=use_slave) # NOTE(danms): We orphan the instance copy so we do not unexpectedly # trigger a lazy-load (which would mean we failed to calculate the # expected_attrs properly) current._context = None for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] self.obj_reset_changes() def obj_load_attr(self, attrname): if attrname not in INSTANCE_OPTIONAL_ATTRS: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) LOG.debug(_("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s"), { 'attr': attrname, 'name': self.obj_name(), 'uuid': self.uuid, }) # FIXME(comstud): This should be optimized to only load the attr. instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=[attrname]) # NOTE(danms): Never allow us to recursively-load if instance.obj_attr_is_set(attrname): self[attrname] = instance[attrname] else: raise exception.ObjectActionError( action='obj_load_attr', reason='loading %s requires recursion' % attrname)
class InstancePayload(base.NotificationPayloadBase): SCHEMA = { 'uuid': ('instance', 'uuid'), 'user_id': ('instance', 'user_id'), 'tenant_id': ('instance', 'project_id'), 'reservation_id': ('instance', 'reservation_id'), 'display_name': ('instance', 'display_name'), 'display_description': ('instance', 'display_description'), 'host_name': ('instance', 'hostname'), 'host': ('instance', 'host'), 'node': ('instance', 'node'), 'os_type': ('instance', 'os_type'), 'architecture': ('instance', 'architecture'), 'availability_zone': ('instance', 'availability_zone'), 'image_uuid': ('instance', 'image_ref'), 'key_name': ('instance', 'key_name'), 'kernel_id': ('instance', 'kernel_id'), 'ramdisk_id': ('instance', 'ramdisk_id'), 'created_at': ('instance', 'created_at'), 'launched_at': ('instance', 'launched_at'), 'terminated_at': ('instance', 'terminated_at'), 'deleted_at': ('instance', 'deleted_at'), 'updated_at': ('instance', 'updated_at'), 'state': ('instance', 'vm_state'), 'power_state': ('instance', 'power_state'), 'task_state': ('instance', 'task_state'), 'progress': ('instance', 'progress'), 'metadata': ('instance', 'metadata'), 'locked': ('instance', 'locked'), 'auto_disk_config': ('instance', 'auto_disk_config') } # Version 1.0: Initial version # Version 1.1: add locked and display_description field # Version 1.2: Add auto_disk_config field # Version 1.3: Add key_name field # Version 1.4: Add BDM related data # Version 1.5: Add updated_at field # Version 1.6: Add request_id field # Version 1.7: Added action_initiator_user and action_initiator_project to # InstancePayload VERSION = '1.7' fields = { 'uuid': fields.UUIDField(), 'user_id': fields.StringField(nullable=True), 'tenant_id': fields.StringField(nullable=True), 'reservation_id': fields.StringField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'host_name': fields.StringField(nullable=True), 'host': fields.StringField(nullable=True), 'node': fields.StringField(nullable=True), 'os_type': fields.StringField(nullable=True), 'architecture': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'flavor': fields.ObjectField('FlavorPayload'), 'image_uuid': fields.StringField(nullable=True), 'key_name': fields.StringField(nullable=True), 'kernel_id': fields.StringField(nullable=True), 'ramdisk_id': fields.StringField(nullable=True), 'created_at': fields.DateTimeField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'deleted_at': fields.DateTimeField(nullable=True), 'updated_at': fields.DateTimeField(nullable=True), 'state': fields.InstanceStateField(nullable=True), 'power_state': fields.InstancePowerStateField(nullable=True), 'task_state': fields.InstanceTaskStateField(nullable=True), 'progress': fields.IntegerField(nullable=True), 'ip_addresses': fields.ListOfObjectsField('IpPayload'), 'block_devices': fields.ListOfObjectsField('BlockDevicePayload', nullable=True), 'metadata': fields.DictOfStringsField(), 'locked': fields.BooleanField(), 'auto_disk_config': fields.DiskConfigField(), 'request_id': fields.StringField(nullable=True), 'action_initiator_user': fields.StringField(nullable=True), 'action_initiator_project': fields.StringField(nullable=True), } def __init__(self, context, instance, bdms=None): super(InstancePayload, self).__init__() network_info = instance.get_network_info() self.ip_addresses = IpPayload.from_network_info(network_info) self.flavor = flavor_payload.FlavorPayload(flavor=instance.flavor) if bdms is not None: self.block_devices = BlockDevicePayload.from_bdms(bdms) else: self.block_devices = BlockDevicePayload.from_instance(instance) # NOTE(Kevin_Zheng): Don't include request_id for periodic tasks, # RequestContext for periodic tasks does not include project_id # and user_id. Consider modify this once periodic tasks got a # consistent request_id. self.request_id = context.request_id if (context.project_id and context.user_id) else None self.action_initiator_user = context.user_id self.action_initiator_project = context.project_id self.populate_schema(instance=instance)
class ImageMetaProps(base.NovaObject): # Version 1.0: Initial version # Version 1.1: added os_require_quiesce field # Version 1.2: added img_hv_type and img_hv_requested_version fields # Version 1.3: HVSpec version 1.1 # Version 1.4: added hw_vif_multiqueue_enabled field # Version 1.5: added os_admin_user field # Version 1.6: Added 'lxc' and 'uml' enum types to DiskBusField # Version 1.7: added img_config_drive field # Version 1.8: Added 'lxd' to hypervisor types # Version 1.9: added hw_cpu_thread_policy field # Version 1.10: added hw_cpu_realtime_mask field # Version 1.11: Added hw_firmware_type field # Version 1.12: Added properties for image signature verification # Version 1.13: added os_secure_boot field # Version 1.14: Added 'hw_pointer_model' field # Version 1.15: Added hw_rescue_bus and hw_rescue_device. # Version 1.16: WatchdogActionField supports 'disabled' enum. # Version 1.17: Add lan9118 as valid nic for hw_vif_model property for qemu # Version 1.18: Pull signature properties from cursive library # Version 1.19: Added 'img_hide_hypervisor_id' type field # Version 1.20: Added 'traits_required' list field # Version 1.21: Added 'hw_time_hpet' field # Version 1.22: Added 'gop', 'virtio' and 'none' to hw_video_model field # Version 1.23: Added 'hw_pmu' field # Version 1.24: Added 'hw_mem_encryption' field # Version 1.25: Added 'hw_pci_numa_affinity_policy' field # Version 1.26: Added 'mixed' to 'hw_cpu_policy' field # Version 1.27: Added 'hw_tpm_model' and 'hw_tpm_version' fields # Version 1.28: Added 'socket' to 'hw_pci_numa_affinity_policy' # Version 1.29: Added 'hw_input_bus' field # NOTE(efried): When bumping this version, the version of # ImageMetaPropsPayload must also be bumped. See its docstring for details. VERSION = '1.29' def obj_make_compatible(self, primitive, target_version): super(ImageMetaProps, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 29): primitive.pop('hw_input_bus', None) if target_version < (1, 28): policy = primitive.get('hw_pci_numa_affinity_policy', None) if policy == fields.PCINUMAAffinityPolicy.SOCKET: raise exception.ObjectActionError( action='obj_make_compatible', reason='hw_numa_affinity_policy=%s not supported ' 'in version %s' % (policy, target_version)) if target_version < (1, 27): primitive.pop('hw_tpm_model', None) primitive.pop('hw_tpm_version', None) if target_version < (1, 26): policy = primitive.get('hw_cpu_policy', None) if policy == fields.CPUAllocationPolicy.MIXED: raise exception.ObjectActionError( action='obj_make_compatible', reason='hw_cpu_policy=%s not supported in version %s' % (policy, target_version)) if target_version < (1, 25): primitive.pop('hw_pci_numa_affinity_policy', None) if target_version < (1, 24): primitive.pop('hw_mem_encryption', None) if target_version < (1, 23): primitive.pop('hw_pmu', None) # NOTE(sean-k-mooney): unlike other nova object we version this object # when composed object are updated. if target_version < (1, 22): video = primitive.get('hw_video_model', None) if video in ('gop', 'virtio', 'none'): raise exception.ObjectActionError( action='obj_make_compatible', reason='hw_video_model=%s not supported in version %s' % (video, target_version)) if target_version < (1, 21): primitive.pop('hw_time_hpet', None) if target_version < (1, 20): primitive.pop('traits_required', None) if target_version < (1, 19): primitive.pop('img_hide_hypervisor_id', None) if target_version < (1, 16) and 'hw_watchdog_action' in primitive: # Check to see if hw_watchdog_action was set to 'disabled' and if # so, remove it since not specifying it is the same behavior. if primitive['hw_watchdog_action'] == \ fields.WatchdogAction.DISABLED: primitive.pop('hw_watchdog_action') if target_version < (1, 15): primitive.pop('hw_rescue_bus', None) primitive.pop('hw_rescue_device', None) if target_version < (1, 14): primitive.pop('hw_pointer_model', None) if target_version < (1, 13): primitive.pop('os_secure_boot', None) if target_version < (1, 11): primitive.pop('hw_firmware_type', None) if target_version < (1, 10): primitive.pop('hw_cpu_realtime_mask', None) if target_version < (1, 9): primitive.pop('hw_cpu_thread_policy', None) if target_version < (1, 7): primitive.pop('img_config_drive', None) if target_version < (1, 5): primitive.pop('os_admin_user', None) if target_version < (1, 4): primitive.pop('hw_vif_multiqueue_enabled', None) if target_version < (1, 2): primitive.pop('img_hv_type', None) primitive.pop('img_hv_requested_version', None) if target_version < (1, 1): primitive.pop('os_require_quiesce', None) if target_version < (1, 6): bus = primitive.get('hw_disk_bus', None) if bus in ('lxc', 'uml'): raise exception.ObjectActionError( action='obj_make_compatible', reason='hw_disk_bus=%s not supported in version %s' % (bus, target_version)) # Maximum number of NUMA nodes permitted for the guest topology NUMA_NODES_MAX = 128 # 'hw_' - settings affecting the guest virtual machine hardware # 'img_' - settings affecting the use of images by the compute node # 'os_' - settings affecting the guest operating system setup # 'traits_required' - The required traits associated with the image fields = { # name of guest hardware architecture eg i686, x86_64, ppc64 'hw_architecture': fields.ArchitectureField(), # used to decide to expand root disk partition and fs to full size of # root disk 'hw_auto_disk_config': fields.StringField(), # whether to display BIOS boot device menu 'hw_boot_menu': fields.FlexibleBooleanField(), # name of the CDROM bus to use eg virtio, scsi, ide 'hw_cdrom_bus': fields.DiskBusField(), # preferred number of CPU cores per socket 'hw_cpu_cores': fields.IntegerField(), # preferred number of CPU sockets 'hw_cpu_sockets': fields.IntegerField(), # maximum number of CPU cores per socket 'hw_cpu_max_cores': fields.IntegerField(), # maximum number of CPU sockets 'hw_cpu_max_sockets': fields.IntegerField(), # maximum number of CPU threads per core 'hw_cpu_max_threads': fields.IntegerField(), # CPU allocation policy 'hw_cpu_policy': fields.CPUAllocationPolicyField(), # CPU thread allocation policy 'hw_cpu_thread_policy': fields.CPUThreadAllocationPolicyField(), # CPU mask indicates which vCPUs will have realtime enable, # example ^0-1 means that all vCPUs except 0 and 1 will have a # realtime policy. 'hw_cpu_realtime_mask': fields.StringField(), # preferred number of CPU threads per core 'hw_cpu_threads': fields.IntegerField(), # guest ABI version for guest xentools either 1 or 2 (or 3 - depends on # Citrix PV tools version installed in image) 'hw_device_id': fields.IntegerField(), # name of the hard disk bus to use eg virtio, scsi, ide 'hw_disk_bus': fields.DiskBusField(), # allocation mode eg 'preallocated' 'hw_disk_type': fields.StringField(), # name of the floppy disk bus to use eg fd, scsi, ide 'hw_floppy_bus': fields.DiskBusField(), # This indicates the guest needs UEFI firmware 'hw_firmware_type': fields.FirmwareTypeField(), # name of the input bus type to use, e.g. usb, virtio 'hw_input_bus': fields.InputBusField(), # boolean - used to trigger code to inject networking when booting a CD # image with a network boot image 'hw_ipxe_boot': fields.FlexibleBooleanField(), # There are sooooooooooo many possible machine types in # QEMU - several new ones with each new release - that it # is not practical to enumerate them all. So we use a free # form string 'hw_machine_type': fields.StringField(), # boolean indicating that the guest needs to be booted with # encrypted memory 'hw_mem_encryption': fields.FlexibleBooleanField(), # One of the magic strings 'small', 'any', 'large' # or an explicit page size in KB (eg 4, 2048, ...) 'hw_mem_page_size': fields.StringField(), # Number of guest NUMA nodes 'hw_numa_nodes': fields.IntegerField(), # Each list entry corresponds to a guest NUMA node and the # set members indicate CPUs for that node 'hw_numa_cpus': fields.ListOfSetsOfIntegersField(), # Each list entry corresponds to a guest NUMA node and the # list value indicates the memory size of that node. 'hw_numa_mem': fields.ListOfIntegersField(), # Enum field to specify pci device NUMA affinity. 'hw_pci_numa_affinity_policy': fields.PCINUMAAffinityPolicyField(), # Generic property to specify the pointer model type. 'hw_pointer_model': fields.PointerModelField(), # boolean 'true' or 'false' to enable virtual performance # monitoring unit (vPMU). 'hw_pmu': fields.FlexibleBooleanField(), # boolean 'yes' or 'no' to enable QEMU guest agent 'hw_qemu_guest_agent': fields.FlexibleBooleanField(), # name of the rescue bus to use with the associated rescue device. 'hw_rescue_bus': fields.DiskBusField(), # name of rescue device to use. 'hw_rescue_device': fields.BlockDeviceTypeField(), # name of the RNG device type eg virtio # NOTE(kchamart): Although this is currently not used anymore, # we should not remove / deprecate it yet, as we are likely to # extend this field to allow two more values to support "VirtIO # transitional/non-transitional devices" (refer to the note in # RNGModel() class in nova/objects/fields.py), and thus expose # to the user again. 'hw_rng_model': fields.RNGModelField(), # boolean 'true' or 'false' to enable HPET 'hw_time_hpet': fields.FlexibleBooleanField(), # number of serial ports to create 'hw_serial_port_count': fields.IntegerField(), # name of the SCSI bus controller eg 'virtio-scsi', 'lsilogic', etc 'hw_scsi_model': fields.SCSIModelField(), # name of the video adapter model to use, eg cirrus, vga, xen, qxl 'hw_video_model': fields.VideoModelField(), # MB of video RAM to provide eg 64 'hw_video_ram': fields.IntegerField(), # name of a NIC device model eg virtio, e1000, rtl8139 'hw_vif_model': fields.VIFModelField(), # "xen" vs "hvm" 'hw_vm_mode': fields.VMModeField(), # action to take when watchdog device fires eg reset, poweroff, pause, # none 'hw_watchdog_action': fields.WatchdogActionField(), # boolean - If true, this will enable the virtio-multiqueue feature 'hw_vif_multiqueue_enabled': fields.FlexibleBooleanField(), # name of emulated TPM model to use. 'hw_tpm_model': fields.TPMModelField(), # version of emulated TPM to use. 'hw_tpm_version': fields.TPMVersionField(), # if true download using bittorrent 'img_bittorrent': fields.FlexibleBooleanField(), # Which data format the 'img_block_device_mapping' field is # using to represent the block device mapping 'img_bdm_v2': fields.FlexibleBooleanField(), # Block device mapping - the may can be in one or two completely # different formats. The 'img_bdm_v2' field determines whether # it is in legacy format, or the new current format. Ideally # we would have a formal data type for this field instead of a # dict, but with 2 different formats to represent this is hard. # See nova/block_device.py from_legacy_mapping() for the complex # conversion code. So for now leave it as a dict and continue # to use existing code that is able to convert dict into the # desired internal BDM formats 'img_block_device_mapping': fields.ListOfDictOfNullableStringsField(), # boolean - if True, and image cache set to "some" decides if image # should be cached on host when server is booted on that host 'img_cache_in_nova': fields.FlexibleBooleanField(), # Compression level for images. (1-9) 'img_compression_level': fields.IntegerField(), # hypervisor supported version, eg. '>=2.6' 'img_hv_requested_version': fields.VersionPredicateField(), # type of the hypervisor, eg kvm, ironic, xen 'img_hv_type': fields.HVTypeField(), # Whether the image needs/expected config drive 'img_config_drive': fields.ConfigDrivePolicyField(), # boolean flag to set space-saving or performance behavior on the # Datastore 'img_linked_clone': fields.FlexibleBooleanField(), # Image mappings - related to Block device mapping data - mapping # of virtual image names to device names. This could be represented # as a formal data type, but is left as dict for same reason as # img_block_device_mapping field. It would arguably make sense for # the two to be combined into a single field and data type in the # future. 'img_mappings': fields.ListOfDictOfNullableStringsField(), # image project id (set on upload) 'img_owner_id': fields.StringField(), # root device name, used in snapshotting eg /dev/<blah> 'img_root_device_name': fields.StringField(), # boolean - if false don't talk to nova agent 'img_use_agent': fields.FlexibleBooleanField(), # integer value 1 'img_version': fields.IntegerField(), # base64 of encoding of image signature 'img_signature': fields.StringField(), # string indicating hash method used to compute image signature 'img_signature_hash_method': fields.ImageSignatureHashTypeField(), # string indicating Castellan uuid of certificate # used to compute the image's signature 'img_signature_certificate_uuid': fields.UUIDField(), # string indicating type of key used to compute image signature 'img_signature_key_type': fields.ImageSignatureKeyTypeField(), # boolean - hide hypervisor signature on instance 'img_hide_hypervisor_id': fields.FlexibleBooleanField(), # string of username with admin privileges 'os_admin_user': fields.StringField(), # string of boot time command line arguments for the guest kernel 'os_command_line': fields.StringField(), # the name of the specific guest operating system distro. This # is not done as an Enum since the list of operating systems is # growing incredibly fast, and valid values can be arbitrarily # user defined. Nova has no real need for strict validation so # leave it freeform 'os_distro': fields.StringField(), # boolean - if true, then guest must support disk quiesce # or snapshot operation will be denied 'os_require_quiesce': fields.FlexibleBooleanField(), # Secure Boot feature will be enabled by setting the "os_secure_boot" # image property to "required". Other options can be: "disabled" or # "optional". # "os:secure_boot" flavor extra spec value overrides the image property # value. 'os_secure_boot': fields.SecureBootField(), # boolean - if using agent don't inject files, assume someone else is # doing that (cloud-init) 'os_skip_agent_inject_files_at_boot': fields.FlexibleBooleanField(), # boolean - if using agent don't try inject ssh key, assume someone # else is doing that (cloud-init) 'os_skip_agent_inject_ssh': fields.FlexibleBooleanField(), # The guest operating system family such as 'linux', 'windows' - this # is a fairly generic type. For a detailed type consider os_distro # instead 'os_type': fields.OSTypeField(), # The required traits associated with the image. Traits are expected to # be defined as starting with `trait:` like below: # trait:HW_CPU_X86_AVX2=required # for trait in image_meta.traits_required: # will yield trait strings such as 'HW_CPU_X86_AVX2' 'traits_required': fields.ListOfStringsField(), } # The keys are the legacy property names and # the values are the current preferred names _legacy_property_map = { 'architecture': 'hw_architecture', 'owner_id': 'img_owner_id', 'vmware_disktype': 'hw_disk_type', 'vmware_image_version': 'img_version', 'vmware_ostype': 'os_distro', 'auto_disk_config': 'hw_auto_disk_config', 'ipxe_boot': 'hw_ipxe_boot', 'xenapi_device_id': 'hw_device_id', 'xenapi_image_compression_level': 'img_compression_level', 'vmware_linked_clone': 'img_linked_clone', 'xenapi_use_agent': 'img_use_agent', 'xenapi_skip_agent_inject_ssh': 'os_skip_agent_inject_ssh', 'xenapi_skip_agent_inject_files_at_boot': 'os_skip_agent_inject_files_at_boot', 'cache_in_nova': 'img_cache_in_nova', 'vm_mode': 'hw_vm_mode', 'bittorrent': 'img_bittorrent', 'mappings': 'img_mappings', 'block_device_mapping': 'img_block_device_mapping', 'bdm_v2': 'img_bdm_v2', 'root_device_name': 'img_root_device_name', 'hypervisor_version_requires': 'img_hv_requested_version', 'hypervisor_type': 'img_hv_type', } # TODO(berrange): Need to run this from a data migration # at some point so we can eventually kill off the compat def _set_attr_from_legacy_names(self, image_props): for legacy_key in self._legacy_property_map: new_key = self._legacy_property_map[legacy_key] if legacy_key not in image_props: continue setattr(self, new_key, image_props[legacy_key]) vmware_adaptertype = image_props.get("vmware_adaptertype") if vmware_adaptertype == "ide": setattr(self, "hw_disk_bus", "ide") elif vmware_adaptertype: setattr(self, "hw_disk_bus", "scsi") setattr(self, "hw_scsi_model", vmware_adaptertype) def _set_numa_mem(self, image_props): hw_numa_mem = [] hw_numa_mem_set = False for cellid in range(ImageMetaProps.NUMA_NODES_MAX): memprop = "hw_numa_mem.%d" % cellid if memprop not in image_props: break hw_numa_mem.append(int(image_props[memprop])) hw_numa_mem_set = True del image_props[memprop] if hw_numa_mem_set: self.hw_numa_mem = hw_numa_mem def _set_numa_cpus(self, image_props): hw_numa_cpus = [] hw_numa_cpus_set = False for cellid in range(ImageMetaProps.NUMA_NODES_MAX): cpuprop = "hw_numa_cpus.%d" % cellid if cpuprop not in image_props: break hw_numa_cpus.append(hardware.parse_cpu_spec(image_props[cpuprop])) hw_numa_cpus_set = True del image_props[cpuprop] if hw_numa_cpus_set: self.hw_numa_cpus = hw_numa_cpus def _set_attr_from_current_names(self, image_props): for key in self.fields: # The two NUMA fields need special handling to # un-stringify them correctly if key == "hw_numa_mem": self._set_numa_mem(image_props) elif key == "hw_numa_cpus": self._set_numa_cpus(image_props) else: # traits_required will be populated by # _set_attr_from_trait_names if key not in image_props or key == "traits_required": continue setattr(self, key, image_props[key]) def _set_attr_from_trait_names(self, image_props): for trait in [ str(k[6:]) for k, v in image_props.items() if str(k).startswith("trait:") and str(v) == 'required' ]: if 'traits_required' not in self: self.traits_required = [] self.traits_required.append(trait) @classmethod def from_dict(cls, image_props): """Create instance from image properties dict :param image_props: dictionary of image metadata properties Creates a new object instance, initializing from a dictionary of image metadata properties :returns: an ImageMetaProps instance """ obj = cls() # We look to see if the dict has entries for any # of the legacy property names first. Then we use # the current property names. That way if both the # current and legacy names are set, the value # associated with the current name takes priority obj._set_attr_from_legacy_names(image_props) obj._set_attr_from_current_names(image_props) obj._set_attr_from_trait_names(image_props) return obj def get(self, name, defvalue=None): """Get the value of an attribute :param name: the attribute to request :param defvalue: the default value if not set This returns the value of an attribute if it is currently set, otherwise it will return None. This differs from accessing props.attrname, because that will raise an exception if the attribute has no value set. So instead of if image_meta.properties.obj_attr_is_set("some_attr"): val = image_meta.properties.some_attr else val = None Callers can rely on unconditional access val = image_meta.properties.get("some_attr") :returns: the attribute value or None """ if not self.obj_attr_is_set(name): return defvalue return getattr(self, name)
class InstancePCIRequests(base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: InstancePCIRequest 1.1 VERSION = '1.1' fields = { 'instance_uuid': fields.UUIDField(), 'requests': fields.ListOfObjectsField('InstancePCIRequest'), } obj_relationships = { 'requests': [('1.0', '1.0'), ('1.1', '1.1')], } def obj_make_compatible(self, primitive, target_version): target_version = utils.convert_version_to_tuple(target_version) if target_version < (1, 1) and 'requests' in primitive: for index, request in enumerate(self.requests): request.obj_make_compatible( primitive['requests'][index]['nova_object.data'], '1.0') primitive['requests'][index]['nova_object.version'] = '1.0' @classmethod def obj_from_db(cls, context, instance_uuid, db_requests): self = cls(context=context, requests=[], instance_uuid=instance_uuid) if db_requests is not None: requests = jsonutils.loads(db_requests) else: requests = [] for request in requests: request_obj = InstancePCIRequest(count=request['count'], spec=request['spec'], alias_name=request['alias_name'], is_new=request['is_new'], request_id=request['request_id']) request_obj.obj_reset_changes() self.requests.append(request_obj) self.obj_reset_changes() return self @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_pci_requests = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['pci_requests']) if db_pci_requests is not None: db_pci_requests = db_pci_requests['pci_requests'] return cls.obj_from_db(context, instance_uuid, db_pci_requests) @classmethod def get_by_instance_uuid_and_newness(cls, context, instance_uuid, is_new): requests = cls.get_by_instance_uuid(context, instance_uuid) requests.requests = [x for x in requests.requests if x.new == is_new] return requests @staticmethod def _load_legacy_requests(sysmeta_value, is_new=False): if sysmeta_value is None: return [] requests = [] db_requests = jsonutils.loads(sysmeta_value) for db_request in db_requests: request = InstancePCIRequest(count=db_request['count'], spec=db_request['spec'], alias_name=db_request['alias_name'], is_new=is_new) request.obj_reset_changes() requests.append(request) return requests @classmethod def get_by_instance(cls, context, instance): # NOTE (baoli): not all callers are passing instance as object yet. # Therefore, use the dict syntax in this routine if 'pci_requests' in instance['system_metadata']: # NOTE(danms): This instance hasn't been converted to use # instance_extra yet, so extract the data from sysmeta sysmeta = instance['system_metadata'] _requests = (cls._load_legacy_requests(sysmeta['pci_requests']) + cls._load_legacy_requests( sysmeta.get('new_pci_requests'), is_new=True)) requests = cls(instance_uuid=instance['uuid'], requests=_requests) requests.obj_reset_changes() return requests else: return cls.get_by_instance_uuid(context, instance['uuid']) def to_json(self): blob = [{ 'count': x.count, 'spec': x.spec, 'alias_name': x.alias_name, 'is_new': x.is_new, 'request_id': x.request_id } for x in self.requests] return jsonutils.dumps(blob) @base.remotable def save(self): blob = self.to_json() db.instance_extra_update_by_uuid(self._context, self.instance_uuid, {'pci_requests': blob}) @classmethod def from_request_spec_instance_props(cls, pci_requests): objs = [ InstancePCIRequest(**request) for request in pci_requests['requests'] ] return cls(requests=objs, instance_uuid=pci_requests['instance_uuid'])
class Aggregate(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Added uuid field # Version 1.3: Added get_by_uuid method VERSION = '1.3' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(nullable=False), 'name': fields.StringField(), 'hosts': fields.ListOfStringsField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), } obj_extra_fields = ['availability_zone'] @staticmethod def _from_db_object(context, aggregate, db_aggregate): for key in aggregate.fields: if key == 'metadata': db_key = 'metadetails' elif key in DEPRECATED_FIELDS and key not in db_aggregate: continue else: db_key = key setattr(aggregate, key, db_aggregate[db_key]) # NOTE: This can be removed when we bump Aggregate to v2.0 aggregate.deleted_at = None aggregate.deleted = False aggregate._context = context aggregate.obj_reset_changes() return aggregate def _assert_no_hosts(self, action): if 'hosts' in self.obj_what_changed(): raise exception.ObjectActionError(action=action, reason='hosts updated inline') @base.remotable_classmethod def get_by_id(cls, context, aggregate_id): db_aggregate = _aggregate_get_from_db(context, aggregate_id) return cls._from_db_object(context, cls(), db_aggregate) @base.remotable_classmethod def get_by_uuid(cls, context, aggregate_uuid): db_aggregate = _aggregate_get_from_db_by_uuid(context, aggregate_uuid) return cls._from_db_object(context, cls(), db_aggregate) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') self._assert_no_hosts('create') updates = self.obj_get_changes() payload = dict(updates) if 'metadata' in updates: # NOTE(danms): For some reason the notification format is weird payload['meta_data'] = payload.pop('metadata') if 'uuid' not in updates: updates['uuid'] = uuidutils.generate_uuid() self.uuid = updates['uuid'] LOG.debug('Generated uuid %(uuid)s for aggregate', dict(uuid=updates['uuid'])) compute_utils.notify_about_aggregate_update(self._context, "create.start", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.CREATE, phase=fields.NotificationPhase.START) metadata = updates.pop('metadata', None) db_aggregate = _aggregate_create_in_db(self._context, updates, metadata=metadata) self._from_db_object(self._context, self, db_aggregate) payload['aggregate_id'] = self.id compute_utils.notify_about_aggregate_update(self._context, "create.end", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.CREATE, phase=fields.NotificationPhase.END) @base.remotable def save(self): self._assert_no_hosts('save') updates = self.obj_get_changes() payload = {'aggregate_id': self.id} if 'metadata' in updates: payload['meta_data'] = updates['metadata'] compute_utils.notify_about_aggregate_update(self._context, "updateprop.start", payload) updates.pop('id', None) db_aggregate = _aggregate_update_to_db(self._context, self.id, updates) compute_utils.notify_about_aggregate_update(self._context, "updateprop.end", payload) self._from_db_object(self._context, self, db_aggregate) @base.remotable def update_metadata(self, updates): payload = {'aggregate_id': self.id, 'meta_data': updates} compute_utils.notify_about_aggregate_update(self._context, "updatemetadata.start", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.UPDATE_METADATA, phase=fields.NotificationPhase.START) to_add = {} for key, value in updates.items(): if value is None: try: _metadata_delete_from_db(self._context, self.id, key) except exception.AggregateMetadataNotFound: pass try: self.metadata.pop(key) except KeyError: pass else: to_add[key] = value self.metadata[key] = value _metadata_add_to_db(self._context, self.id, to_add) compute_utils.notify_about_aggregate_update(self._context, "updatemetadata.end", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.UPDATE_METADATA, phase=fields.NotificationPhase.END) self.obj_reset_changes(fields=['metadata']) @base.remotable def destroy(self): _aggregate_delete_from_db(self._context, self.id) @base.remotable def add_host(self, host): _host_add_to_db(self._context, self.id, host) if self.hosts is None: self.hosts = [] self.hosts.append(host) self.obj_reset_changes(fields=['hosts']) @base.remotable def delete_host(self, host): _host_delete_from_db(self._context, self.id, host) self.hosts.remove(host) self.obj_reset_changes(fields=['hosts']) @property def availability_zone(self): return self.metadata.get('availability_zone', None)
class InstancePayload(base.NotificationPayloadBase): SCHEMA = { 'uuid': ('instance', 'uuid'), 'user_id': ('instance', 'user_id'), 'tenant_id': ('instance', 'project_id'), 'reservation_id': ('instance', 'reservation_id'), 'display_name': ('instance', 'display_name'), 'display_description': ('instance', 'display_description'), 'host_name': ('instance', 'hostname'), 'host': ('instance', 'host'), 'node': ('instance', 'node'), 'os_type': ('instance', 'os_type'), 'architecture': ('instance', 'architecture'), 'availability_zone': ('instance', 'availability_zone'), 'image_uuid': ('instance', 'image_ref'), 'key_name': ('instance', 'key_name'), 'kernel_id': ('instance', 'kernel_id'), 'ramdisk_id': ('instance', 'ramdisk_id'), 'created_at': ('instance', 'created_at'), 'launched_at': ('instance', 'launched_at'), 'terminated_at': ('instance', 'terminated_at'), 'deleted_at': ('instance', 'deleted_at'), 'updated_at': ('instance', 'updated_at'), 'state': ('instance', 'vm_state'), 'power_state': ('instance', 'power_state'), 'task_state': ('instance', 'task_state'), 'progress': ('instance', 'progress'), 'metadata': ('instance', 'metadata'), 'locked': ('instance', 'locked'), 'auto_disk_config': ('instance', 'auto_disk_config') } # Version 1.0: Initial version # Version 1.1: add locked and display_description field # Version 1.2: Add auto_disk_config field # Version 1.3: Add key_name field # Version 1.4: Add BDM related data # Version 1.5: Add updated_at field VERSION = '1.5' fields = { 'uuid': fields.UUIDField(), 'user_id': fields.StringField(nullable=True), 'tenant_id': fields.StringField(nullable=True), 'reservation_id': fields.StringField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'host_name': fields.StringField(nullable=True), 'host': fields.StringField(nullable=True), 'node': fields.StringField(nullable=True), 'os_type': fields.StringField(nullable=True), 'architecture': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'flavor': fields.ObjectField('FlavorPayload'), 'image_uuid': fields.StringField(nullable=True), 'key_name': fields.StringField(nullable=True), 'kernel_id': fields.StringField(nullable=True), 'ramdisk_id': fields.StringField(nullable=True), 'created_at': fields.DateTimeField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'deleted_at': fields.DateTimeField(nullable=True), 'updated_at': fields.DateTimeField(nullable=True), 'state': fields.InstanceStateField(nullable=True), 'power_state': fields.InstancePowerStateField(nullable=True), 'task_state': fields.InstanceTaskStateField(nullable=True), 'progress': fields.IntegerField(nullable=True), 'ip_addresses': fields.ListOfObjectsField('IpPayload'), 'block_devices': fields.ListOfObjectsField('BlockDevicePayload', nullable=True), 'metadata': fields.DictOfStringsField(), 'locked': fields.BooleanField(), 'auto_disk_config': fields.DiskConfigField() } def __init__(self, instance): super(InstancePayload, self).__init__() network_info = instance.get_network_info() self.ip_addresses = IpPayload.from_network_info(network_info) self.flavor = flavor_payload.FlavorPayload(flavor=instance.flavor) # TODO(gibi): investigate the possibility to use already in scope bdm # when available like in instance.create self.block_devices = BlockDevicePayload.from_instance(instance) self.populate_schema(instance=instance)
class Service(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added compute_node nested object # Version 1.2: String attributes updated to support unicode # Version 1.3: ComputeNode version 1.5 # Version 1.4: Added use_slave to get_by_compute_host # Version 1.5: ComputeNode version 1.6 # Version 1.6: ComputeNode version 1.7 # Version 1.7: ComputeNode version 1.8 # Version 1.8: ComputeNode version 1.9 # Version 1.9: ComputeNode version 1.10 # Version 1.10: Changes behaviour of loading compute_node # Version 1.11: Added get_by_host_and_binary # Version 1.12: ComputeNode version 1.11 # Version 1.13: Added last_seen_up # Version 1.14: Added forced_down # Version 1.15: ComputeNode version 1.12 # Version 1.16: Added version # Version 1.17: ComputeNode version 1.13 # Version 1.18: ComputeNode version 1.14 # Version 1.19: Added get_minimum_version() # Version 1.20: Added get_minimum_version_multi() # Version 1.21: Added uuid # Version 1.22: Added get_by_uuid() VERSION = '1.22' fields = { 'id': fields.IntegerField(read_only=True), 'uuid': fields.UUIDField(), 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), 'topic': fields.StringField(nullable=True), 'report_count': fields.IntegerField(), 'disabled': fields.BooleanField(), 'disabled_reason': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'compute_node': fields.ObjectField('ComputeNode'), 'last_seen_up': fields.DateTimeField(nullable=True), 'forced_down': fields.BooleanField(), 'version': fields.IntegerField(), } _MIN_VERSION_CACHE = {} _SERVICE_VERSION_CACHING = False def __init__(self, *args, **kwargs): # NOTE(danms): We're going against the rules here and overriding # init. The reason is that we want to *ensure* that we're always # setting the current service version on our objects, overriding # whatever else might be set in the database, or otherwise (which # is the normal reason not to override init). # # We also need to do this here so that it's set on the client side # all the time, such that create() and save() operations will # include the current service version. if 'version' in kwargs: raise exception.ObjectActionError( action='init', reason='Version field is immutable') super(Service, self).__init__(*args, **kwargs) self.version = SERVICE_VERSION def obj_make_compatible_from_manifest(self, primitive, target_version, version_manifest): super(Service, self).obj_make_compatible_from_manifest( primitive, target_version, version_manifest) _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 21) and 'uuid' in primitive: del primitive['uuid'] if _target_version < (1, 16) and 'version' in primitive: del primitive['version'] if _target_version < (1, 14) and 'forced_down' in primitive: del primitive['forced_down'] if _target_version < (1, 13) and 'last_seen_up' in primitive: del primitive['last_seen_up'] if _target_version < (1, 10): # service.compute_node was not lazy-loaded, we need to provide it # when called self._do_compute_node(self._context, primitive, version_manifest) def _do_compute_node(self, context, primitive, version_manifest): try: target_version = version_manifest['ComputeNode'] # NOTE(sbauza): Ironic deployments can have multiple # nodes for the same service, but for keeping same behaviour, # returning only the first elem of the list compute = objects.ComputeNodeList.get_all_by_host( context, primitive['host'])[0] except Exception: return primitive['compute_node'] = compute.obj_to_primitive( target_version=target_version, version_manifest=version_manifest) @staticmethod def _from_db_object(context, service, db_service): allow_missing = ('availability_zone',) for key in service.fields: if key in allow_missing and key not in db_service: continue if key == 'compute_node': # NOTE(sbauza); We want to only lazy-load compute_node continue elif key == 'version': # NOTE(danms): Special handling of the version field, since # it is read_only and set in our init. setattr(service, base.get_attrname(key), db_service[key]) elif key == 'uuid' and not db_service.get(key): # Leave uuid off the object if undefined in the database # so that it will be generated below. continue else: service[key] = db_service[key] service._context = context service.obj_reset_changes() return service def obj_load_attr(self, attrname): if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) LOG.debug("Lazy-loading '%(attr)s' on %(name)s id %(id)s", {'attr': attrname, 'name': self.obj_name(), 'id': self.id, }) if attrname != 'compute_node': raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) if self.binary == 'nova-compute': # Only n-cpu services have attached compute_node(s) compute_nodes = objects.ComputeNodeList.get_all_by_host( self._context, self.host) else: # NOTE(sbauza); Previous behaviour was raising a ServiceNotFound, # we keep it for backwards compatibility raise exception.ServiceNotFound(service_id=self.id) # NOTE(sbauza): Ironic deployments can have multiple nodes # for the same service, but for keeping same behaviour, returning only # the first elem of the list self.compute_node = compute_nodes[0] @base.remotable_classmethod def get_by_id(cls, context, service_id): db_service = db.service_get(context, service_id) return cls._from_db_object(context, cls(), db_service) @base.remotable_classmethod def get_by_uuid(cls, context, service_uuid): db_service = db.service_get_by_uuid(context, service_uuid) return cls._from_db_object(context, cls(), db_service) @base.remotable_classmethod def get_by_host_and_topic(cls, context, host, topic): db_service = db.service_get_by_host_and_topic(context, host, topic) return cls._from_db_object(context, cls(), db_service) @base.remotable_classmethod def get_by_host_and_binary(cls, context, host, binary): try: db_service = db.service_get_by_host_and_binary(context, host, binary) except exception.HostBinaryNotFound: return return cls._from_db_object(context, cls(), db_service) @staticmethod @db.select_db_reader_mode def _db_service_get_by_compute_host(context, host, use_slave=False): return db.service_get_by_compute_host(context, host) @base.remotable_classmethod def get_by_compute_host(cls, context, host, use_slave=False): db_service = cls._db_service_get_by_compute_host(context, host, use_slave=use_slave) return cls._from_db_object(context, cls(), db_service) # NOTE(ndipanov): This is deprecated and should be removed on the next # major version bump @base.remotable_classmethod def get_by_args(cls, context, host, binary): db_service = db.service_get_by_host_and_binary(context, host, binary) return cls._from_db_object(context, cls(), db_service) def _check_minimum_version(self): """Enforce that we are not older that the minimum version. This is a loose check to avoid creating or updating our service record if we would do so with a version that is older that the current minimum of all services. This could happen if we were started with older code by accident, either due to a rollback or an old and un-updated node suddenly coming back onto the network. There is technically a race here between the check and the update, but since the minimum version should always roll forward and never backwards, we don't need to worry about doing it atomically. Further, the consequence for getting this wrong is minor, in that we'll just fail to send messages that other services understand. """ if not self.obj_attr_is_set('version'): return if not self.obj_attr_is_set('binary'): return minver = self.get_minimum_version(self._context, self.binary) if minver > self.version: raise exception.ServiceTooOld(thisver=self.version, minver=minver) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') self._check_minimum_version() updates = self.obj_get_changes() if 'uuid' not in updates: updates['uuid'] = uuidutils.generate_uuid() self.uuid = updates['uuid'] db_service = db.service_create(self._context, updates) self._from_db_object(self._context, self, db_service) self._send_notification(fields.NotificationAction.CREATE) @base.remotable def save(self): updates = self.obj_get_changes() updates.pop('id', None) self._check_minimum_version() db_service = db.service_update(self._context, self.id, updates) self._from_db_object(self._context, self, db_service) self._send_status_update_notification(updates) def _send_status_update_notification(self, updates): # Note(gibi): We do not trigger notification on version as that field # is always dirty, which would cause that nova sends notification on # every other field change. See the comment in save() too. if set(updates.keys()).intersection( {'disabled', 'disabled_reason', 'forced_down'}): self._send_notification(fields.NotificationAction.UPDATE) def _send_notification(self, action): payload = service_notification.ServiceStatusPayload(self) service_notification.ServiceStatusNotification( publisher=notification.NotificationPublisher.from_service_obj( self), event_type=notification.EventType( object='service', action=action), priority=fields.NotificationPriority.INFO, payload=payload).emit(self._context) @base.remotable def destroy(self): db.service_destroy(self._context, self.id) self._send_notification(fields.NotificationAction.DELETE) @classmethod def enable_min_version_cache(cls): cls.clear_min_version_cache() cls._SERVICE_VERSION_CACHING = True @classmethod def clear_min_version_cache(cls): cls._MIN_VERSION_CACHE = {} @staticmethod @db.select_db_reader_mode def _db_service_get_minimum_version(context, binaries, use_slave=False): return db.service_get_minimum_version(context, binaries) @base.remotable_classmethod def get_minimum_version_multi(cls, context, binaries, use_slave=False): if not all(binary.startswith('nova-') for binary in binaries): LOG.warning('get_minimum_version called with likely-incorrect ' 'binaries `%s\'', ','.join(binaries)) raise exception.ObjectActionError(action='get_minimum_version', reason='Invalid binary prefix') if (not cls._SERVICE_VERSION_CACHING or any(binary not in cls._MIN_VERSION_CACHE for binary in binaries)): min_versions = cls._db_service_get_minimum_version( context, binaries, use_slave=use_slave) if min_versions: min_versions = {binary: version or 0 for binary, version in min_versions.items()} cls._MIN_VERSION_CACHE.update(min_versions) else: min_versions = {binary: cls._MIN_VERSION_CACHE[binary] for binary in binaries} if min_versions: version = min(min_versions.values()) else: version = 0 # NOTE(danms): Since our return value is not controlled by object # schema, be explicit here. version = int(version) return version @base.remotable_classmethod def get_minimum_version(cls, context, binary, use_slave=False): return cls.get_minimum_version_multi(context, [binary], use_slave=use_slave)
class RequestSpec(base.NovaObject): # Version 1.0: Initial version # Version 1.1: ImageMeta version 1.6 # Version 1.2: SchedulerRetries version 1.1 # Version 1.3: InstanceGroup version 1.10 # Version 1.4: ImageMeta version 1.7 # Version 1.5: Added get_by_instance_uuid(), create(), save() # Version 1.6: Added requested_destination # Version 1.7: Added destroy() # Version 1.8: Added security_groups # Version 1.9: Added user_id # Version 1.10: Added network_metadata # Version 1.11: Added is_bfv # Version 1.12: Added requested_resources VERSION = '1.12' fields = { 'id': fields.IntegerField(), 'image': fields.ObjectField('ImageMeta', nullable=True), 'numa_topology': fields.ObjectField('InstanceNUMATopology', nullable=True), 'pci_requests': fields.ObjectField('InstancePCIRequests', nullable=True), # TODO(mriedem): The project_id shouldn't be nullable since the # scheduler relies on it being set. 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'flavor': fields.ObjectField('Flavor', nullable=False), 'num_instances': fields.IntegerField(default=1), # NOTE(alex_xu): This field won't be persisted. 'ignore_hosts': fields.ListOfStringsField(nullable=True), # NOTE(mriedem): In reality, you can only ever have one # host in the force_hosts list. The fact this is a list # is a mistake perpetuated over time. 'force_hosts': fields.ListOfStringsField(nullable=True), # NOTE(mriedem): In reality, you can only ever have one # node in the force_nodes list. The fact this is a list # is a mistake perpetuated over time. 'force_nodes': fields.ListOfStringsField(nullable=True), # NOTE(alex_xu): This field won't be persisted. 'requested_destination': fields.ObjectField('Destination', nullable=True, default=None), # NOTE(alex_xu): This field won't be persisted. 'retry': fields.ObjectField('SchedulerRetries', nullable=True), 'limits': fields.ObjectField('SchedulerLimits', nullable=True), 'instance_group': fields.ObjectField('InstanceGroup', nullable=True), # NOTE(sbauza): Since hints are depending on running filters, we prefer # to leave the API correctly validating the hints per the filters and # just provide to the RequestSpec object a free-form dictionary 'scheduler_hints': fields.DictOfListOfStringsField(nullable=True), 'instance_uuid': fields.UUIDField(), 'security_groups': fields.ObjectField('SecurityGroupList'), # NOTE(alex_xu): This field won't be persisted. 'network_metadata': fields.ObjectField('NetworkMetadata'), 'is_bfv': fields.BooleanField(), # NOTE(gibi): Eventually we want to store every resource request as # RequestGroup objects here. However currently the flavor based # resources like vcpu, ram, disk, and flavor.extra_spec based resources # are not handled this way. See the Todo in from_components() where # requested_resources are set. # NOTE(alex_xu): This field won't be persisted. 'requested_resources': fields.ListOfObjectsField('RequestGroup', nullable=True, default=None) } def obj_make_compatible(self, primitive, target_version): super(RequestSpec, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 12): if 'requested_resources' in primitive: del primitive['requested_resources'] if target_version < (1, 11) and 'is_bfv' in primitive: del primitive['is_bfv'] if target_version < (1, 10): if 'network_metadata' in primitive: del primitive['network_metadata'] if target_version < (1, 9): if 'user_id' in primitive: del primitive['user_id'] if target_version < (1, 8): if 'security_groups' in primitive: del primitive['security_groups'] if target_version < (1, 6): if 'requested_destination' in primitive: del primitive['requested_destination'] def obj_load_attr(self, attrname): if attrname not in REQUEST_SPEC_OPTIONAL_ATTRS: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) if attrname == 'security_groups': self.security_groups = objects.SecurityGroupList(objects=[]) return if attrname == 'network_metadata': self.network_metadata = objects.NetworkMetadata(physnets=set(), tunneled=False) return # NOTE(sbauza): In case the primitive was not providing that field # because of a previous RequestSpec version, we want to default # that field in order to have the same behaviour. self.obj_set_defaults(attrname) @property def vcpus(self): return self.flavor.vcpus @property def memory_mb(self): return self.flavor.memory_mb @property def root_gb(self): return self.flavor.root_gb @property def ephemeral_gb(self): return self.flavor.ephemeral_gb @property def swap(self): return self.flavor.swap def _image_meta_from_image(self, image): if isinstance(image, objects.ImageMeta): self.image = image elif isinstance(image, dict): # NOTE(sbauza): Until Nova is fully providing an ImageMeta object # for getting properties, we still need to hydrate it here # TODO(sbauza): To be removed once all RequestSpec hydrations are # done on the conductor side and if the image is an ImageMeta self.image = objects.ImageMeta.from_dict(image) else: self.image = None def _from_instance(self, instance): if isinstance(instance, obj_instance.Instance): # NOTE(sbauza): Instance should normally be a NovaObject... getter = getattr elif isinstance(instance, dict): # NOTE(sbauza): ... but there are some cases where request_spec # has an instance key as a dictionary, just because # select_destinations() is getting a request_spec dict made by # sched_utils.build_request_spec() # TODO(sbauza): To be removed once all RequestSpec hydrations are # done on the conductor side getter = lambda x, y: x.get(y) else: # If the instance is None, there is no reason to set the fields return instance_fields = [ 'numa_topology', 'pci_requests', 'uuid', 'project_id', 'user_id', 'availability_zone' ] for field in instance_fields: if field == 'uuid': setattr(self, 'instance_uuid', getter(instance, field)) elif field == 'pci_requests': self._from_instance_pci_requests(getter(instance, field)) elif field == 'numa_topology': self._from_instance_numa_topology(getter(instance, field)) else: setattr(self, field, getter(instance, field)) def _from_instance_pci_requests(self, pci_requests): if isinstance(pci_requests, dict): pci_req_cls = objects.InstancePCIRequests self.pci_requests = pci_req_cls.from_request_spec_instance_props( pci_requests) else: self.pci_requests = pci_requests def _from_instance_numa_topology(self, numa_topology): if isinstance(numa_topology, six.string_types): numa_topology = objects.InstanceNUMATopology.obj_from_primitive( jsonutils.loads(numa_topology)) self.numa_topology = numa_topology def _from_flavor(self, flavor): if isinstance(flavor, objects.Flavor): self.flavor = flavor elif isinstance(flavor, dict): # NOTE(sbauza): Again, request_spec is primitived by # sched_utils.build_request_spec() and passed to # select_destinations() like this # TODO(sbauza): To be removed once all RequestSpec hydrations are # done on the conductor side self.flavor = objects.Flavor(**flavor) def _from_retry(self, retry_dict): self.retry = (SchedulerRetries.from_dict(self._context, retry_dict) if retry_dict else None) def _populate_group_info(self, filter_properties): if filter_properties.get('instance_group'): # New-style group information as a NovaObject, we can directly set # the field self.instance_group = filter_properties.get('instance_group') elif filter_properties.get('group_updated') is True: # Old-style group information having ugly dict keys containing sets # NOTE(sbauza): Can be dropped once select_destinations is removed policies = list(filter_properties.get('group_policies')) hosts = list(filter_properties.get('group_hosts')) members = list(filter_properties.get('group_members')) self.instance_group = objects.InstanceGroup(policy=policies[0], hosts=hosts, members=members) # InstanceGroup.uuid is not nullable so only set it if we got it. group_uuid = filter_properties.get('group_uuid') if group_uuid: self.instance_group.uuid = group_uuid # hosts has to be not part of the updates for saving the object self.instance_group.obj_reset_changes(['hosts']) else: # Set the value anyway to avoid any call to obj_attr_is_set for it self.instance_group = None def _from_limits(self, limits): if isinstance(limits, dict): self.limits = SchedulerLimits.from_dict(limits) else: # Already a SchedulerLimits object. self.limits = limits def _from_hints(self, hints_dict): if hints_dict is None: self.scheduler_hints = None return self.scheduler_hints = { hint: value if isinstance(value, list) else [value] for hint, value in hints_dict.items() } @classmethod def from_primitives(cls, context, request_spec, filter_properties): """Returns a new RequestSpec object by hydrating it from legacy dicts. Deprecated. A RequestSpec object is created early in the boot process using the from_components method. That object will either be passed to places that require it, or it can be looked up with get_by_instance_uuid. This method can be removed when there are no longer any callers. Because the method is not remotable it is not tied to object versioning. That helper is not intended to leave the legacy dicts kept in the nova codebase, but is rather just for giving a temporary solution for populating the Spec object until we get rid of scheduler_utils' build_request_spec() and the filter_properties hydratation in the conductor. :param context: a context object :param request_spec: An old-style request_spec dictionary :param filter_properties: An old-style filter_properties dictionary """ num_instances = request_spec.get('num_instances', 1) spec = cls(context, num_instances=num_instances) # Hydrate from request_spec first image = request_spec.get('image') spec._image_meta_from_image(image) instance = request_spec.get('instance_properties') spec._from_instance(instance) flavor = request_spec.get('instance_type') spec._from_flavor(flavor) # Hydrate now from filter_properties spec.ignore_hosts = filter_properties.get('ignore_hosts') spec.force_hosts = filter_properties.get('force_hosts') spec.force_nodes = filter_properties.get('force_nodes') retry = filter_properties.get('retry', {}) spec._from_retry(retry) limits = filter_properties.get('limits', {}) spec._from_limits(limits) spec._populate_group_info(filter_properties) scheduler_hints = filter_properties.get('scheduler_hints', {}) spec._from_hints(scheduler_hints) spec.requested_destination = filter_properties.get( 'requested_destination') # NOTE(sbauza): Default the other fields that are not part of the # original contract spec.obj_set_defaults() return spec def get_scheduler_hint(self, hint_name, default=None): """Convenient helper for accessing a particular scheduler hint since it is hydrated by putting a single item into a list. In order to reduce the complexity, that helper returns a string if the requested hint is a list of only one value, and if not, returns the value directly (ie. the list). If the hint is not existing (or scheduler_hints is None), then it returns the default value. :param hint_name: name of the hint :param default: the default value if the hint is not there """ if (not self.obj_attr_is_set('scheduler_hints') or self.scheduler_hints is None): return default hint_val = self.scheduler_hints.get(hint_name, default) return (hint_val[0] if isinstance(hint_val, list) and len(hint_val) == 1 else hint_val) def _to_legacy_image(self): return base.obj_to_primitive(self.image) if ( self.obj_attr_is_set('image') and self.image) else {} def _to_legacy_instance(self): # NOTE(sbauza): Since the RequestSpec only persists a few Instance # fields, we can only return a dict. instance = {} instance_fields = [ 'numa_topology', 'pci_requests', 'project_id', 'user_id', 'availability_zone', 'instance_uuid' ] for field in instance_fields: if not self.obj_attr_is_set(field): continue if field == 'instance_uuid': instance['uuid'] = getattr(self, field) else: instance[field] = getattr(self, field) flavor_fields = ['root_gb', 'ephemeral_gb', 'memory_mb', 'vcpus'] if not self.obj_attr_is_set('flavor'): return instance for field in flavor_fields: instance[field] = getattr(self.flavor, field) return instance def _to_legacy_group_info(self): # NOTE(sbauza): Since this is only needed until the AffinityFilters are # modified by using directly the RequestSpec object, we need to keep # the existing dictionary as a primitive. return { 'group_updated': True, 'group_hosts': set(self.instance_group.hosts), 'group_policies': set([self.instance_group.policy]), 'group_members': set(self.instance_group.members), 'group_uuid': self.instance_group.uuid } def to_legacy_request_spec_dict(self): """Returns a legacy request_spec dict from the RequestSpec object. Since we need to manage backwards compatibility and rolling upgrades within our RPC API, we need to accept to provide an helper for primitiving the right RequestSpec object into a legacy dict until we drop support for old Scheduler RPC API versions. If you don't understand why this method is needed, please don't use it. """ req_spec = {} if not self.obj_attr_is_set('num_instances'): req_spec['num_instances'] = self.fields['num_instances'].default else: req_spec['num_instances'] = self.num_instances req_spec['image'] = self._to_legacy_image() req_spec['instance_properties'] = self._to_legacy_instance() if self.obj_attr_is_set('flavor'): req_spec['instance_type'] = self.flavor else: req_spec['instance_type'] = {} return req_spec def to_legacy_filter_properties_dict(self): """Returns a legacy filter_properties dict from the RequestSpec object. Since we need to manage backwards compatibility and rolling upgrades within our RPC API, we need to accept to provide an helper for primitiving the right RequestSpec object into a legacy dict until we drop support for old Scheduler RPC API versions. If you don't understand why this method is needed, please don't use it. """ filt_props = {} if self.obj_attr_is_set('ignore_hosts') and self.ignore_hosts: filt_props['ignore_hosts'] = self.ignore_hosts if self.obj_attr_is_set('force_hosts') and self.force_hosts: filt_props['force_hosts'] = self.force_hosts if self.obj_attr_is_set('force_nodes') and self.force_nodes: filt_props['force_nodes'] = self.force_nodes if self.obj_attr_is_set('retry') and self.retry: filt_props['retry'] = self.retry.to_dict() if self.obj_attr_is_set('limits') and self.limits: filt_props['limits'] = self.limits.to_dict() if self.obj_attr_is_set('instance_group') and self.instance_group: filt_props.update(self._to_legacy_group_info()) if self.obj_attr_is_set('scheduler_hints') and self.scheduler_hints: # NOTE(sbauza): We need to backport all the hints correctly since # we had to hydrate the field by putting a single item into a list. filt_props['scheduler_hints'] = { hint: self.get_scheduler_hint(hint) for hint in self.scheduler_hints } if self.obj_attr_is_set( 'requested_destination') and self.requested_destination: filt_props['requested_destination'] = self.requested_destination return filt_props @classmethod def from_components(cls, context, instance_uuid, image, flavor, numa_topology, pci_requests, filter_properties, instance_group, availability_zone, security_groups=None, project_id=None, user_id=None, port_resource_requests=None): """Returns a new RequestSpec object hydrated by various components. This helper is useful in creating the RequestSpec from the various objects that are assembled early in the boot process. This method creates a complete RequestSpec object with all properties set or intentionally left blank. :param context: a context object :param instance_uuid: the uuid of the instance to schedule :param image: a dict of properties for an image or volume :param flavor: a flavor NovaObject :param numa_topology: InstanceNUMATopology or None :param pci_requests: InstancePCIRequests :param filter_properties: a dict of properties for scheduling :param instance_group: None or an instance group NovaObject :param availability_zone: an availability_zone string :param security_groups: A SecurityGroupList object. If None, don't set security_groups on the resulting object. :param project_id: The project_id for the requestspec (should match the instance project_id). :param user_id: The user_id for the requestspec (should match the instance user_id). :param port_resource_requests: a list of RequestGroup objects representing the resource needs of the neutron ports """ spec_obj = cls(context) spec_obj.num_instances = 1 spec_obj.instance_uuid = instance_uuid spec_obj.instance_group = instance_group if spec_obj.instance_group is None and filter_properties: spec_obj._populate_group_info(filter_properties) spec_obj.project_id = project_id or context.project_id spec_obj.user_id = user_id or context.user_id spec_obj._image_meta_from_image(image) spec_obj._from_flavor(flavor) spec_obj._from_instance_pci_requests(pci_requests) spec_obj._from_instance_numa_topology(numa_topology) spec_obj.ignore_hosts = filter_properties.get('ignore_hosts') spec_obj.force_hosts = filter_properties.get('force_hosts') spec_obj.force_nodes = filter_properties.get('force_nodes') spec_obj._from_retry(filter_properties.get('retry', {})) spec_obj._from_limits(filter_properties.get('limits', {})) spec_obj._from_hints(filter_properties.get('scheduler_hints', {})) spec_obj.availability_zone = availability_zone if security_groups is not None: spec_obj.security_groups = security_groups spec_obj.requested_destination = filter_properties.get( 'requested_destination') # TODO(gibi): do the creation of the unnumbered group and any # numbered group from the flavor by moving the logic from # nova.scheduler.utils.resources_from_request_spec() here. See also # the comment in the definition of requested_resources field. spec_obj.requested_resources = [] if port_resource_requests: spec_obj.requested_resources.extend(port_resource_requests) # NOTE(sbauza): Default the other fields that are not part of the # original contract spec_obj.obj_set_defaults() return spec_obj def ensure_project_and_user_id(self, instance): if 'project_id' not in self or self.project_id is None: self.project_id = instance.project_id if 'user_id' not in self or self.user_id is None: self.user_id = instance.user_id def ensure_network_metadata(self, instance): if not (instance.info_cache and instance.info_cache.network_info): return physnets = set([]) tunneled = True # physical_network and tunneled might not be in the cache for old # instances that haven't had their info_cache healed yet for vif in instance.info_cache.network_info: physnet = vif.get('network', {}).get('meta', {}).get('physical_network', None) if physnet: physnets.add(physnet) tunneled |= vif.get('network', {}).get('meta', {}).get('tunneled', False) self.network_metadata = objects.NetworkMetadata(physnets=physnets, tunneled=tunneled) @staticmethod def _from_db_object(context, spec, db_spec): spec_obj = spec.obj_from_primitive(jsonutils.loads(db_spec['spec'])) for key in spec.fields: # Load these from the db model not the serialized object within, # though they should match. if key in ['id', 'instance_uuid']: setattr(spec, key, db_spec[key]) elif key in ('requested_destination', 'requested_resources', 'network_metadata'): # Do not override what we already have in the object as this # field is not persisted. If save() is called after # requested_resources, requested_destination or # network_metadata is populated, it will reset the field to # None and we'll lose what is set (but not persisted) on the # object. continue elif key in ('retry', 'ignore_hosts'): # NOTE(takashin): Do not override the 'retry' or 'ignore_hosts' # fields which are not persisted. They are not lazy-loadable # fields. If they are not set, set None. if not spec.obj_attr_is_set(key): setattr(spec, key, None) elif key in spec_obj: setattr(spec, key, getattr(spec_obj, key)) spec._context = context if 'instance_group' in spec and spec.instance_group: # NOTE(mriedem): We could have a half-baked instance group with no # uuid if some legacy translation was performed on this spec in the # past. In that case, try to workaround the issue by getting the # group uuid from the scheduler hint. if 'uuid' not in spec.instance_group: spec.instance_group.uuid = spec.get_scheduler_hint('group') # NOTE(danms): We don't store the full instance group in # the reqspec since it would be stale almost immediately. # Instead, load it by uuid here so it's up-to-date. try: spec.instance_group = objects.InstanceGroup.get_by_uuid( context, spec.instance_group.uuid) except exception.InstanceGroupNotFound: # NOTE(danms): Instance group may have been deleted spec.instance_group = None spec.obj_reset_changes() return spec @staticmethod @db.api_context_manager.reader def _get_by_instance_uuid_from_db(context, instance_uuid): db_spec = context.session.query(api_models.RequestSpec).filter_by( instance_uuid=instance_uuid).first() if not db_spec: raise exception.RequestSpecNotFound(instance_uuid=instance_uuid) return db_spec @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_spec = cls._get_by_instance_uuid_from_db(context, instance_uuid) return cls._from_db_object(context, cls(), db_spec) @staticmethod @db.api_context_manager.writer def _create_in_db(context, updates): db_spec = api_models.RequestSpec() db_spec.update(updates) db_spec.save(context.session) return db_spec def _get_update_primitives(self): """Serialize object to match the db model. We store copies of embedded objects rather than references to these objects because we want a snapshot of the request at this point. If the references changed or were deleted we would not be able to reschedule this instance under the same conditions as it was originally scheduled with. """ updates = self.obj_get_changes() db_updates = None # NOTE(alaski): The db schema is the full serialized object in a # 'spec' column. If anything has changed we rewrite the full thing. if updates: # NOTE(danms): Don't persist the could-be-large and could-be-stale # properties of InstanceGroup spec = self.obj_clone() if 'instance_group' in spec and spec.instance_group: spec.instance_group.members = None spec.instance_group.hosts = None # NOTE(mriedem): Don't persist retries, requested_destination, # requested_resources or ignored hosts since those are per-request for excluded in ('retry', 'requested_destination', 'requested_resources', 'ignore_hosts'): if excluded in spec and getattr(spec, excluded): setattr(spec, excluded, None) # NOTE(stephenfin): Don't persist network metadata since we have # no need for it after scheduling if 'network_metadata' in spec and spec.network_metadata: del spec.network_metadata db_updates = {'spec': jsonutils.dumps(spec.obj_to_primitive())} if 'instance_uuid' in updates: db_updates['instance_uuid'] = updates['instance_uuid'] return db_updates @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self._get_update_primitives() if not updates: raise exception.ObjectActionError(action='create', reason='no fields are set') db_spec = self._create_in_db(self._context, updates) self._from_db_object(self._context, self, db_spec) @staticmethod @db.api_context_manager.writer def _save_in_db(context, instance_uuid, updates): # FIXME(sbauza): Provide a classmethod when oslo.db bug #1520195 is # fixed and released db_spec = RequestSpec._get_by_instance_uuid_from_db( context, instance_uuid) db_spec.update(updates) db_spec.save(context.session) return db_spec @base.remotable def save(self): updates = self._get_update_primitives() if updates: db_spec = self._save_in_db(self._context, self.instance_uuid, updates) self._from_db_object(self._context, self, db_spec) self.obj_reset_changes() @staticmethod @db.api_context_manager.writer def _destroy_in_db(context, instance_uuid): result = context.session.query(api_models.RequestSpec).filter_by( instance_uuid=instance_uuid).delete() if not result: raise exception.RequestSpecNotFound(instance_uuid=instance_uuid) @base.remotable def destroy(self): self._destroy_in_db(self._context, self.instance_uuid) @staticmethod @db.api_context_manager.writer def _destroy_bulk_in_db(context, instance_uuids): return context.session.query(api_models.RequestSpec).filter( api_models.RequestSpec.instance_uuid.in_(instance_uuids)).\ delete(synchronize_session=False) @classmethod def destroy_bulk(cls, context, instance_uuids): return cls._destroy_bulk_in_db(context, instance_uuids) def reset_forced_destinations(self): """Clears the forced destination fields from the RequestSpec object. This method is for making sure we don't ask the scheduler to give us again the same destination(s) without persisting the modifications. """ self.force_hosts = None self.force_nodes = None # NOTE(sbauza): Make sure we don't persist this, we need to keep the # original request for the forced hosts self.obj_reset_changes(['force_hosts', 'force_nodes']) @property def maps_requested_resources(self): """Returns True if this RequestSpec needs to map requested_resources to resource providers, False otherwise. """ return 'requested_resources' in self and self.requested_resources def _is_valid_group_rp_mapping(self, group_rp_mapping, placement_allocations, provider_traits): """Decides if the mapping is valid from resources and traits perspective. :param group_rp_mapping: A list of RequestGroup - RP UUID two tuples representing a mapping between request groups in this RequestSpec and RPs from the allocation. It contains every RequestGroup in this RequestSpec but the mapping might not be valid from resources and traits perspective. :param placement_allocations: The overall allocation made by the scheduler for this RequestSpec :param provider_traits: A dict keyed by resource provider uuids containing the list of traits the given RP has. This dict contains info only about RPs appearing in the placement_allocations param. :return: True if each group's resource and trait request can be fulfilled from the RP it is mapped to. False otherwise. """ # Check that traits are matching for each group - rp pair in # this mapping for group, rp_uuid in group_rp_mapping: if not group.required_traits.issubset(provider_traits[rp_uuid]): return False # TODO(gibi): add support for groups with forbidden_traits and # aggregates # Check that each group can consume the requested resources from the rp # that it is mapped to in the current mapping. Consume each group's # request from the allocation, if anything drops below zero, then this # is not a solution rcs = set() allocs = copy.deepcopy(placement_allocations) for group, rp_uuid in group_rp_mapping: rp_allocs = allocs[rp_uuid]['resources'] for rc, amount in group.resources.items(): rcs.add(rc) if rc in rp_allocs: rp_allocs[rc] -= amount if rp_allocs[rc] < 0: return False else: return False # Check that all the allocations are consumed from the resource # classes that appear in the request groups. It should never happen # that we have a match but also have some leftover if placement returns # valid allocation candidates. Except if the leftover in the allocation # are due to the RC requested in the unnumbered group. for rp_uuid in allocs: rp_allocs = allocs[rp_uuid]['resources'] for rc, amount in group.resources.items(): if rc in rcs and rc in rp_allocs: if rp_allocs[rc] != 0: LOG.debug( 'Found valid group - RP mapping %s but there are ' 'allocations leftover in %s from resource class ' '%s', group_rp_mapping, allocs, rc) return False # If both the traits and the allocations are OK then mapping is valid return True def map_requested_resources_to_providers(self, placement_allocations, provider_traits): """Fill the provider_uuids field in each RequestGroup objects in the requested_resources field. The mapping is generated based on the overall allocation made for this RequestSpec, the request in each RequestGroup, and the traits of the RPs in the allocation. Limitations: * only groups with use_same_provider = True is mapped, the un-numbered group are not supported. * mapping is generated only based on the resource request and the required traits, aggregate membership and forbidden traits are not supported. * requesting the same resource class in numbered and un-numbered group is not supported We can live with these limitations today as Neutron does not use forbidden traits and aggregates in the request and each Neutron port is mapped to a numbered group and the resources class used by neutron ports are never requested through the flavor extra_spec. This is a workaround as placement does not return which RP fulfills which granular request group in the allocation candidate request. There is a spec proposing a solution in placement: https://review.opendev.org/#/c/597601/ :param placement_allocations: The overall allocation made by the scheduler for this RequestSpec :param provider_traits: A dict keyed by resource provider uuids containing the list of traits the given RP has. This dict contains info only about RPs appearing in the placement_allocations param. """ if not self.maps_requested_resources: # Nothing to do, so let's return early return for group in self.requested_resources: # See the limitations in the func doc above if (not group.use_same_provider or group.aggregates or group.forbidden_traits): raise NotImplementedError() # Iterate through every possible group - RP mappings and try to find a # valid one. If there are more than one possible solution then it is # enough to find one as these solutions are interchangeable from # backend (e.g. Neutron) perspective. LOG.debug( 'Trying to find a valid group - RP mapping for groups %s to ' 'allocations %s with traits %s', self.requested_resources, placement_allocations, provider_traits) # This generator first creates permutations with repetition of the RPs # with length of the number of groups we have. So if there is # 2 RPs (rp1, rp2) and # 3 groups (g1, g2, g3). # Then the itertools.product(('rp1', 'rp2'), repeat=3)) will be: # (rp1, rp1, rp1) # (rp1, rp1, rp2) # (rp1, rp2, rp1) # ... # (rp2, rp2, rp2) # Then we zip each of this permutations to our group list resulting in # a list of list of group - rp pairs: # [[('g1', 'rp1'), ('g2', 'rp1'), ('g3', 'rp1')], # [('g1', 'rp1'), ('g2', 'rp1'), ('g3', 'rp2')], # [('g1', 'rp1'), ('g2', 'rp2'), ('g3', 'rp1')], # ... # [('g1', 'rp2'), ('g2', 'rp2'), ('g3', 'rp2')]] # NOTE(gibi): the list() around the zip() below is needed as the # algorithm looks into the mapping more than once and zip returns an # iterator in py3.x. Still we need to generate a mapping once hence the # generator expression. every_possible_mapping = ( list(zip(self.requested_resources, rps)) for rps in itertools.product(placement_allocations.keys(), repeat=len(self.requested_resources))) for mapping in every_possible_mapping: if self._is_valid_group_rp_mapping(mapping, placement_allocations, provider_traits): for group, rp in mapping: # NOTE(gibi): un-numbered group might be mapped to more # than one RP but we do not support that yet here. group.provider_uuids = [rp] LOG.debug('Found valid group - RP mapping %s', mapping) return # if we reached this point then none of the possible mappings was # valid. This should never happen as Placement returns allocation # candidates based on the overall resource request of the server # including the request of the groups. raise ValueError( 'No valid group - RP mapping is found for ' 'groups %s, allocation %s and provider traits %s' % (self.requested_resources, placement_allocations, provider_traits))
class ImageMeta(base.NovaObject): # Version 1.0: Initial version # Version 1.1: updated ImageMetaProps # Version 1.2: ImageMetaProps version 1.2 # Version 1.3: ImageMetaProps version 1.3 # Version 1.4: ImageMetaProps version 1.4 # Version 1.5: ImageMetaProps version 1.5 # Version 1.6: ImageMetaProps version 1.6 # Version 1.7: ImageMetaProps version 1.7 VERSION = '1.7' # These are driven by what the image client API returns # to Nova from Glance. This is defined in the glance # code glance/api/v2/images.py get_base_properties() # method. A few things are currently left out: # self, file, schema - Nova does not appear to ever use # these field; locations - modelling the arbitrary # data in the 'metadata' subfield is non-trivial as # there's no clear spec. # # TODO(ft): In version 2.0, these fields should be nullable: # name, checksum, owner, size, virtual_size, container_format, disk_format # fields = { 'id': fields.UUIDField(), 'name': fields.StringField(), 'status': fields.StringField(), 'visibility': fields.StringField(), 'protected': fields.FlexibleBooleanField(), 'checksum': fields.StringField(), 'owner': fields.StringField(), 'size': fields.IntegerField(), 'virtual_size': fields.IntegerField(), 'container_format': fields.StringField(), 'disk_format': fields.StringField(), 'created_at': fields.DateTimeField(nullable=True), 'updated_at': fields.DateTimeField(nullable=True), 'tags': fields.ListOfStringsField(), 'direct_url': fields.StringField(), 'min_ram': fields.IntegerField(), 'min_disk': fields.IntegerField(), 'properties': fields.ObjectField('ImageMetaProps'), } obj_relationships = { 'properties': [('1.0', '1.0'), ('1.1', '1.1'), ('1.2', '1.2'), ('1.3', '1.3'), ('1.4', '1.4'), ('1.5', '1.5'), ('1.6', '1.6'), ('1.7', '1.7'), ], } @classmethod def from_dict(cls, image_meta): """Create instance from image metadata dict :param image_meta: image metadata dictionary Creates a new object instance, initializing from the properties associated with the image metadata instance :returns: an ImageMeta instance """ if image_meta is None: image_meta = {} # We must turn 'properties' key dict into an object # so copy image_meta to avoid changing original image_meta = copy.deepcopy(image_meta) image_meta["properties"] = \ objects.ImageMetaProps.from_dict( image_meta.get("properties", {})) # Some fields are nullable in Glance DB schema, but was not marked that # in ImageMeta initially by mistake. To keep compatibility with compute # nodes which are run with previous versions these fields are still # not nullable in ImageMeta, but the code below converts None to # approppriate empty values. for fld in NULLABLE_STRING_FIELDS: if fld in image_meta and image_meta[fld] is None: image_meta[fld] = '' for fld in NULLABLE_INTEGER_FIELDS: if fld in image_meta and image_meta[fld] is None: image_meta[fld] = 0 return cls(**image_meta) @classmethod def from_instance(cls, instance): """Create instance from instance system metadata :param instance: Instance object Creates a new object instance, initializing from the system metadata "image_*" properties associated with instance :returns: an ImageMeta instance """ sysmeta = utils.instance_sys_meta(instance) image_meta = utils.get_image_from_system_metadata(sysmeta) return cls.from_dict(image_meta)
class FloatingIP(obj_base.NovaPersistentObject, obj_base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added _get_addresses_by_instance_uuid() # Version 1.2: FixedIP <= version 1.2 # Version 1.3: FixedIP <= version 1.3 # Version 1.4: FixedIP <= version 1.4 VERSION = '1.4' fields = { 'id': fields.IntegerField(), 'address': fields.IPAddressField(), 'fixed_ip_id': fields.IntegerField(nullable=True), 'project_id': fields.UUIDField(nullable=True), 'host': fields.StringField(nullable=True), 'auto_assigned': fields.BooleanField(), 'pool': fields.StringField(nullable=True), 'interface': fields.StringField(nullable=True), 'fixed_ip': fields.ObjectField('FixedIP', nullable=True), } def obj_make_compatible(self, primitive, target_version): target_version = utils.convert_version_to_tuple(target_version) if target_version < (1, 2) and 'fixed_ip' in primitive: self.fixed_ip.obj_make_compatible( primitive['fixed_ip']['nova_object.data'], '1.1') primitive['fixed_ip']['nova_object.version'] = '1.1' elif target_version < (1, 3) and self.obj_attr_is_set('fixed_ip'): self.fixed_ip.obj_make_compatible( primitive['fixed_ip']['nova_object.data'], '1.2') primitive['fixed_ip']['nova_object.version'] = '1.2' elif target_version < (1, 4) and self.obj_attr_is_set('fixed_ip'): self.fixed_ip.obj_make_compatible( primitive['fixed_ip']['nova_object.data'], '1.3') primitive['fixed_ip']['nova_object.version'] = '1.3' @staticmethod def _from_db_object(context, floatingip, db_floatingip, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for field in floatingip.fields: if field not in FLOATING_IP_OPTIONAL_ATTRS: floatingip[field] = db_floatingip[field] if ('fixed_ip' in expected_attrs and db_floatingip['fixed_ip'] is not None): floatingip.fixed_ip = objects.FixedIP._from_db_object( context, objects.FixedIP(context), db_floatingip['fixed_ip']) floatingip._context = context floatingip.obj_reset_changes() return floatingip def obj_load_attr(self, attrname): if attrname not in FLOATING_IP_OPTIONAL_ATTRS: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s is not lazy-loadable' % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if self.fixed_ip_id is not None: self.fixed_ip = objects.FixedIP.get_by_id( self._context, self.fixed_ip_id, expected_attrs=['network']) else: self.fixed_ip = None @obj_base.remotable_classmethod def get_by_id(cls, context, id): db_floatingip = db.floating_ip_get(context, id) # XXX joins fixed.instance return cls._from_db_object(context, cls(context), db_floatingip, expected_attrs=['fixed_ip']) @obj_base.remotable_classmethod def get_by_address(cls, context, address): db_floatingip = db.floating_ip_get_by_address(context, str(address)) return cls._from_db_object(context, cls(context), db_floatingip) @obj_base.remotable_classmethod def get_pool_names(cls, context): return [x['name'] for x in db.floating_ip_get_pools(context)] @obj_base.remotable_classmethod def allocate_address(cls, context, project_id, pool, auto_assigned=False): return db.floating_ip_allocate_address(context, project_id, pool, auto_assigned=auto_assigned) @obj_base.remotable_classmethod def associate(cls, context, floating_address, fixed_address, host): db_fixed = db.floating_ip_fixed_ip_associate(context, str(floating_address), str(fixed_address), host) if db_fixed is None: return None floating = FloatingIP( context=context, address=floating_address, host=host, fixed_ip_id=db_fixed['id'], fixed_ip=objects.FixedIP._from_db_object( context, objects.FixedIP(context), db_fixed, expected_attrs=['network'])) return floating @obj_base.remotable_classmethod def deallocate(cls, context, address): return db.floating_ip_deallocate(context, str(address)) @obj_base.remotable_classmethod def destroy(cls, context, address): db.floating_ip_destroy(context, str(address)) @obj_base.remotable_classmethod def disassociate(cls, context, address): db_fixed = db.floating_ip_disassociate(context, str(address)) return cls(context=context, address=address, fixed_ip_id=db_fixed['id'], fixed_ip=objects.FixedIP._from_db_object( context, objects.FixedIP(context), db_fixed, expected_attrs=['network'])) @obj_base.remotable_classmethod def _get_addresses_by_instance_uuid(cls, context, instance_uuid): return db.instance_floating_address_get_all(context, instance_uuid) @classmethod def get_addresses_by_instance(cls, context, instance): return cls._get_addresses_by_instance_uuid(context, instance['uuid']) @obj_base.remotable def save(self, context): updates = self.obj_get_changes() if 'address' in updates: raise exception.ObjectActionError(action='save', reason='address is not mutable') if 'fixed_ip_id' in updates: reason = 'fixed_ip_id is not mutable' raise exception.ObjectActionError(action='save', reason=reason) # NOTE(danms): Make sure we don't pass the calculated fixed_ip # relationship to the DB update method updates.pop('fixed_ip', None) db_floatingip = db.floating_ip_update(context, str(self.address), updates) self._from_db_object(context, self, db_floatingip)
class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Add instance_uuid to get_by_volume_id method # Version 1.2: Instance version 1.14 # Version 1.3: Instance version 1.15 # Version 1.4: Instance version 1.16 # Version 1.5: Instance version 1.17 # Version 1.6: Instance version 1.18 # Version 1.7: Add update_or_create method # Version 1.8: Instance version 1.19 # Version 1.9: Instance version 1.20 # Version 1.10: Changed source_type field to BlockDeviceSourceTypeField. # Version 1.11: Changed destination_type field to # BlockDeviceDestinationTypeField. # Version 1.12: Changed device_type field to BlockDeviceTypeField. # Version 1.13: Instance version 1.21 # Version 1.14: Instance version 1.22 VERSION = '1.14' fields = { 'id': fields.IntegerField(), 'instance_uuid': fields.UUIDField(), 'instance': fields.ObjectField('Instance', nullable=True), 'source_type': fields.BlockDeviceSourceTypeField(nullable=True), 'destination_type': fields.BlockDeviceDestinationTypeField(nullable=True), 'guest_format': fields.StringField(nullable=True), 'device_type': fields.BlockDeviceTypeField(nullable=True), 'disk_bus': fields.StringField(nullable=True), 'boot_index': fields.IntegerField(nullable=True), 'device_name': fields.StringField(nullable=True), 'delete_on_termination': fields.BooleanField(default=False), 'snapshot_id': fields.StringField(nullable=True), 'volume_id': fields.StringField(nullable=True), 'volume_size': fields.IntegerField(nullable=True), 'image_id': fields.StringField(nullable=True), 'no_device': fields.BooleanField(default=False), 'connection_info': fields.StringField(nullable=True), } obj_relationships = { 'instance': [('1.0', '1.13'), ('1.2', '1.14'), ('1.3', '1.15'), ('1.4', '1.16'), ('1.5', '1.17'), ('1.6', '1.18'), ('1.8', '1.19'), ('1.9', '1.20'), ('1.13', '1.21'), ('1.14', '1.22')], } @staticmethod def _from_db_object(context, block_device_obj, db_block_device, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for key in block_device_obj.fields: if key in BLOCK_DEVICE_OPTIONAL_ATTRS: continue block_device_obj[key] = db_block_device[key] if 'instance' in expected_attrs: my_inst = objects.Instance(context) my_inst._from_db_object(context, my_inst, db_block_device['instance']) block_device_obj.instance = my_inst block_device_obj._context = context block_device_obj.obj_reset_changes() return block_device_obj def _create(self, context, update_or_create=False): """Create the block device record in the database. In case the id field is set on the object, and if the instance is set raise an ObjectActionError. Resets all the changes on the object. Returns None :param context: security context used for database calls :param update_or_create: consider existing block devices for the instance based on the device name and swap, and only update the ones that match. Normally only used when creating the instance for the first time. """ cell_type = cells_opts.get_cell_type() if cell_type == 'api': raise exception.ObjectActionError( action='create', reason='BlockDeviceMapping cannot be ' 'created in the API cell.') if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() if 'instance' in updates: raise exception.ObjectActionError(action='create', reason='instance assigned') cells_create = update_or_create or None if update_or_create: db_bdm = db.block_device_mapping_update_or_create(context, updates, legacy=False) else: db_bdm = db.block_device_mapping_create(context, updates, legacy=False) self._from_db_object(context, self, db_bdm) # NOTE(alaski): bdms are looked up by instance uuid and device_name # so if we sync up with no device_name an entry will be created that # will not be found on a later update_or_create call and a second bdm # create will occur. if cell_type == 'compute' and db_bdm.get('device_name') is not None: cells_api = cells_rpcapi.CellsAPI() cells_api.bdm_update_or_create_at_top(context, self, create=cells_create) @base.remotable def create(self): self._create(self._context) @base.remotable def update_or_create(self): self._create(self._context, update_or_create=True) @base.remotable def destroy(self): if not self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='destroy', reason='already destroyed') db.block_device_mapping_destroy(self._context, self.id) delattr(self, base.get_attrname('id')) cell_type = cells_opts.get_cell_type() if cell_type == 'compute': cells_api = cells_rpcapi.CellsAPI() cells_api.bdm_destroy_at_top(self._context, self.instance_uuid, device_name=self.device_name, volume_id=self.volume_id) @base.remotable def save(self): updates = self.obj_get_changes() if 'instance' in updates: raise exception.ObjectActionError(action='save', reason='instance changed') updates.pop('id', None) updated = db.block_device_mapping_update(self._context, self.id, updates, legacy=False) if not updated: raise exception.BDMNotFound(id=self.id) self._from_db_object(self._context, self, updated) cell_type = cells_opts.get_cell_type() if cell_type == 'compute': create = False # NOTE(alaski): If the device name has just been set this bdm # likely does not exist in the parent cell and we should create it. # If this is a modification of the device name we should update # rather than create which is why None is used here instead of True if 'device_name' in updates: create = None cells_api = cells_rpcapi.CellsAPI() cells_api.bdm_update_or_create_at_top(self._context, self, create=create) @base.remotable_classmethod def get_by_volume_id(cls, context, volume_id, instance_uuid=None, expected_attrs=None): if expected_attrs is None: expected_attrs = [] db_bdm = db.block_device_mapping_get_by_volume_id( context, volume_id, _expected_cols(expected_attrs)) if not db_bdm: raise exception.VolumeBDMNotFound(volume_id=volume_id) # NOTE (ndipanov): Move this to the db layer into a # get_by_instance_and_volume_id method if instance_uuid and instance_uuid != db_bdm['instance_uuid']: raise exception.InvalidVolume( reason=_("Volume does not belong to the " "requested instance.")) return cls._from_db_object(context, cls(), db_bdm, expected_attrs=expected_attrs) @property def is_root(self): return self.boot_index == 0 @property def is_volume(self): return ( self.destination_type == fields.BlockDeviceDestinationType.VOLUME) @property def is_image(self): return self.source_type == fields.BlockDeviceSourceType.IMAGE def get_image_mapping(self): return block_device.BlockDeviceDict(self).get_image_mapping() def obj_load_attr(self, attrname): if attrname not in BLOCK_DEVICE_OPTIONAL_ATTRS: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s", { 'attr': attrname, 'name': self.obj_name(), 'uuid': self.uuid, }) self.instance = objects.Instance.get_by_uuid(self._context, self.instance_uuid) self.obj_reset_changes(fields=['instance'])