class MyObj(base.MagnumPersistentObject, base.MagnumObject): VERSION = '1.0' fields = { 'foo': fields.IntegerField(), 'bar': fields.StringField(), 'missing': fields.StringField(), } def obj_load_attr(self, attrname): setattr(self, attrname, 'loaded!') @base.remotable_classmethod def query(cls, context): obj = cls(context) obj.foo = 1 obj.bar = 'bar' obj.obj_reset_changes() return obj @base.remotable def marco(self, context): return 'polo' @base.remotable def update_test(self, context): if context.project_id == 'alternate': self.bar = 'alternate-context' else: self.bar = 'updated' @base.remotable def save(self, context): self.obj_reset_changes() @base.remotable def refresh(self, context): self.foo = 321 self.bar = 'refreshed' self.obj_reset_changes() @base.remotable def modify_save_modify(self, context): self.bar = 'meow' self.save(context) self.foo = 42
class QosMinimumBandwidthRule(QosRule): db_model = qos_db_model.QosMinimumBandwidthRule fields = { 'min_kbps': obj_fields.IntegerField(nullable=True), 'direction': common_types.FlowDirectionEnumField(), } rule_type = qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 2): raise exception.IncompatibleObjectVersion( objver=target_version, objname="QosMinimumBandwidthRule")
class PortBindingLevel(base.NeutronDbObject): LOG.info('%s(): caller(): %s', log_utils.get_fname(1), log_utils.get_fname(2)) # Version 1.0: Initial version # Version 1.1: Added segment_id VERSION = '1.1' db_model = ml2_models.PortBindingLevel primary_keys = ['port_id', 'host', 'level'] fields = { 'port_id': common_types.UUIDField(), 'host': obj_fields.StringField(), 'level': obj_fields.IntegerField(), 'driver': obj_fields.StringField(nullable=True), 'segment': obj_fields.ObjectField( 'NetworkSegment', nullable=True ), # arguably redundant but allows us to define foreign key for 'segment' # synthetic field inside NetworkSegment definition 'segment_id': common_types.UUIDField(nullable=True), } synthetic_fields = ['segment'] foreign_keys = { 'Port': {'port_id': 'id'}, } @classmethod def get_objects(cls, context, _pager=None, validate_filters=True, **kwargs): LOG.info('%s(): caller(): %s', log_utils.get_fname(1), log_utils.get_fname(2)) if not _pager: _pager = base.Pager() if not _pager.sorts: # (NOTE) True means ASC, False is DESC _pager.sorts = [('port_id', True), ('level', True)] return super(PortBindingLevel, cls).get_objects( context, _pager, validate_filters, **kwargs) def obj_make_compatible(self, primitive, target_version): LOG.info('%s(): caller(): %s', log_utils.get_fname(1), log_utils.get_fname(2)) _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): primitive.pop('segment_id', None)
class GreAllocation(base.NeutronDbObject, ml2_base.SegmentAllocation): # Version 1.0: Initial version VERSION = '1.0' db_model = gre_model.GreAllocation primary_keys = ['gre_id'] fields = { 'gre_id': obj_fields.IntegerField(), 'allocated': obj_fields.BooleanField(default=False) } network_type = n_const.TYPE_GRE @classmethod def get_segmentation_id(cls): return cls.db_model.get_segmentation_id()
class MyObject(base.VersionedObject): fields = {'diglett': fields.IntegerField()} @base.remotable def remotable_method(self): pass @classmethod @base.remotable def remotable_classmethod(cls): pass def non_remotable_method(self): pass @classmethod def non_remotable_classmethod(cls): pass
class RouterL3AgentBinding(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = l3agent.RouterL3AgentBinding primary_keys = ['router_id', 'l3_agent_id'] fields = { 'router_id': common_types.UUIDField(), 'l3_agent_id': common_types.UUIDField(), 'binding_index': obj_fields.IntegerField(default=l3agent.LOWEST_BINDING_INDEX), } # TODO(ihrachys) return OVO objects not models # TODO(ihrachys) move under Agent object class @classmethod @db_api.CONTEXT_READER def get_l3_agents_by_router_ids(cls, context, router_ids): query = context.session.query(l3agent.RouterL3AgentBinding) query = query.options(joinedload('l3_agent')).filter( l3agent.RouterL3AgentBinding.router_id.in_(router_ids)) return [db_obj.l3_agent for db_obj in query.all()] @classmethod @db_api.CONTEXT_READER def get_down_router_bindings(cls, context, cutoff): query = (context.session.query(l3agent.RouterL3AgentBinding).join( agent_model.Agent).filter( agent_model.Agent.heartbeat_timestamp < cutoff, agent_model.Agent.admin_state_up).outerjoin( l3_attrs.RouterExtraAttributes, l3_attrs.RouterExtraAttributes.router_id == l3agent.RouterL3AgentBinding.router_id).filter( sa.or_( l3_attrs.RouterExtraAttributes.ha == sql.false(), l3_attrs.RouterExtraAttributes.ha == sql.null()))) bindings = [ cls._load_object(context, db_obj) for db_obj in query.all() ] return bindings
class WatchData( heat_base.HeatObject, base.VersionedObjectDictCompat, ): fields = { 'id': fields.IntegerField(), 'data': heat_fields.JsonField(nullable=True), 'watch_rule_id': fields.StringField(), 'watch_rule': fields.ObjectField('WatchRule'), 'created_at': fields.DateTimeField(read_only=True), 'updated_at': fields.DateTimeField(nullable=True), } @staticmethod def _from_db_object(context, rule, db_data): from conveyor.conveyorheat.objects import watch_rule for field in rule.fields: if field == 'watch_rule': rule[field] = watch_rule.WatchRule._from_db_object( context, watch_rule.WatchRule(), db_data['watch_rule']) else: rule[field] = db_data[field] rule._context = context rule.obj_reset_changes() return rule @classmethod def create(cls, context, values): db_data = db_api.watch_data_create(context, values) return cls._from_db_object(context, cls(), db_data) @classmethod def get_all(cls, context): return [cls._from_db_object(context, cls(), db_data) for db_data in db_api.watch_data_get_all(context)] @classmethod def get_all_by_watch_rule_id(cls, context, watch_rule_id): return (cls._from_db_object(context, cls(), db_data) for db_data in db_api.watch_data_get_all_by_watch_rule_id( context, watch_rule_id))
class Service(base.OrchestratorObject, base.VersionedObjectDictCompat): """DC Orchestrator service object.""" fields = { 'id': fields.UUIDField(), 'host': fields.StringField(), 'binary': fields.StringField(), 'topic': fields.StringField(), 'disabled': fields.BooleanField(), 'disabled_reason': fields.StringField(nullable=True), 'created_at': fields.DateTimeField(), 'updated_at': fields.DateTimeField(), 'deleted_at': fields.DateTimeField(nullable=True), 'deleted': fields.IntegerField(nullable=True), } @classmethod def create(cls, context, service_id, host=None, binary=None, topic=None): obj = db_api.service_create(context, service_id=service_id, host=host, binary=binary, topic=topic) return cls._from_db_object(context, cls(context), obj) @classmethod def get(cls, context, service_id): obj = db_api.service_get(context, service_id) return cls._from_db_object(context, cls(), obj) @classmethod def get_all(cls, context): objs = db_api.service_get_all(context) return [cls._from_db_object(context, cls(), obj) for obj in objs] @classmethod def update(cls, context, obj_id, values=None): obj = db_api.service_update(context, obj_id, values=values) return cls._from_db_object(context, cls(), obj) @classmethod def delete(cls, context, obj_id): db_api.service_delete(context, obj_id)
class ExtraDhcpOpt(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.ExtraDhcpOpt fields = { 'id': common_types.UUIDField(), 'port_id': common_types.UUIDField(), 'opt_name': obj_fields.StringField(), 'opt_value': obj_fields.StringField(), 'ip_version': obj_fields.IntegerField(), } fields_no_update = ['port_id'] foreign_keys = { 'Port': {'port_id': 'id'}, }
class EthernetClassification(ClassificationBase): VERSION = '1.0' db_model = models.EthernetClassification fields = { 'ethertype': obj_fields.IntegerField(nullable=True), 'src_addr': obj_fields.StringField(nullable=True), 'dst_addr': obj_fields.StringField(nullable=True), } def create(self): with db_api.autonested_transaction(self.obj_context.session): super(ClassificationBase, self).create() @classmethod def get_object(cls, context, **kwargs): with db_api.autonested_transaction(context.session): obj = super(EthernetClassification, cls).get_object(context, c_type='ethernet', **kwargs) return obj
class ClusterLock(senlin_base.SenlinObject, base.VersionedObjectDictCompat): """Senlin cluster lock object.""" fields = { 'cluster_id': fields.UUIDField(), 'action_ids': fields.ListOfStringsField(), 'semaphore': fields.IntegerField(), } @classmethod def acquire(cls, cluster_id, action_id, scope): return db_api.cluster_lock_acquire(cluster_id, action_id, scope) @classmethod def release(cls, cluster_id, action_id, scope): return db_api.cluster_lock_release(cluster_id, action_id, scope) @classmethod def steal(cls, cluster_id, action_id): return db_api.cluster_lock_steal(cluster_id, action_id)
class ContainerPCIRequest(base.ZunPersistentObject, base.ZunObject): # Version 1.0: Add request_id VERSION = '1.0' fields = { 'count': fields.IntegerField(), 'spec': fields.ListOfDictOfNullableStringsField(), 'alias_name': fields.StringField(nullable=True), # Note(moshele): is_new is deprecated and should be removed # on major version bump 'is_new': fields.BooleanField(default=False), 'request_id': fields.UUIDField(nullable=True), } def obj_load_attr(self, attr): setattr(self, attr, None) def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1) and 'request_id' in primitive: del primitive['request_id']
class CapsuleInitContainer(ContainerBase): # Version 1.0: Initial version VERSION = '1.0' container_type = consts.TYPE_CAPSULE_INIT_CONTAINER fields = { 'capsule_id': fields.IntegerField(nullable=False), } @base.remotable_classmethod def list_by_capsule_id(cls, context, capsule_id): """Return a list of Container objects by capsule_id. :param context: Security context. :param host: A capsule id. :returns: a list of :class:`Container` object. """ db_containers = dbapi.list_containers( context, cls.container_type, filters={'capsule_id': capsule_id}) return Container._from_db_object_list(db_containers, cls, context)
class Subnet(base.VersionedObject): """Represents a subnet.""" # Version 1.0: Initial version VERSION = '1.0' fields = { 'cidr': fields.StringField(nullable=True), 'dns': fields.ListOfStringsField(), 'gateway': fields.StringField(), 'ips': fields.ListOfStringsField(), 'routes': fields.ListOfStringsField(), 'version': fields.IntegerField(nullable=True), } def __init__(self, cidr=None, dns=None, gateway=None, ips=None, routes=None, **kwargs): dns = dns or set() ips = ips or set() routes = routes or set() version = kwargs.pop('version', None) if cidr and not version: version = netaddr.IPNetwork(cidr).version super(Subnet, self).__init__(cidr=cidr, dns=dns, gateway=gateway, ips=ips, routes=routes, version=version) def as_netaddr(self): """Convenience function to get cidr as a netaddr object.""" return netaddr.IPNetwork(self.cidr)
class Network(base.DrydockPersistentObject, base.DrydockObject): VERSION = '1.0' fields = { 'name': ovo_fields.StringField(), 'site': ovo_fields.StringField(), 'metalabels': ovo_fields.DictOfNullableStringsField(), 'cidr': ovo_fields.StringField(), 'vlan_id': ovo_fields.StringField(nullable=True), 'routedomain': ovo_fields.StringField(nullable=True), 'mtu': ovo_fields.IntegerField(nullable=True), 'dns_domain': ovo_fields.StringField(nullable=True), 'dns_servers': ovo_fields.StringField(nullable=True), # Keys of ranges are 'type', 'start', 'end' 'ranges': ovo_fields.ListOfDictOfNullableStringsField(), # Keys of routes are 'subnet', 'routedomain', 'gateway', 'metric' 'routes': ovo_fields.ListOfDictOfNullableStringsField(), 'dhcp_relay_self_ip': ovo_fields.StringField(nullable=True), 'dhcp_relay_upstream_target': ovo_fields.StringField(nullable=True), } def __init__(self, **kwargs): super(Network, self).__init__(**kwargs) # Network keyed on name def get_id(self): return self.get_name() def get_name(self): return self.name def get_default_gateway(self): for r in getattr(self, 'routes', []): if r.get('subnet', '') == '0.0.0.0/0': return r.get('gateway', None) return None
class PortBindingLevel(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = ml2_models.PortBindingLevel primary_keys = ['port_id', 'host', 'level'] fields = { 'port_id': common_types.UUIDField(), 'host': obj_fields.StringField(), 'level': obj_fields.IntegerField(), 'driver': obj_fields.StringField(nullable=True), 'segment': obj_fields.ObjectField('NetworkSegment', nullable=True), } synthetic_fields = ['segment'] foreign_keys = { 'Port': { 'port_id': 'id' }, } @classmethod def get_objects(cls, context, _pager=None, validate_filters=True, **kwargs): if not _pager: _pager = base.Pager() if not _pager.sorts: # (NOTE) True means ASC, False is DESC _pager.sorts = [('port_id', True), ('level', True)] return super(PortBindingLevel, cls).get_objects(context, _pager, validate_filters, **kwargs)
class Network(osv_base.VersionedObject): """Represents a network.""" # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.UUIDField(), 'bridge': fields.StringField(), 'label': fields.StringField(), 'subnets': fields.ObjectField('SubnetList'), 'multi_host': fields.BooleanField(), 'should_provide_bridge': fields.BooleanField(), 'should_provide_vlan': fields.BooleanField(), 'bridge_interface': fields.StringField(nullable=True), 'vlan': fields.IntegerField(nullable=True), } def __init__(self, **kwargs): kwargs.setdefault('subnets', objects.subnet.SubnetList(objects=[])) kwargs.setdefault('multi_host', False) kwargs.setdefault('should_provide_bridge', False) kwargs.setdefault('should_provide_vlan', False) super(Network, self).__init__(**kwargs)
class VolumeProperties(base.CinderObject, base.CinderObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added group_id and group_type_id VERSION = '1.1' # TODO(dulek): We should add this to initially move volume_properites to # ovo, but this should be removed as soon as possible. Most of the data # here is already in request_spec and volume there. Outstanding ones would # be reservation, and qos_specs. First one may be moved to request_spec and # second added as relationship in volume_type field and whole # volume_properties (and resource_properties) in request_spec won't be # needed. fields = { 'attach_status': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'cgsnapshot_id': fields.UUIDField(nullable=True), 'consistencygroup_id': fields.UUIDField(nullable=True), 'group_id': fields.UUIDField(nullable=True), 'display_description': fields.StringField(nullable=True), 'display_name': fields.StringField(nullable=True), 'encryption_key_id': fields.UUIDField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), 'multiattach': fields.BooleanField(nullable=True), 'project_id': fields.StringField(nullable=True), 'qos_specs': fields.DictOfStringsField(nullable=True), 'replication_status': fields.StringField(nullable=True), 'reservations': fields.ListOfStringsField(nullable=True), 'size': fields.IntegerField(nullable=True), 'snapshot_id': fields.UUIDField(nullable=True), 'source_replicaid': fields.UUIDField(nullable=True), 'source_volid': fields.UUIDField(nullable=True), 'status': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'volume_type_id': fields.UUIDField(nullable=True), 'group_type_id': fields.UUIDField(nullable=True), }
class HealthRegistry(senlin_base.SenlinObject, base.VersionedObjectDictCompat): """Senlin health registry object.""" fields = { 'id': fields.UUIDField(), 'cluster_id': fields.UUIDField(), 'check_type': fields.StringField(), 'interval': fields.IntegerField(nullable=True), 'params': fields.DictOfStringsField(), 'engine_id': fields.UUIDField(), } @staticmethod def _from_db_object(context, registry, db_obj): for field in registry.fields: registry[field] = db_obj[field] registry._context = context registry.obj_reset_changes() return registry @classmethod def create(cls, context, cluster_id, check_type, interval, params, engine_id): obj = db_api.registry_create(context, cluster_id, check_type, interval, params, engine_id) return cls._from_db_object(context, cls(context), obj) @classmethod def claim(cls, context, engine_id): objs = db_api.registry_claim(context, engine_id) return [cls._from_db_object(context, cls(), obj) for obj in objs] @classmethod def delete(cls, context, cluster_id): db_api.registry_delete(context, cluster_id)
class StackTag(base.VersionedObject, base.VersionedObjectDictCompat, base.ComparableVersionedObject): fields = { 'id': fields.IntegerField(), 'tag': fields.StringField(nullable=True), 'stack_id': fields.StringField(), 'created_at': fields.DateTimeField(read_only=True), 'updated_at': fields.DateTimeField(nullable=True), } @staticmethod def _from_db_object(tag, db_tag): if db_tag is None: return None for field in tag.fields: tag[field] = db_tag[field] tag.obj_reset_changes() return tag @classmethod def get_obj(cls, context, tag): tag_obj = cls._from_db_object(cls(context), tag) return tag_obj
class NetworkDhcpAgentBinding(base.NeutronDbObject): # Version 1.0: Initial version # Version 1.1: Added 'binding_index' VERSION = '1.1' db_model = ndab_models.NetworkDhcpAgentBinding primary_keys = ['network_id', 'dhcp_agent_id'] fields = { 'network_id': common_types.UUIDField(), 'dhcp_agent_id': common_types.UUIDField(), 'binding_index': obj_fields.IntegerField(), } # NOTE(ndahiwade): The join was implemented this way as get_objects # currently doesn't support operators like '<' or '>' @classmethod def get_down_bindings(cls, context, cutoff): agent_objs = agent_obj.Agent.get_objects(context) dhcp_agent_ids = [obj.id for obj in agent_objs if obj.heartbeat_timestamp < cutoff] return cls.get_objects(context, dhcp_agent_id=dhcp_agent_ids)
class IPV6Classification(ClassificationBase): VERSION = '1.0' db_model = models.IPV6Classification fields = { 'dscp': obj_fields.IntegerField(nullable=True), 'dscp_mask': obj_fields.IntegerField(nullable=True), 'ecn': obj_fields.EnumField(valid_values=["0", "1", "2", "3"], nullable=True), 'length_min': obj_fields.IntegerField(nullable=True), 'length_max': obj_fields.IntegerField(nullable=True), 'next_header': obj_fields.IntegerField(nullable=True), 'hops_min': obj_fields.IntegerField(nullable=True), 'hops_max': obj_fields.IntegerField(nullable=True), 'src_addr': obj_fields.StringField(nullable=True), 'dst_addr': obj_fields.StringField(nullable=True), } def create(self): with db_api.autonested_transaction(self.obj_context.session): super(ClassificationBase, self).create() @classmethod def get_object(cls, context, **kwargs): with db_api.autonested_transaction(context.session): obj = super(IPV6Classification, cls).get_object(context, c_type='ipv6', **kwargs) return obj
class UDPClassification(ClassificationBase): VERSION = '1.0' db_model = models.UDPClassification fields = { 'src_port_min': obj_fields.IntegerField(nullable=True), 'src_port_max': obj_fields.IntegerField(nullable=True), 'dst_port_min': obj_fields.IntegerField(nullable=True), 'dst_port_max': obj_fields.IntegerField(nullable=True), 'length_min': obj_fields.IntegerField(nullable=True), 'length_max': obj_fields.IntegerField(nullable=True), } def create(self): with db_api.autonested_transaction(self.obj_context.session): super(ClassificationBase, self).create() @classmethod def get_object(cls, context, **kwargs): with db_api.autonested_transaction(context.session): obj = super(UDPClassification, cls).get_object(context, c_type='udp', **kwargs) return obj
class Volume(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Added metadata, admin_metadata, volume_attachment, and # volume_type # Version 1.2: Added glance_metadata, consistencygroup and snapshots # Version 1.3: Added finish_volume_migration() VERSION = '1.3' OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata', 'volume_type', 'volume_attachment', 'consistencygroup', 'snapshots') fields = { 'id': fields.UUIDField(), '_name_id': fields.UUIDField(nullable=True), 'ec2_id': fields.UUIDField(nullable=True), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'snapshot_id': fields.UUIDField(nullable=True), 'host': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'attach_status': fields.StringField(nullable=True), 'migration_status': fields.StringField(nullable=True), 'scheduled_at': fields.DateTimeField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'provider_id': fields.UUIDField(nullable=True), 'provider_location': fields.StringField(nullable=True), 'provider_auth': fields.StringField(nullable=True), 'provider_geometry': fields.StringField(nullable=True), 'volume_type_id': fields.UUIDField(nullable=True), 'source_volid': fields.UUIDField(nullable=True), 'encryption_key_id': fields.UUIDField(nullable=True), 'consistencygroup_id': fields.UUIDField(nullable=True), 'deleted': fields.BooleanField(default=False, nullable=True), 'bootable': fields.BooleanField(default=False, nullable=True), 'multiattach': fields.BooleanField(default=False, nullable=True), 'replication_status': fields.StringField(nullable=True), 'replication_extended_status': fields.StringField(nullable=True), 'replication_driver_data': fields.StringField(nullable=True), 'previous_status': fields.StringField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), 'admin_metadata': fields.DictOfStringsField(nullable=True), 'glance_metadata': fields.DictOfStringsField(nullable=True), 'volume_type': fields.ObjectField('VolumeType', nullable=True), 'volume_attachment': fields.ObjectField('VolumeAttachmentList', nullable=True), 'consistencygroup': fields.ObjectField('ConsistencyGroup', nullable=True), 'snapshots': fields.ObjectField('SnapshotList', nullable=True), } # NOTE(thangp): obj_extra_fields is used to hold properties that are not # usually part of the model obj_extra_fields = ['name', 'name_id', 'volume_metadata', 'volume_admin_metadata', 'volume_glance_metadata'] @classmethod def _get_expected_attrs(cls, context): expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs'] if context.is_admin: expected_attrs.append('admin_metadata') return expected_attrs @property def name_id(self): return self.id if not self._name_id else self._name_id @name_id.setter def name_id(self, value): self._name_id = value @property def name(self): return CONF.volume_name_template % self.name_id # TODO(dulek): Three properties below are for compatibility with dict # representation of volume. The format there is different (list of # SQLAlchemy models) so we need a conversion. Anyway - these should be # removed when we stop this class from deriving from DictObjectCompat. @property def volume_metadata(self): md = [MetadataObject(k, v) for k, v in self.metadata.items()] return md @volume_metadata.setter def volume_metadata(self, value): md = {d['key']: d['value'] for d in value} self.metadata = md @property def volume_admin_metadata(self): md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()] return md @volume_admin_metadata.setter def volume_admin_metadata(self, value): md = {d['key']: d['value'] for d in value} self.admin_metadata = md @property def volume_glance_metadata(self): md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()] return md @volume_glance_metadata.setter def volume_glance_metadata(self, value): md = {d['key']: d['value'] for d in value} self.glance_metadata = md def __init__(self, *args, **kwargs): super(Volume, self).__init__(*args, **kwargs) self._orig_metadata = {} self._orig_admin_metadata = {} self._orig_glance_metadata = {} self._reset_metadata_tracking() def obj_reset_changes(self, fields=None): super(Volume, self).obj_reset_changes(fields) self._reset_metadata_tracking(fields=fields) @classmethod def _obj_from_primitive(cls, context, objver, primitive): obj = super(Volume, Volume)._obj_from_primitive(context, objver, primitive) obj._reset_metadata_tracking() return obj def _reset_metadata_tracking(self, fields=None): if fields is None or 'metadata' in fields: self._orig_metadata = (dict(self.metadata) if 'metadata' in self else {}) if fields is None or 'admin_metadata' in fields: self._orig_admin_metadata = (dict(self.admin_metadata) if 'admin_metadata' in self else {}) if fields is None or 'glance_metadata' in fields: self._orig_glance_metadata = (dict(self.glance_metadata) if 'glance_metadata' in self else {}) def obj_what_changed(self): changes = super(Volume, self).obj_what_changed() if 'metadata' in self and self.metadata != self._orig_metadata: changes.add('metadata') if ('admin_metadata' in self and self.admin_metadata != self._orig_admin_metadata): changes.add('admin_metadata') if ('glance_metadata' in self and self.glance_metadata != self._orig_glance_metadata): changes.add('glance_metadata') return changes def obj_make_compatible(self, primitive, target_version): """Make an object representation compatible with a target version.""" super(Volume, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) @staticmethod def _from_db_object(context, volume, db_volume, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in volume.fields.items(): if name in Volume.OPTIONAL_FIELDS: continue value = db_volume.get(name) if isinstance(field, fields.IntegerField): value = value or 0 volume[name] = value # Get data from db_volume object that was queried by joined query # from DB if 'metadata' in expected_attrs: metadata = db_volume.get('volume_metadata', []) volume.metadata = {item['key']: item['value'] for item in metadata} if 'admin_metadata' in expected_attrs: metadata = db_volume.get('volume_admin_metadata', []) volume.admin_metadata = {item['key']: item['value'] for item in metadata} if 'glance_metadata' in expected_attrs: metadata = db_volume.get('volume_glance_metadata', []) volume.glance_metadata = {item['key']: item['value'] for item in metadata} if 'volume_type' in expected_attrs: db_volume_type = db_volume.get('volume_type') if db_volume_type: vt_expected_attrs = [] if 'volume_type.extra_specs' in expected_attrs: vt_expected_attrs.append('extra_specs') volume.volume_type = objects.VolumeType._from_db_object( context, objects.VolumeType(), db_volume_type, expected_attrs=vt_expected_attrs) if 'volume_attachment' in expected_attrs: attachments = base.obj_make_list( context, objects.VolumeAttachmentList(context), objects.VolumeAttachment, db_volume.get('volume_attachment')) volume.volume_attachment = attachments if 'consistencygroup' in expected_attrs: consistencygroup = objects.ConsistencyGroup(context) consistencygroup._from_db_object(context, consistencygroup, db_volume['consistencygroup']) volume.consistencygroup = consistencygroup if 'snapshots' in expected_attrs: snapshots = base.obj_make_list( context, objects.SnapshotList(context), objects.Snapshot, db_volume['snapshots']) volume.snapshots = snapshots volume._context = context volume.obj_reset_changes() return volume @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if 'consistencygroup' in updates: raise exception.ObjectActionError( action='create', reason=_('consistencygroup assigned')) if 'snapshots' in updates: raise exception.ObjectActionError( action='create', reason=_('snapshots assigned')) db_volume = db.volume_create(self._context, updates) self._from_db_object(self._context, self, db_volume) @base.remotable def save(self): updates = self.cinder_obj_get_changes() if updates: if 'consistencygroup' in updates: raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) db.volume_update(self._context, self.id, updates) self.obj_reset_changes() @base.remotable def destroy(self): with self.obj_as_admin(): db.volume_destroy(self._context, self.id) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'metadata': self.metadata = db.volume_metadata_get(self._context, self.id) elif attrname == 'admin_metadata': self.admin_metadata = {} if self._context.is_admin: self.admin_metadata = db.volume_admin_metadata_get( self._context, self.id) elif attrname == 'glance_metadata': try: # NOTE(dulek): We're using alias here to have conversion from # list to dict done there. self.volume_glance_metadata = db.volume_glance_metadata_get( self._context, self.id) except exception.GlanceMetadataNotFound: # NOTE(dulek): DB API raises when volume has no # glance_metadata. Silencing this because at this level no # metadata is a completely valid result. self.glance_metadata = {} elif attrname == 'volume_type': # If the volume doesn't have volume_type, VolumeType.get_by_id # would trigger a db call which raise VolumeTypeNotFound exception. self.volume_type = (objects.VolumeType.get_by_id( self._context, self.volume_type_id) if self.volume_type_id else None) elif attrname == 'volume_attachment': attachments = objects.VolumeAttachmentList.get_all_by_volume_id( self._context, self.id) self.volume_attachment = attachments elif attrname == 'consistencygroup': consistencygroup = objects.ConsistencyGroup.get_by_id( self._context, self.consistencygroup_id) self.consistencygroup = consistencygroup elif attrname == 'snapshots': self.snapshots = objects.SnapshotList.get_all_for_volume( self._context, self.id) self.obj_reset_changes(fields=[attrname]) def delete_metadata_key(self, key): db.volume_metadata_delete(self._context, self.id, key) md_was_changed = 'metadata' in self.obj_what_changed() del self.metadata[key] self._orig_metadata.pop(key, None) if not md_was_changed: self.obj_reset_changes(['metadata']) def finish_volume_migration(self, dest_volume): # We swap fields between source (i.e. self) and destination at the # end of migration because we want to keep the original volume id # in the DB but now pointing to the migrated volume. skip = ({'id', 'provider_location', 'glance_metadata', 'volume_type'} | set(self.obj_extra_fields)) for key in set(dest_volume.fields.keys()) - skip: # Only swap attributes that are already set. We do not want to # unexpectedly trigger a lazy-load. if not dest_volume.obj_attr_is_set(key): continue value = getattr(dest_volume, key) value_to_dst = getattr(self, key) # Destination must have a _name_id since the id no longer matches # the volume. If it doesn't have a _name_id we set one. if key == '_name_id': if not dest_volume._name_id: setattr(dest_volume, key, self.id) continue elif key == 'migration_status': value = None value_to_dst = 'deleting' elif key == 'display_description': value_to_dst = 'migration src for ' + self.id elif key == 'status': value_to_dst = 'deleting' # Because dest_volume will be deleted soon, we can # skip to copy volume_type_id and volume_type which # are not keys for volume deletion. elif key == 'volume_type_id': # Initialize volume_type of source volume using # new volume_type_id. self.update({'volume_type_id': value}) continue setattr(self, key, value) setattr(dest_volume, key, value_to_dst) dest_volume.save() return dest_volume
class Backup(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.UUIDField(), 'user_id': fields.UUIDField(), 'project_id': fields.UUIDField(), 'volume_id': fields.UUIDField(), 'host': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'container': fields.StringField(nullable=True), 'parent_id': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'fail_reason': fields.StringField(nullable=True), 'size': fields.IntegerField(), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), # NOTE(dulek): Metadata field is used to store any strings by backup # drivers, that's why it can't be DictOfStringsField. 'service_metadata': fields.StringField(nullable=True), 'service': fields.StringField(nullable=True), 'object_count': fields.IntegerField(), 'temp_volume_id': fields.StringField(nullable=True), 'temp_snapshot_id': fields.StringField(nullable=True), } obj_extra_fields = ['name'] @property def name(self): return CONF.backup_name_template % self.id def obj_make_compatible(self, primitive, target_version): """Make an object representation compatible with a target version.""" super(Backup, self).obj_make_compatible(primitive, target_version) target_version = utils.convert_version_to_tuple(target_version) @staticmethod def _from_db_object(context, backup, db_backup): for name, field in backup.fields.items(): value = db_backup.get(name) if isinstance(field, fields.IntegerField): value = value if value is not None else 0 backup[name] = value backup._context = context backup.obj_reset_changes() return backup @base.remotable_classmethod def get_by_id(cls, context, id): db_backup = db.backup_get(context, id) return cls._from_db_object(context, cls(context), db_backup) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.cinder_obj_get_changes() db_backup = db.backup_create(self._context, updates) self._from_db_object(self._context, self, db_backup) @base.remotable def save(self): updates = self.cinder_obj_get_changes() if updates: db.backup_update(self._context, self.id, updates) self.obj_reset_changes() @base.remotable def destroy(self): with self.obj_as_admin(): db.backup_destroy(self._context, self.id)
class Providerregion(base.ZunPersistentObject, base.ZunObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(), 'region': fields.StringField(nullable=True), 'provider_id': fields.StringField(nullable=True), 'uuid': fields.UUIDField(nullable=True), 'displayname': fields.StringField(nullable=True) } @staticmethod def _from_db_object(providerregion, db_providerregion): """Converts a database entity to a formal object.""" for field in providerregion.fields: setattr(providerregion, field, db_providerregion[field]) providerregion.obj_reset_changes() return providerregion @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [ Providerregion._from_db_object(cls(context), obj) for obj in db_objects ] @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find a providerregion based on uuid and return a :class:`Providerregion` object. :param uuid: the uuid of a providerregion. :param context: Security context :returns: a :class:`Providerregion` object. """ db_providerregion = dbapi.get_providerregion_by_uuid(context, uuid) providerregion = Providerregion._from_db_object( cls(context), db_providerregion) return providerregion @base.remotable_classmethod def get_by_name(cls, context, name): """Find a providerregion based on name and return a Providerregion object. :param name: the logical name of a providerregion. :param context: Security context :returns: a :class:`Providerregion` object. """ db_providerregion = dbapi.get_providerregion_by_name(context, name) providerregion = Providerregion._from_db_object( cls(context), db_providerregion) return providerregion @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Return a list of Providerregion objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param filters: filters when list providerregions, the filter name could be 'name', 'image', 'project_id', 'user_id', 'memory'. For example, filters={'image': 'nginx'} :returns: a list of :class:`Providerregion` object. """ db_providerregions = dbapi.list_providerregions(context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, filters=filters) LOG.debug( 'get_all Providerregion LISt Providerregion xxxxxx db_providerregions =%s.', db_providerregions) return Providerregion._from_db_object_list(db_providerregions, cls, context) @base.remotable_classmethod def list_by_host(cls, context, host): """Return a list of Providerregion objects by host. :param context: Security context. :param host: A compute host. :returns: a list of :class:`Providerregion` object. """ db_providerregions = dbapi.list_providerregions(context, filters={'host': host}) return Providerregion._from_db_object_list(db_providerregions, cls, context) @base.remotable def create(self, context): """Create a Providerregion record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Providerregion(context) """ values = self.obj_get_changes() db_providerregion = dbapi.create_providerregion(context, values) self._from_db_object(self, db_providerregion) @base.remotable def destroy(self, context=None): """Delete the Providerregion from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Providerregion(context) """ dbapi.destroy_providerregion(context, self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this Providerregion. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Providerregion(context) """ updates = self.obj_get_changes() LOG.debug('Save Providerregion xxxxxx uuid =%s, updates=%s', self.uuid, updates) dbapi.update_providerregion(context, self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this Providerregion. Loads a providerregion with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded providerregion column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Providerregion(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and \ getattr(self, field) != getattr(current, field): setattr(self, field, getattr(current, field)) def get_sandbox_id(self): if self.meta: return self.meta.get('sandbox_id', None) else: return None def set_sandbox_id(self, sandbox_id): if self.meta is None: self.meta = {'sandbox_id': sandbox_id} else: self.meta['sandbox_id'] = sandbox_id
class Cluster(base.MagnumPersistentObject, base.MagnumObject, base.MagnumObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'bay_create_timeout' field # Version 1.2: Add 'registry_trust_id' field # Version 1.3: Added 'baymodel' field # Version 1.4: Added more types of status to bay's status field # Version 1.5: Rename 'registry_trust_id' to 'trust_id' # Add 'trustee_user_name', 'trustee_password', # 'trustee_user_id' field # Version 1.6: Add rollback support for Bay # Version 1.7: Added 'coe_version' and 'container_version' fields # Version 1.8: Rename 'baymodel' to 'cluster_template' # Version 1.9: Rename table name from 'bay' to 'cluster' # Rename 'baymodel_id' to 'cluster_template_id' # Rename 'bay_create_timeout' to 'create_timeout' # Version 1.10: Added 'keypair' field # Version 1.11: Added 'RESUME_FAILED' in status field # Version 1.12: Added 'get_stats' method # Version 1.13: Added get_count_all method # Version 1.14: Added 'docker_volume_size' field # Version 1.15: Added 'labels' field # Version 1.16: Added 'master_flavor_id' field # Version 1.17: Added 'flavor_id' field # Version 1.18: Added 'health_status' and 'health_status_reason' field # Version 1.19: Added nodegroups, default_ng_worker, default_ng_master # Version 1.20: Fields node_count, master_count, node_addresses, # master_addresses are now properties. VERSION = '1.20' dbapi = dbapi.get_instance() fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(nullable=True), 'name': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'cluster_template_id': fields.StringField(nullable=True), 'keypair': fields.StringField(nullable=True), 'docker_volume_size': fields.IntegerField(nullable=True), 'labels': fields.DictOfStringsField(nullable=True), 'master_flavor_id': fields.StringField(nullable=True), 'flavor_id': fields.StringField(nullable=True), 'stack_id': fields.StringField(nullable=True), 'status': m_fields.ClusterStatusField(nullable=True), 'status_reason': fields.StringField(nullable=True), 'health_status': m_fields.ClusterHealthStatusField(nullable=True), 'health_status_reason': fields.DictOfStringsField(nullable=True), 'create_timeout': fields.IntegerField(nullable=True), 'api_address': fields.StringField(nullable=True), 'discovery_url': fields.StringField(nullable=True), 'ca_cert_ref': fields.StringField(nullable=True), 'magnum_cert_ref': fields.StringField(nullable=True), 'cluster_template': fields.ObjectField('ClusterTemplate'), 'trust_id': fields.StringField(nullable=True), 'trustee_username': fields.StringField(nullable=True), 'trustee_password': fields.StringField(nullable=True), 'trustee_user_id': fields.StringField(nullable=True), 'coe_version': fields.StringField(nullable=True), 'container_version': fields.StringField(nullable=True) } @staticmethod def _from_db_object(cluster, db_cluster): """Converts a database entity to a formal object.""" for field in cluster.fields: if field != 'cluster_template': cluster[field] = db_cluster[field] # Note(eliqiao): The following line needs to be placed outside the # loop because there is a dependency from cluster_template to # cluster_template_id. The cluster_template_id must be populated # first in the loop before it can be used to find the cluster_template. cluster['cluster_template'] = ClusterTemplate.get_by_uuid( cluster._context, cluster.cluster_template_id) cluster.obj_reset_changes() return cluster @property def nodegroups(self): # Returns all nodegroups that belong to the cluster. return NodeGroup.list(self._context, self.uuid) @property def default_ng_worker(self): # Assume that every cluster will have only one default # non-master nodegroup. We don't want to limit the roles # so each nodegroup that does not have a master role is # considered as a worker/minion nodegroup. filters = {'is_default': True} default_ngs = NodeGroup.list(self._context, self.uuid, filters=filters) return [n for n in default_ngs if n.role != 'master'][0] @property def default_ng_master(self): # Assume that every cluster will have only one default # master nodegroup. filters = {'role': 'master', 'is_default': True} return NodeGroup.list(self._context, self.uuid, filters=filters)[0] @property def node_count(self): return sum(n.node_count for n in self.nodegroups if n.role != 'master') @property def master_count(self): return sum(n.node_count for n in self.nodegroups if n.role == 'master') @property def node_addresses(self): node_addresses = [] for ng in self.nodegroups: if ng.role != 'master': node_addresses += ng.node_addresses return node_addresses @property def master_addresses(self): master_addresses = [] for ng in self.nodegroups: if ng.role == 'master': master_addresses += ng.node_addresses return master_addresses @staticmethod def _from_db_object_list(db_objects, cls, context): """Converts a list of database entities to a list of formal objects.""" return [ Cluster._from_db_object(cls(context), obj) for obj in db_objects ] @base.remotable_classmethod def get(cls, context, cluster_id): """Find a cluster based on its id or uuid and return a Cluster object. :param cluster_id: the id *or* uuid of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ if strutils.is_int_like(cluster_id): return cls.get_by_id(context, cluster_id) elif uuidutils.is_uuid_like(cluster_id): return cls.get_by_uuid(context, cluster_id) else: raise exception.InvalidIdentity(identity=cluster_id) @base.remotable_classmethod def get_by_id(cls, context, cluster_id): """Find a cluster based on its integer id and return a Cluster object. :param cluster_id: the id of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ db_cluster = cls.dbapi.get_cluster_by_id(context, cluster_id) cluster = Cluster._from_db_object(cls(context), db_cluster) return cluster @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find a cluster based on uuid and return a :class:`Cluster` object. :param uuid: the uuid of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ db_cluster = cls.dbapi.get_cluster_by_uuid(context, uuid) cluster = Cluster._from_db_object(cls(context), db_cluster) return cluster @base.remotable_classmethod def get_count_all(cls, context, filters=None): """Get count of matching clusters. :param context: The security context :param filters: filter dict, can includes 'cluster_template_id', 'name', 'node_count', 'stack_id', 'api_address', 'node_addresses', 'project_id', 'user_id', 'status'(should be a status list), 'master_count'. :returns: Count of matching clusters. """ return cls.dbapi.get_cluster_count_all(context, filters=filters) @base.remotable_classmethod def get_by_name(cls, context, name): """Find a cluster based on name and return a Cluster object. :param name: the logical name of a cluster. :param context: Security context :returns: a :class:`Cluster` object. """ db_cluster = cls.dbapi.get_cluster_by_name(context, name) cluster = Cluster._from_db_object(cls(context), db_cluster) return cluster @base.remotable_classmethod def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=None, filters=None): """Return a list of Cluster objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param filters: filter dict, can includes 'cluster_template_id', 'name', 'node_count', 'stack_id', 'api_address', 'node_addresses', 'project_id', 'user_id', 'status'(should be a status list), 'master_count'. :returns: a list of :class:`Cluster` object. """ db_clusters = cls.dbapi.get_cluster_list(context, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return Cluster._from_db_object_list(db_clusters, cls, context) @base.remotable_classmethod def get_stats(cls, context, project_id=None): """Return a list of Cluster objects. :param context: Security context. :param project_id: project id """ return cls.dbapi.get_cluster_stats(project_id) @base.remotable def create(self, context=None): """Create a Cluster record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ values = self.obj_get_changes() db_cluster = self.dbapi.create_cluster(values) self._from_db_object(self, db_cluster) @base.remotable def destroy(self, context=None): """Delete the Cluster from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ self.dbapi.destroy_cluster(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this Cluster. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ updates = self.obj_get_changes() self.dbapi.update_cluster(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this Cluster. Loads a Cluster with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded Cluster column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Cluster(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] def as_dict(self): dict_ = super(Cluster, self).as_dict() # Update the dict with the attributes coming form # the cluster's nodegroups. dict_.update({ 'node_count': self.node_count, 'master_count': self.master_count, 'node_addresses': self.node_addresses, 'master_addresses': self.master_addresses }) return dict_
class Volume(cleanable.CinderCleanableObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject, base.ClusteredObject): # Version 1.0: Initial version # Version 1.1: Added metadata, admin_metadata, volume_attachment, and # volume_type # Version 1.2: Added glance_metadata, consistencygroup and snapshots # Version 1.3: Added finish_volume_migration() # Version 1.4: Added cluster fields # Version 1.5: Added group # Version 1.6: This object is now cleanable (adds rows to workers table) # Version 1.7: Added service_uuid # Version 1.8: Added shared_targets VERSION = '1.8' OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata', 'volume_type', 'volume_attachment', 'consistencygroup', 'snapshots', 'cluster', 'group') fields = { 'id': fields.UUIDField(), '_name_id': fields.UUIDField(nullable=True), 'ec2_id': fields.UUIDField(nullable=True), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'snapshot_id': fields.UUIDField(nullable=True), 'cluster_name': fields.StringField(nullable=True), 'cluster': fields.ObjectField('Cluster', nullable=True, read_only=True), 'host': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'attach_status': c_fields.VolumeAttachStatusField(nullable=True), 'migration_status': fields.StringField(nullable=True), 'scheduled_at': fields.DateTimeField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'provider_id': fields.StringField(nullable=True), 'provider_location': fields.StringField(nullable=True), 'provider_auth': fields.StringField(nullable=True), 'provider_geometry': fields.StringField(nullable=True), 'volume_type_id': fields.UUIDField(nullable=True), 'source_volid': fields.UUIDField(nullable=True), 'encryption_key_id': fields.UUIDField(nullable=True), 'consistencygroup_id': fields.UUIDField(nullable=True), 'group_id': fields.UUIDField(nullable=True), 'deleted': fields.BooleanField(default=False, nullable=True), 'bootable': fields.BooleanField(default=False, nullable=True), 'multiattach': fields.BooleanField(default=False, nullable=True), 'replication_status': fields.StringField(nullable=True), 'replication_extended_status': fields.StringField(nullable=True), 'replication_driver_data': fields.StringField(nullable=True), 'previous_status': fields.StringField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), 'admin_metadata': fields.DictOfStringsField(nullable=True), 'glance_metadata': fields.DictOfStringsField(nullable=True), 'volume_type': fields.ObjectField('VolumeType', nullable=True), 'volume_attachment': fields.ObjectField('VolumeAttachmentList', nullable=True), 'consistencygroup': fields.ObjectField('ConsistencyGroup', nullable=True), 'snapshots': fields.ObjectField('SnapshotList', nullable=True), 'group': fields.ObjectField('Group', nullable=True), 'service_uuid': fields.StringField(nullable=True), 'shared_targets': fields.BooleanField(default=True, nullable=True), } # NOTE(thangp): obj_extra_fields is used to hold properties that are not # usually part of the model obj_extra_fields = ['name', 'name_id', 'volume_metadata', 'volume_admin_metadata', 'volume_glance_metadata'] @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs'] if context.is_admin: expected_attrs.append('admin_metadata') return expected_attrs @property def name_id(self): return self.id if not self._name_id else self._name_id @name_id.setter def name_id(self, value): self._name_id = value @property def name(self): return CONF.volume_name_template % self.name_id # TODO(dulek): Three properties below are for compatibility with dict # representation of volume. The format there is different (list of # SQLAlchemy models) so we need a conversion. Anyway - these should be # removed when we stop this class from deriving from DictObjectCompat. @property def volume_metadata(self): md = [MetadataObject(k, v) for k, v in self.metadata.items()] return md @volume_metadata.setter def volume_metadata(self, value): md = {d['key']: d['value'] for d in value} self.metadata = md @property def volume_admin_metadata(self): md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()] return md @volume_admin_metadata.setter def volume_admin_metadata(self, value): md = {d['key']: d['value'] for d in value} self.admin_metadata = md @property def volume_glance_metadata(self): md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()] return md @volume_glance_metadata.setter def volume_glance_metadata(self, value): md = {d['key']: d['value'] for d in value} self.glance_metadata = md def __init__(self, *args, **kwargs): super(Volume, self).__init__(*args, **kwargs) self._reset_metadata_tracking() def obj_reset_changes(self, fields=None): super(Volume, self).obj_reset_changes(fields) self._reset_metadata_tracking(fields=fields) @classmethod def _obj_from_primitive(cls, context, objver, primitive): obj = super(Volume, Volume)._obj_from_primitive(context, objver, primitive) obj._reset_metadata_tracking() return obj def _reset_metadata_tracking(self, fields=None): if fields is None or 'metadata' in fields: self._orig_metadata = (dict(self.metadata) if 'metadata' in self else {}) if fields is None or 'admin_metadata' in fields: self._orig_admin_metadata = (dict(self.admin_metadata) if 'admin_metadata' in self else {}) if fields is None or 'glance_metadata' in fields: self._orig_glance_metadata = (dict(self.glance_metadata) if 'glance_metadata' in self else {}) def obj_what_changed(self): changes = super(Volume, self).obj_what_changed() if 'metadata' in self and self.metadata != self._orig_metadata: changes.add('metadata') if ('admin_metadata' in self and self.admin_metadata != self._orig_admin_metadata): changes.add('admin_metadata') if ('glance_metadata' in self and self.glance_metadata != self._orig_glance_metadata): changes.add('glance_metadata') return changes def obj_make_compatible(self, primitive, target_version): """Make a Volume representation compatible with a target version.""" added_fields = (((1, 4), ('cluster', 'cluster_name')), ((1, 5), ('group', 'group_id')), ((1, 7), ('service_uuid'))) # Convert all related objects super(Volume, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) for version, remove_fields in added_fields: if target_version < version: for obj_field in remove_fields: primitive.pop(obj_field, None) @classmethod def _from_db_object(cls, context, volume, db_volume, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in volume.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_volume.get(name) if isinstance(field, fields.IntegerField): value = value or 0 volume[name] = value # Get data from db_volume object that was queried by joined query # from DB if 'metadata' in expected_attrs: metadata = db_volume.get('volume_metadata', []) volume.metadata = {item['key']: item['value'] for item in metadata} if 'admin_metadata' in expected_attrs: metadata = db_volume.get('volume_admin_metadata', []) volume.admin_metadata = {item['key']: item['value'] for item in metadata} if 'glance_metadata' in expected_attrs: metadata = db_volume.get('volume_glance_metadata', []) volume.glance_metadata = {item['key']: item['value'] for item in metadata} if 'volume_type' in expected_attrs: db_volume_type = db_volume.get('volume_type') if db_volume_type: vt_expected_attrs = [] if 'volume_type.extra_specs' in expected_attrs: vt_expected_attrs.append('extra_specs') volume.volume_type = objects.VolumeType._from_db_object( context, objects.VolumeType(), db_volume_type, expected_attrs=vt_expected_attrs) if 'volume_attachment' in expected_attrs: attachments = base.obj_make_list( context, objects.VolumeAttachmentList(context), objects.VolumeAttachment, db_volume.get('volume_attachment')) volume.volume_attachment = attachments if volume.consistencygroup_id and 'consistencygroup' in expected_attrs: consistencygroup = objects.ConsistencyGroup(context) consistencygroup._from_db_object(context, consistencygroup, db_volume['consistencygroup']) volume.consistencygroup = consistencygroup if 'snapshots' in expected_attrs: snapshots = base.obj_make_list( context, objects.SnapshotList(context), objects.Snapshot, db_volume['snapshots']) volume.snapshots = snapshots if 'cluster' in expected_attrs: db_cluster = db_volume.get('cluster') # If this volume doesn't belong to a cluster the cluster field in # the ORM instance will have value of None. if db_cluster: volume.cluster = objects.Cluster(context) objects.Cluster._from_db_object(context, volume.cluster, db_cluster) else: volume.cluster = None if volume.group_id and 'group' in expected_attrs: group = objects.Group(context) group._from_db_object(context, group, db_volume['group']) volume.group = group volume._context = context volume.obj_reset_changes() return volume def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if 'consistencygroup' in updates: raise exception.ObjectActionError( action='create', reason=_('consistencygroup assigned')) if 'snapshots' in updates: raise exception.ObjectActionError( action='create', reason=_('snapshots assigned')) if 'cluster' in updates: raise exception.ObjectActionError( action='create', reason=_('cluster assigned')) if 'group' in updates: raise exception.ObjectActionError( action='create', reason=_('group assigned')) if ('volume_type_id' not in updates or updates['volume_type_id'] is None): updates['volume_type_id'] = ( volume_types.get_default_volume_type()['id']) db_volume = db.volume_create(self._context, updates) self._from_db_object(self._context, self, db_volume) def save(self): updates = self.cinder_obj_get_changes() if updates: # NOTE(xyang): Allow this to pass if 'consistencygroup' is # set to None. This is to support backward compatibility. # Also remove 'consistencygroup' from updates because # consistencygroup is the name of a relationship in the ORM # Volume model, so SQLA tries to do some kind of update of # the foreign key based on the provided updates if # 'consistencygroup' is in updates. if updates.pop('consistencygroup', None): raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'group' in updates: raise exception.ObjectActionError( action='save', reason=_('group changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) if 'cluster' in updates: raise exception.ObjectActionError( action='save', reason=_('cluster changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) # When we are creating a volume and we change from 'creating' # status to 'downloading' status we have to change the worker entry # in the DB to reflect this change, otherwise the cleanup will # not be performed as it will be mistaken for a volume that has # been somehow changed (reset status, forced operation...) if updates.get('status') == 'downloading': self.set_worker() # updates are changed after popping out metadata. if updates: db.volume_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = db.volume_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'metadata': self.metadata = db.volume_metadata_get(self._context, self.id) elif attrname == 'admin_metadata': self.admin_metadata = {} if self._context.is_admin: self.admin_metadata = db.volume_admin_metadata_get( self._context, self.id) elif attrname == 'glance_metadata': try: # NOTE(dulek): We're using alias here to have conversion from # list to dict done there. self.volume_glance_metadata = db.volume_glance_metadata_get( self._context, self.id) except exception.GlanceMetadataNotFound: # NOTE(dulek): DB API raises when volume has no # glance_metadata. Silencing this because at this level no # metadata is a completely valid result. self.glance_metadata = {} elif attrname == 'volume_type': # If the volume doesn't have volume_type, VolumeType.get_by_id # would trigger a db call which raise VolumeTypeNotFound exception. self.volume_type = (objects.VolumeType.get_by_id( self._context, self.volume_type_id) if self.volume_type_id else None) elif attrname == 'volume_attachment': attachments = objects.VolumeAttachmentList.get_all_by_volume_id( self._context, self.id) self.volume_attachment = attachments elif attrname == 'consistencygroup': if self.consistencygroup_id is None: self.consistencygroup = None else: consistencygroup = objects.ConsistencyGroup.get_by_id( self._context, self.consistencygroup_id) self.consistencygroup = consistencygroup elif attrname == 'snapshots': self.snapshots = objects.SnapshotList.get_all_for_volume( self._context, self.id) elif attrname == 'cluster': # If this volume doesn't belong to a cluster (cluster_name is # empty), then cluster field will be None. if self.cluster_name: self.cluster = objects.Cluster.get_by_id( self._context, name=self.cluster_name) else: self.cluster = None elif attrname == 'group': if self.group_id is None: self.group = None else: group = objects.Group.get_by_id( self._context, self.group_id) self.group = group self.obj_reset_changes(fields=[attrname]) def delete_metadata_key(self, key): db.volume_metadata_delete(self._context, self.id, key) md_was_changed = 'metadata' in self.obj_what_changed() del self.metadata[key] self._orig_metadata.pop(key, None) if not md_was_changed: self.obj_reset_changes(['metadata']) def finish_volume_migration(self, dest_volume): # We swap fields between source (i.e. self) and destination at the # end of migration because we want to keep the original volume id # in the DB but now pointing to the migrated volume. skip = ({'id', 'provider_location', 'glance_metadata', 'volume_type'} | set(self.obj_extra_fields)) for key in set(dest_volume.fields.keys()) - skip: # Only swap attributes that are already set. We do not want to # unexpectedly trigger a lazy-load. if not dest_volume.obj_attr_is_set(key): continue value = getattr(dest_volume, key) value_to_dst = getattr(self, key) # Destination must have a _name_id since the id no longer matches # the volume. If it doesn't have a _name_id we set one. if key == '_name_id': if not dest_volume._name_id: setattr(dest_volume, key, self.id) continue elif key == 'migration_status': value = None value_to_dst = 'deleting' elif key == 'display_description': value_to_dst = 'migration src for ' + self.id elif key == 'status': value_to_dst = 'deleting' # Because dest_volume will be deleted soon, we can # skip to copy volume_type_id and volume_type which # are not keys for volume deletion. elif key == 'volume_type_id': # Initialize volume_type of source volume using # new volume_type_id. self.update({'volume_type_id': value}) continue setattr(self, key, value) setattr(dest_volume, key, value_to_dst) self.save() dest_volume.save() return dest_volume def get_latest_snapshot(self): """Get volume's latest snapshot""" snapshot_db = db.snapshot_get_latest_for_volume(self._context, self.id) snapshot = objects.Snapshot(self._context) return snapshot._from_db_object(self._context, snapshot, snapshot_db) @staticmethod def _is_cleanable(status, obj_version): # Before 1.6 we didn't have workers table, so cleanup wasn't supported. # cleaning. if obj_version and obj_version < 1.6: return False return status in ('creating', 'deleting', 'uploading', 'downloading') def begin_attach(self, attach_mode): attachment = objects.VolumeAttachment( context=self._context, attach_status=c_fields.VolumeAttachStatus.ATTACHING, volume_id=self.id) attachment.create() with self.obj_as_admin(): self.admin_metadata['attached_mode'] = attach_mode self.save() return attachment def finish_detach(self, attachment_id): with self.obj_as_admin(): volume_updates, attachment_updates = ( db.volume_detached(self._context, self.id, attachment_id)) db.volume_admin_metadata_delete(self._context, self.id, 'attached_mode') self.admin_metadata.pop('attached_mode', None) # Remove attachment in volume only when this field is loaded. if attachment_updates and self.obj_attr_is_set('volume_attachment'): for i, attachment in enumerate(self.volume_attachment): if attachment.id == attachment_id: del self.volume_attachment.objects[i] break self.update(volume_updates) self.obj_reset_changes( list(volume_updates.keys()) + ['volume_attachment', 'admin_metadata']) def is_replicated(self): return self.volume_type and self.volume_type.is_replicated() def is_multiattach(self): return self.volume_type and self.volume_type.is_multiattach()
class Resource( heat_base.HeatObject, base.VersionedObjectDictCompat, base.ComparableVersionedObject, ): fields = { 'id': fields.IntegerField(), 'uuid': fields.StringField(), 'stack_id': fields.StringField(), 'created_at': fields.DateTimeField(read_only=True), 'updated_at': fields.DateTimeField(nullable=True), 'physical_resource_id': fields.StringField(nullable=True), 'name': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'status_reason': fields.StringField(nullable=True), 'action': fields.StringField(nullable=True), 'rsrc_metadata': heat_fields.JsonField(nullable=True), 'properties_data': heat_fields.JsonField(nullable=True), 'properties_data_encrypted': fields.BooleanField(default=False), 'data': fields.ListOfObjectsField(resource_data.ResourceData, nullable=True), 'engine_id': fields.StringField(nullable=True), 'atomic_key': fields.IntegerField(nullable=True), 'current_template_id': fields.IntegerField(), 'needed_by': heat_fields.ListField(nullable=True, default=None), 'requires': heat_fields.ListField(nullable=True, default=None), 'replaces': fields.IntegerField(nullable=True), 'replaced_by': fields.IntegerField(nullable=True), 'root_stack_id': fields.StringField(nullable=True), } @staticmethod def _from_db_object(resource, context, db_resource): if db_resource is None: return None for field in resource.fields: if field == 'data': resource['data'] = [ resource_data.ResourceData._from_db_object( resource_data.ResourceData(context), resd) for resd in db_resource.data ] else: resource[field] = db_resource[field] if resource.properties_data_encrypted and resource.properties_data: properties_data = {} for prop_name, prop_value in resource.properties_data.items(): method, value = prop_value decrypted_value = crypt.decrypt(method, value) prop_string = jsonutils.loads(decrypted_value) properties_data[prop_name] = prop_string resource.properties_data = properties_data resource._context = context resource.obj_reset_changes() return resource @classmethod def get_obj(cls, context, resource_id, refresh=False): resource_db = db_api.resource_get(context, resource_id, refresh=refresh) return cls._from_db_object(cls(context), context, resource_db) @classmethod def get_all(cls, context): resources_db = db_api.resource_get_all(context) resources = [ (resource_name, cls._from_db_object(cls(context), context, resource_db)) for resource_name, resource_db in six.iteritems(resources_db) ] return dict(resources) @classmethod def create(cls, context, values): return cls._from_db_object(cls(context), context, db_api.resource_create(context, values)) @classmethod def delete(cls, context, resource_id): db_api.resource_delete(context, resource_id) @classmethod def exchange_stacks(cls, context, resource_id1, resource_id2): return db_api.resource_exchange_stacks(context, resource_id1, resource_id2) @classmethod def get_all_by_stack(cls, context, stack_id, filters=None): cache = context.cache(ResourceCache) resources = cache.by_stack_id_name.get(stack_id) if resources: return dict(resources) resources_db = db_api.resource_get_all_by_stack( context, stack_id, filters) return cls._resources_to_dict(context, resources_db) @classmethod def _resources_to_dict(cls, context, resources_db): resources = [ (resource_name, cls._from_db_object(cls(context), context, resource_db)) for resource_name, resource_db in six.iteritems(resources_db) ] return dict(resources) @classmethod def get_all_active_by_stack(cls, context, stack_id): resources_db = db_api.resource_get_all_active_by_stack( context, stack_id) resources = [ (resource_id, cls._from_db_object(cls(context), context, resource_db)) for resource_id, resource_db in six.iteritems(resources_db) ] return dict(resources) @classmethod def get_all_by_root_stack(cls, context, stack_id, filters, cache=False): resources_db = db_api.resource_get_all_by_root_stack( context, stack_id, filters) all = cls._resources_to_dict(context, resources_db) if cache: context.cache(ResourceCache).set_by_stack_id(all) return all @classmethod def purge_deleted(cls, context, stack_id): return db_api.resource_purge_deleted(context, stack_id) @classmethod def get_by_name_and_stack(cls, context, resource_name, stack_id): resource_db = db_api.resource_get_by_name_and_stack( context, resource_name, stack_id) return cls._from_db_object(cls(context), context, resource_db) @classmethod def get_by_physical_resource_id(cls, context, physical_resource_id): resource_db = db_api.resource_get_by_physical_resource_id( context, physical_resource_id) return cls._from_db_object(cls(context), context, resource_db) @classmethod def update_by_id(cls, context, resource_id, values): db_api.resource_update_and_save(context, resource_id, values) def update_and_save(self, values): db_api.resource_update_and_save(self._context, self.id, values) def select_and_update(self, values, expected_engine_id=None, atomic_key=0): return db_api.resource_update(self._context, self.id, values, atomic_key=atomic_key, expected_engine_id=expected_engine_id) def refresh(self): resource_db = db_api.resource_get(self._context, self.id, refresh=True) return self.__class__._from_db_object(self, self._context, resource_db) @staticmethod def encrypt_properties_data(data): if cfg.CONF.encrypt_parameters_and_properties and data: result = {} for prop_name, prop_value in data.items(): prop_string = jsonutils.dumps(prop_value) encrypted_value = crypt.encrypt(prop_string) result[prop_name] = encrypted_value return (True, result) return (False, data) def update_metadata(self, metadata): if self.rsrc_metadata != metadata: rows_updated = self.select_and_update({'rsrc_metadata': metadata}, self.engine_id, self.atomic_key) if not rows_updated: action = _('metadata setting for resource %s') % self.name raise exception.ConcurrentTransaction(action=action)
class Stack( heat_base.HeatObject, base.VersionedObjectDictCompat, base.ComparableVersionedObject, ): fields = { 'id': fields.StringField(), 'name': fields.StringField(), 'raw_template_id': fields.IntegerField(), 'backup': fields.BooleanField(), 'created_at': fields.DateTimeField(read_only=True), 'deleted_at': fields.DateTimeField(nullable=True), 'disable_rollback': fields.BooleanField(), 'nested_depth': fields.IntegerField(), 'owner_id': fields.StringField(nullable=True), 'stack_user_project_id': fields.StringField(nullable=True), 'tenant': fields.StringField(nullable=True), 'timeout': fields.IntegerField(nullable=True), 'updated_at': fields.DateTimeField(nullable=True), 'user_creds_id': fields.StringField(nullable=True), 'username': fields.StringField(nullable=True), 'action': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'status_reason': fields.StringField(nullable=True), 'raw_template': fields.ObjectField('RawTemplate'), 'convergence': fields.BooleanField(), 'current_traversal': fields.StringField(), 'current_deps': heat_fields.JsonField(), 'prev_raw_template_id': fields.IntegerField(), 'prev_raw_template': fields.ObjectField('RawTemplate'), 'parent_resource_name': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, stack, db_stack): for field in stack.fields: if field == 'raw_template': stack['raw_template'] = ( raw_template.RawTemplate.from_db_object( context, raw_template.RawTemplate(), db_stack['raw_template'])) else: stack[field] = db_stack.__dict__.get(field) stack._context = context stack.obj_reset_changes() return stack @classmethod def get_root_id(cls, context, stack_id): return db_api.stack_get_root_id(context, stack_id) @classmethod def get_by_id(cls, context, stack_id, **kwargs): db_stack = db_api.stack_get(context, stack_id, **kwargs) if not db_stack: return None stack = cls._from_db_object(context, cls(context), db_stack) return stack @classmethod def get_by_name_and_owner_id(cls, context, stack_name, owner_id): db_stack = db_api.stack_get_by_name_and_owner_id( context, six.text_type(stack_name), owner_id) if not db_stack: return None stack = cls._from_db_object(context, cls(context), db_stack) return stack @classmethod def get_by_name(cls, context, stack_name): db_stack = db_api.stack_get_by_name(context, six.text_type(stack_name)) if not db_stack: return None stack = cls._from_db_object(context, cls(context), db_stack) return stack @classmethod def get_all(cls, context, limit=None, sort_keys=None, marker=None, sort_dir=None, filters=None, show_deleted=False, show_nested=False, show_hidden=False, tags=None, tags_any=None, not_tags=None, not_tags_any=None): db_stacks = db_api.stack_get_all(context, limit=limit, sort_keys=sort_keys, marker=marker, sort_dir=sort_dir, filters=filters, show_deleted=show_deleted, show_nested=show_nested, show_hidden=show_hidden, tags=tags, tags_any=tags_any, not_tags=not_tags, not_tags_any=not_tags_any) for db_stack in db_stacks: try: yield cls._from_db_object(context, cls(context), db_stack) except exception.NotFound: pass @classmethod def get_all_by_owner_id(cls, context, owner_id): db_stacks = db_api.stack_get_all_by_owner_id(context, owner_id) for db_stack in db_stacks: try: yield cls._from_db_object(context, cls(context), db_stack) except exception.NotFound: pass @classmethod def count_all(cls, context, **kwargs): return db_api.stack_count_all(context, **kwargs) @classmethod def count_total_resources(cls, context, stack_id): return db_api.stack_count_total_resources(context, stack_id) @classmethod def create(cls, context, values): return cls._from_db_object(context, cls(context), db_api.stack_create(context, values)) @classmethod def update_by_id(cls, context, stack_id, values): """Update and return (boolean) if it was updated. Note: the underlying stack_update filters by current_traversal and stack_id. """ return db_api.stack_update(context, stack_id, values) @classmethod def select_and_update(cls, context, stack_id, values, exp_trvsl=None): """Update the stack by selecting on traversal ID. Uses UPDATE ... WHERE (compare and swap) to catch any concurrent update problem. If the stack is found with given traversal, it is updated. If there occurs a race while updating, only one will succeed and other will get return value of False. """ return db_api.stack_update(context, stack_id, values, exp_trvsl=exp_trvsl) @classmethod def persist_state_and_release_lock(cls, context, stack_id, engine_id, values): return db_api.persist_state_and_release_lock(context, stack_id, engine_id, values) @classmethod def delete(cls, context, stack_id): db_api.stack_delete(context, stack_id) def update_and_save(self, values): has_updated = self.__class__.update_by_id(self._context, self.id, values) if not has_updated: raise exception.NotFound( _('Attempt to update a stack with id: ' '%(id)s %(traversal)s %(msg)s') % { 'id': self.id, 'traversal': self.current_traversal, 'msg': 'that does not exist' }) def __eq__(self, another): self.refresh() # to make test object comparison work well return super(Stack, self).__eq__(another) def __ne__(self, other): return not self.__eq__(other) def refresh(self): db_stack = db_api.stack_get(self._context, self.id, show_deleted=True) if db_stack is None: message = _('No stack exists with id "%s"') % str(self.id) raise exception.NotFound(message) return self.__class__._from_db_object(self._context, self, db_stack) @classmethod def encrypt_hidden_parameters(cls, tmpl): raw_template.RawTemplate.encrypt_hidden_parameters(tmpl) @classmethod def get_status(cls, context, stack_id): """Return action and status for the given stack.""" return db_api.stack_get_status(context, stack_id) def identifier(self): """Return an identifier for this stack.""" return identifier.HeatIdentifier(self.tenant, self.name, self.id) @property def tags(self): return stack_tag.StackTagList.get(self._context, self.id)