Ejemplo n.º 1
0
class Router(base.NeutronDbObject):
    # Version 1.0: Initial version
    # Version 1.1: Added "qos_policy_id" field
    VERSION = '1.1'

    db_model = l3.Router

    fields = {
        'id':
        common_types.UUIDField(),
        'project_id':
        obj_fields.StringField(nullable=True),
        'name':
        obj_fields.StringField(nullable=True),
        'status':
        common_types.RouterStatusEnumField(nullable=True),
        'admin_state_up':
        obj_fields.BooleanField(nullable=True),
        'gw_port_id':
        common_types.UUIDField(nullable=True),
        'enable_snat':
        obj_fields.BooleanField(default=True),
        'flavor_id':
        common_types.UUIDField(nullable=True),
        'extra_attributes':
        obj_fields.ObjectField('RouterExtraAttributes', nullable=True),
        'qos_policy_id':
        common_types.UUIDField(nullable=True, default=None),
    }

    synthetic_fields = [
        'extra_attributes',
        'qos_policy_id',
    ]

    fields_no_update = ['project_id']

    @classmethod
    @db_api.CONTEXT_READER
    def check_routers_not_owned_by_projects(cls, context, gw_ports, projects):
        """This method is to check whether routers that aren't owned by
        existing projects or not
        """

        # TODO(hungpv) We may want to implement NOT semantic in get_object(s)
        query = context.session.query(l3.Router).filter(
            l3.Router.gw_port_id.in_(gw_ports))

        query = query.filter(~l3.Router.project_id.in_(projects))

        return bool(query.count())

    def _attach_qos_policy(self, qos_policy_id):
        qos_binding.QosPolicyRouterGatewayIPBinding.delete_objects(
            self.obj_context, router_id=self.id)
        if qos_policy_id:
            qos_binding.QosPolicyRouterGatewayIPBinding(
                self.obj_context, policy_id=qos_policy_id,
                router_id=self.id).create()

        self.qos_policy_id = qos_policy_id
        self.obj_reset_changes(['qos_policy_id'])

    def create(self):
        fields = self.obj_get_changes()
        with self.db_context_writer(self.obj_context):
            qos_policy_id = self.qos_policy_id
            super().create()
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(qos_policy_id)

    def update(self):
        fields = self.obj_get_changes()
        with self.db_context_writer(self.obj_context):
            super().update()
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(fields['qos_policy_id'])

    def from_db_object(self, db_obj):
        super().from_db_object(db_obj)
        fields_to_change = []
        if db_obj.get('qos_policy_binding'):
            self.qos_policy_id = db_obj.qos_policy_binding.policy_id
            fields_to_change.append('qos_policy_id')

        self.obj_reset_changes(fields_to_change)

    def obj_make_compatible(self, primitive, target_version):
        _target_version = versionutils.convert_version_to_tuple(target_version)
        if _target_version < (1, 1):
            primitive.pop('qos_policy_id', None)
Ejemplo n.º 2
0
class Volume(base.CinderPersistentObject, base.CinderObject,
             base.CinderObjectDictCompat):
    # Version 1.0: Initial version
    VERSION = '1.0'

    fields = {
        'id': fields.UUIDField(),
        '_name_id': fields.UUIDField(nullable=True),
        'ec2_id': fields.UUIDField(nullable=True),
        'user_id': fields.UUIDField(nullable=True),
        'project_id': fields.UUIDField(nullable=True),
        'snapshot_id': fields.UUIDField(nullable=True),
        'host': fields.StringField(nullable=True),
        'size': fields.IntegerField(),
        'availability_zone': fields.StringField(),
        'status': fields.StringField(),
        'attach_status': fields.StringField(),
        'migration_status': fields.StringField(nullable=True),
        'scheduled_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'provider_id': fields.UUIDField(nullable=True),
        'provider_location': fields.StringField(nullable=True),
        'provider_auth': fields.StringField(nullable=True),
        'provider_geometry': fields.StringField(nullable=True),
        'volume_type_id': fields.UUIDField(nullable=True),
        'source_volid': fields.UUIDField(nullable=True),
        'encryption_key_id': fields.UUIDField(nullable=True),
        'consistencygroup_id': fields.UUIDField(nullable=True),
        'deleted': fields.BooleanField(default=False),
        'bootable': fields.BooleanField(default=False),
        'replication_status': fields.StringField(nullable=True),
        'replication_extended_status': fields.StringField(nullable=True),
        'replication_driver_data': fields.StringField(nullable=True),
        'previous_status': fields.StringField(nullable=True),
    }

    # NOTE(thangp): obj_extra_fields is used to hold properties that are not
    # usually part of the model
    obj_extra_fields = ['name', 'name_id']

    @property
    def name_id(self):
        return self.id if not self._name_id else self._name_id

    @name_id.setter
    def name_id(self, value):
        self._name_id = value

    @property
    def name(self):
        return CONF.volume_name_template % self.name_id

    def __init__(self, *args, **kwargs):
        super(Volume, self).__init__(*args, **kwargs)

    def obj_make_compatible(self, primitive, target_version):
        """Make an object representation compatible with a target version."""
        target_version = utils.convert_version_to_tuple(target_version)

    @staticmethod
    def _from_db_object(context, volume, db_volume):
        for name, field in volume.fields.items():
            value = db_volume[name]
            if isinstance(field, fields.IntegerField):
                value = value or 0
            volume[name] = value

        volume._context = context
        volume.obj_reset_changes()
        return volume

    @base.remotable_classmethod
    def get_by_id(cls, context, id):
        db_volume = db.volume_get(context, id)
        return cls._from_db_object(context, cls(context), db_volume)

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()
        db_volume = db.volume_create(self._context, updates)
        self._from_db_object(self._context, self, db_volume)

    @base.remotable
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            db.volume_update(self._context, self.id, updates)

        self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        db.volume_destroy(self._context, self.id)
Ejemplo n.º 3
0
class NetworkSegment(base.NeutronDbObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    db_model = segment_model.NetworkSegment

    fields = {
        'id': common_types.UUIDField(),
        'network_id': common_types.UUIDField(),
        'name': obj_fields.StringField(nullable=True),
        'network_type': obj_fields.StringField(),
        'physical_network': obj_fields.StringField(nullable=True),
        'segmentation_id': obj_fields.IntegerField(nullable=True),
        'is_dynamic': obj_fields.BooleanField(default=False),
        'segment_index': obj_fields.IntegerField(default=0),
        'hosts': obj_fields.ListOfStringsField(nullable=True)
    }

    synthetic_fields = ['hosts']

    fields_no_update = ['network_id']

    foreign_keys = {
        'Network': {
            'network_id': 'id'
        },
        'PortBindingLevel': {
            'id': 'segment_id'
        },
    }

    def create(self):
        fields = self.obj_get_changes()
        with db_api.autonested_transaction(self.obj_context.session):
            hosts = self.hosts
            if hosts is None:
                hosts = []
            super(NetworkSegment, self).create()
            if 'hosts' in fields:
                self._attach_hosts(hosts)

    def update(self):
        fields = self.obj_get_changes()
        with db_api.autonested_transaction(self.obj_context.session):
            super(NetworkSegment, self).update()
            if 'hosts' in fields:
                self._attach_hosts(fields['hosts'])

    def _attach_hosts(self, hosts):
        SegmentHostMapping.delete_objects(
            self.obj_context,
            segment_id=self.id,
        )
        if hosts:
            for host in hosts:
                SegmentHostMapping(self.obj_context,
                                   segment_id=self.id,
                                   host=host).create()
        self.hosts = hosts
        self.obj_reset_changes(['hosts'])

    def obj_load_attr(self, attrname):
        if attrname == 'hosts':
            return self._load_hosts()
        super(NetworkSegment, self).obj_load_attr(attrname)

    def _load_hosts(self, db_obj=None):
        if db_obj:
            hosts = db_obj.get('segment_host_mapping', [])
        else:
            hosts = SegmentHostMapping.get_objects(self.obj_context,
                                                   segment_id=self.id)

        self.hosts = [host['host'] for host in hosts]
        self.obj_reset_changes(['hosts'])

    def from_db_object(self, db_obj):
        super(NetworkSegment, self).from_db_object(db_obj)
        self._load_hosts(db_obj)

    @classmethod
    def get_objects(cls, context, _pager=None, **kwargs):
        if not _pager:
            _pager = base.Pager()
        if not _pager.sorts:
            # (NOTE) True means ASC, False is DESC
            _pager.sorts = [(field, True)
                            for field in ('network_id', 'segment_index')]
        return super(NetworkSegment, cls).get_objects(context, _pager,
                                                      **kwargs)
Ejemplo n.º 4
0
class QosPolicy(rbac_db.NeutronRbacObject):
    # Version 1.0: Initial version
    # Version 1.1: QosDscpMarkingRule introduced
    # Version 1.2: Added QosMinimumBandwidthRule
    # Version 1.3: Added standard attributes (created_at, revision, etc)
    # Version 1.4: Changed tenant_id to project_id
    # Version 1.5: Direction for bandwidth limit rule added
    # Version 1.6: Added "is_default" field
    # Version 1.7: Added floating IP bindings
    # Version 1.8: Added router gateway QoS policy bindings
    VERSION = '1.8'

    # required by RbacNeutronMetaclass
    rbac_db_cls = QosPolicyRBAC
    db_model = qos_db_model.QosPolicy

    fields = {
        'id': common_types.UUIDField(),
        'project_id': obj_fields.StringField(),
        'name': obj_fields.StringField(),
        'shared': obj_fields.BooleanField(default=False),
        'rules': obj_fields.ListOfObjectsField('QosRule', subclasses=True),
        'is_default': obj_fields.BooleanField(default=False),
    }

    fields_no_update = ['id', 'project_id']

    synthetic_fields = ['rules', 'is_default']

    extra_filter_names = {'is_default'}

    binding_models = {
        'port': binding.QosPolicyPortBinding,
        'network': binding.QosPolicyNetworkBinding,
        'fip': binding.QosPolicyFloatingIPBinding,
        'router': binding.QosPolicyRouterGatewayIPBinding
    }

    def obj_load_attr(self, attrname):
        if attrname == 'rules':
            return self._reload_rules()
        elif attrname == 'is_default':
            return self._reload_is_default()
        return super(QosPolicy, self).obj_load_attr(attrname)

    def _reload_rules(self):
        rules = rule_obj_impl.get_rules(self, self.obj_context, self.id)
        setattr(self, 'rules', rules)
        self.obj_reset_changes(['rules'])

    def _reload_is_default(self):
        if self.get_default() == self.id:
            setattr(self, 'is_default', True)
        else:
            setattr(self, 'is_default', False)
        self.obj_reset_changes(['is_default'])

    def get_rule_by_id(self, rule_id):
        """Return rule specified by rule_id.

        @raise QosRuleNotFound: if there is no such rule in the policy.
        """

        for rule in self.rules:
            if rule_id == rule.id:
                return rule
        raise qos_exc.QosRuleNotFound(policy_id=self.id, rule_id=rule_id)

    # TODO(hichihara): For tag mechanism. This will be removed in bug/1704137
    def to_dict(self):
        _dict = super(QosPolicy, self).to_dict()
        try:
            _dict['tags'] = [t.tag for t in self.db_obj.standard_attr.tags]
        except AttributeError:
            # AttrtibuteError can be raised when accessing self.db_obj
            # or self.db_obj.standard_attr
            pass
        return _dict

    @classmethod
    def get_policy_obj(cls, context, policy_id):
        """Fetch a QoS policy.

        :param context: neutron api request context
        :type context: neutron.context.Context
        :param policy_id: the id of the QosPolicy to fetch
        :type policy_id: str uuid

        :returns: a QosPolicy object
        :raises: n_exc.QosPolicyNotFound
        """

        obj = cls.get_object(context, id=policy_id)
        if obj is None:
            raise qos_exc.QosPolicyNotFound(policy_id=policy_id)
        return obj

    @classmethod
    def get_object(cls, context, **kwargs):
        policy_obj = super(QosPolicy, cls).get_object(context, **kwargs)
        if not policy_obj:
            return

        policy_obj.obj_load_attr('rules')
        policy_obj.obj_load_attr('is_default')
        return policy_obj

    @classmethod
    def get_objects(cls,
                    context,
                    _pager=None,
                    validate_filters=True,
                    **kwargs):
        objs = super(QosPolicy, cls).get_objects(context, _pager,
                                                 validate_filters, **kwargs)
        result = []
        for obj in objs:
            obj.obj_load_attr('rules')
            obj.obj_load_attr('is_default')
            result.append(obj)
        return result

    @classmethod
    def _get_object_policy(cls, context, binding_cls, **kwargs):
        with cls.db_context_reader(context):
            binding_db_obj = obj_db_api.get_object(binding_cls, context,
                                                   **kwargs)
            if binding_db_obj:
                return cls.get_object(context, id=binding_db_obj['policy_id'])

    @classmethod
    def get_network_policy(cls, context, network_id):
        return cls._get_object_policy(context,
                                      binding.QosPolicyNetworkBinding,
                                      network_id=network_id)

    @classmethod
    def get_port_policy(cls, context, port_id):
        return cls._get_object_policy(context,
                                      binding.QosPolicyPortBinding,
                                      port_id=port_id)

    @classmethod
    def get_fip_policy(cls, context, fip_id):
        return cls._get_object_policy(context,
                                      binding.QosPolicyFloatingIPBinding,
                                      fip_id=fip_id)

    @classmethod
    def get_router_policy(cls, context, router_id):
        return cls._get_object_policy(context,
                                      binding.QosPolicyRouterGatewayIPBinding,
                                      router_id=router_id)

    # TODO(QoS): Consider extending base to trigger registered methods for us
    def create(self):
        with self.db_context_writer(self.obj_context):
            super(QosPolicy, self).create()
            if self.is_default:
                self.set_default()
            self.obj_load_attr('rules')

    def update(self):
        with self.db_context_writer(self.obj_context):
            if 'is_default' in self.obj_what_changed():
                if self.is_default:
                    self.set_default()
                else:
                    self.unset_default()
            super(QosPolicy, self).update()

    def delete(self):
        with self.db_context_writer(self.obj_context):
            for object_type, obj_class in self.binding_models.items():
                pager = base_db.Pager(limit=1)
                binding_obj = obj_class.get_objects(self.obj_context,
                                                    policy_id=self.id,
                                                    _pager=pager)
                if binding_obj:
                    raise qos_exc.QosPolicyInUse(
                        policy_id=self.id,
                        object_type=object_type,
                        object_id=binding_obj[0]['%s_id' % object_type])

            super(QosPolicy, self).delete()

    def attach_network(self, network_id):
        network_binding = {'policy_id': self.id, 'network_id': network_id}
        network_binding_obj = binding.QosPolicyNetworkBinding(
            self.obj_context, **network_binding)
        try:
            network_binding_obj.create()
        except db_exc.DBReferenceError as e:
            raise qos_exc.NetworkQosBindingError(policy_id=self.id,
                                                 net_id=network_id,
                                                 db_error=e)

    def attach_port(self, port_id):
        port_binding_obj = binding.QosPolicyPortBinding(self.obj_context,
                                                        policy_id=self.id,
                                                        port_id=port_id)
        try:
            port_binding_obj.create()
        except db_exc.DBReferenceError as e:
            raise qos_exc.PortQosBindingError(policy_id=self.id,
                                              port_id=port_id,
                                              db_error=e)

    def attach_floatingip(self, fip_id):
        fip_binding_obj = binding.QosPolicyFloatingIPBinding(self.obj_context,
                                                             policy_id=self.id,
                                                             fip_id=fip_id)
        try:
            fip_binding_obj.create()
        except db_exc.DBReferenceError as e:
            raise qos_exc.FloatingIPQosBindingError(policy_id=self.id,
                                                    fip_id=fip_id,
                                                    db_error=e)

    def attach_router(self, router_id):
        router_binding_obj = binding.QosPolicyRouterGatewayIPBinding(
            self.obj_context, policy_id=self.id, router_id=router_id)
        try:
            router_binding_obj.create()
        except db_exc.DBReferenceError as e:
            raise qos_exc.RouterQosBindingError(policy_id=self.id,
                                                router_id=router_id,
                                                db_error=e)

    def detach_network(self, network_id):
        deleted = binding.QosPolicyNetworkBinding.delete_objects(
            self.obj_context, network_id=network_id)
        if not deleted:
            raise qos_exc.NetworkQosBindingNotFound(net_id=network_id,
                                                    policy_id=self.id)

    def detach_port(self, port_id):
        deleted = binding.QosPolicyPortBinding.delete_objects(self.obj_context,
                                                              port_id=port_id)
        if not deleted:
            raise qos_exc.PortQosBindingNotFound(port_id=port_id,
                                                 policy_id=self.id)

    def detach_floatingip(self, fip_id):
        deleted = binding.QosPolicyFloatingIPBinding.delete_objects(
            self.obj_context, fip_id=fip_id)
        if not deleted:
            raise qos_exc.FloatingIPQosBindingNotFound(fip_id=fip_id,
                                                       policy_id=self.id)

    def detach_router(self, router_id):
        deleted = binding.QosPolicyRouterGatewayIPBinding.delete_objects(
            self.obj_context, router_id=router_id)
        if not deleted:
            raise qos_exc.RouterQosBindingNotFound(router_id=router_id,
                                                   policy_id=self.id)

    def set_default(self):
        if not self.get_default():
            qos_default_policy = QosPolicyDefault(self.obj_context,
                                                  qos_policy_id=self.id,
                                                  project_id=self.project_id)
            qos_default_policy.create()
        elif self.get_default() != self.id:
            raise qos_exc.QoSPolicyDefaultAlreadyExists(
                project_id=self.project_id)

    def unset_default(self):
        if self.get_default() == self.id:
            qos_default_policy = QosPolicyDefault.get_object(
                self.obj_context, project_id=self.project_id)
            qos_default_policy.delete()

    def get_default(self):
        qos_default_policy = QosPolicyDefault.get_object(
            self.obj_context, project_id=self.project_id)
        if qos_default_policy:
            return qos_default_policy.qos_policy_id

    def get_bound_networks(self):
        return [
            nb.network_id
            for nb in binding.QosPolicyNetworkBinding.get_objects(
                self.obj_context, policy_id=self.id)
        ]

    def get_bound_ports(self):
        return [
            pb.port_id for pb in binding.QosPolicyPortBinding.get_objects(
                self.obj_context, policy_id=self.id)
        ]

    def get_bound_floatingips(self):
        return [
            fb.fip_id for fb in binding.QosPolicyFloatingIPBinding.get_objects(
                self.obj_context, policy_id=self.id)
        ]

    def get_bound_routers(self):
        return [
            rb.router_id
            for rb in binding.QosPolicyRouterGatewayIPBinding.get_objects(
                self.obj_context, policy_id=self.id)
        ]

    @classmethod
    def _get_bound_tenant_ids(cls, session, binding_db, bound_db,
                              binding_db_id_column, policy_id):
        return list(
            itertools.chain.from_iterable(
                session.query(bound_db.tenant_id).join(
                    binding_db, bound_db.id == binding_db_id_column).filter(
                        binding_db.policy_id == policy_id).all()))

    @classmethod
    def get_bound_tenant_ids(cls, context, policy_id):
        """Implements RbacNeutronObject.get_bound_tenant_ids.

        :returns: set -- a set of tenants' ids dependent on QosPolicy.
        """
        net = models_v2.Network
        qosnet = qos_db_model.QosNetworkPolicyBinding
        port = models_v2.Port
        qosport = qos_db_model.QosPortPolicyBinding
        fip = l3.FloatingIP
        qosfip = qos_db_model.QosFIPPolicyBinding
        router = l3.Router
        qosrouter = qos_db_model.QosRouterGatewayIPPolicyBinding
        bound_tenants = []
        with cls.db_context_reader(context):
            bound_tenants.extend(
                cls._get_bound_tenant_ids(context.session, qosnet, net,
                                          qosnet.network_id, policy_id))
            bound_tenants.extend(
                cls._get_bound_tenant_ids(context.session, qosport, port,
                                          qosport.port_id, policy_id))
            bound_tenants.extend(
                cls._get_bound_tenant_ids(context.session, qosfip, fip,
                                          qosfip.fip_id, policy_id))
            bound_tenants.extend(
                cls._get_bound_tenant_ids(context.session, qosrouter, router,
                                          qosrouter.router_id, policy_id))
        return set(bound_tenants)

    def obj_make_compatible(self, primitive, target_version):
        _target_version = versionutils.convert_version_to_tuple(target_version)
        if _target_version < (1, 8):
            raise exception.IncompatibleObjectVersion(
                objver=target_version, objname=self.__class__.__name__)
Ejemplo n.º 5
0
class Volume(cleanable.CinderCleanableObject, base.CinderObject,
             base.CinderObjectDictCompat, base.CinderComparableObject,
             base.ClusteredObject):
    # Version 1.0: Initial version
    # Version 1.1: Added metadata, admin_metadata, volume_attachment, and
    #              volume_type
    # Version 1.2: Added glance_metadata, consistencygroup and snapshots
    # Version 1.3: Added finish_volume_migration()
    # Version 1.4: Added cluster fields
    # Version 1.5: Added group
    # Version 1.6: This object is now cleanable (adds rows to workers table)
    # Version 1.7: Added service_uuid
    # Version 1.8: Added shared_targets
    VERSION = '1.8'

    OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
                       'volume_type', 'volume_attachment', 'consistencygroup',
                       'snapshots', 'cluster', 'group')

    fields = {
        'id': fields.UUIDField(),
        '_name_id': fields.UUIDField(nullable=True),
        'ec2_id': fields.UUIDField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),

        'snapshot_id': fields.UUIDField(nullable=True),

        'cluster_name': fields.StringField(nullable=True),
        'cluster': fields.ObjectField('Cluster', nullable=True,
                                      read_only=True),
        'host': fields.StringField(nullable=True),
        'size': fields.IntegerField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'status': fields.StringField(nullable=True),
        'attach_status': c_fields.VolumeAttachStatusField(nullable=True),
        'migration_status': fields.StringField(nullable=True),

        'scheduled_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),

        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),

        'provider_id': fields.StringField(nullable=True),
        'provider_location': fields.StringField(nullable=True),
        'provider_auth': fields.StringField(nullable=True),
        'provider_geometry': fields.StringField(nullable=True),

        'volume_type_id': fields.UUIDField(nullable=True),
        'source_volid': fields.UUIDField(nullable=True),
        'encryption_key_id': fields.UUIDField(nullable=True),

        'consistencygroup_id': fields.UUIDField(nullable=True),
        'group_id': fields.UUIDField(nullable=True),

        'deleted': fields.BooleanField(default=False, nullable=True),
        'bootable': fields.BooleanField(default=False, nullable=True),
        'multiattach': fields.BooleanField(default=False, nullable=True),

        'replication_status': fields.StringField(nullable=True),
        'replication_extended_status': fields.StringField(nullable=True),
        'replication_driver_data': fields.StringField(nullable=True),

        'previous_status': fields.StringField(nullable=True),

        'metadata': fields.DictOfStringsField(nullable=True),
        'admin_metadata': fields.DictOfStringsField(nullable=True),
        'glance_metadata': fields.DictOfStringsField(nullable=True),
        'volume_type': fields.ObjectField('VolumeType', nullable=True),
        'volume_attachment': fields.ObjectField('VolumeAttachmentList',
                                                nullable=True),
        'consistencygroup': fields.ObjectField('ConsistencyGroup',
                                               nullable=True),
        'snapshots': fields.ObjectField('SnapshotList', nullable=True),
        'group': fields.ObjectField('Group', nullable=True),
        'service_uuid': fields.StringField(nullable=True),
        'shared_targets': fields.BooleanField(default=True, nullable=True),
    }

    # NOTE(thangp): obj_extra_fields is used to hold properties that are not
    # usually part of the model
    obj_extra_fields = ['name', 'name_id', 'volume_metadata',
                        'volume_admin_metadata', 'volume_glance_metadata']

    @classmethod
    def _get_expected_attrs(cls, context, *args, **kwargs):
        expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs']
        if context.is_admin:
            expected_attrs.append('admin_metadata')

        return expected_attrs

    @property
    def name_id(self):
        return self.id if not self._name_id else self._name_id

    @name_id.setter
    def name_id(self, value):
        self._name_id = value

    @property
    def name(self):
        return CONF.volume_name_template % self.name_id

    # TODO(dulek): Three properties below are for compatibility with dict
    # representation of volume. The format there is different (list of
    # SQLAlchemy models) so we need a conversion. Anyway - these should be
    # removed when we stop this class from deriving from DictObjectCompat.
    @property
    def volume_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.metadata.items()]
        return md

    @volume_metadata.setter
    def volume_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.metadata = md

    @property
    def volume_admin_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()]
        return md

    @volume_admin_metadata.setter
    def volume_admin_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.admin_metadata = md

    @property
    def volume_glance_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()]
        return md

    @volume_glance_metadata.setter
    def volume_glance_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.glance_metadata = md

    def __init__(self, *args, **kwargs):
        super(Volume, self).__init__(*args, **kwargs)
        self._orig_metadata = {}
        self._orig_admin_metadata = {}
        self._orig_glance_metadata = {}

        self._reset_metadata_tracking()

    def obj_reset_changes(self, fields=None):
        super(Volume, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        obj = super(Volume, Volume)._obj_from_primitive(context, objver,
                                                        primitive)
        obj._reset_metadata_tracking()
        return obj

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})
        if fields is None or 'admin_metadata' in fields:
            self._orig_admin_metadata = (dict(self.admin_metadata)
                                         if 'admin_metadata' in self
                                         else {})
        if fields is None or 'glance_metadata' in fields:
            self._orig_glance_metadata = (dict(self.glance_metadata)
                                          if 'glance_metadata' in self
                                          else {})

    def obj_what_changed(self):
        changes = super(Volume, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if ('admin_metadata' in self and
                self.admin_metadata != self._orig_admin_metadata):
            changes.add('admin_metadata')
        if ('glance_metadata' in self and
                self.glance_metadata != self._orig_glance_metadata):
            changes.add('glance_metadata')

        return changes

    def obj_make_compatible(self, primitive, target_version):
        """Make a Volume representation compatible with a target version."""
        added_fields = (((1, 4), ('cluster', 'cluster_name')),
                        ((1, 5), ('group', 'group_id')),
                        ((1, 7), ('service_uuid')))

        # Convert all related objects
        super(Volume, self).obj_make_compatible(primitive, target_version)

        target_version = versionutils.convert_version_to_tuple(target_version)
        for version, remove_fields in added_fields:
            if target_version < version:
                for obj_field in remove_fields:
                    primitive.pop(obj_field, None)

    @classmethod
    def _from_db_object(cls, context, volume, db_volume, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in volume.fields.items():
            if name in cls.OPTIONAL_FIELDS:
                continue
            value = db_volume.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            volume[name] = value

        # Get data from db_volume object that was queried by joined query
        # from DB
        if 'metadata' in expected_attrs:
            metadata = db_volume.get('volume_metadata', [])
            volume.metadata = {item['key']: item['value'] for item in metadata}
        if 'admin_metadata' in expected_attrs:
            metadata = db_volume.get('volume_admin_metadata', [])
            volume.admin_metadata = {item['key']: item['value']
                                     for item in metadata}
        if 'glance_metadata' in expected_attrs:
            metadata = db_volume.get('volume_glance_metadata', [])
            volume.glance_metadata = {item['key']: item['value']
                                      for item in metadata}
        if 'volume_type' in expected_attrs:
            db_volume_type = db_volume.get('volume_type')
            if db_volume_type:
                vt_expected_attrs = []
                if 'volume_type.extra_specs' in expected_attrs:
                    vt_expected_attrs.append('extra_specs')
                volume.volume_type = objects.VolumeType._from_db_object(
                    context, objects.VolumeType(), db_volume_type,
                    expected_attrs=vt_expected_attrs)
        if 'volume_attachment' in expected_attrs:
            attachments = base.obj_make_list(
                context, objects.VolumeAttachmentList(context),
                objects.VolumeAttachment,
                db_volume.get('volume_attachment'))
            volume.volume_attachment = attachments
        if volume.consistencygroup_id and 'consistencygroup' in expected_attrs:
            consistencygroup = objects.ConsistencyGroup(context)
            consistencygroup._from_db_object(context,
                                             consistencygroup,
                                             db_volume['consistencygroup'])
            volume.consistencygroup = consistencygroup
        if 'snapshots' in expected_attrs:
            snapshots = base.obj_make_list(
                context, objects.SnapshotList(context),
                objects.Snapshot,
                db_volume['snapshots'])
            volume.snapshots = snapshots
        if 'cluster' in expected_attrs:
            db_cluster = db_volume.get('cluster')
            # If this volume doesn't belong to a cluster the cluster field in
            # the ORM instance will have value of None.
            if db_cluster:
                volume.cluster = objects.Cluster(context)
                objects.Cluster._from_db_object(context, volume.cluster,
                                                db_cluster)
            else:
                volume.cluster = None
        if volume.group_id and 'group' in expected_attrs:
            group = objects.Group(context)
            group._from_db_object(context,
                                  group,
                                  db_volume['group'])
            volume.group = group

        volume._context = context
        volume.obj_reset_changes()
        return volume

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()

        if 'consistencygroup' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('consistencygroup assigned'))
        if 'snapshots' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('snapshots assigned'))
        if 'cluster' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('cluster assigned'))
        if 'group' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('group assigned'))
        if ('volume_type_id' not in updates or
                updates['volume_type_id'] is None):
            updates['volume_type_id'] = (
                volume_types.get_default_volume_type()['id'])

        db_volume = db.volume_create(self._context, updates)
        self._from_db_object(self._context, self, db_volume)

    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            # NOTE(xyang): Allow this to pass if 'consistencygroup' is
            # set to None. This is to support backward compatibility.
            # Also remove 'consistencygroup' from updates because
            # consistencygroup is the name of a relationship in the ORM
            # Volume model, so SQLA tries to do some kind of update of
            # the foreign key based on the provided updates if
            # 'consistencygroup' is in updates.
            if updates.pop('consistencygroup', None):
                raise exception.ObjectActionError(
                    action='save', reason=_('consistencygroup changed'))
            if 'group' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('group changed'))
            if 'glance_metadata' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('glance_metadata changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            if 'cluster' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('cluster changed'))
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(self._context,
                                                          self.id, metadata,
                                                          True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            # When we are creating a volume and we change from 'creating'
            # status to 'downloading' status we have to change the worker entry
            # in the DB to reflect this change, otherwise the cleanup will
            # not be performed as it will be mistaken for a volume that has
            # been somehow changed (reset status, forced operation...)
            if updates.get('status') == 'downloading':
                self.set_worker()

            # updates are changed after popping out metadata.
            if updates:
                db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def destroy(self):
        with self.obj_as_admin():
            updated_values = db.volume_destroy(self._context, self.id)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'metadata':
            self.metadata = db.volume_metadata_get(self._context, self.id)
        elif attrname == 'admin_metadata':
            self.admin_metadata = {}
            if self._context.is_admin:
                self.admin_metadata = db.volume_admin_metadata_get(
                    self._context, self.id)
        elif attrname == 'glance_metadata':
            try:
                # NOTE(dulek): We're using alias here to have conversion from
                # list to dict done there.
                self.volume_glance_metadata = db.volume_glance_metadata_get(
                    self._context, self.id)
            except exception.GlanceMetadataNotFound:
                # NOTE(dulek): DB API raises when volume has no
                # glance_metadata. Silencing this because at this level no
                # metadata is a completely valid result.
                self.glance_metadata = {}
        elif attrname == 'volume_type':
            # If the volume doesn't have volume_type, VolumeType.get_by_id
            # would trigger a db call which raise VolumeTypeNotFound exception.
            self.volume_type = (objects.VolumeType.get_by_id(
                self._context, self.volume_type_id) if self.volume_type_id
                else None)
        elif attrname == 'volume_attachment':
            attachments = objects.VolumeAttachmentList.get_all_by_volume_id(
                self._context, self.id)
            self.volume_attachment = attachments
        elif attrname == 'consistencygroup':
            if self.consistencygroup_id is None:
                self.consistencygroup = None
            else:
                consistencygroup = objects.ConsistencyGroup.get_by_id(
                    self._context, self.consistencygroup_id)
                self.consistencygroup = consistencygroup
        elif attrname == 'snapshots':
            self.snapshots = objects.SnapshotList.get_all_for_volume(
                self._context, self.id)
        elif attrname == 'cluster':
            # If this volume doesn't belong to a cluster (cluster_name is
            # empty), then cluster field will be None.
            if self.cluster_name:
                self.cluster = objects.Cluster.get_by_id(
                    self._context, name=self.cluster_name)
            else:
                self.cluster = None
        elif attrname == 'group':
            if self.group_id is None:
                self.group = None
            else:
                group = objects.Group.get_by_id(
                    self._context, self.group_id)
                self.group = group

        self.obj_reset_changes(fields=[attrname])

    def delete_metadata_key(self, key):
        db.volume_metadata_delete(self._context, self.id, key)
        md_was_changed = 'metadata' in self.obj_what_changed()

        del self.metadata[key]
        self._orig_metadata.pop(key, None)

        if not md_was_changed:
            self.obj_reset_changes(['metadata'])

    def finish_volume_migration(self, dest_volume):
        # We swap fields between source (i.e. self) and destination at the
        # end of migration because we want to keep the original volume id
        # in the DB but now pointing to the migrated volume.
        skip = ({'id', 'provider_location', 'glance_metadata',
                 'volume_type'} | set(self.obj_extra_fields))
        for key in set(dest_volume.fields.keys()) - skip:
            # Only swap attributes that are already set.  We do not want to
            # unexpectedly trigger a lazy-load.
            if not dest_volume.obj_attr_is_set(key):
                continue

            value = getattr(dest_volume, key)
            value_to_dst = getattr(self, key)

            # Destination must have a _name_id since the id no longer matches
            # the volume.  If it doesn't have a _name_id we set one.
            if key == '_name_id':
                if not dest_volume._name_id:
                    setattr(dest_volume, key, self.id)
                continue
            elif key == 'migration_status':
                value = None
                value_to_dst = 'deleting'
            elif key == 'display_description':
                value_to_dst = 'migration src for ' + self.id
            elif key == 'status':
                value_to_dst = 'deleting'
            # Because dest_volume will be deleted soon, we can
            # skip to copy volume_type_id and volume_type which
            # are not keys for volume deletion.
            elif key == 'volume_type_id':
                # Initialize volume_type of source volume using
                # new volume_type_id.
                self.update({'volume_type_id': value})
                continue

            setattr(self, key, value)
            setattr(dest_volume, key, value_to_dst)

        self.save()
        dest_volume.save()
        return dest_volume

    def get_latest_snapshot(self):
        """Get volume's latest snapshot"""
        snapshot_db = db.snapshot_get_latest_for_volume(self._context, self.id)
        snapshot = objects.Snapshot(self._context)
        return snapshot._from_db_object(self._context,
                                        snapshot, snapshot_db)

    @staticmethod
    def _is_cleanable(status, obj_version):
        # Before 1.6 we didn't have workers table, so cleanup wasn't supported.
        # cleaning.
        if obj_version and obj_version < 1.6:
            return False
        return status in ('creating', 'deleting', 'uploading', 'downloading')

    def begin_attach(self, attach_mode):
        attachment = objects.VolumeAttachment(
            context=self._context,
            attach_status=c_fields.VolumeAttachStatus.ATTACHING,
            volume_id=self.id)
        attachment.create()
        with self.obj_as_admin():
            self.admin_metadata['attached_mode'] = attach_mode
            self.save()
        return attachment

    def finish_detach(self, attachment_id):
        with self.obj_as_admin():
            volume_updates, attachment_updates = (
                db.volume_detached(self._context, self.id, attachment_id))
            db.volume_admin_metadata_delete(self._context, self.id,
                                            'attached_mode')
            self.admin_metadata.pop('attached_mode', None)
        # Remove attachment in volume only when this field is loaded.
        if attachment_updates and self.obj_attr_is_set('volume_attachment'):
            for i, attachment in enumerate(self.volume_attachment):
                if attachment.id == attachment_id:
                    del self.volume_attachment.objects[i]
                    break

        self.update(volume_updates)
        self.obj_reset_changes(
            list(volume_updates.keys()) +
            ['volume_attachment', 'admin_metadata'])

    def is_replicated(self):
        return self.volume_type and self.volume_type.is_replicated()

    def is_multiattach(self):
        return self.volume_type and self.volume_type.is_multiattach()
Ejemplo n.º 6
0
class QosPolicy(rbac_db.NeutronRbacObject):
    # Version 1.0: Initial version
    # Version 1.1: QosDscpMarkingRule introduced
    # Version 1.2: Added QosMinimumBandwidthRule
    # Version 1.3: Added standard attributes (created_at, revision, etc)
    # Version 1.4: Changed tenant_id to project_id
    # Version 1.5: Direction for bandwidth limit rule added
    # Version 1.6: Added "is_default" field
    VERSION = '1.6'

    # required by RbacNeutronMetaclass
    rbac_db_model = QosPolicyRBAC
    db_model = qos_db_model.QosPolicy

    port_binding_model = qos_db_model.QosPortPolicyBinding
    network_binding_model = qos_db_model.QosNetworkPolicyBinding

    fields = {
        'id': common_types.UUIDField(),
        'project_id': obj_fields.StringField(),
        'name': obj_fields.StringField(),
        'shared': obj_fields.BooleanField(default=False),
        'rules': obj_fields.ListOfObjectsField('QosRule', subclasses=True),
        'is_default': obj_fields.BooleanField(default=False),
    }

    fields_no_update = ['id', 'project_id']

    synthetic_fields = ['rules', 'is_default']

    extra_filter_names = {'is_default'}

    binding_models = {
        'port': binding.QosPolicyPortBinding,
        'network': binding.QosPolicyNetworkBinding
    }

    def obj_load_attr(self, attrname):
        if attrname == 'rules':
            return self._reload_rules()
        elif attrname == 'is_default':
            return self._reload_is_default()
        return super(QosPolicy, self).obj_load_attr(attrname)

    def _reload_rules(self):
        rules = rule_obj_impl.get_rules(self.obj_context, self.id)
        setattr(self, 'rules', rules)
        self.obj_reset_changes(['rules'])

    def _reload_is_default(self):
        if self.get_default() == self.id:
            setattr(self, 'is_default', True)
        else:
            setattr(self, 'is_default', False)
        self.obj_reset_changes(['is_default'])

    def get_rule_by_id(self, rule_id):
        """Return rule specified by rule_id.

        @raise QosRuleNotFound: if there is no such rule in the policy.
        """

        for rule in self.rules:
            if rule_id == rule.id:
                return rule
        raise exceptions.QosRuleNotFound(policy_id=self.id, rule_id=rule_id)

    # TODO(hichihara): For tag mechanism. This will be removed in bug/1704137
    def to_dict(self):
        _dict = super(QosPolicy, self).to_dict()
        try:
            _dict['tags'] = [t.tag for t in self.db_obj.standard_attr.tags]
        except AttributeError:
            # AttrtibuteError can be raised when accessing self.db_obj
            # or self.db_obj.standard_attr
            pass
        return _dict

    @classmethod
    def get_object(cls, context, **kwargs):
        # We want to get the policy regardless of its tenant id. We'll make
        # sure the tenant has permission to access the policy later on.
        admin_context = context.elevated()
        with db_api.autonested_transaction(admin_context.session):
            policy_obj = super(QosPolicy,
                               cls).get_object(admin_context, **kwargs)
            if (not policy_obj or not cls.is_accessible(context, policy_obj)):
                return

            policy_obj.obj_load_attr('rules')
            policy_obj.obj_load_attr('is_default')
            return policy_obj

    @classmethod
    def get_objects(cls,
                    context,
                    _pager=None,
                    validate_filters=True,
                    **kwargs):
        # We want to get the policy regardless of its tenant id. We'll make
        # sure the tenant has permission to access the policy later on.
        admin_context = context.elevated()
        with db_api.autonested_transaction(admin_context.session):
            objs = super(QosPolicy,
                         cls).get_objects(admin_context, _pager,
                                          validate_filters, **kwargs)
            result = []
            for obj in objs:
                if not cls.is_accessible(context, obj):
                    continue
                obj.obj_load_attr('rules')
                obj.obj_load_attr('is_default')
                result.append(obj)
            return result

    @classmethod
    def _get_object_policy(cls, context, model, **kwargs):
        with db_api.autonested_transaction(context.session):
            binding_db_obj = obj_db_api.get_object(context, model, **kwargs)
            if binding_db_obj:
                return cls.get_object(context, id=binding_db_obj['policy_id'])

    @classmethod
    def get_network_policy(cls, context, network_id):
        return cls._get_object_policy(context,
                                      cls.network_binding_model,
                                      network_id=network_id)

    @classmethod
    def get_port_policy(cls, context, port_id):
        return cls._get_object_policy(context,
                                      cls.port_binding_model,
                                      port_id=port_id)

    # TODO(QoS): Consider extending base to trigger registered methods for us
    def create(self):
        with db_api.autonested_transaction(self.obj_context.session):
            super(QosPolicy, self).create()
            if self.is_default:
                self.set_default()
            self.obj_load_attr('rules')

    def update(self):
        with db_api.autonested_transaction(self.obj_context.session):
            if 'is_default' in self.obj_what_changed():
                if self.is_default:
                    self.set_default()
                else:
                    self.unset_default()
            super(QosPolicy, self).update()

    def delete(self):
        with db_api.autonested_transaction(self.obj_context.session):
            for object_type, obj_class in self.binding_models.items():
                pager = base_db.Pager(limit=1)
                binding_obj = obj_class.get_objects(self.obj_context,
                                                    policy_id=self.id,
                                                    _pager=pager)
                if binding_obj:
                    raise exceptions.QosPolicyInUse(
                        policy_id=self.id,
                        object_type=object_type,
                        object_id=binding_obj[0]['%s_id' % object_type])

            super(QosPolicy, self).delete()

    def attach_network(self, network_id):
        network_binding = {'policy_id': self.id, 'network_id': network_id}
        network_binding_obj = binding.QosPolicyNetworkBinding(
            self.obj_context, **network_binding)
        try:
            network_binding_obj.create()
        except db_exc.DBReferenceError as e:
            raise exceptions.NetworkQosBindingError(policy_id=self.id,
                                                    net_id=network_id,
                                                    db_error=e)

    def attach_port(self, port_id):
        port_binding_obj = binding.QosPolicyPortBinding(self.obj_context,
                                                        policy_id=self.id,
                                                        port_id=port_id)
        try:
            port_binding_obj.create()
        except db_exc.DBReferenceError as e:
            raise exceptions.PortQosBindingError(policy_id=self.id,
                                                 port_id=port_id,
                                                 db_error=e)

    def detach_network(self, network_id):
        deleted = binding.QosPolicyNetworkBinding.delete_objects(
            self.obj_context, network_id=network_id)
        if not deleted:
            raise exceptions.NetworkQosBindingNotFound(net_id=network_id,
                                                       policy_id=self.id)

    def detach_port(self, port_id):
        deleted = binding.QosPolicyPortBinding.delete_objects(self.obj_context,
                                                              port_id=port_id)
        if not deleted:
            raise exceptions.PortQosBindingNotFound(port_id=port_id,
                                                    policy_id=self.id)

    def set_default(self):
        if not self.get_default():
            qos_default_policy = QosPolicyDefault(self.obj_context,
                                                  qos_policy_id=self.id,
                                                  project_id=self.project_id)
            qos_default_policy.create()
        elif self.get_default() != self.id:
            raise exceptions.QoSPolicyDefaultAlreadyExists(
                project_id=self.project_id)

    def unset_default(self):
        if self.get_default() == self.id:
            qos_default_policy = QosPolicyDefault.get_object(
                self.obj_context, project_id=self.project_id)
            qos_default_policy.delete()

    def get_default(self):
        qos_default_policy = QosPolicyDefault.get_object(
            self.obj_context, project_id=self.project_id)
        if qos_default_policy:
            return qos_default_policy.qos_policy_id

    def get_bound_networks(self):
        return [
            nb.network_id
            for nb in binding.QosPolicyNetworkBinding.get_objects(
                self.obj_context, policy_id=self.id)
        ]

    def get_bound_ports(self):
        return [
            pb.port_id for pb in binding.QosPolicyPortBinding.get_objects(
                self.obj_context, policy_id=self.id)
        ]

    @classmethod
    def _get_bound_tenant_ids(cls, session, binding_db, bound_db,
                              binding_db_id_column, policy_id):
        return list(
            itertools.chain.from_iterable(
                session.query(bound_db.tenant_id).join(
                    binding_db, bound_db.id == binding_db_id_column).filter(
                        binding_db.policy_id == policy_id).all()))

    @classmethod
    def get_bound_tenant_ids(cls, context, policy_id):
        """Implements RbacNeutronObject.get_bound_tenant_ids.

        :returns: set -- a set of tenants' ids dependant on QosPolicy.
        """
        net = models_v2.Network
        qosnet = qos_db_model.QosNetworkPolicyBinding
        port = models_v2.Port
        qosport = qos_db_model.QosPortPolicyBinding
        bound_tenants = []
        with db_api.autonested_transaction(context.session):
            bound_tenants.extend(
                cls._get_bound_tenant_ids(context.session, qosnet, net,
                                          qosnet.network_id, policy_id))
            bound_tenants.extend(
                cls._get_bound_tenant_ids(context.session, qosport, port,
                                          qosport.port_id, policy_id))
        return set(bound_tenants)

    def obj_make_compatible(self, primitive, target_version):
        def filter_rules(obj_names, rules):
            return [
                rule for rule in rules
                if rule['versioned_object.name'] in obj_names
            ]

        def filter_ingress_bandwidth_limit_rules(rules):
            bwlimit_obj_name = rule_obj_impl.QosBandwidthLimitRule.obj_name()
            filtered_rules = []
            for rule in rules:
                if rule['versioned_object.name'] == bwlimit_obj_name:
                    direction = rule['versioned_object.data'].get("direction")
                    if direction == n_const.EGRESS_DIRECTION:
                        rule['versioned_object.data'].pop('direction')
                        filtered_rules.append(rule)
                else:
                    filtered_rules.append(rule)
            return filtered_rules

        _target_version = versionutils.convert_version_to_tuple(target_version)
        names = []
        if _target_version >= (1, 0):
            names.append(rule_obj_impl.QosBandwidthLimitRule.obj_name())
        if _target_version >= (1, 1):
            names.append(rule_obj_impl.QosDscpMarkingRule.obj_name())
        if _target_version >= (1, 2):
            names.append(rule_obj_impl.QosMinimumBandwidthRule.obj_name())
        if 'rules' in primitive and names:
            primitive['rules'] = filter_rules(names, primitive['rules'])

        if _target_version < (1, 3):
            standard_fields = ['revision_number', 'created_at', 'updated_at']
            for f in standard_fields:
                primitive.pop(f)
            if primitive['description'] is None:
                # description was not nullable before
                raise exception.IncompatibleObjectVersion(
                    objver=target_version, objname='QoSPolicy')

        if _target_version < (1, 4):
            primitive['tenant_id'] = primitive.pop('project_id')

        if _target_version < (1, 5):
            if 'rules' in primitive:
                primitive['rules'] = filter_ingress_bandwidth_limit_rules(
                    primitive['rules'])

        if _target_version < (1, 6):
            primitive.pop('is_default', None)
Ejemplo n.º 7
0
class BayModel(base.MagnumPersistentObject, base.MagnumObject,
               base.MagnumObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Add 'registry_enabled' field
    # Version 1.2: Added 'network_driver' field
    # Version 1.3: Added 'labels' attribute
    # Version 1.4: Added 'insecure' attribute
    # Version 1.5: Changed type of 'coe' from StringField to BayTypeField
    # Version 1.6: Change 'insecure' to 'tls_disabled'
    # Version 1.7: Added 'public' field
    # Version 1.8: Added 'server_type' field
    # Version 1.9: Added 'volume_driver' field
    # Version 1.10: Removed 'ssh_authorized_key' field
    # Version 1.11: Added 'insecure_registry' field
    VERSION = '1.11'

    dbapi = dbapi.get_instance()

    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'name': fields.StringField(nullable=True),
        'image_id': fields.StringField(nullable=True),
        'flavor_id': fields.StringField(nullable=True),
        'master_flavor_id': fields.StringField(nullable=True),
        'keypair_id': fields.StringField(nullable=True),
        'dns_nameserver': fields.StringField(nullable=True),
        'external_network_id': fields.StringField(nullable=True),
        'fixed_network': fields.StringField(nullable=True),
        'network_driver': fields.StringField(nullable=True),
        'volume_driver': fields.StringField(nullable=True),
        'apiserver_port': fields.IntegerField(nullable=True),
        'docker_volume_size': fields.IntegerField(nullable=True),
        'cluster_distro': fields.StringField(nullable=True),
        'coe': m_fields.BayTypeField(nullable=True),
        'http_proxy': fields.StringField(nullable=True),
        'https_proxy': fields.StringField(nullable=True),
        'no_proxy': fields.StringField(nullable=True),
        'registry_enabled': fields.BooleanField(default=False),
        'labels': fields.DictOfStringsField(nullable=True),
        'tls_disabled': fields.BooleanField(default=False),
        'public': fields.BooleanField(default=False),
        'server_type': fields.StringField(nullable=True),
        'insecure_registry': fields.StringField(nullable=True),
    }

    @staticmethod
    def _from_db_object(baymodel, db_baymodel):
        """Converts a database entity to a formal object."""
        for field in baymodel.fields:
            baymodel[field] = db_baymodel[field]

        baymodel.obj_reset_changes()
        return baymodel

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [
            BayModel._from_db_object(cls(context), obj) for obj in db_objects
        ]

    @base.remotable_classmethod
    def get(cls, context, baymodel_id):
        """Find a baymodel based on its id or uuid and return a BayModel object.

        :param baymodel_id: the id *or* uuid of a baymodel.
        :param context: Security context
        :returns: a :class:`BayModel` object.
        """
        if strutils.is_int_like(baymodel_id):
            return cls.get_by_id(context, baymodel_id)
        elif uuidutils.is_uuid_like(baymodel_id):
            return cls.get_by_uuid(context, baymodel_id)
        else:
            raise exception.InvalidIdentity(identity=baymodel_id)

    @base.remotable_classmethod
    def get_by_id(cls, context, baymodel_id):
        """Find a baymodel based on its integer id and return a BayModel object.

        :param baymodel_id: the id of a baymodel.
        :param context: Security context
        :returns: a :class:`BayModel` object.
        """
        db_baymodel = cls.dbapi.get_baymodel_by_id(context, baymodel_id)
        baymodel = BayModel._from_db_object(cls(context), db_baymodel)
        return baymodel

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a baymodel based on uuid and return a :class:`BayModel` object.

        :param uuid: the uuid of a baymodel.
        :param context: Security context
        :returns: a :class:`BayModel` object.
        """
        db_baymodel = cls.dbapi.get_baymodel_by_uuid(context, uuid)
        baymodel = BayModel._from_db_object(cls(context), db_baymodel)
        return baymodel

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        """Find a baymodel based on name and return a :class:`BayModel` object.

        :param name: the name of a baymodel.
        :param context: Security context
        :returns: a :class:`BayModel` object.
        """
        db_baymodel = cls.dbapi.get_baymodel_by_name(context, name)
        baymodel = BayModel._from_db_object(cls(context), db_baymodel)
        return baymodel

    @base.remotable_classmethod
    def list(cls,
             context,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None):
        """Return a list of BayModel objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :returns: a list of :class:`BayModel` object.

        """
        db_baymodels = cls.dbapi.get_baymodel_list(context,
                                                   limit=limit,
                                                   marker=marker,
                                                   sort_key=sort_key,
                                                   sort_dir=sort_dir)
        return BayModel._from_db_object_list(db_baymodels, cls, context)

    @base.remotable
    def create(self, context=None):
        """Create a BayModel record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: BayModel(context)

        """
        values = self.obj_get_changes()
        db_baymodel = self.dbapi.create_baymodel(values)
        self._from_db_object(self, db_baymodel)

    @base.remotable
    def destroy(self, context=None):
        """Delete the BayModel from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: BayModel(context)
        """
        self.dbapi.destroy_baymodel(self.uuid)
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this BayModel.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: BayModel(context)
        """
        updates = self.obj_get_changes()
        self.dbapi.update_baymodel(self.uuid, updates)

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this BayModel.

        Loads a baymodel with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded baymodel column by column, if there are any updates.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: BayModel(context)
        """
        current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and self[field] != current[field]:
                self[field] = current[field]
Ejemplo n.º 8
0
class Port(base.NeutronDbObject):
    # Version 1.0: Initial version
    # Version 1.1: Add data_plane_status field
    # Version 1.2: Added segment_id to binding_levels
    # Version 1.3: distributed_binding -> distributed_bindings
    # Version 1.4: Attribute binding becomes ListOfObjectsField
    # Version 1.5: Added qos_network_policy_id field
    VERSION = '1.5'

    db_model = models_v2.Port

    fields = {
        'id':
        common_types.UUIDField(),
        'project_id':
        obj_fields.StringField(nullable=True),
        'name':
        obj_fields.StringField(nullable=True),
        'network_id':
        common_types.UUIDField(),
        'mac_address':
        common_types.MACAddressField(),
        'admin_state_up':
        obj_fields.BooleanField(),
        'device_id':
        obj_fields.StringField(),
        'device_owner':
        obj_fields.StringField(),
        'status':
        obj_fields.StringField(),
        'allowed_address_pairs':
        obj_fields.ListOfObjectsField('AllowedAddressPair', nullable=True),
        'bindings':
        obj_fields.ListOfObjectsField('PortBinding', nullable=True),
        'data_plane_status':
        obj_fields.ObjectField('PortDataPlaneStatus', nullable=True),
        'dhcp_options':
        obj_fields.ListOfObjectsField('ExtraDhcpOpt', nullable=True),
        'distributed_bindings':
        obj_fields.ListOfObjectsField('DistributedPortBinding', nullable=True),
        'dns':
        obj_fields.ObjectField('PortDNS', nullable=True),
        'fixed_ips':
        obj_fields.ListOfObjectsField('IPAllocation', nullable=True),
        # TODO(ihrachys): consider converting to boolean
        'security':
        obj_fields.ObjectField('PortSecurity', nullable=True),
        'security_group_ids':
        common_types.SetOfUUIDsField(
            nullable=True,
            # TODO(ihrachys): how do we safely pass a mutable default?
            default=None,
        ),
        'qos_policy_id':
        common_types.UUIDField(nullable=True, default=None),
        'qos_network_policy_id':
        common_types.UUIDField(nullable=True, default=None),
        'binding_levels':
        obj_fields.ListOfObjectsField('PortBindingLevel', nullable=True),

        # TODO(ihrachys): consider adding a 'dns_assignment' fully synthetic
        # field in later object iterations
    }

    extra_filter_names = {'security_group_ids'}

    fields_no_update = ['project_id', 'network_id']

    synthetic_fields = [
        'allowed_address_pairs',
        'bindings',
        'binding_levels',
        'data_plane_status',
        'dhcp_options',
        'distributed_bindings',
        'dns',
        'fixed_ips',
        'qos_policy_id',
        'qos_network_policy_id',
        'security',
        'security_group_ids',
    ]

    fields_need_translation = {
        'bindings': 'port_bindings',
        'dhcp_options': 'dhcp_opts',
        'distributed_bindings': 'distributed_port_binding',
        'security': 'port_security',
    }

    def create(self):
        fields = self.obj_get_changes()
        with self.db_context_writer(self.obj_context):
            sg_ids = self.security_group_ids
            if sg_ids is None:
                sg_ids = set()
            qos_policy_id = self.qos_policy_id
            super(Port, self).create()
            if 'security_group_ids' in fields:
                self._attach_security_groups(sg_ids)
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(qos_policy_id)

    def update(self):
        fields = self.obj_get_changes()
        with self.db_context_writer(self.obj_context):
            super(Port, self).update()
            if 'security_group_ids' in fields:
                self._attach_security_groups(fields['security_group_ids'])
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(fields['qos_policy_id'])

    def _attach_qos_policy(self, qos_policy_id):
        binding.QosPolicyPortBinding.delete_objects(self.obj_context,
                                                    port_id=self.id)
        if qos_policy_id:
            port_binding_obj = binding.QosPolicyPortBinding(
                self.obj_context, policy_id=qos_policy_id, port_id=self.id)
            port_binding_obj.create()

        self.qos_policy_id = qos_policy_id
        self.obj_reset_changes(['qos_policy_id'])

    def _attach_security_groups(self, sg_ids):
        # TODO(ihrachys): consider introducing an (internal) object for the
        # binding to decouple database operations a bit more
        obj_db_api.delete_objects(SecurityGroupPortBinding,
                                  self.obj_context,
                                  port_id=self.id)
        if sg_ids:
            for sg_id in sg_ids:
                self._attach_security_group(sg_id)
        self.security_group_ids = sg_ids
        self.obj_reset_changes(['security_group_ids'])

    def _attach_security_group(self, sg_id):
        obj_db_api.create_object(SecurityGroupPortBinding, self.obj_context, {
            'port_id': self.id,
            'security_group_id': sg_id
        })

    @classmethod
    def get_objects(cls,
                    context,
                    _pager=None,
                    validate_filters=True,
                    security_group_ids=None,
                    **kwargs):
        if security_group_ids:
            ports_with_sg = cls.get_ports_ids_by_security_groups(
                context, security_group_ids)
            port_ids = kwargs.get("id", [])
            if port_ids:
                kwargs['id'] = list(set(port_ids) & set(ports_with_sg))
            else:
                kwargs['id'] = ports_with_sg
        return super(Port, cls).get_objects(context, _pager, validate_filters,
                                            **kwargs)

    @classmethod
    def get_port_ids_filter_by_segment_id(cls, context, segment_id):
        query = context.session.query(models_v2.Port.id)
        query = query.join(
            ml2_models.PortBindingLevel,
            ml2_models.PortBindingLevel.port_id == models_v2.Port.id)
        query = query.filter(
            ml2_models.PortBindingLevel.segment_id == segment_id)
        return [p.id for p in query]

    @classmethod
    def modify_fields_to_db(cls, fields):
        result = super(Port, cls).modify_fields_to_db(fields)

        # TODO(rossella_s): get rid of it once we switch the db model to using
        # custom types.
        if 'mac_address' in result:
            result['mac_address'] = cls.filter_to_str(result['mac_address'])

        # convert None to []
        if 'distributed_port_binding' in result:
            result['distributed_port_binding'] = (
                result['distributed_port_binding'] or [])
        return result

    @classmethod
    def modify_fields_from_db(cls, db_obj):
        fields = super(Port, cls).modify_fields_from_db(db_obj)

        # TODO(rossella_s): get rid of it once we switch the db model to using
        # custom types.
        if 'mac_address' in fields:
            fields['mac_address'] = net_utils.AuthenticEUI(
                fields['mac_address'])

        distributed_port_binding = fields.get('distributed_bindings')
        if distributed_port_binding:
            # TODO(ihrachys) support multiple bindings
            fields['distributed_bindings'] = fields['distributed_bindings'][0]
        else:
            fields['distributed_bindings'] = []
        return fields

    def from_db_object(self, db_obj):
        super(Port, self).from_db_object(db_obj)
        # extract security group bindings
        if db_obj.get('security_groups', []):
            self.security_group_ids = {
                sg.security_group_id
                for sg in db_obj.security_groups
            }
        else:
            self.security_group_ids = set()
        fields_to_change = ['security_group_ids']

        # extract qos policy binding
        if db_obj.get('qos_policy_binding'):
            self.qos_policy_id = db_obj.qos_policy_binding.policy_id
            fields_to_change.append('qos_policy_id')
        if db_obj.get('qos_network_policy_binding'):
            self.qos_network_policy_id = (
                db_obj.qos_network_policy_binding.policy_id)
            fields_to_change.append('qos_network_policy_binding')

        self.obj_reset_changes(fields_to_change)

    def obj_make_compatible(self, primitive, target_version):
        _target_version = versionutils.convert_version_to_tuple(target_version)
        if _target_version < (1, 1):
            primitive.pop('data_plane_status', None)
        if _target_version < (1, 2):
            binding_levels = primitive.get('binding_levels', [])
            for lvl in binding_levels:
                lvl['versioned_object.version'] = '1.0'
                lvl['versioned_object.data'].pop('segment_id', None)
        if _target_version < (1, 3):
            bindings = primitive.pop('distributed_bindings', [])
            primitive['distributed_binding'] = (bindings[0]
                                                if bindings else None)
        if _target_version < (1, 4):
            # In version 1.4 we add support for multiple port bindings.
            # Previous versions only support one port binding. The following
            # lines look for the active port binding, which is the only one
            # needed in previous versions
            if 'bindings' in primitive:
                original_bindings = primitive.pop('bindings')
                primitive['binding'] = None
                for a_binding in original_bindings:
                    if (a_binding['versioned_object.data']['status'] ==
                            constants.ACTIVE):
                        primitive['binding'] = a_binding
                        break
        if _target_version < (1, 5):
            primitive.pop('qos_network_policy_id', None)

    @classmethod
    def get_ports_by_router(cls, context, router_id, owner, subnet):
        rport_qry = context.session.query(models_v2.Port).join(l3.RouterPort)
        ports = rport_qry.filter(
            l3.RouterPort.router_id == router_id,
            l3.RouterPort.port_type == owner,
            models_v2.Port.network_id == subnet['network_id'])
        return [cls._load_object(context, db_obj) for db_obj in ports.all()]

    @classmethod
    def get_ports_ids_by_security_groups(cls,
                                         context,
                                         security_group_ids,
                                         excluded_device_owners=None):
        query = context.session.query(sg_models.SecurityGroupPortBinding)
        query = query.filter(
            sg_models.SecurityGroupPortBinding.security_group_id.in_(
                security_group_ids))
        if excluded_device_owners:
            query = query.join(models_v2.Port)
            query = query.filter(
                ~models_v2.Port.device_owner.in_(excluded_device_owners))
        return [port_binding['port_id'] for port_binding in query.all()]

    @classmethod
    def get_ports_by_binding_type_and_host(cls, context, binding_type, host):
        query = context.session.query(models_v2.Port).join(
            ml2_models.PortBinding)
        query = query.filter(ml2_models.PortBinding.vif_type == binding_type,
                             ml2_models.PortBinding.host == host)
        return [cls._load_object(context, db_obj) for db_obj in query.all()]

    @classmethod
    def get_ports_by_vnic_type_and_host(cls, context, vnic_type, host):
        query = context.session.query(models_v2.Port).join(
            ml2_models.PortBinding)
        query = query.filter(ml2_models.PortBinding.vnic_type == vnic_type,
                             ml2_models.PortBinding.host == host)
        return [cls._load_object(context, db_obj) for db_obj in query.all()]

    @classmethod
    def check_network_ports_by_binding_types(cls,
                                             context,
                                             network_id,
                                             binding_types,
                                             negative_search=False):
        """This method is to check whether networks have ports with given
        binding_types.

        :param context:
        :param network_id: ID of network to check
        :param binding_types: list of binding types to look for
        :param negative_search: if set to true, ports with with binding_type
                                other than "binding_types" will be counted
        :return: True if any port is found, False otherwise
        """
        query = context.session.query(models_v2.Port).join(
            ml2_models.PortBinding)
        query = query.filter(models_v2.Port.network_id == network_id)
        if negative_search:
            query = query.filter(
                ml2_models.PortBinding.vif_type.notin_(binding_types))
        else:
            query = query.filter(
                ml2_models.PortBinding.vif_type.in_(binding_types))
        return bool(query.count())
Ejemplo n.º 9
0
class VolumeType(base.CinderPersistentObject, base.CinderObject,
                 base.CinderObjectDictCompat, base.CinderComparableObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    fields = {
        'id': fields.UUIDField(),
        'name': fields.StringField(nullable=True),
        'description': fields.StringField(nullable=True),
        'is_public': fields.BooleanField(default=True, nullable=True),
        'projects': fields.ListOfStringsField(nullable=True),
        'extra_specs': fields.DictOfStringsField(nullable=True),
    }

    @classmethod
    def _get_expected_attrs(cls, context):
        return 'extra_specs', 'projects'

    @staticmethod
    def _from_db_object(context, type, db_type, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in type.fields.items():
            if name in OPTIONAL_FIELDS:
                continue
            value = db_type[name]
            if isinstance(field, fields.IntegerField):
                value = value or 0
            type[name] = value

        # Get data from db_type object that was queried by joined query
        # from DB
        if 'extra_specs' in expected_attrs:
            type.extra_specs = {}
            specs = db_type.get('extra_specs')
            if specs and isinstance(specs, list):
                type.extra_specs = {
                    item['key']: item['value']
                    for item in specs
                }
            elif specs and isinstance(specs, dict):
                type.extra_specs = specs
        if 'projects' in expected_attrs:
            type.projects = db_type.get('projects', [])

        type._context = context
        type.obj_reset_changes()
        return type

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        db_volume_type = volume_types.create(self._context, self.name,
                                             self.extra_specs, self.is_public,
                                             self.projects, self.description)
        self._from_db_object(self._context, self, db_volume_type)

    @base.remotable
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            volume_types.update(self._context, self.id, self.name,
                                self.description)
            self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        with self.obj_as_admin():
            volume_types.destroy(self._context, self.id)
Ejemplo n.º 10
0
class CinderPersistentObject(object):
    """Mixin class for Persistent objects.

    This adds the fields that we use in common for all persistent objects.
    """
    OPTIONAL_FIELDS = []

    Not = db.Not
    Case = db.Case

    fields = {
        'created_at': fields.DateTimeField(nullable=True),
        'updated_at': fields.DateTimeField(nullable=True),
        'deleted_at': fields.DateTimeField(nullable=True),
        'deleted': fields.BooleanField(default=False, nullable=True),
    }

    @classmethod
    def cinder_ovo_cls_init(cls):
        """This method is called on OVO registration and sets the DB model."""
        # Persistent Versioned Objects Classes should have a DB model, and if
        # they don't, then we have a problem and we must raise an exception on
        # registration.
        try:
            cls.model = db.get_model_for_versioned_object(cls)
        except (ImportError, AttributeError):
            msg = _("Couldn't find ORM model for Persistent Versioned "
                    "Object %s.") % cls.obj_name()
            raise exception.ProgrammingError(reason=msg)

    @contextlib.contextmanager
    def obj_as_admin(self):
        """Context manager to make an object call as an admin.

        This temporarily modifies the context embedded in an object to
        be elevated() and restores it after the call completes. Example
        usage:

           with obj.obj_as_admin():
               obj.save()
        """
        if self._context is None:
            raise exception.OrphanedObjectError(method='obj_as_admin',
                                                objtype=self.obj_name())

        original_context = self._context
        self._context = self._context.elevated()
        try:
            yield
        finally:
            self._context = original_context

    @classmethod
    def _get_expected_attrs(cls, context, *args, **kwargs):
        return None

    @classmethod
    def get_by_id(cls, context, id, *args, **kwargs):
        # To get by id we need to have a model and for the model to
        # have an id field
        if 'id' not in cls.fields:
            msg = (_('VersionedObject %s cannot retrieve object by id.') %
                   (cls.obj_name()))
            raise NotImplementedError(msg)

        orm_obj = db.get_by_id(context, cls.model, id, *args, **kwargs)
        # We pass parameters because fields to expect may depend on them
        expected_attrs = cls._get_expected_attrs(context, *args, **kwargs)
        kargs = {}
        if expected_attrs:
            kargs = {'expected_attrs': expected_attrs}
        return cls._from_db_object(context, cls(context), orm_obj, **kargs)

    def update_single_status_where(self,
                                   new_status,
                                   expected_status,
                                   filters=()):
        values = {'status': new_status}
        expected_status = {'status': expected_status}
        return self.conditional_update(values, expected_status, filters)

    def conditional_update(self,
                           values,
                           expected_values=None,
                           filters=(),
                           save_all=False,
                           session=None,
                           reflect_changes=True,
                           order=None):
        """Compare-and-swap update.

        A conditional object update that, unlike normal update, will SAVE the
        contents of the update to the DB.

        Update will only occur in the DB and the object if conditions are met.

        If no expected_values are passed in we will default to make sure that
        all fields have not been changed in the DB. Since we cannot know the
        original value in the DB for dirty fields in the object those will be
        excluded.

        We have 4 different condition types we can use in expected_values:
         - Equality:  {'status': 'available'}
         - Inequality: {'status': vol_obj.Not('deleting')}
         - In range: {'status': ['available', 'error']
         - Not in range: {'status': vol_obj.Not(['in-use', 'attaching'])

        Method accepts additional filters, which are basically anything that
        can be passed to a sqlalchemy query's filter method, for example:

        .. code-block:: python

         [~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)]

        We can select values based on conditions using Case objects in the
        'values' argument. For example:

        .. code-block:: python

         has_snapshot_filter = sql.exists().where(
             models.Snapshot.volume_id == models.Volume.id)
         case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
                                   else_='no-snapshot')
         volume.conditional_update({'status': case_values},
                                   {'status': 'available'}))

        And we can use DB fields using model class attribute for example to
        store previous status in the corresponding field even though we don't
        know which value is in the db from those we allowed:

        .. code-block:: python

         volume.conditional_update({'status': 'deleting',
                                    'previous_status': volume.model.status},
                                   {'status': ('available', 'error')})

        :param values: Dictionary of key-values to update in the DB.
        :param expected_values: Dictionary of conditions that must be met for
                                the update to be executed.
        :param filters: Iterable with additional filters
        :param save_all: Object may have changes that are not in the DB, this
                         will say whether we want those changes saved as well.
        :param session: Session to use for the update
        :param reflect_changes: If we want changes made in the database to be
                                reflected in the versioned object.  This may
                                mean in some cases that we have to reload the
                                object from the database.
        :param order: Specific order of fields in which to update the values
        :returns: number of db rows that were updated, which can be used as a
                  boolean, since it will be 0 if we couldn't update the DB and
                  1 if we could, because we are using unique index id.
        """
        if 'id' not in self.fields:
            msg = (
                _('VersionedObject %s does not support conditional update.') %
                (self.obj_name()))
            raise NotImplementedError(msg)

        # If no conditions are set we will require object in DB to be unchanged
        if expected_values is None:
            changes = self.obj_what_changed()

            expected = {
                key: getattr(self, key)
                for key in self.fields.keys() if self.obj_attr_is_set(key)
                and key not in changes and key not in self.OPTIONAL_FIELDS
            }
        else:
            # Set the id in expected_values to limit conditional update to only
            # change this object
            expected = expected_values.copy()
            expected['id'] = self.id

        # If we want to save any additional changes the object has besides the
        # ones referred in values
        if save_all:
            changes = self.cinder_obj_get_changes()
            changes.update(values)
            values = changes

        result = db.conditional_update(self._context,
                                       self.model,
                                       values,
                                       expected,
                                       filters,
                                       order=order)

        # If we were able to update the DB then we need to update this object
        # as well to reflect new DB contents and clear the object's dirty flags
        # for those fields.
        if result and reflect_changes:
            # If we have used a Case, a db field or an expression in values we
            # don't know which value was used, so we need to read the object
            # back from the DB
            if any(
                    isinstance(v, self.Case) or db.is_orm_value(v)
                    for v in values.values()):
                # Read back object from DB
                obj = type(self).get_by_id(self._context, self.id)
                db_values = obj.obj_to_primitive()['versioned_object.data']
                # Only update fields were changes were requested
                values = {
                    field: db_values[field]
                    for field, value in values.items()
                }

            # NOTE(geguileo): We don't use update method because our objects
            # will eventually move away from VersionedObjectDictCompat
            for key, value in values.items():
                setattr(self, key, value)
            self.obj_reset_changes(values.keys())
        return result

    def refresh(self):
        # To refresh we need to have a model and for the model to have an id
        # field
        if 'id' not in self.fields:
            msg = (_('VersionedObject %s cannot retrieve object by id.') %
                   (self.obj_name()))
            raise NotImplementedError(msg)

        current = self.get_by_id(self._context, self.id)

        # Copy contents retrieved from the DB into self
        my_data = vars(self)
        my_data.clear()
        my_data.update(vars(current))

    @classmethod
    def exists(cls, context, id_):
        return db.resource_exists(context, cls.model, id_)
Ejemplo n.º 11
0
class ResourceData(
    heat_base.HeatObject,
    base.VersionedObjectDictCompat,
    base.ComparableVersionedObject,
):
    fields = {
        'id': fields.IntegerField(),
        'created_at': fields.DateTimeField(read_only=True),
        'updated_at': fields.DateTimeField(nullable=True),
        'key': fields.StringField(nullable=True),
        'value': fields.StringField(nullable=True),
        'redact': fields.BooleanField(nullable=True),
        'resource_id': fields.IntegerField(),
        'decrypt_method': fields.StringField(nullable=True),
    }

    @staticmethod
    def _from_db_object(sdata, db_sdata):
        if db_sdata is None:
            return None
        for field in sdata.fields:
            sdata[field] = db_sdata[field]
        sdata.obj_reset_changes()
        return sdata

    @classmethod
    def get_all(cls, resource, *args, **kwargs):
        # this method only returns dict, so we won't use objects mechanism here
        return db_api.resource_data_get_all(resource.context,
                                            resource.id,
                                            *args,
                                            **kwargs)

    @classmethod
    def get_obj(cls, resource, key):
        raise exception.NotSupported(feature='ResourceData.get_obj')

    @classmethod
    def get_val(cls, resource, key):
        return db_api.resource_data_get(resource.context, resource.id, key)

    @classmethod
    def set(cls, resource, key, value, *args, **kwargs):
        db_data = db_api.resource_data_set(
            resource.context,
            resource.id,
            key,
            value,
            *args,
            **kwargs
        )
        return db_data

    @classmethod
    def get_by_key(cls, context, resource_id, key):
        db_rdata = db_api.resource_data_get_by_key(context, resource_id, key)
        return cls._from_db_object(cls(context), db_rdata)

    @classmethod
    def delete(cls, resource, key):
        db_api.resource_data_delete(resource.context, resource.id, key)
Ejemplo n.º 12
0
class Port(base.NeutronDbObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    db_model = models_v2.Port

    fields = {
        'id': obj_fields.UUIDField(),
        'project_id': obj_fields.StringField(nullable=True),
        'name': obj_fields.StringField(nullable=True),
        'network_id': obj_fields.UUIDField(),
        'mac_address': common_types.MACAddressField(),
        'admin_state_up': obj_fields.BooleanField(),
        'device_id': obj_fields.StringField(),
        'device_owner': obj_fields.StringField(),
        'status': obj_fields.StringField(),

        'allowed_address_pairs': obj_fields.ListOfObjectsField(
            'AllowedAddressPair', nullable=True
        ),
        'binding': obj_fields.ObjectField(
            'PortBinding', nullable=True
        ),
        'dhcp_options': obj_fields.ListOfObjectsField(
            'ExtraDhcpOpt', nullable=True
        ),
        'distributed_binding': obj_fields.ObjectField(
            'DistributedPortBinding', nullable=True
        ),
        'dns': obj_fields.ObjectField('PortDNS', nullable=True),
        'fixed_ips': obj_fields.ListOfObjectsField(
            'IPAllocation', nullable=True
        ),
        # TODO(ihrachys): consider converting to boolean
        'security': obj_fields.ObjectField(
            'PortSecurity', nullable=True
        ),
        'security_group_ids': common_types.SetOfUUIDsField(
            nullable=True,
            # TODO(ihrachys): how do we safely pass a mutable default?
            default=None,
        ),
        'qos_policy_id': obj_fields.UUIDField(nullable=True, default=None),

        'binding_levels': obj_fields.ListOfObjectsField(
            'PortBindingLevel', nullable=True
        ),

        # TODO(ihrachys): consider adding a 'dns_assignment' fully synthetic
        # field in later object iterations
    }

    synthetic_fields = [
        'allowed_address_pairs',
        'binding',
        'binding_levels',
        'dhcp_options',
        'distributed_binding',
        'dns',
        'fixed_ips',
        'qos_policy_id',
        'security',
        'security_group_ids',
    ]

    fields_need_translation = {
        'binding': 'port_binding',
        'dhcp_options': 'dhcp_opts',
        'distributed_binding': 'distributed_port_binding',
        'security': 'port_security',
    }

    def create(self):
        fields = self.obj_get_changes()
        with db_api.autonested_transaction(self.obj_context.session):
            sg_ids = self.security_group_ids
            if sg_ids is None:
                sg_ids = set()
            qos_policy_id = self.qos_policy_id
            super(Port, self).create()
            if 'security_group_ids' in fields:
                self._attach_security_groups(sg_ids)
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(qos_policy_id)

    def update(self):
        fields = self.obj_get_changes()
        with db_api.autonested_transaction(self.obj_context.session):
            super(Port, self).update()
            if 'security_group_ids' in fields:
                self._attach_security_groups(fields['security_group_ids'])
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(fields['qos_policy_id'])

    def _attach_qos_policy(self, qos_policy_id):
        # TODO(ihrachys): introduce an object for the binding to isolate
        # database access in a single place, currently scattered between port
        # and policy objects
        obj_db_api.delete_objects(
            self.obj_context, qos_models.QosPortPolicyBinding, port_id=self.id)
        if qos_policy_id:
            obj_db_api.create_object(
                self.obj_context, qos_models.QosPortPolicyBinding,
                {'port_id': self.id, 'policy_id': qos_policy_id}
            )
        self.qos_policy_id = qos_policy_id
        self.obj_reset_changes(['qos_policy_id'])

    def _attach_security_groups(self, sg_ids):
        # TODO(ihrachys): consider introducing an (internal) object for the
        # binding to decouple database operations a bit more
        obj_db_api.delete_objects(
            self.obj_context, sg_models.SecurityGroupPortBinding,
            port_id=self.id,
        )
        if sg_ids:
            for sg_id in sg_ids:
                self._attach_security_group(sg_id)
        self.security_group_ids = sg_ids
        self.obj_reset_changes(['security_group_ids'])

    def _attach_security_group(self, sg_id):
        obj_db_api.create_object(
            self.obj_context, sg_models.SecurityGroupPortBinding,
            {'port_id': self.id, 'security_group_id': sg_id}
        )

    # TODO(rossella_s): get rid of it once we switch the db model to using
    # custom types.
    @classmethod
    def modify_fields_to_db(cls, fields):
        result = super(Port, cls).modify_fields_to_db(fields)
        if 'mac_address' in result:
            result['mac_address'] = cls.filter_to_str(result['mac_address'])
        return result

    # TODO(rossella_s): get rid of it once we switch the db model to using
    # custom types.
    @classmethod
    def modify_fields_from_db(cls, db_obj):
        fields = super(Port, cls).modify_fields_from_db(db_obj)
        if 'mac_address' in fields:
            fields['mac_address'] = utils.AuthenticEUI(fields['mac_address'])
        distributed_port_binding = fields.get('distributed_binding')
        if distributed_port_binding:
            fields['distributed_binding'] = fields['distributed_binding'][0]
        else:
            fields['distributed_binding'] = None
        return fields

    def from_db_object(self, db_obj):
        super(Port, self).from_db_object(db_obj)
        # extract security group bindings
        if db_obj.get('security_groups', []):
            self.security_group_ids = {
                sg.security_group_id
                for sg in db_obj.security_groups
            }
        else:
            self.security_group_ids = set()
        self.obj_reset_changes(['security_group_ids'])

        # extract qos policy binding
        if db_obj.get('qos_policy_binding'):
            self.qos_policy_id = (
                db_obj.qos_policy_binding.policy_id
            )
        else:
            self.qos_policy_id = None
        self.obj_reset_changes(['qos_policy_id'])
Ejemplo n.º 13
0
class HostInterface(base.DrydockObject):

    VERSION = '1.0'

    fields = {
        'device_name':  obj_fields.StringField(),
        'primary_network': obj_fields.BooleanField(nullable=False, default=False),
        'source':   hd_fields.ModelSourceField(),
        'network_link': obj_fields.StringField(nullable=True),
        'hardware_slaves':  obj_fields.ListOfStringsField(nullable=True),
        'slave_selectors':  obj_fields.ObjectField('HardwareDeviceSelectorList',
                                                nullable=True),
        'networks': obj_fields.ListOfStringsField(nullable=True),
    }

    def __init__(self, **kwargs):
        super(HostInterface, self).__init__(**kwargs)

    # HostInterface is keyed by device_name
    def get_id(self):
        return self.get_name()

    def get_name(self):
        return self.device_name

    def get_hw_slaves(self):
        return self.hardware_slaves

    def get_slave_selectors(self):
        return self.slave_selectors

    # Return number of slaves for this interface
    def get_slave_count(self):
        return len(self.hardware_slaves)

    # The device attribute may be hardware alias that translates to a
    # physical device address. If the device attribute does not match an
    # alias, we assume it directly identifies a OS device name. When the
    # apply_hardware_profile method is called on the parent Node of this
    # device, the selector will be decided and applied

    def add_selector(self, slave_selector):
        if self.slave_selectors is None:
            self.slave_selectors = objects.HardwareDeviceSelectorList()

        self.slave_selectors.append(slave_selector)

    """
    Merge two lists of HostInterface models with child_list taking
    priority when conflicts. If a member of child_list has a device_name
    beginning with '!' it indicates that HostInterface should be
    removed from the merged list
    """

    @staticmethod
    def merge_lists(child_list, parent_list):
        effective_list = []

        if len(child_list) == 0 and len(parent_list) > 0:
            for p in parent_list:
                pp = deepcopy(p)
                pp.source = hd_fields.ModelSource.Compiled
                effective_list.append(pp)
        elif len(parent_list) == 0 and len(child_list) > 0:
            for i in child_list:
                if i.get_name().startswith('!'):
                    continue
                else:
                    ii = deepcopy(i)
                    ii.source = hd_fields.ModelSource.Compiled
                    effective_list.append(ii)
        elif len(parent_list) > 0 and len(child_list) > 0:
            parent_interfaces = []
            for i in parent_list:
                parent_name = i.get_name()
                parent_interfaces.append(parent_name)
                add = True
                for j in child_list:
                    if j.get_name() == ("!" + parent_name):
                        add = False
                        break
                    elif j.get_name() == parent_name:
                        m = objects.HostInterface()
                        m.device_name = j.get_name()
                        m.primary_network = \
                            objects.Utils.apply_field_inheritance(
                                getattr(j, 'primary_network', None),
                                getattr(i, 'primary_network', None))
                            
                        m.network_link = \
                            objects.Utils.apply_field_inheritance(
                                getattr(j, 'network_link', None),
                                getattr(i, 'network_link', None))

                        s = [x for x 
                             in getattr(i, 'hardware_slaves', [])
                             if ("!" + x) not in getattr(j, 'hardware_slaves', [])]

                        s.extend(
                            [x for x
                             in getattr(j, 'hardware_slaves', [])
                             if not x.startswith("!")])

                        m.hardware_slaves = s

                        n = [x for x
                             in getattr(i, 'networks',[])
                             if ("!" + x) not in getattr(j, 'networks', [])]

                        n.extend(
                            [x for x
                             in getattr(j, 'networks', [])
                             if not x.startswith("!")])

                        m.networks = n
                        m.source = hd_fields.ModelSource.Compiled

                        effective_list.append(m)
                        add = False
                        break

                if add:
                    ii = deepcopy(i)
                    ii.source = hd_fields.ModelSource.Compiled
                    effective_list.append(ii)

            for j in child_list:
                if (j.device_name not in parent_interfaces
                    and not j.get_name().startswith("!")):
                    jj = deepcopy(j)
                    jj.source = hd_fields.ModelSource.Compiled
                    effective_list.append(jj)

        return effective_list
Ejemplo n.º 14
0
class ComputeNode(base.ZunPersistentObject, base.ZunObject):
    # Version 1.0: Initial version
    # Version 1.1: Add mem_total, mem_free, mem_available columns
    # Version 1.2: Add total, running, pasued, stopped containers columns
    # Version 1.3: Add cpus, cpu_used
    # Version 1.4: Add host operating system info
    # Version 1.5: Add host labels info
    # Version 1.6: Add mem_used to compute node
    # Version 1.7: Change get_by_hostname to get_by_name
    # Version 1.8: Add pci_device_pools to compute node
    # Version 1.9: Change PciDevicePoolList to ObjectField
    # Version 1.10: Add disk_total, disk_used columns
    # Version 1.11: Add disk_quota_supported field
    # Version 1.12: Add runtimes field
    # Version 1.13: Add enable_cpu_pinning field
    VERSION = '1.13'

    fields = {
        'uuid': fields.UUIDField(read_only=True, nullable=False),
        'numa_topology': fields.ObjectField('NUMATopology', nullable=True),
        'hostname': fields.StringField(nullable=False),
        'mem_total': fields.IntegerField(nullable=False),
        'mem_free': fields.IntegerField(nullable=False),
        'mem_available': fields.IntegerField(nullable=False),
        'mem_used': fields.IntegerField(nullable=False),
        'total_containers': fields.IntegerField(nullable=False),
        'running_containers': fields.IntegerField(nullable=False),
        'paused_containers': fields.IntegerField(nullable=False),
        'stopped_containers': fields.IntegerField(nullable=False),
        'cpus': fields.IntegerField(nullable=False),
        'cpu_used': fields.FloatField(nullable=False),
        'architecture': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'os': fields.StringField(nullable=True),
        'kernel_version': fields.StringField(nullable=True),
        'labels': fields.DictOfStringsField(nullable=True),
        # NOTE(pmurray): the pci_device_pools field maps to the
        # pci_stats field in the database
        'pci_device_pools': fields.ObjectField('PciDevicePoolList',
                                               nullable=True),
        'disk_total': fields.IntegerField(nullable=False),
        'disk_used': fields.IntegerField(nullable=False),
        'disk_quota_supported': fields.BooleanField(nullable=False),
        'runtimes': fields.ListOfStringsField(nullable=True),
        'enable_cpu_pinning': fields.BooleanField(nullable=False),
    }

    @staticmethod
    def _from_db_object(context, compute_node, db_compute_node):
        """Converts a database entity to a formal object."""
        special_cases = set(['pci_device_pools'])
        fields = set(compute_node.fields) - special_cases
        for field in fields:
            if field == 'numa_topology':
                numa_obj = NUMATopology._from_dict(
                    db_compute_node['numa_topology'])
                compute_node.numa_topology = numa_obj
            else:
                setattr(compute_node, field, db_compute_node[field])

        pci_stats = db_compute_node.get('pci_stats')
        if pci_stats is not None:
            pci_stats = pci_device_pool.from_pci_stats(pci_stats)
        compute_node.pci_device_pools = pci_stats
        compute_node.obj_reset_changes(recursive=True)
        return compute_node

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [
            ComputeNode._from_db_object(context, cls(context), obj)
            for obj in db_objects
        ]

    @staticmethod
    def _convert_pci_stats_to_db_format(updates):
        if 'pci_device_pools' in updates:
            pools = updates.pop('pci_device_pools')
            if pools is not None:
                pools = jsonutils.dumps(pools.obj_to_primitive())
            updates['pci_stats'] = pools

    @base.remotable
    def create(self, context):
        """Create a compute node record in the DB.

        :param context: Security context.

        """
        values = self.obj_get_changes()
        numa_obj = values.pop('numa_topology', None)
        if numa_obj is not None:
            values['numa_topology'] = numa_obj._to_dict()

        self._convert_pci_stats_to_db_format(values)
        db_compute_node = dbapi.create_compute_node(context, values)
        self._from_db_object(context, self, db_compute_node)

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a compute node based on uuid.

        :param uuid: the uuid of a compute node.
        :param context: Security context
        :returns: a :class:`ComputeNode` object.
        """
        db_compute_node = dbapi.get_compute_node(context, uuid)
        compute_node = ComputeNode._from_db_object(context, cls(context),
                                                   db_compute_node)
        return compute_node

    @base.remotable_classmethod
    def get_by_name(cls, context, hostname):
        db_compute_node = dbapi.get_compute_node_by_hostname(context, hostname)
        return cls._from_db_object(context, cls(), db_compute_node)

    @base.remotable_classmethod
    def list(cls,
             context,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None,
             filters=None):
        """Return a list of ComputeNode objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :param filters: filters when list resource providers.
        :returns: a list of :class:`ComputeNode` object.

        """
        db_compute_nodes = dbapi.list_compute_nodes(context,
                                                    limit=limit,
                                                    marker=marker,
                                                    sort_key=sort_key,
                                                    sort_dir=sort_dir,
                                                    filters=filters)
        return ComputeNode._from_db_object_list(db_compute_nodes, cls, context)

    @base.remotable
    def destroy(self, context=None):
        """Delete the ComputeNode from the DB.

        :param context: Security context.
        """
        dbapi.destroy_compute_node(context, self.uuid)
        self.obj_reset_changes(recursive=True)

    @base.remotable
    def save(self, context=None):
        """Save updates to this ComputeNode.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context.
        """
        updates = self.obj_get_changes()
        numa_obj = updates.pop('numa_topology', None)
        if numa_obj is not None:
            updates['numa_topology'] = numa_obj._to_dict()
        self._convert_pci_stats_to_db_format(updates)
        dbapi.update_compute_node(context, self.uuid, updates)
        self.obj_reset_changes(recursive=True)

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this ComputeNode.

        Loads a compute node with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded compute node column by column, if there are any
        updates.

        :param context: Security context.
        """
        current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and \
               getattr(self, field) != getattr(current, field):
                setattr(self, field, getattr(current, field))
Ejemplo n.º 15
0
class Subnet(base.NeutronDbObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    db_model = models_v2.Subnet

    fields = {
        'id':
        common_types.UUIDField(),
        'project_id':
        obj_fields.StringField(nullable=True),
        'name':
        obj_fields.StringField(nullable=True),
        'network_id':
        common_types.UUIDField(),
        'segment_id':
        common_types.UUIDField(nullable=True),
        # NOTE: subnetpool_id can be 'prefix_delegation' string
        # when the IPv6 Prefix Delegation is enabled
        'subnetpool_id':
        obj_fields.StringField(nullable=True),
        'ip_version':
        common_types.IPVersionEnumField(),
        'cidr':
        common_types.IPNetworkField(),
        'gateway_ip':
        obj_fields.IPAddressField(nullable=True),
        'allocation_pools':
        obj_fields.ListOfObjectsField('IPAllocationPool', nullable=True),
        'enable_dhcp':
        obj_fields.BooleanField(nullable=True),
        'shared':
        obj_fields.BooleanField(nullable=True),
        'dns_nameservers':
        obj_fields.ListOfObjectsField('DNSNameServer', nullable=True),
        'host_routes':
        obj_fields.ListOfObjectsField('Route', nullable=True),
        'ipv6_ra_mode':
        common_types.IPV6ModeEnumField(nullable=True),
        'ipv6_address_mode':
        common_types.IPV6ModeEnumField(nullable=True),
        'service_types':
        obj_fields.ListOfStringsField(nullable=True)
    }

    synthetic_fields = [
        'allocation_pools', 'dns_nameservers', 'host_routes', 'service_types',
        'shared'
    ]

    foreign_keys = {'Network': {'network_id': 'id'}}

    fields_no_update = ['project_id', 'network_id']

    fields_need_translation = {'host_routes': 'routes'}

    def __init__(self, context=None, **kwargs):
        super(Subnet, self).__init__(context, **kwargs)
        self.add_extra_filter_name('shared')

    def obj_load_attr(self, attrname):
        if attrname == 'shared':
            return self._load_shared()
        if attrname == 'service_types':
            return self._load_service_types()
        super(Subnet, self).obj_load_attr(attrname)

    def _load_shared(self, db_obj=None):
        if db_obj:
            # NOTE(korzen) db_obj is passed when Subnet object is loaded
            # from DB
            rbac_entries = db_obj.get('rbac_entries') or {}
            shared = (rbac_db.RbacNeutronDbObjectMixin.is_network_shared(
                self.obj_context, rbac_entries))
        else:
            # NOTE(korzen) this case is used when Subnet object was
            # instantiated and without DB interaction (get_object(s), update,
            # create), it should be rare case to load 'shared' by that method
            shared = (rbac_db.RbacNeutronDbObjectMixin.get_shared_with_tenant(
                self.obj_context.elevated(), network.NetworkRBAC,
                self.network_id, self.project_id))
        setattr(self, 'shared', shared)
        self.obj_reset_changes(['shared'])

    def _load_service_types(self, db_obj=None):
        if db_obj:
            service_types = db_obj.get('service_types', [])
        else:
            service_types = SubnetServiceType.get_objects(self.obj_context,
                                                          subnet_id=self.id)

        self.service_types = [
            service_type['service_type'] for service_type in service_types
        ]
        self.obj_reset_changes(['service_types'])

    def from_db_object(self, db_obj):
        super(Subnet, self).from_db_object(db_obj)
        self._load_shared(db_obj)
        self._load_service_types(db_obj)

    @classmethod
    def modify_fields_from_db(cls, db_obj):
        # TODO(korzen) remove this method when IP and CIDR decorator ready
        result = super(Subnet, cls).modify_fields_from_db(db_obj)
        if 'cidr' in result:
            result['cidr'] = utils.AuthenticIPNetwork(result['cidr'])
        if 'gateway_ip' in result and result['gateway_ip'] is not None:
            result['gateway_ip'] = netaddr.IPAddress(result['gateway_ip'])
        return result

    @classmethod
    def modify_fields_to_db(cls, fields):
        # TODO(korzen) remove this method when IP and CIDR decorator ready
        result = super(Subnet, cls).modify_fields_to_db(fields)
        if 'cidr' in result:
            result['cidr'] = cls.filter_to_str(result['cidr'])
        if 'gateway_ip' in result and result['gateway_ip'] is not None:
            result['gateway_ip'] = cls.filter_to_str(result['gateway_ip'])
        return result

    @classmethod
    def find_candidate_subnets(cls, context, network_id, host, service_type,
                               fixed_configured):
        """Find canditate subnets for the network, host, and service_type"""
        query = cls.query_subnets_on_network(context, network_id)
        query = SubnetServiceType.query_filter_service_subnets(
            query, service_type)

        # Select candidate subnets and return them
        if not cls.is_host_set(host):
            if fixed_configured:
                # If fixed_ips in request and host is not known all subnets on
                # the network are candidates. Host/Segment will be validated
                # on port update with binding:host_id set. Allocation _cannot_
                # be deferred as requested fixed_ips would then be lost.
                return query.all()
            # If the host isn't known, we can't allocate on a routed network.
            # So, exclude any subnets attached to segments.
            return cls._query_exclude_subnets_on_segments(query).all()

        # The host is known. Consider both routed and non-routed networks
        results = cls._query_filter_by_segment_host_mapping(query, host).all()

        # For now, we're using a simplifying assumption that a host will only
        # touch one segment in a given routed network.  Raise exception
        # otherwise.  This restriction may be relaxed as use cases for multiple
        # mappings are understood.
        segment_ids = {
            subnet.segment_id
            for subnet, mapping in results if mapping
        }
        if 1 < len(segment_ids):
            raise segment_exc.HostConnectedToMultipleSegments(
                host=host, network_id=network_id)

        return [subnet for subnet, _mapping in results]

    @classmethod
    def _query_filter_by_segment_host_mapping(cls, query, host):
        # TODO(tuanvu): find OVO-like solution for handling "join queries" and
        #               write unit test for this function
        """Excludes subnets on segments not reachable by the host

        The query gets two kinds of subnets: those that are on segments that
        the host can reach and those that are not on segments at all (assumed
        reachable by all hosts). Hence, subnets on segments that the host
        *cannot* reach are excluded.
        """
        SegmentHostMapping = segment_model.SegmentHostMapping

        # A host has been provided.  Consider these two scenarios
        # 1. Not a routed network:  subnets are not on segments
        # 2. Is a routed network:  only subnets on segments mapped to host
        # The following join query returns results for either.  The two are
        # guaranteed to be mutually exclusive when subnets are created.
        query = query.add_entity(SegmentHostMapping)
        query = query.outerjoin(
            SegmentHostMapping,
            and_(cls.db_model.segment_id == SegmentHostMapping.segment_id,
                 SegmentHostMapping.host == host))

        # Essentially "segment_id IS NULL XNOR host IS NULL"
        query = query.filter(
            or_(
                and_(cls.db_model.segment_id.isnot(None),
                     SegmentHostMapping.host.isnot(None)),
                and_(cls.db_model.segment_id.is_(None),
                     SegmentHostMapping.host.is_(None))))
        return query

    @classmethod
    def query_subnets_on_network(cls, context, network_id):
        query = model_query.get_collection_query(context, cls.db_model)
        return query.filter(cls.db_model.network_id == network_id)

    @classmethod
    def _query_exclude_subnets_on_segments(cls, query):
        """Excludes all subnets associated with segments

        For the case where the host is not known, we don't consider any subnets
        that are on segments. But, we still consider subnets that are not
        associated with any segment (i.e. for non-routed networks).
        """
        return query.filter(cls.db_model.segment_id.is_(None))

    @classmethod
    def is_host_set(cls, host):
        """Utility to tell if the host is set in the port binding"""
        # This seems redundant, but its not. Host is unset if its None, '',
        # or ATTR_NOT_SPECIFIED due to differences in host binding
        # implementations.
        return host and validators.is_attr_set(host)

    @classmethod
    def network_has_no_subnet(cls, context, network_id, host, service_type):
        # Determine why we found no subnets to raise the right error
        query = cls.query_subnets_on_network(context, network_id)

        if cls.is_host_set(host):
            # Empty because host isn't mapped to a segment with a subnet?
            s_query = query.filter(cls.db_model.segment_id.isnot(None))
            if s_query.limit(1).count() != 0:
                # It is a routed network but no subnets found for host
                raise segment_exc.HostNotConnectedToAnySegment(
                    host=host, network_id=network_id)

        if not query.limit(1).count():
            # Network has *no* subnets of any kind. This isn't an error.
            return True

        # Does filtering ineligible service subnets makes the list empty?
        query = SubnetServiceType.query_filter_service_subnets(
            query, service_type)
        if query.limit(1).count():
            # No, must be a deferred IP port because there are matching
            # subnets. Happens on routed networks when host isn't known.
            raise ipam_exceptions.DeferIpam()
        return False
Ejemplo n.º 16
0
class NetworkSegmentRange(base.NeutronDbObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    db_model = range_model.NetworkSegmentRange

    primary_keys = ['id']

    fields = {
        'id':
        common_types.UUIDField(),
        'name':
        obj_fields.StringField(nullable=True),
        'default':
        obj_fields.BooleanField(nullable=False),
        'shared':
        obj_fields.BooleanField(nullable=False),
        'project_id':
        obj_fields.StringField(nullable=True),
        'network_type':
        common_types.NetworkSegmentRangeNetworkTypeEnumField(nullable=False),
        'physical_network':
        obj_fields.StringField(nullable=True),
        'minimum':
        obj_fields.IntegerField(nullable=True),
        'maximum':
        obj_fields.IntegerField(nullable=True)
    }

    def to_dict(self, fields=None):
        _dict = super(NetworkSegmentRange, self).to_dict()
        # extend the network segment range dict with `available` and `used`
        # fields
        _dict.update({'available': self._get_available_allocation()})
        _dict.update({'used': self._get_used_allocation_mapping()})
        # NOTE(ralonsoh): this workaround should be removed once the migration
        # from "tenant_id" to "project_id" is finished.
        _dict = db_utils.resource_fields(_dict, fields)
        _dict.pop('tenant_id', None)
        resource_extend.apply_funcs(nsr_def.COLLECTION_NAME, _dict,
                                    self.db_obj)
        return _dict

    def _check_shared_project_id(self, action):
        if self.shared is False and not self.project_id:
            raise n_exc.ObjectActionError(
                action=action,
                reason='if NetworkSegmentRange is not shared, it must have a '
                'project_id')

    def create(self):
        self._check_shared_project_id('create')
        super(NetworkSegmentRange, self).create()

    def update(self):
        self._check_shared_project_id('update')
        super(NetworkSegmentRange, self).update()

    def _get_allocation_model_details(self):
        model = models_map.get(self.network_type)
        if model is not None:
            alloc_segmentation_id = model.get_segmentation_id()
        else:
            msg = (_("network_type '%s' unknown for getting allocation "
                     "information") % self.network_type)
            raise n_exc.InvalidInput(error_message=msg)
        allocated = model.allocated

        return model, alloc_segmentation_id, allocated

    def _get_available_allocation(self):
        with self.db_context_reader(self.obj_context):
            model, alloc_segmentation_id, allocated = (
                self._get_allocation_model_details())

            query = self.obj_context.session.query(alloc_segmentation_id)
            query = query.filter(
                and_(alloc_segmentation_id >= self.minimum,
                     alloc_segmentation_id <= self.maximum), not_(allocated))
            if self.network_type == constants.TYPE_VLAN:
                alloc_available = query.filter(
                    model.physical_network == self.physical_network).all()
            else:
                alloc_available = query.all()

            return [segmentation_id for (segmentation_id, ) in alloc_available]

    def _get_used_allocation_mapping(self):
        with self.db_context_reader(self.obj_context):
            query = self.obj_context.session.query(
                segments_model.NetworkSegment.segmentation_id,
                models_v2.Network.project_id)
            alloc_used = (query.filter(
                and_(
                    segments_model.NetworkSegment.network_type ==
                    self.network_type,
                    segments_model.NetworkSegment.physical_network ==
                    self.physical_network,
                    segments_model.NetworkSegment.segmentation_id >=
                    self.minimum, segments_model.NetworkSegment.segmentation_id
                    <= self.maximum)).filter(
                        segments_model.NetworkSegment.network_id ==
                        models_v2.Network.id)).all()
        return dict(alloc_used)

    @classmethod
    def _build_query_segments(cls, context, model, network_type, **filters):
        columns = set(dict(model.__table__.columns))
        model_filters = dict(
            (k, filters[k]) for k in columns & set(filters.keys()))
        query = (context.session.query(model).filter_by(
            allocated=False, **model_filters).distinct())
        _and = and_(
            cls.db_model.network_type == network_type,
            model.physical_network == cls.db_model.physical_network
            if network_type == constants.TYPE_VLAN else sql.expression.true())
        return query.join(range_model.NetworkSegmentRange, _and)

    @classmethod
    def get_segments_for_project(cls, context, model, network_type,
                                 model_segmentation_id, **filters):
        _filters = copy.deepcopy(filters)
        project_id = _filters.pop('project_id', None)
        if not project_id:
            return []

        with cls.db_context_reader(context):
            query = cls._build_query_segments(context, model, network_type,
                                              **_filters)
            query = query.filter(
                and_(model_segmentation_id >= cls.db_model.minimum,
                     model_segmentation_id <= cls.db_model.maximum,
                     cls.db_model.project_id == project_id))
            return query.limit(common_constants.IDPOOL_SELECT_SIZE).all()

    @classmethod
    def get_segments_shared(cls, context, model, network_type,
                            model_segmentation_id, **filters):
        _filters = copy.deepcopy(filters)
        project_id = _filters.pop('project_id', None)
        with cls.db_context_reader(context):
            # Retrieve all network segment ranges shared.
            shared_ranges = context.session.query(cls.db_model).filter(
                and_(cls.db_model.network_type == network_type,
                     cls.db_model.shared == sql.expression.true()))
            if network_type == constants.TYPE_VLAN:
                shared_ranges.filter(cls.db_model.physical_network ==
                                     _filters['physical_network'])
            segment_ids = set([])
            for shared_range in shared_ranges.all():
                segment_ids.update(
                    set(range(shared_range.minimum, shared_range.maximum + 1)))
            if not segment_ids:
                return []

            # Retrieve other project segment ID ranges (not own project, not
            # default range).
            other_project_ranges = context.session.query(cls.db_model).filter(
                and_(cls.db_model.project_id != project_id,
                     cls.db_model.project_id.isnot(None),
                     cls.db_model.network_type == network_type))
            if network_type == constants.TYPE_VLAN:
                other_project_ranges = other_project_ranges.filter(
                    cls.db_model.physical_network ==
                    _filters['physical_network'])

            for other_project_range in other_project_ranges.all():
                _set = set(
                    range(other_project_range.minimum,
                          other_project_range.maximum + 1))
                segment_ids.difference_update(_set)

            # NOTE(ralonsoh): https://stackoverflow.com/questions/4628333/
            # converting-a-list-of-integers-into-range-in-python
            segment_ranges = [[
                t[0][1], t[-1][1]
            ] for t in (tuple(g[1]) for g in itertools.groupby(
                enumerate(segment_ids),
                key=lambda enum_seg: enum_seg[1] - enum_seg[0]))]

            # Retrieve all segments belonging to the default range except those
            # assigned to other projects.
            query = cls._build_query_segments(context, model, network_type,
                                              **_filters)
            clauses = [
                and_(model_segmentation_id >= _range[0],
                     model_segmentation_id <= _range[1])
                for _range in segment_ranges
            ]
            query = query.filter(or_(*clauses))
            return query.limit(common_constants.IDPOOL_SELECT_SIZE).all()
Ejemplo n.º 17
0
class MagnumService(base.MagnumPersistentObject, base.MagnumObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    dbapi = dbapi.get_instance()

    fields = {
        'id': fields.IntegerField(),
        'host': fields.StringField(nullable=True),
        'binary': fields.StringField(nullable=True),
        'disabled': fields.BooleanField(),
        'disabled_reason': fields.StringField(nullable=True),
        'last_seen_up': fields.DateTimeField(nullable=True),
        'forced_down': fields.BooleanField(),
        'report_count': fields.IntegerField(),
    }

    @staticmethod
    def _from_db_object(magnum_service, db_magnum_service):
        """Converts a database entity to a formal object."""
        for field in magnum_service.fields:
            setattr(magnum_service, field, db_magnum_service[field])

        magnum_service.obj_reset_changes()
        return magnum_service

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [
            MagnumService._from_db_object(cls(context), obj)
            for obj in db_objects
        ]

    @base.remotable_classmethod
    def get_by_host_and_binary(cls, context, host, binary):
        """Find a magnum_service based on its hostname and binary.

        :param host: The host on which the binary is running.
        :param binary: The name of the binary.
        :param context: Security context.
        :returns: a :class:`MagnumService` object.
        """
        db_magnum_service = cls.dbapi.get_magnum_service_by_host_and_binary(
            host, binary)
        if db_magnum_service is None:
            return None
        magnum_service = MagnumService._from_db_object(cls(context),
                                                       db_magnum_service)
        return magnum_service

    @base.remotable_classmethod
    def list(cls,
             context,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None):
        """Return a list of MagnumService objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :returns: a list of :class:`MagnumService` object.

        """
        db_magnum_services = cls.dbapi.get_magnum_service_list(
            limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir)
        return MagnumService._from_db_object_list(db_magnum_services, cls,
                                                  context)

    @base.remotable
    def create(self, context=None):
        """Create a MagnumService record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: MagnumService(context)
        """
        values = self.obj_get_changes()
        db_magnum_service = self.dbapi.create_magnum_service(values)
        self._from_db_object(self, db_magnum_service)

    @base.remotable
    def destroy(self, context=None):
        """Delete the MagnumService from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: MagnumService(context)
        """
        self.dbapi.destroy_magnum_service(self.id)
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this MagnumService.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: MagnumService(context)
        """
        updates = self.obj_get_changes()
        self.dbapi.update_magnum_service(self.id, updates)
        self.obj_reset_changes()

    @base.remotable
    def report_state_up(self, context=None):
        """Touching the magnum_service record to show aliveness.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: MagnumService(context)
        """
        self.report_count += 1
        self.save()
Ejemplo n.º 18
0
class Resource(
        heat_base.HeatObject,
        base.VersionedObjectDictCompat,
        base.ComparableVersionedObject,
):
    fields = {
        'id':
        fields.IntegerField(),
        'uuid':
        fields.StringField(),
        'stack_id':
        fields.StringField(),
        'created_at':
        fields.DateTimeField(read_only=True),
        'updated_at':
        fields.DateTimeField(nullable=True),
        'nova_instance':
        fields.StringField(nullable=True),
        'name':
        fields.StringField(nullable=True),
        'status':
        fields.StringField(nullable=True),
        'status_reason':
        fields.StringField(nullable=True),
        'action':
        fields.StringField(nullable=True),
        'rsrc_metadata':
        heat_fields.JsonField(nullable=True),
        'properties_data':
        heat_fields.JsonField(nullable=True),
        'properties_data_encrypted':
        fields.BooleanField(default=False),
        'data':
        fields.ListOfObjectsField(resource_data.ResourceData, nullable=True),
        'stack':
        fields.ObjectField(stack.Stack),
        'engine_id':
        fields.StringField(nullable=True),
        'atomic_key':
        fields.IntegerField(nullable=True),
        'current_template_id':
        fields.IntegerField(),
        'needed_by':
        heat_fields.ListField(nullable=True, default=None),
        'requires':
        heat_fields.ListField(nullable=True, default=None),
        'replaces':
        fields.IntegerField(nullable=True),
        'replaced_by':
        fields.IntegerField(nullable=True),
        'root_stack_id':
        fields.StringField(nullable=True),
    }

    @staticmethod
    def _from_db_object(resource, context, db_resource):
        if db_resource is None:
            return None
        for field in resource.fields:
            if field == 'data':
                resource['data'] = [
                    resource_data.ResourceData._from_db_object(
                        resource_data.ResourceData(context), resd)
                    for resd in db_resource.data
                ]
            else:
                resource[field] = db_resource[field]

        if resource.properties_data_encrypted and resource.properties_data:
            properties_data = {}
            for prop_name, prop_value in resource.properties_data.items():
                method, value = prop_value
                decrypted_value = crypt.decrypt(method, value)
                prop_string = jsonutils.loads(decrypted_value)
                properties_data[prop_name] = prop_string
            resource.properties_data = properties_data

        resource._context = context
        resource.obj_reset_changes()
        return resource

    @classmethod
    def get_obj(cls, context, resource_id):
        resource_db = db_api.resource_get(context, resource_id)
        return cls._from_db_object(cls(context), context, resource_db)

    @classmethod
    def get_all(cls, context):
        resources_db = db_api.resource_get_all(context)
        resources = [
            (resource_name,
             cls._from_db_object(cls(context), context, resource_db))
            for resource_name, resource_db in six.iteritems(resources_db)
        ]
        return dict(resources)

    @classmethod
    def create(cls, context, values):
        return cls._from_db_object(cls(context), context,
                                   db_api.resource_create(context, values))

    @classmethod
    def delete(cls, context, resource_id):
        resource_db = db_api.resource_get(context, resource_id)
        resource_db.delete()

    @classmethod
    def exchange_stacks(cls, context, resource_id1, resource_id2):
        return db_api.resource_exchange_stacks(context, resource_id1,
                                               resource_id2)

    @classmethod
    def get_all_by_stack(cls, context, stack_id, key_id=False, filters=None):
        resources_db = db_api.resource_get_all_by_stack(
            context, stack_id, key_id, filters)
        resources = [
            (resource_key,
             cls._from_db_object(cls(context), context, resource_db))
            for resource_key, resource_db in six.iteritems(resources_db)
        ]
        return dict(resources)

    @classmethod
    def get_by_name_and_stack(cls, context, resource_name, stack_id):
        resource_db = db_api.resource_get_by_name_and_stack(
            context, resource_name, stack_id)
        return cls._from_db_object(cls(context), context, resource_db)

    @classmethod
    def get_by_physical_resource_id(cls, context, physical_resource_id):
        resource_db = db_api.resource_get_by_physical_resource_id(
            context, physical_resource_id)
        return cls._from_db_object(cls(context), context, resource_db)

    @classmethod
    def update_by_id(cls, context, resource_id, values):
        resource_db = db_api.resource_get(context, resource_id)
        resource_db.update_and_save(values)

    def update_and_save(self, values):
        resource_db = db_api.resource_get(self._context, self.id)
        resource_db.update_and_save(values)

    def select_and_update(self, values, expected_engine_id=None, atomic_key=0):
        return db_api.resource_update(self._context,
                                      self.id,
                                      values,
                                      atomic_key=atomic_key,
                                      expected_engine_id=expected_engine_id)

    def refresh(self, attrs=None):
        resource_db = db_api.resource_get(self._context, self.id)
        resource_db.refresh(attrs=attrs)
        return self.__class__._from_db_object(self, self._context, resource_db)

    @staticmethod
    def encrypt_properties_data(data):
        if cfg.CONF.encrypt_parameters_and_properties and data:
            result = {}
            for prop_name, prop_value in data.items():
                prop_string = jsonutils.dumps(prop_value)
                encrypted_value = crypt.encrypt(prop_string)
                result[prop_name] = encrypted_value
            return (True, result)
        return (False, data)

    def update_metadata(self, metadata):
        if self.rsrc_metadata != metadata:
            rows_updated = self.select_and_update({'rsrc_metadata': metadata},
                                                  self.engine_id,
                                                  self.atomic_key)
            if not rows_updated:
                action = _('metadata setting for resource %s') % self.name
                raise exception.ConcurrentTransaction(action=action)
Ejemplo n.º 19
0
class Container(base.ZunPersistentObject, base.ZunObject):
    # Version 1.0: Initial version
    # Version 1.1: Add container_id column
    # Version 1.2: Add memory column
    # Version 1.3: Add task_state column
    # Version 1.4: Add cpu, workdir, ports, hostname and labels columns
    # Version 1.5: Add meta column
    # Version 1.6: Add addresses column
    # Version 1.7: Add host column
    # Version 1.8: Add restart_policy
    # Version 1.9: Add status_detail column
    # Version 1.10: Add tty, stdin_open
    # Version 1.11: Add image_driver
    # Version 1.12: Add 'Created' to ContainerStatus
    # Version 1.13: Add more task states for container
    # Version 1.14: Add method 'list_by_host'
    # Version 1.15: Combine tty and stdin_open
    # Version 1.16: Add websocket_url and token
    # Version 1.17: Add security_groups
    # Version 1.18: Add auto_remove
    # Version 1.19: Add runtime column
    # Version 1.20: Change runtime to String type
    # Version 1.21: Add pci_device attribute
    # Version 1.22: Add 'Deleting' to ContainerStatus
    # Version 1.23: Add the missing 'pci_devices' attribute
    # Version 1.24: Add the storage_opt attribute
    # Version 1.25: Change TaskStateField definition
    # Version 1.26:  Add auto_heal
    # Version 1.27: Make auto_heal field nullable
    # Version 1.28: Add 'Dead' to ContainerStatus
    # Version 1.29: Add 'Restarting' to ContainerStatus
    # Version 1.30: Add capsule_id attribute
    # Version 1.31: Add 'started_at' attribute
    # Version 1.32: Add 'exec_instances' attribute
    # Version 1.33: Change 'command' to List type
    # Version 1.34: Add privileged to container
    # Version 1.35: Add 'healthcheck' attribute
    # Version 1.36: Add 'get_count' method
    VERSION = '1.36'

    fields = {
        'id': fields.IntegerField(),
        'container_id': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(nullable=True),
        'name': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'image': fields.StringField(nullable=True),
        'cpu': fields.FloatField(nullable=True),
        'memory': fields.StringField(nullable=True),
        'command': fields.ListOfStringsField(nullable=True),
        'status': z_fields.ContainerStatusField(nullable=True),
        'status_reason': fields.StringField(nullable=True),
        'task_state': z_fields.TaskStateField(nullable=True),
        'environment': fields.DictOfStringsField(nullable=True),
        'workdir': fields.StringField(nullable=True),
        'auto_remove': fields.BooleanField(nullable=True),
        'ports': z_fields.ListOfIntegersField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'labels': fields.DictOfStringsField(nullable=True),
        'meta': fields.DictOfStringsField(nullable=True),
        'addresses': z_fields.JsonField(nullable=True),
        'image_pull_policy': fields.StringField(nullable=True),
        'host': fields.StringField(nullable=True),
        'restart_policy': fields.DictOfStringsField(nullable=True),
        'status_detail': fields.StringField(nullable=True),
        'interactive': fields.BooleanField(nullable=True),
        'image_driver': fields.StringField(nullable=True),
        'websocket_url': fields.StringField(nullable=True),
        'websocket_token': fields.StringField(nullable=True),
        'security_groups': fields.ListOfStringsField(nullable=True),
        'runtime': fields.StringField(nullable=True),
        'pci_devices': fields.ListOfObjectsField('PciDevice',
                                                 nullable=True),
        'disk': fields.IntegerField(nullable=True),
        'auto_heal': fields.BooleanField(nullable=True),
        'capsule_id': fields.IntegerField(nullable=True),
        'started_at': fields.DateTimeField(tzinfo_aware=False, nullable=True),
        'exec_instances': fields.ListOfObjectsField('ExecInstance',
                                                    nullable=True),
        'privileged': fields.BooleanField(nullable=True),
        'healthcheck': z_fields.JsonField(nullable=True),
    }

    @staticmethod
    def _from_db_object(container, db_container):
        """Converts a database entity to a formal object."""
        for field in container.fields:
            if field in ['pci_devices', 'exec_instances']:
                continue
            setattr(container, field, db_container[field])

        container.obj_reset_changes()
        return container

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [Container._from_db_object(cls(context), obj)
                for obj in db_objects]

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a container based on uuid and return a :class:`Container` object.

        :param uuid: the uuid of a container.
        :param context: Security context
        :returns: a :class:`Container` object.
        """
        db_container = dbapi.get_container_by_uuid(context, uuid)
        container = Container._from_db_object(cls(context), db_container)
        return container

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        """Find a container based on name and return a Container object.

        :param name: the logical name of a container.
        :param context: Security context
        :returns: a :class:`Container` object.
        """
        db_container = dbapi.get_container_by_name(context, name)
        container = Container._from_db_object(cls(context), db_container)
        return container

    @base.remotable_classmethod
    def list(cls, context, limit=None, marker=None,
             sort_key=None, sort_dir=None, filters=None):
        """Return a list of Container objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :param filters: filters when list containers, the filter name could be
                        'name', 'image', 'project_id', 'user_id', 'memory'.
                        For example, filters={'image': 'nginx'}
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(
            context, limit=limit, marker=marker, sort_key=sort_key,
            sort_dir=sort_dir, filters=filters)
        return Container._from_db_object_list(db_containers, cls, context)

    @base.remotable_classmethod
    def list_by_host(cls, context, host):
        """Return a list of Container objects by host.

        :param context: Security context.
        :param host: A compute host.
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(context, filters={'host': host})
        return Container._from_db_object_list(db_containers, cls, context)

    @base.remotable_classmethod
    def list_by_capsule_id(cls, context, capsule_id):
        """Return a list of Container objects by capsule_id.

        :param context: Security context.
        :param host: A capsule id.
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(
            context, filters={'capsule_id': capsule_id})
        return Container._from_db_object_list(db_containers, cls, context)

    @base.remotable
    def create(self, context):
        """Create a Container record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)

        """
        values = self.obj_get_changes()
        db_container = dbapi.create_container(context, values)
        self._from_db_object(self, db_container)

    @base.remotable
    def destroy(self, context=None):
        """Delete the Container from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        dbapi.destroy_container(context, self.uuid)
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this Container.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        updates = self.obj_get_changes()
        dbapi.update_container(context, self.uuid, updates)

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this Container.

        Loads a container with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded container column by column, if there are any updates.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and \
               getattr(self, field) != getattr(current, field):
                setattr(self, field, getattr(current, field))

    def get_sandbox_id(self):
        if self.meta:
            return self.meta.get('sandbox_id', None)
        else:
            return None

    def set_sandbox_id(self, sandbox_id):
        if self.meta is None:
            self.meta = {'sandbox_id': sandbox_id}
        else:
            self.meta['sandbox_id'] = sandbox_id
            self._changed_fields.add('meta')

    def obj_load_attr(self, attrname):
        if attrname not in CONTAINER_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)

        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s",
                  {'attr': attrname,
                   'name': self.obj_name(),
                   'uuid': self.uuid,
                   })

        # NOTE(danms): We handle some fields differently here so that we
        # can be more efficient
        if attrname == 'pci_devices':
            self._load_pci_devices()

        if attrname == 'exec_instances':
            self._load_exec_instances()

        self.obj_reset_changes([attrname])

    def _load_pci_devices(self):
        self.pci_devices = pci_device.PciDevice.list_by_container_uuid(
            self._context, self.uuid)

    def _load_exec_instances(self):
        self.exec_instances = exec_inst.ExecInstance.list_by_container_id(
            self._context, self.id)

    @base.remotable_classmethod
    def get_count(cls, context, project_id, flag):
        """Get the counts of Container objects in the database.

        :param context: The request context for database access.
        :param project_id: The project_id to count across.
        :param flag: The name of resource, one of the following options:
                     - containers: Count the number of containers owned by the
                     project.
                     - memory: The sum of containers's memory.
                     - cpu: The sum of container's cpu.
                     - disk: The sum of container's disk size.
        """
        usage = dbapi.count_usage(context, project_id, flag)[0] or 0
        return usage
Ejemplo n.º 20
0
class Cluster(base.CinderPersistentObject, base.CinderObject,
              base.CinderComparableObject):
    """Cluster Versioned Object.

    Method get_by_id supports as additional named arguments:
        - get_services: If we want to load all services from this cluster.
        - services_summary: If we want to load num_nodes and num_down_nodes
                            fields.
        - is_up: Boolean value to filter based on the cluster's up status.
        - read_deleted: Filtering based on delete status. Default value "no".
        - Any other cluster field will be used as a filter.
    """
    # Version 1.0: Initial version
    VERSION = '1.0'
    OPTIONAL_FIELDS = ('num_hosts', 'num_down_hosts', 'services')

    # NOTE(geguileo): We don't want to expose race_preventer field at the OVO
    # layer since it is only meant for the DB layer internal mechanism to
    # prevent races.
    fields = {
        'id': fields.IntegerField(),
        'name': fields.StringField(nullable=False),
        'binary': fields.StringField(nullable=False),
        'disabled': fields.BooleanField(default=False, nullable=True),
        'disabled_reason': fields.StringField(nullable=True),
        'num_hosts': fields.IntegerField(default=0, read_only=True),
        'num_down_hosts': fields.IntegerField(default=0, read_only=True),
        'last_heartbeat': fields.DateTimeField(nullable=True, read_only=True),
        'services': fields.ObjectField('ServiceList', nullable=True,
                                       read_only=True),
    }

    @classmethod
    def _get_expected_attrs(cls, context, *args, **kwargs):
        """Return expected attributes when getting a cluster.

        Expected attributes depend on whether we are retrieving all related
        services as well as if we are getting the services summary.
        """
        expected_attrs = []
        if kwargs.get('get_services'):
            expected_attrs.append('services')
        if kwargs.get('services_summary'):
            expected_attrs.extend(('num_hosts', 'num_down_hosts'))
        return expected_attrs

    @staticmethod
    def _from_db_object(context, cluster, db_cluster, expected_attrs=None):
        """Fill cluster OVO fields from cluster ORM instance."""
        expected_attrs = expected_attrs or tuple()
        for name, field in cluster.fields.items():
            # The only field that cannot be assigned using setattr is services,
            # because it is an ObjectField.   So we don't assign the value if
            # it's a non expected optional field or if it's services field.
            if ((name in Cluster.OPTIONAL_FIELDS
                 and name not in expected_attrs) or name == 'services'):
                continue
            value = getattr(db_cluster, name)
            setattr(cluster, name, value)

        cluster._context = context
        if 'services' in expected_attrs:
            cluster.services = base.obj_make_list(
                context,
                objects.ServiceList(context),
                objects.Service,
                db_cluster.services)

        cluster.obj_reset_changes()
        return cluster

    def obj_load_attr(self, attrname):
        """Lazy load services attribute."""
        # NOTE(geguileo): We only allow lazy loading services to raise
        # awareness of the high cost of lazy loading num_hosts and
        # num_down_hosts, so if we are going to need this information we should
        # be certain we really need it and it should loaded when retrieving the
        # data from the DB the first time we read the OVO.
        if attrname != 'services':
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        self.services = objects.ServiceList.get_all(
            self._context, {'cluster_name': self.name})

        self.obj_reset_changes(fields=('services',))

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()
        if updates:
            for field in self.OPTIONAL_FIELDS:
                if field in updates:
                    raise exception.ObjectActionError(
                        action='create', reason=_('%s assigned') % field)

        db_cluster = db.cluster_create(self._context, updates)
        self._from_db_object(self._context, self, db_cluster)

    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            for field in self.OPTIONAL_FIELDS:
                if field in updates:
                    raise exception.ObjectActionError(
                        action='save', reason=_('%s changed') % field)
            db.cluster_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def destroy(self):
        with self.obj_as_admin():
            updated_values = db.cluster_destroy(self._context, self.id)
        for field, value in updated_values.items():
            setattr(self, field, value)
        self.obj_reset_changes(updated_values.keys())

    def is_up(self):
        return (self.last_heartbeat and
                self.last_heartbeat >= utils.service_expired_time(True))
Ejemplo n.º 21
0
class VolumeMapping(base.ZunPersistentObject, base.ZunObject):
    # Version 1.0: Initial version
    # Version 1.1: Add field "auto_remove"
    # Version 1.2: Add field "host"
    # Version 1.3: Add field "contents"
    # Version 1.4: Rename field "volume_id" to "cinder_volume_id"
    # Version 1.5: Add method "count"
    VERSION = '1.5'

    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.UUIDField(nullable=False),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'cinder_volume_id': fields.UUIDField(nullable=True),
        'volume_provider': fields.StringField(nullable=False),
        'container_path': fields.StringField(nullable=True),
        'container_uuid': fields.UUIDField(nullable=True),
        'container': fields.ObjectField('ContainerBase', nullable=True),
        'connection_info': fields.SensitiveStringField(nullable=True),
        'auto_remove': fields.BooleanField(nullable=True),
        'host': fields.StringField(nullable=True),
        'contents': fields.SensitiveStringField(nullable=True),
        'volume_id': fields.IntegerField(nullable=False),
        'volume': fields.ObjectField('Volume', nullable=True),
    }

    @staticmethod
    def _from_db_object(volume, db_volume):
        """Converts a database entity to a formal object."""
        for field in volume.fields:
            if field in VOLUME_MAPPING_OPTIONAL_ATTRS:
                continue
            setattr(volume, field, db_volume[field])

        volume.obj_reset_changes()
        return volume

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [
            VolumeMapping._from_db_object(cls(context), obj)
            for obj in db_objects
        ]

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a volume mapping based on uuid.

        :param uuid: the uuid of a volume mapping.
        :param context: Security context
        :returns: a :class:`VolumeMapping` object.
        """
        db_volume = dbapi.get_volume_mapping_by_uuid(context, uuid)
        volume = VolumeMapping._from_db_object(cls(context), db_volume)
        return volume

    @base.remotable_classmethod
    def list(cls,
             context,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None,
             filters=None):
        """Return a list of VolumeMapping objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :param filters: filters when list volume mappings.
        :returns: a list of :class:`VolumeMapping` object.

        """
        db_volumes = dbapi.list_volume_mappings(context,
                                                limit=limit,
                                                marker=marker,
                                                sort_key=sort_key,
                                                sort_dir=sort_dir,
                                                filters=filters)
        return VolumeMapping._from_db_object_list(db_volumes, cls, context)

    @base.remotable_classmethod
    def list_by_container(cls, context, container_uuid):
        filters = {'container_uuid': container_uuid}
        db_volumes = dbapi.list_volume_mappings(context, filters=filters)
        return VolumeMapping._from_db_object_list(db_volumes, cls, context)

    @base.remotable_classmethod
    def list_by_cinder_volume(cls, context, cinder_volume_id):
        filters = {'cinder_volume_id': cinder_volume_id}
        db_volumes = dbapi.list_volume_mappings(context, filters=filters)
        return VolumeMapping._from_db_object_list(db_volumes, cls, context)

    @base.remotable_classmethod
    def count(cls, context, **filters):
        return dbapi.count_volume_mappings(context, **filters)

    @base.remotable
    def create(self, context):
        """Create a VolumeMapping record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object.

        """
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        values = self.obj_get_changes()
        if 'container' in values:
            raise exception.ObjectActionError(action='create',
                                              reason='container assigned')
        if 'volume' in values:
            raise exception.ObjectActionError(action='create',
                                              reason='volume assigned')

        self._create_volume(context, values)
        db_volume = dbapi.create_volume_mapping(context, values)
        self._from_db_object(self, db_volume)

    def _create_volume(self, context, values):
        volume_values = {}
        for attrname in list(values.keys()):
            if attrname in VOLUME_ATTRS:
                volume_values[attrname] = values.pop(attrname)
        volume_values['user_id'] = values['user_id']
        volume_values['project_id'] = values['project_id']
        if 'volume_id' not in values:
            volume = volume_obj.Volume(context, **volume_values)
            volume.create(context)
            values['volume_id'] = volume.id

    @base.remotable
    def destroy(self, context=None):
        """Delete the VolumeMapping from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object.
        """
        context = context or self._context
        if not self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='already destroyed')
        dbapi.destroy_volume_mapping(context, self.uuid)
        self._destroy_volume(context)
        delattr(self, 'id')
        self.obj_reset_changes()

    def _destroy_volume(self, context):
        if VolumeMapping.count(context, volume_id=self.volume_id) == 0:
            dbapi.destroy_volume(context, self.volume_id)

    @base.remotable
    def save(self, context=None):
        """Save updates to this VolumeMapping.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object.
        """
        updates = self.obj_get_changes()
        if 'container' in updates:
            raise exception.ObjectActionError(action='save',
                                              reason='container changed')
        if 'volume' in updates:
            raise exception.ObjectActionError(action='save',
                                              reason='volume changed')
        updates.pop('id', None)
        self._update_volume(context, updates)
        dbapi.update_volume_mapping(context, self.uuid, updates)

        self.obj_reset_changes()

    def _update_volume(self, context, values):
        volume = self.volume
        for attrname in list(values.keys()):
            if attrname in VOLUME_ATTRS:
                setattr(volume, attrname, values.pop(attrname))
        volume.save(context)

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this VolumeMapping.

        Loads a volume mapping with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded volume mapping column by column, if there are any updates.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object.
        """
        current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
        for field in self.fields:
            if not self.obj_attr_is_set(field):
                continue
            if field == 'volume':
                self.volume.refresh()
            elif field == 'container':
                self.container.refresh()
            elif getattr(self, field) != getattr(current, field):
                setattr(self, field, getattr(current, field))
        self.obj_reset_changes()

    def obj_load_attr(self, attrname):
        if attrname not in VOLUME_MAPPING_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='attribute %s not lazy-loadable' % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading '%(attr)s' on %(name)s", {
            'attr': attrname,
            'name': self.obj_name(),
        })

        if attrname in VOLUME_ATTRS:
            value = getattr(self.volume, attrname)
            setattr(self, attrname, value)
            self.obj_reset_changes(fields=[attrname])
        if attrname == 'container':
            self.container = container.ContainerBase.get_container_any_type(
                self._context, self.container_uuid)
            self.obj_reset_changes(fields=['container'])
        if attrname == 'volume':
            self.volume = volume_obj.Volume.get_by_id(self._context,
                                                      self.volume_id)
            self.obj_reset_changes(fields=['volume'])
Ejemplo n.º 22
0
class Stack(
        heat_base.HeatObject,
        base.VersionedObjectDictCompat,
        base.ComparableVersionedObject,
):
    fields = {
        'id': fields.StringField(),
        'name': fields.StringField(),
        'raw_template_id': fields.IntegerField(),
        'backup': fields.BooleanField(),
        'created_at': fields.DateTimeField(read_only=True),
        'deleted_at': fields.DateTimeField(nullable=True),
        'disable_rollback': fields.BooleanField(),
        'nested_depth': fields.IntegerField(),
        'owner_id': fields.StringField(nullable=True),
        'stack_user_project_id': fields.StringField(nullable=True),
        'tenant': fields.StringField(nullable=True),
        'timeout': fields.IntegerField(nullable=True),
        'updated_at': fields.DateTimeField(nullable=True),
        'user_creds_id': fields.StringField(nullable=True),
        'username': fields.StringField(nullable=True),
        'action': fields.StringField(nullable=True),
        'status': fields.StringField(nullable=True),
        'status_reason': fields.StringField(nullable=True),
        'raw_template': fields.ObjectField('RawTemplate'),
        'convergence': fields.BooleanField(),
        'current_traversal': fields.StringField(),
        'current_deps': heat_fields.JsonField(),
        'prev_raw_template_id': fields.IntegerField(),
        'prev_raw_template': fields.ObjectField('RawTemplate'),
        'tags': fields.ObjectField('StackTagList'),
        'parent_resource_name': fields.StringField(nullable=True),
    }

    @staticmethod
    def _from_db_object(context, stack, db_stack):
        for field in stack.fields:
            if field == 'raw_template':
                stack['raw_template'] = (raw_template.RawTemplate.get_by_id(
                    context, db_stack['raw_template_id']))
            elif field == 'tags':
                stack['tags'] = stack_tag.StackTagList.from_db_object(
                    context, db_stack.get(field))
            else:
                stack[field] = db_stack.__dict__.get(field)
        stack._context = context
        stack.obj_reset_changes()
        return stack

    @classmethod
    def get_root_id(cls, context, stack_id):
        return db_api.stack_get_root_id(context, stack_id)

    @classmethod
    def get_by_id(cls, context, stack_id, **kwargs):
        db_stack = db_api.stack_get(context, stack_id, **kwargs)
        if not db_stack:
            return None
        stack = cls._from_db_object(context, cls(context), db_stack)
        return stack

    @classmethod
    def get_by_name_and_owner_id(cls, context, stack_name, owner_id):
        db_stack = db_api.stack_get_by_name_and_owner_id(
            context, six.text_type(stack_name), owner_id)
        if not db_stack:
            return None
        stack = cls._from_db_object(context, cls(context), db_stack)
        return stack

    @classmethod
    def get_by_name(cls, context, stack_name):
        db_stack = db_api.stack_get_by_name(context, six.text_type(stack_name))
        if not db_stack:
            return None
        stack = cls._from_db_object(context, cls(context), db_stack)
        return stack

    @classmethod
    def get_all(cls, context, *args, **kwargs):
        db_stacks = db_api.stack_get_all(context, *args, **kwargs)
        for db_stack in db_stacks:
            try:
                yield cls._from_db_object(context, cls(context), db_stack)
            except exception.NotFound:
                pass

    @classmethod
    def get_all_by_owner_id(cls, context, owner_id):
        db_stacks = db_api.stack_get_all_by_owner_id(context, owner_id)
        for db_stack in db_stacks:
            try:
                yield cls._from_db_object(context, cls(context), db_stack)
            except exception.NotFound:
                pass

    @classmethod
    def count_all(cls, context, **kwargs):
        return db_api.stack_count_all(context, **kwargs)

    @classmethod
    def count_total_resources(cls, context, stack_id):
        return db_api.stack_count_total_resources(context, stack_id)

    @classmethod
    def create(cls, context, values):
        return cls._from_db_object(context, cls(context),
                                   db_api.stack_create(context, values))

    @classmethod
    def update_by_id(cls, context, stack_id, values):
        """Update and return (boolean) if it was updated.

        Note: the underlying stack_update filters by current_traversal
        and stack_id.
        """
        return db_api.stack_update(context, stack_id, values)

    @classmethod
    def select_and_update(cls, context, stack_id, values, exp_trvsl=None):
        """Update the stack by selecting on traversal ID.

        Uses UPDATE ... WHERE (compare and swap) to catch any concurrent
        update problem.

        If the stack is found with given traversal, it is updated.

        If there occurs a race while updating, only one will succeed and
        other will get return value of False.
        """
        return db_api.stack_update(context,
                                   stack_id,
                                   values,
                                   exp_trvsl=exp_trvsl)

    @classmethod
    def persist_state_and_release_lock(cls, context, stack_id, engine_id,
                                       values):
        return db_api.persist_state_and_release_lock(context, stack_id,
                                                     engine_id, values)

    @classmethod
    def delete(cls, context, stack_id):
        db_api.stack_delete(context, stack_id)

    def update_and_save(self, values):
        has_updated = self.__class__.update_by_id(self._context, self.id,
                                                  values)
        if not has_updated:
            raise exception.NotFound(
                _('Attempt to update a stack with id: '
                  '%(id)s %(traversal)s %(msg)s') % {
                      'id': self.id,
                      'traversal': self.current_traversal,
                      'msg': 'that does not exist'
                  })

    def __eq__(self, another):
        self.refresh()  # to make test object comparison work well
        return super(Stack, self).__eq__(another)

    def refresh(self):
        db_stack = db_api.stack_get(self._context, self.id, show_deleted=True)
        if db_stack is None:
            message = _('No stack exists with id "%s"') % str(self.id)
            raise exception.NotFound(message)
        return self.__class__._from_db_object(self._context, self, db_stack)

    @classmethod
    def encrypt_hidden_parameters(cls, tmpl):
        raw_template.RawTemplate.encrypt_hidden_parameters(tmpl)
Ejemplo n.º 23
0
class Subnet(base.NeutronDbObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    db_model = models_v2.Subnet

    fields = {
        'id':
        obj_fields.UUIDField(),
        'project_id':
        obj_fields.StringField(nullable=True),
        'name':
        obj_fields.StringField(nullable=True),
        'network_id':
        obj_fields.UUIDField(),
        'segment_id':
        obj_fields.UUIDField(nullable=True),
        'subnetpool_id':
        obj_fields.UUIDField(nullable=True),
        'ip_version':
        common_types.IPVersionEnumField(),
        'cidr':
        common_types.IPNetworkField(),
        'gateway_ip':
        obj_fields.IPAddressField(nullable=True),
        'allocation_pools':
        obj_fields.ListOfObjectsField('IPAllocationPool', nullable=True),
        'enable_dhcp':
        obj_fields.BooleanField(nullable=True),
        'shared':
        obj_fields.BooleanField(nullable=True),
        'dns_nameservers':
        obj_fields.ListOfObjectsField('DNSNameServer', nullable=True),
        'host_routes':
        obj_fields.ListOfObjectsField('Route', nullable=True),
        'ipv6_ra_mode':
        common_types.IPV6ModeEnumField(nullable=True),
        'ipv6_address_mode':
        common_types.IPV6ModeEnumField(nullable=True)
    }

    synthetic_fields = [
        'allocation_pools', 'dns_nameservers', 'host_routes', 'shared'
    ]

    foreign_keys = {'Network': {'network_id': 'id'}}

    fields_no_update = ['project_id']

    fields_need_translation = {
        'project_id': 'tenant_id',
        'host_routes': 'routes'
    }

    def __init__(self, context=None, **kwargs):
        super(Subnet, self).__init__(context, **kwargs)
        self.add_extra_filter_name('shared')

    def obj_load_attr(self, attrname):
        if attrname == 'shared':
            return self._load_shared()
        super(Subnet, self).obj_load_attr(attrname)

    def _load_shared(self, db_obj=None):
        if db_obj:
            # NOTE(korzen) db_obj is passed when Subnet object is loaded
            # from DB
            rbac_entries = db_obj.get('rbac_entries') or {}
            shared = (rbac_db.RbacNeutronDbObjectMixin.is_network_shared(
                self.obj_context, rbac_entries))
        else:
            # NOTE(korzen) this case is used when Subnet object was
            # instantiated and without DB interaction (get_object(s), update,
            # create), it should be rare case to load 'shared' by that method
            shared = (rbac_db.RbacNeutronDbObjectMixin.get_shared_with_tenant(
                self.obj_context.elevated(), rbac_db_models.NetworkRBAC,
                self.network_id, self.project_id))
        setattr(self, 'shared', shared)
        self.obj_reset_changes(['shared'])

    def from_db_object(self, *objs):
        super(Subnet, self).from_db_object(*objs)
        for obj in objs:
            self._load_shared(obj)

    @classmethod
    def modify_fields_from_db(cls, db_obj):
        # TODO(korzen) remove this method when IP and CIDR decorator ready
        result = super(Subnet, cls).modify_fields_from_db(db_obj)
        if 'cidr' in result:
            result['cidr'] = utils.AuthenticIPNetwork(result['cidr'])
        if 'gateway_ip' in result and result['gateway_ip'] is not None:
            result['gateway_ip'] = netaddr.IPAddress(result['gateway_ip'])
        return result

    @classmethod
    def modify_fields_to_db(cls, fields):
        # TODO(korzen) remove this method when IP and CIDR decorator ready
        result = super(Subnet, cls).modify_fields_to_db(fields)
        if 'cidr' in result:
            result['cidr'] = cls.filter_to_str(result['cidr'])
        if 'gateway_ip' in result and result['gateway_ip'] is not None:
            result['gateway_ip'] = cls.filter_to_str(result['gateway_ip'])
        return result
Ejemplo n.º 24
0
class Service(base.CinderPersistentObject, base.CinderObject,
              base.CinderObjectDictCompat, base.CinderComparableObject,
              base.ClusteredObject):
    # Version 1.0: Initial version
    # Version 1.1: Add rpc_current_version and object_current_version fields
    # Version 1.2: Add get_minimum_rpc_version() and get_minimum_obj_version()
    # Version 1.3: Add replication fields
    # Version 1.4: Add cluster fields
    # Version 1.5: Add UUID field
    # Version 1.6: Modify UUID field to be not nullable
    VERSION = '1.6'

    OPTIONAL_FIELDS = ('cluster', )

    fields = {
        'id': fields.IntegerField(),
        'host': fields.StringField(nullable=True),
        'binary': fields.StringField(nullable=True),
        'cluster_name': fields.StringField(nullable=True),
        'cluster': fields.ObjectField('Cluster', nullable=True,
                                      read_only=True),
        'topic': fields.StringField(nullable=True),
        'report_count': fields.IntegerField(default=0),
        'disabled': fields.BooleanField(default=False, nullable=True),
        'availability_zone': fields.StringField(nullable=True,
                                                default='cinder'),
        'disabled_reason': fields.StringField(nullable=True),
        'modified_at': fields.DateTimeField(nullable=True),
        'rpc_current_version': fields.StringField(nullable=True),
        'object_current_version': fields.StringField(nullable=True),

        # Replication properties
        'replication_status': c_fields.ReplicationStatusField(nullable=True),
        'frozen': fields.BooleanField(default=False),
        'active_backend_id': fields.StringField(nullable=True),
        'uuid': fields.StringField(),
    }

    def obj_make_compatible(self, primitive, target_version):
        """Make a service representation compatible with a target version."""
        # Convert all related objects
        super(Service, self).obj_make_compatible(primitive, target_version)

        target_version = versionutils.convert_version_to_tuple(target_version)
        # Before v1.4 we didn't have cluster fields so we have to remove them.
        if target_version < (1, 4):
            for obj_field in ('cluster', 'cluster_name'):
                primitive.pop(obj_field, None)
        if target_version < (1, 5) and 'uuid' in primitive:
            del primitive['uuid']

    @staticmethod
    def _from_db_object(context, service, db_service, expected_attrs=None):
        expected_attrs = expected_attrs or []
        for name, field in service.fields.items():
            if ((name == 'uuid' and not db_service.get(name))
                    or name in service.OPTIONAL_FIELDS):
                continue

            value = db_service.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            elif isinstance(field, fields.DateTimeField):
                value = value or None
            service[name] = value

        service._context = context
        if 'cluster' in expected_attrs:
            db_cluster = db_service.get('cluster')
            # If this service doesn't belong to a cluster the cluster field in
            # the ORM instance will have value of None.
            if db_cluster:
                service.cluster = objects.Cluster(context)
                objects.Cluster._from_db_object(context, service.cluster,
                                                db_cluster)
            else:
                service.cluster = None

        service.obj_reset_changes()

        return service

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        # NOTE(geguileo): We only have 1 optional field, so we don't need to
        # confirm that we are loading the cluster.
        # If this service doesn't belong to a cluster (cluster_name is empty),
        # then cluster field will be None.
        if self.cluster_name:
            self.cluster = objects.Cluster.get_by_id(self._context,
                                                     None,
                                                     name=self.cluster_name)
        else:
            self.cluster = None
        self.obj_reset_changes(fields=(attrname, ))

    @classmethod
    def get_by_host_and_topic(cls, context, host, topic, disabled=False):
        db_service = db.service_get(context,
                                    disabled=disabled,
                                    host=host,
                                    topic=topic)
        return cls._from_db_object(context, cls(context), db_service)

    @classmethod
    def get_by_args(cls, context, host, binary_key):
        db_service = db.service_get(context, host=host, binary=binary_key)
        return cls._from_db_object(context, cls(context), db_service)

    @classmethod
    def get_by_uuid(cls, context, service_uuid):
        db_service = db.service_get_by_uuid(context, service_uuid)
        return cls._from_db_object(context, cls(), db_service)

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()
        if 'cluster' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('cluster assigned'))
        if 'uuid' not in updates:
            updates['uuid'] = uuidutils.generate_uuid()
            self.uuid = updates['uuid']

        db_service = db.service_create(self._context, updates)
        self._from_db_object(self._context, self, db_service)

    def save(self):
        updates = self.cinder_obj_get_changes()
        if 'cluster' in updates:
            raise exception.ObjectActionError(action='save',
                                              reason=_('cluster changed'))
        if updates:
            db.service_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def destroy(self):
        with self.obj_as_admin():
            updated_values = db.service_destroy(self._context, self.id)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())

    @classmethod
    def _get_minimum_version(cls, attribute, context, binary):
        services = ServiceList.get_all_by_binary(context, binary)
        min_ver = None
        min_ver_str = None
        for s in services:
            ver_str = getattr(s, attribute)
            if ver_str is None:
                # NOTE(dulek) None in *_current_version means that this
                # service is in Liberty version, which we now don't provide
                # backward compatibility to.
                msg = _('Service %s is in Liberty version. We do not provide '
                        'backward compatibility with Liberty now, so you '
                        'need to upgrade it, release by release if live '
                        'upgrade is required. After upgrade you may need to '
                        'remove any stale service records via '
                        '"cinder-manage service remove".') % s.binary
                raise exception.ServiceTooOld(msg)
            ver = versionutils.convert_version_to_int(ver_str)
            if min_ver is None or ver < min_ver:
                min_ver = ver
                min_ver_str = ver_str

        return min_ver_str

    @classmethod
    def get_minimum_rpc_version(cls, context, binary):
        return cls._get_minimum_version('rpc_current_version', context, binary)

    @classmethod
    def get_minimum_obj_version(cls, context, binary=None):
        return cls._get_minimum_version('object_current_version', context,
                                        binary)

    @property
    def is_up(self):
        """Check whether a service is up based on last heartbeat."""
        return (self.updated_at
                and self.updated_at >= utils.service_expired_time(True))
Ejemplo n.º 25
0
class Container(base.ZunPersistentObject, base.ZunObject):
    # Version 1.0: Initial version
    # Version 1.1: Add container_id column
    # Version 1.2: Add memory column
    # Version 1.3: Add task_state column
    # Version 1.4: Add cpu, workdir, ports, hostname and labels columns
    # Version 1.5: Add meta column
    # Version 1.6: Add addresses column
    # Version 1.7: Add host column
    # Version 1.8: Add restart_policy
    # Version 1.9: Add status_detail column
    # Version 1.10: Add tty, stdin_open
    # Version 1.11: Add image_driver
    # Version 1.12: Add 'Created' to ContainerStatus
    # Version 1.13: Add more task states for container
    # Version 1.14: Add method 'list_by_host'
    # Version 1.15: Combine tty and stdin_open
    # Version 1.16: Add websocket_url and token
    # Version 1.17: Add security_groups
    VERSION = '1.17'

    fields = {
        'id': fields.IntegerField(),
        'container_id': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(nullable=True),
        'name': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'image': fields.StringField(nullable=True),
        'cpu': fields.FloatField(nullable=True),
        'memory': fields.StringField(nullable=True),
        'command': fields.StringField(nullable=True),
        'status': z_fields.ContainerStatusField(nullable=True),
        'status_reason': fields.StringField(nullable=True),
        'task_state': z_fields.TaskStateField(nullable=True),
        'environment': fields.DictOfStringsField(nullable=True),
        'workdir': fields.StringField(nullable=True),
        'ports': z_fields.ListOfIntegersField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'labels': fields.DictOfStringsField(nullable=True),
        'meta': fields.DictOfStringsField(nullable=True),
        'addresses': z_fields.JsonField(nullable=True),
        'image_pull_policy': fields.StringField(nullable=True),
        'host': fields.StringField(nullable=True),
        'restart_policy': fields.DictOfStringsField(nullable=True),
        'status_detail': fields.StringField(nullable=True),
        'interactive': fields.BooleanField(nullable=True),
        'image_driver': fields.StringField(nullable=True),
        'websocket_url': fields.StringField(nullable=True),
        'websocket_token': fields.StringField(nullable=True),
        'security_groups': fields.ListOfStringsField(nullable=True),
    }

    @staticmethod
    def _from_db_object(container, db_container):
        """Converts a database entity to a formal object."""
        for field in container.fields:
            setattr(container, field, db_container[field])

        container.obj_reset_changes()
        return container

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [
            Container._from_db_object(cls(context), obj) for obj in db_objects
        ]

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a container based on uuid and return a :class:`Container` object.

        :param uuid: the uuid of a container.
        :param context: Security context
        :returns: a :class:`Container` object.
        """
        db_container = dbapi.get_container_by_uuid(context, uuid)
        container = Container._from_db_object(cls(context), db_container)
        return container

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        """Find a container based on name and return a Container object.

        :param name: the logical name of a container.
        :param context: Security context
        :returns: a :class:`Container` object.
        """
        db_container = dbapi.get_container_by_name(context, name)
        container = Container._from_db_object(cls(context), db_container)
        return container

    @base.remotable_classmethod
    def list(cls,
             context,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None,
             filters=None):
        """Return a list of Container objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :param filters: filters when list containers, the filter name could be
                        'name', 'image', 'project_id', 'user_id', 'memory'.
                        For example, filters={'image': 'nginx'}
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(context,
                                              limit=limit,
                                              marker=marker,
                                              sort_key=sort_key,
                                              sort_dir=sort_dir,
                                              filters=filters)
        return Container._from_db_object_list(db_containers, cls, context)

    @base.remotable_classmethod
    def list_by_host(cls, context, host):
        """Return a list of Container objects by host.

        :param context: Security context.
        :param host: A compute host.
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(context, filters={'host': host})
        return Container._from_db_object_list(db_containers, cls, context)

    @base.remotable
    def create(self, context):
        """Create a Container record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)

        """
        values = self.obj_get_changes()
        db_container = dbapi.create_container(context, values)
        self._from_db_object(self, db_container)

    @base.remotable
    def destroy(self, context=None):
        """Delete the Container from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        dbapi.destroy_container(context, self.uuid)
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this Container.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        updates = self.obj_get_changes()
        dbapi.update_container(context, self.uuid, updates)

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this Container.

        Loads a container with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded container column by column, if there are any updates.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and \
               getattr(self, field) != getattr(current, field):
                setattr(self, field, getattr(current, field))
Ejemplo n.º 26
0
class QosPolicy(base.NeutronDbObject):
    # Version 1.0: Initial version
    # Version 1.1: QosDscpMarkingRule introduced
    VERSION = '1.1'

    # required by RbacNeutronMetaclass
    rbac_db_model = QosPolicyRBAC
    db_model = qos_db_model.QosPolicy

    port_binding_model = qos_db_model.QosPortPolicyBinding
    network_binding_model = qos_db_model.QosNetworkPolicyBinding

    fields = {
        'id': obj_fields.UUIDField(),
        'tenant_id': obj_fields.UUIDField(),
        'name': obj_fields.StringField(),
        'description': obj_fields.StringField(),
        'shared': obj_fields.BooleanField(default=False),
        'rules': obj_fields.ListOfObjectsField('QosRule', subclasses=True),
    }

    fields_no_update = ['id', 'tenant_id']

    synthetic_fields = ['rules']

    binding_models = {
        'network': network_binding_model,
        'port': port_binding_model
    }

    def obj_load_attr(self, attrname):
        if attrname != 'rules':
            raise exceptions.ObjectActionError(action='obj_load_attr',
                                               reason=_('unable to load %s') %
                                               attrname)

        if not hasattr(self, attrname):
            self.reload_rules()

    def reload_rules(self):
        rules = rule_obj_impl.get_rules(self._context, self.id)
        setattr(self, 'rules', rules)
        self.obj_reset_changes(['rules'])

    def get_rule_by_id(self, rule_id):
        """Return rule specified by rule_id.

        @raise QosRuleNotFound: if there is no such rule in the policy.
        """

        for rule in self.rules:
            if rule_id == rule.id:
                return rule
        raise exceptions.QosRuleNotFound(policy_id=self.id, rule_id=rule_id)

    @classmethod
    def get_object(cls, context, **kwargs):
        # We want to get the policy regardless of its tenant id. We'll make
        # sure the tenant has permission to access the policy later on.
        admin_context = context.elevated()
        with db_api.autonested_transaction(admin_context.session):
            policy_obj = super(QosPolicy,
                               cls).get_object(admin_context, **kwargs)
            if (not policy_obj or not cls.is_accessible(context, policy_obj)):
                return

            policy_obj.reload_rules()
            return policy_obj

    @classmethod
    def get_objects(cls, context, **kwargs):
        # We want to get the policy regardless of its tenant id. We'll make
        # sure the tenant has permission to access the policy later on.
        admin_context = context.elevated()
        with db_api.autonested_transaction(admin_context.session):
            objs = super(QosPolicy, cls).get_objects(admin_context, **kwargs)
            result = []
            for obj in objs:
                if not cls.is_accessible(context, obj):
                    continue
                obj.reload_rules()
                result.append(obj)
            return result

    @classmethod
    def _get_object_policy(cls, context, model, **kwargs):
        with db_api.autonested_transaction(context.session):
            binding_db_obj = obj_db_api.get_object(context, model, **kwargs)
            if binding_db_obj:
                return cls.get_object(context, id=binding_db_obj['policy_id'])

    @classmethod
    def get_network_policy(cls, context, network_id):
        return cls._get_object_policy(context,
                                      cls.network_binding_model,
                                      network_id=network_id)

    @classmethod
    def get_port_policy(cls, context, port_id):
        return cls._get_object_policy(context,
                                      cls.port_binding_model,
                                      port_id=port_id)

    # TODO(QoS): Consider extending base to trigger registered methods for us
    def create(self):
        with db_api.autonested_transaction(self._context.session):
            super(QosPolicy, self).create()
            self.reload_rules()

    def delete(self):
        with db_api.autonested_transaction(self._context.session):
            for object_type, model in self.binding_models.items():
                binding_db_obj = obj_db_api.get_object(self._context,
                                                       model,
                                                       policy_id=self.id)
                if binding_db_obj:
                    raise exceptions.QosPolicyInUse(
                        policy_id=self.id,
                        object_type=object_type,
                        object_id=binding_db_obj['%s_id' % object_type])

            super(QosPolicy, self).delete()

    def attach_network(self, network_id):
        qos_db_api.create_policy_network_binding(self._context,
                                                 policy_id=self.id,
                                                 network_id=network_id)

    def attach_port(self, port_id):
        qos_db_api.create_policy_port_binding(self._context,
                                              policy_id=self.id,
                                              port_id=port_id)

    def detach_network(self, network_id):
        qos_db_api.delete_policy_network_binding(self._context,
                                                 policy_id=self.id,
                                                 network_id=network_id)

    def detach_port(self, port_id):
        qos_db_api.delete_policy_port_binding(self._context,
                                              policy_id=self.id,
                                              port_id=port_id)

    @classmethod
    def _get_bound_tenant_ids(cls, session, binding_db, bound_db,
                              binding_db_id_column, policy_id):
        return list(
            itertools.chain.from_iterable(
                session.query(bound_db.tenant_id).join(
                    binding_db, bound_db.id == binding_db_id_column).filter(
                        binding_db.policy_id == policy_id).all()))

    @classmethod
    def get_bound_tenant_ids(cls, context, policy_id):
        """Implements RbacNeutronObject.get_bound_tenant_ids.

        :returns: set -- a set of tenants' ids dependant on QosPolicy.
        """
        net = models_v2.Network
        qosnet = qos_db_model.QosNetworkPolicyBinding
        port = models_v2.Port
        qosport = qos_db_model.QosPortPolicyBinding
        bound_tenants = []
        with db_api.autonested_transaction(context.session):
            bound_tenants.extend(
                cls._get_bound_tenant_ids(context.session, qosnet, net,
                                          qosnet.network_id, policy_id))
            bound_tenants.extend(
                cls._get_bound_tenant_ids(context.session, qosport, port,
                                          qosport.port_id, policy_id))
        return set(bound_tenants)

    def obj_make_compatible(self, primitive, target_version):
        _target_version = versionutils.convert_version_to_tuple(target_version)
        if _target_version < (1, 1):
            if 'rules' in primitive:
                bw_obj_name = rule_obj_impl.QosBandwidthLimitRule.obj_name()
                primitive['rules'] = filter(
                    lambda rule:
                    (rule['versioned_object.name'] == bw_obj_name),
                    primitive['rules'])
Ejemplo n.º 27
0
class Network(rbac_db.NeutronRbacObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    rbac_db_model = rbac_db_models.NetworkRBAC
    db_model = models_v2.Network

    fields = {
        'id': common_types.UUIDField(),
        'project_id': obj_fields.StringField(nullable=True),
        'name': obj_fields.StringField(nullable=True),
        'status': obj_fields.StringField(nullable=True),
        'admin_state_up': obj_fields.BooleanField(nullable=True),
        'vlan_transparent': obj_fields.BooleanField(nullable=True),
        # TODO(ihrachys): consider converting to a field of stricter type
        'availability_zone_hints':
        obj_fields.ListOfStringsField(nullable=True),
        'shared': obj_fields.BooleanField(default=False),
        'mtu': obj_fields.IntegerField(nullable=True),

        # TODO(ihrachys): consider exposing availability zones

        # TODO(ihrachys): consider converting to boolean
        'security': obj_fields.ObjectField('NetworkPortSecurity',
                                           nullable=True),
        'segments': obj_fields.ListOfObjectsField('NetworkSegment',
                                                  nullable=True),
        'dns_domain': common_types.DomainNameField(nullable=True),
        'qos_policy_id': common_types.UUIDField(nullable=True, default=None),

        # TODO(ihrachys): add support for tags, probably through a base class
        # since it's a feature that will probably later be added for other
        # resources too

        # TODO(ihrachys): expose external network attributes
    }

    synthetic_fields = [
        'dns_domain',
        # MTU is not stored in the database any more, it's a synthetic field
        # that may be used by plugins to provide a canonical representation for
        # the resource
        'mtu',
        'qos_policy_id',
        'security',
        'segments',
    ]

    fields_need_translation = {
        'security': 'port_security',
    }

    def create(self):
        fields = self.obj_get_changes()
        with db_api.autonested_transaction(self.obj_context.session):
            dns_domain = self.dns_domain
            qos_policy_id = self.qos_policy_id
            super(Network, self).create()
            if 'dns_domain' in fields:
                self._set_dns_domain(dns_domain)
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(qos_policy_id)

    def update(self):
        fields = self.obj_get_changes()
        with db_api.autonested_transaction(self.obj_context.session):
            super(Network, self).update()
            if 'dns_domain' in fields:
                self._set_dns_domain(fields['dns_domain'])
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(fields['qos_policy_id'])

    def _attach_qos_policy(self, qos_policy_id):
        # TODO(ihrachys): introduce an object for the binding to isolate
        # database access in a single place, currently scattered between port
        # and policy objects
        obj_db_api.delete_objects(
            self.obj_context,
            qos_models.QosNetworkPolicyBinding,
            network_id=self.id,
        )
        if qos_policy_id:
            obj_db_api.create_object(self.obj_context,
                                     qos_models.QosNetworkPolicyBinding, {
                                         'network_id': self.id,
                                         'policy_id': qos_policy_id
                                     })
        self.qos_policy_id = qos_policy_id
        self.obj_reset_changes(['qos_policy_id'])

    def _set_dns_domain(self, dns_domain):
        NetworkDNSDomain.delete_objects(self.obj_context, network_id=self.id)
        if dns_domain:
            NetworkDNSDomain(self.obj_context,
                             network_id=self.id,
                             dns_domain=dns_domain).create()
        self.dns_domain = dns_domain
        self.obj_reset_changes(['dns_domain'])

    @classmethod
    def modify_fields_from_db(cls, db_obj):
        result = super(Network, cls).modify_fields_from_db(db_obj)
        if az_ext.AZ_HINTS in result:
            result[az_ext.AZ_HINTS] = (az_ext.convert_az_string_to_list(
                result[az_ext.AZ_HINTS]))
        return result

    @classmethod
    def modify_fields_to_db(cls, fields):
        result = super(Network, cls).modify_fields_to_db(fields)
        if az_ext.AZ_HINTS in result:
            result[az_ext.AZ_HINTS] = (az_ext.convert_az_list_to_string(
                result[az_ext.AZ_HINTS]))
        return result

    def from_db_object(self, *objs):
        super(Network, self).from_db_object(*objs)
        for db_obj in objs:
            # extract domain name
            if db_obj.get('dns_domain'):
                self.dns_domain = (db_obj.dns_domain.dns_domain)
            else:
                self.dns_domain = None
            self.obj_reset_changes(['dns_domain'])

            # extract qos policy binding
            if db_obj.get('qos_policy_binding'):
                self.qos_policy_id = (db_obj.qos_policy_binding.policy_id)
            else:
                self.qos_policy_id = None
            self.obj_reset_changes(['qos_policy_id'])

    @classmethod
    def get_bound_tenant_ids(cls, context, policy_id):
        # TODO(ihrachys): provide actual implementation
        return set()
Ejemplo n.º 28
0
class Service(base.CinderPersistentObject, base.CinderObject,
              base.CinderObjectDictCompat,
              base.CinderComparableObject):
    # Version 1.0: Initial version
    # Version 1.1: Add rpc_current_version and object_current_version fields
    # Version 1.2: Add get_minimum_rpc_version() and get_minimum_obj_version()
    # Version 1.3: Add replication fields
    VERSION = '1.3'

    fields = {
        'id': fields.IntegerField(),
        'host': fields.StringField(nullable=True),
        'binary': fields.StringField(nullable=True),
        'topic': fields.StringField(nullable=True),
        'report_count': fields.IntegerField(default=0),
        'disabled': fields.BooleanField(default=False),
        'availability_zone': fields.StringField(nullable=True,
                                                default='cinder'),
        'disabled_reason': fields.StringField(nullable=True),

        'modified_at': fields.DateTimeField(nullable=True),
        'rpc_current_version': fields.StringField(nullable=True),
        'object_current_version': fields.StringField(nullable=True),

        # Replication properties
        'replication_status': c_fields.ReplicationStatusField(nullable=True),
        'frozen': fields.BooleanField(default=False),
        'active_backend_id': fields.StringField(nullable=True),
    }

    def obj_make_compatible(self, primitive, target_version):
        """Make an object representation compatible with a target version."""
        target_version = utils.convert_version_to_tuple(target_version)

    @staticmethod
    def _from_db_object(context, service, db_service):
        for name, field in service.fields.items():
            value = db_service.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            elif isinstance(field, fields.DateTimeField):
                value = value or None
            service[name] = value

        service._context = context
        service.obj_reset_changes()
        return service

    @base.remotable_classmethod
    def get_by_host_and_topic(cls, context, host, topic):
        db_service = db.service_get_by_host_and_topic(context, host, topic)
        return cls._from_db_object(context, cls(context), db_service)

    @base.remotable_classmethod
    def get_by_args(cls, context, host, binary_key):
        db_service = db.service_get_by_args(context, host, binary_key)
        return cls._from_db_object(context, cls(context), db_service)

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()
        db_service = db.service_create(self._context, updates)
        self._from_db_object(self._context, self, db_service)

    @base.remotable
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            db.service_update(self._context, self.id, updates)
            self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        with self.obj_as_admin():
            db.service_destroy(self._context, self.id)

    @classmethod
    def _get_minimum_version(cls, attribute, context, binary):
        services = ServiceList.get_all_by_binary(context, binary)
        min_ver = None
        min_ver_str = None
        for s in services:
            ver_str = getattr(s, attribute)
            if ver_str is None:
                # FIXME(dulek) None in *_current_version means that this
                # service is in Liberty version, so we must assume this is the
                # lowest one. We use handy and easy to remember token to
                # indicate that. This may go away as soon as we drop
                # compatibility with Liberty, possibly in early N.
                return 'liberty'
            ver = versionutils.convert_version_to_int(ver_str)
            if min_ver is None or ver < min_ver:
                min_ver = ver
                min_ver_str = ver_str

        return min_ver_str

    @base.remotable_classmethod
    def get_minimum_rpc_version(cls, context, binary):
        return cls._get_minimum_version('rpc_current_version', context, binary)

    @base.remotable_classmethod
    def get_minimum_obj_version(cls, context, binary):
        return cls._get_minimum_version('object_current_version', context,
                                        binary)
Ejemplo n.º 29
0
class Agent(base.NeutronDbObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    db_model = agent_model.Agent

    fields = {
        'id': common_types.UUIDField(),
        'agent_type': obj_fields.StringField(),
        'binary': obj_fields.StringField(),
        'topic': obj_fields.StringField(),
        'host': obj_fields.StringField(),
        'availability_zone': obj_fields.StringField(nullable=True),
        'admin_state_up': obj_fields.BooleanField(default=True),
        'started_at': obj_fields.DateTimeField(tzinfo_aware=False),
        'created_at': obj_fields.DateTimeField(tzinfo_aware=False),
        'heartbeat_timestamp': obj_fields.DateTimeField(tzinfo_aware=False),
        'description': obj_fields.StringField(nullable=True),
        'configurations': common_types.DictOfMiscValuesField(),
        'resource_versions': common_types.DictOfMiscValuesField(nullable=True),
        'load': obj_fields.IntegerField(default=0),
    }

    @classmethod
    def modify_fields_to_db(cls, fields):
        result = super(Agent, cls).modify_fields_to_db(fields)
        if ('configurations' in result and
                not isinstance(result['configurations'],
                               obj_utils.StringMatchingFilterObj)):
            # dump configuration into string, set '' if empty '{}'
            result['configurations'] = (
                cls.filter_to_json_str(result['configurations'], default=''))
        if ('resource_versions' in result and
                not isinstance(result['resource_versions'],
                               obj_utils.StringMatchingFilterObj)):
            # dump resource version into string, set None if empty '{}' or None
            result['resource_versions'] = (
                cls.filter_to_json_str(result['resource_versions']))
        return result

    @classmethod
    def modify_fields_from_db(cls, db_obj):
        fields = super(Agent, cls).modify_fields_from_db(db_obj)
        if 'configurations' in fields:
            # load string from DB, set {} if configuration is ''
            fields['configurations'] = (
                cls.load_json_from_str(fields['configurations'], default={}))
        if 'resource_versions' in fields:
            # load string from DB, set None if resource_version is None or ''
            fields['resource_versions'] = (
                cls.load_json_from_str(fields['resource_versions']))
        return fields

    @property
    def is_active(self):
        return not utils.is_agent_down(self.heartbeat_timestamp)

    # TODO(ihrachys) reuse query builder from
    # get_l3_agents_ordered_by_num_routers
    @classmethod
    def get_l3_agent_with_min_routers(cls, context, agent_ids):
        """Return l3 agent with the least number of routers."""
        with cls.db_context_reader(context):
            query = context.session.query(
                agent_model.Agent,
                func.count(
                    rb_model.RouterL3AgentBinding.router_id
                ).label('count')).outerjoin(
                    rb_model.RouterL3AgentBinding).group_by(
                    agent_model.Agent.id,
                    rb_model.RouterL3AgentBinding
                    .l3_agent_id).order_by('count')
            res = query.filter(agent_model.Agent.id.in_(agent_ids)).first()
        agent_obj = cls._load_object(context, res[0])
        return agent_obj

    @classmethod
    def get_l3_agents_ordered_by_num_routers(cls, context, agent_ids):
        with cls.db_context_reader(context):
            query = (context.session.query(agent_model.Agent, func.count(
                rb_model.RouterL3AgentBinding.router_id)
                .label('count')).
                outerjoin(rb_model.RouterL3AgentBinding).
                group_by(agent_model.Agent.id).
                filter(agent_model.Agent.id.in_(agent_ids)).
                order_by('count'))
        agents = [cls._load_object(context, record[0]) for record in query]

        return agents

    @classmethod
    def get_ha_agents(cls, context, network_id=None, router_id=None):
        if not (network_id or router_id):
            return []
        query = context.session.query(agent_model.Agent.host)
        query = query.join(l3ha_model.L3HARouterAgentPortBinding,
                           l3ha_model.L3HARouterAgentPortBinding.l3_agent_id ==
                           agent_model.Agent.id)
        if router_id:
            query = query.filter(
                l3ha_model.L3HARouterAgentPortBinding.router_id ==
                router_id).all()
        elif network_id:
            query = query.join(models_v2.Port, models_v2.Port.device_id ==
                               l3ha_model.L3HARouterAgentPortBinding.router_id)
            query = query.filter(models_v2.Port.network_id == network_id,
                                 models_v2.Port.status ==
                                 const.PORT_STATUS_ACTIVE,
                                 models_v2.Port.device_owner.in_(
                                     (const.DEVICE_OWNER_HA_REPLICATED_INT,
                                      const.DEVICE_OWNER_ROUTER_SNAT))).all()
        # L3HARouterAgentPortBinding will have l3 agent ids of hosting agents.
        # But we need l2 agent(for tunneling ip) while creating FDB entries.
        hosts = [host[0] for host in query]
        agents = cls.get_objects(context, host=hosts)
        return agents

    @classmethod
    def _get_agents_by_availability_zones_and_agent_type(
            cls, context, agent_type, availability_zones):
        query = context.session.query(
            agent_model.Agent).filter_by(
            agent_type=agent_type).group_by(
            agent_model.Agent.availability_zone)
        query = query.filter(
            agent_model.Agent.availability_zone.in_(availability_zones)).all()
        agents = [cls._load_object(context, record) for record in query]
        return agents

    @classmethod
    def get_objects_by_agent_mode(cls, context, agent_mode=None, **kwargs):
        mode_filter = obj_utils.StringContains(agent_mode)
        return cls.get_objects(context, configurations=mode_filter, **kwargs)
Ejemplo n.º 30
0
class NodeGroup(base.MagnumPersistentObject, base.MagnumObject,
                base.MagnumObjectDictCompat):
    # Version 1.0: Initial version

    VERSION = '1.0'

    dbapi = dbapi.get_instance()

    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.UUIDField(),
        'name': fields.StringField(),
        'cluster_id': fields.StringField(),
        'project_id': fields.StringField(),
        'docker_volume_size': fields.IntegerField(nullable=True),
        'labels': fields.DictOfStringsField(nullable=True),
        'flavor_id': fields.StringField(nullable=True),
        'image_id': fields.StringField(nullable=True),
        'node_addresses': fields.ListOfStringsField(nullable=True),
        'node_count': fields.IntegerField(nullable=False, default=1),
        'role': fields.StringField(),
        'max_node_count': fields.IntegerField(nullable=True),
        'min_node_count': fields.IntegerField(nullable=False, default=1),
        'is_default': fields.BooleanField(default=False)
    }

    @staticmethod
    def _from_db_object(nodegroup, db_nodegroup):
        """Converts a database entity to a formal object."""
        for field in nodegroup.fields:
            nodegroup[field] = db_nodegroup[field]

        nodegroup.obj_reset_changes()
        return nodegroup

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [
            NodeGroup._from_db_object(cls(context), obj) for obj in db_objects
        ]

    @base.remotable_classmethod
    def get(cls, context, cluster_id, nodegroup_id):
        """Find a nodegroup based on its id or uuid and return a NodeGroup.

        :param cluster_id: the of id a cluster.
        :param nodegroup_id: the id of a nodegroup.
        :param context: Security context
        :returns: a :class:`NodeGroup` object.
        """
        if strutils.is_int_like(nodegroup_id):
            return cls.get_by_id(context, cluster_id, nodegroup_id)
        elif uuidutils.is_uuid_like(nodegroup_id):
            return cls.get_by_uuid(context, cluster_id, nodegroup_id)
        else:
            return cls.get_by_name(context, cluster_id, nodegroup_id)

    @base.remotable_classmethod
    def get_by_id(cls, context, cluster, id_):
        """Find a nodegroup based on its integer id and return a NodeGroup.

        :param cluster: the id of a cluster.
        :param id_: the id of a nodegroup.
        :param context: Security context
        :returns: a :class:`NodeGroup` object.
        """
        db_nodegroup = cls.dbapi.get_nodegroup_by_id(context, cluster, id_)
        nodegroup = NodeGroup._from_db_object(cls(context), db_nodegroup)
        return nodegroup

    @base.remotable_classmethod
    def get_by_uuid(cls, context, cluster, uuid):
        """Find a nodegroup based on uuid and return a :class:`NodeGroup`.

        :param cluster: the id of a cluster.
        :param uuid: the uuid of a nodegroup.
        :param context: Security context
        :returns: a :class:`NodeGroup` object.
        """
        db_nodegroup = cls.dbapi.get_nodegroup_by_uuid(context, cluster, uuid)
        nodegroup = NodeGroup._from_db_object(cls(context), db_nodegroup)
        return nodegroup

    @base.remotable_classmethod
    def get_by_name(cls, context, cluster, name):
        """Find a nodegroup based on name and return a NodeGroup object.

        :param cluster: the id of a cluster.
        :param name: the logical name of a nodegroup.
        :param context: Security context
        :returns: a :class:`NodeGroup` object.
        """
        db_nodegroup = cls.dbapi.get_nodegroup_by_name(context, cluster, name)
        nodegroup = NodeGroup._from_db_object(cls(context), db_nodegroup)
        return nodegroup

    @base.remotable_classmethod
    def get_count_all(cls, context, cluster_id):
        """Get count of nodegroups in cluster.

        :param context: The security context
        :param cluster_id: The uuid of the cluster
        :returns: Count of nodegroups in the cluster.
        """
        return cls.dbapi.get_cluster_nodegroup_count(context, cluster_id)

    @base.remotable_classmethod
    def list(cls,
             context,
             cluster,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None,
             filters=None):
        """Return a list of NodeGroup objects.

        :param context: Security context.
        :param cluster: The cluster uuid or name
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :param filters: filter dict, can includes 'name', 'node_count',
                        'stack_id', 'node_addresses',
                        'status'(should be a status list).
        :returns: a list of :class:`NodeGroup` objects.

        """
        db_nodegroups = cls.dbapi.list_cluster_nodegroups(context,
                                                          cluster,
                                                          limit=limit,
                                                          marker=marker,
                                                          sort_key=sort_key,
                                                          sort_dir=sort_dir,
                                                          filters=filters)
        return NodeGroup._from_db_object_list(db_nodegroups, cls, context)

    @base.remotable
    def create(self, context=None):
        """Create a nodegroup record in the DB.

        :param context: Security context
        """
        values = self.obj_get_changes()
        db_nodegroup = self.dbapi.create_nodegroup(values)
        self._from_db_object(self, db_nodegroup)

    @base.remotable
    def destroy(self, context=None):
        """Delete the NodeGroup from the DB.

        :param context: Security context.
        """
        self.dbapi.destroy_nodegroup(self.cluster_id, self.uuid)
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this NodeGroup.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context.
        """
        updates = self.obj_get_changes()
        self.dbapi.update_nodegroup(self.cluster_id, self.uuid, updates)

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this NodeGroup.

        Loads a NodeGroup with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded NogeGroup column by column, if there are any updates.

        :param context: Security context.
        """
        current = self.__class__.get_by_uuid(self._context,
                                             cluster=self.cluster_id,
                                             uuid=self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and self[field] != current[field]:
                self[field] = current[field]

    @base.remotable_classmethod
    def update_nodegroup(cls, context, cluster_id, nodegroup_id, values):
        """Updates a NodeGroup.

        :param context: Security context.
        :param cluster_id:
        :param nodegroup_id:
        :param values: a dictionary with the changed values
        """
        current = cls.get(context, cluster_id, nodegroup_id)
        db_nodegroup = cls.dbapi.update_nodegroup(cluster_id, current.uuid,
                                                  values)
        return NodeGroup._from_db_object(cls(context), db_nodegroup)