Exemplo n.º 1
0
class Stack(
        base.VersionedObject,
        base.VersionedObjectDictCompat,
        base.ComparableVersionedObject,
):
    fields = {
        'id': fields.StringField(),
        'name': fields.StringField(),
        'raw_template_id': fields.IntegerField(),
        'backup': fields.BooleanField(),
        'created_at': fields.DateTimeField(read_only=True),
        'deleted_at': fields.DateTimeField(nullable=True),
        'disable_rollback': fields.BooleanField(),
        'nested_depth': fields.IntegerField(),
        'owner_id': fields.StringField(nullable=True),
        'stack_user_project_id': fields.StringField(nullable=True),
        'tenant': fields.StringField(nullable=True),
        'timeout': fields.IntegerField(nullable=True),
        'updated_at': fields.DateTimeField(nullable=True),
        'user_creds_id': fields.StringField(nullable=True),
        'username': fields.StringField(nullable=True),
        'action': fields.StringField(nullable=True),
        'status': fields.StringField(nullable=True),
        'status_reason': fields.StringField(nullable=True),
        'raw_template': fields.ObjectField('RawTemplate'),
        'convergence': fields.BooleanField(),
        'current_traversal': fields.StringField(),
        'current_deps': heat_fields.JsonField(),
        'prev_raw_template_id': fields.IntegerField(),
        'prev_raw_template': fields.ObjectField('RawTemplate'),
        'tags': fields.ObjectField('StackTagList'),
        'parent_resource_name': fields.StringField(nullable=True),
    }

    @staticmethod
    def _from_db_object(context, stack, db_stack):
        for field in stack.fields:
            if field == 'raw_template':
                stack['raw_template'] = (raw_template.RawTemplate.get_by_id(
                    context, db_stack['raw_template_id']))
            elif field == 'tags':
                if db_stack.get(field) is not None:
                    stack['tags'] = stack_tag.StackTagList.get(
                        context, db_stack['id'])
                else:
                    stack['tags'] = None
            else:
                stack[field] = db_stack.__dict__.get(field)
        stack._context = context
        stack.obj_reset_changes()
        return stack

    @classmethod
    def get_root_id(cls, context, stack_id):
        return db_api.stack_get_root_id(context, stack_id)

    @classmethod
    def get_by_id(cls, context, stack_id, **kwargs):
        db_stack = db_api.stack_get(context, stack_id, **kwargs)
        if not db_stack:
            return None
        stack = cls._from_db_object(context, cls(context), db_stack)
        return stack

    @classmethod
    def get_by_name_and_owner_id(cls, context, stack_name, owner_id):
        db_stack = db_api.stack_get_by_name_and_owner_id(
            context, six.text_type(stack_name), owner_id)
        if not db_stack:
            return None
        stack = cls._from_db_object(context, cls(context), db_stack)
        return stack

    @classmethod
    def get_by_name(cls, context, stack_name):
        db_stack = db_api.stack_get_by_name(context, six.text_type(stack_name))
        if not db_stack:
            return None
        stack = cls._from_db_object(context, cls(context), db_stack)
        return stack

    @classmethod
    def get_all(cls, context, *args, **kwargs):
        db_stacks = db_api.stack_get_all(context, *args, **kwargs)
        for db_stack in db_stacks:
            try:
                yield cls._from_db_object(context, cls(context), db_stack)
            except exception.NotFound:
                pass

    @classmethod
    def get_all_by_owner_id(cls, context, owner_id):
        db_stacks = db_api.stack_get_all_by_owner_id(context, owner_id)
        for db_stack in db_stacks:
            try:
                yield cls._from_db_object(context, cls(context), db_stack)
            except exception.NotFound:
                pass

    @classmethod
    def count_all(cls, context, **kwargs):
        return db_api.stack_count_all(context, **kwargs)

    @classmethod
    def count_total_resources(cls, context, stack_id):
        return db_api.stack_count_total_resources(context, stack_id)

    @classmethod
    def create(cls, context, values):
        return cls._from_db_object(context, cls(context),
                                   db_api.stack_create(context, values))

    @classmethod
    def update_by_id(cls, context, stack_id, values):
        """Update and return (boolean) if it was updated.

        Note: the underlying stack_update filters by current_traversal
        and stack_id.
        """
        return db_api.stack_update(context, stack_id, values)

    @classmethod
    def select_and_update(cls, context, stack_id, values, exp_trvsl=None):
        """Update the stack by selecting on traversal ID.

        Uses UPDATE ... WHERE (compare and swap) to catch any concurrent
        update problem.

        If the stack is found with given traversal, it is updated.

        If there occurs a race while updating, only one will succeed and
        other will get return value of False.
        """
        return db_api.stack_update(context,
                                   stack_id,
                                   values,
                                   exp_trvsl=exp_trvsl)

    @classmethod
    def persist_state_and_release_lock(cls, context, stack_id, engine_id,
                                       values):
        return db_api.persist_state_and_release_lock(context, stack_id,
                                                     engine_id, values)

    @classmethod
    def delete(cls, context, stack_id):
        db_api.stack_delete(context, stack_id)

    def update_and_save(self, values):
        has_updated = self.__class__.update_by_id(self._context, self.id,
                                                  values)
        if not has_updated:
            raise exception.NotFound(
                _('Attempt to update a stack with id: '
                  '%(id)s %(traversal)s %(msg)s') % {
                      'id': self.id,
                      'traversal': self.current_traversal,
                      'msg': 'that does not exist'
                  })

    def __eq__(self, another):
        self.refresh()  # to make test object comparison work well
        return super(Stack, self).__eq__(another)

    def refresh(self):
        db_stack = db_api.stack_get(self._context, self.id, show_deleted=True)
        if db_stack is None:
            message = _('No stack exists with id "%s"') % str(self.id)
            raise exception.NotFound(message)
        db_stack.refresh()
        return self.__class__._from_db_object(self._context, self, db_stack)

    @classmethod
    def encrypt_hidden_parameters(cls, tmpl):
        raw_template.RawTemplate.encrypt_hidden_parameters(tmpl)
Exemplo n.º 2
0
class Port(base.NeutronDbObject):
    # Version 1.0: Initial version
    # Version 1.1: Add data_plane_status field
    # Version 1.2: Added segment_id to binding_levels
    # Version 1.3: distributed_binding -> distributed_bindings
    # Version 1.4: Attribute binding becomes ListOfObjectsField
    # Version 1.5: Added qos_network_policy_id field
    VERSION = '1.5'

    db_model = models_v2.Port

    fields = {
        'id':
        common_types.UUIDField(),
        'project_id':
        obj_fields.StringField(nullable=True),
        'name':
        obj_fields.StringField(nullable=True),
        'network_id':
        common_types.UUIDField(),
        'mac_address':
        common_types.MACAddressField(),
        'admin_state_up':
        obj_fields.BooleanField(),
        'device_id':
        obj_fields.StringField(),
        'device_owner':
        obj_fields.StringField(),
        'status':
        obj_fields.StringField(),
        'allowed_address_pairs':
        obj_fields.ListOfObjectsField('AllowedAddressPair', nullable=True),
        'bindings':
        obj_fields.ListOfObjectsField('PortBinding', nullable=True),
        'data_plane_status':
        obj_fields.ObjectField('PortDataPlaneStatus', nullable=True),
        'dhcp_options':
        obj_fields.ListOfObjectsField('ExtraDhcpOpt', nullable=True),
        'distributed_bindings':
        obj_fields.ListOfObjectsField('DistributedPortBinding', nullable=True),
        'dns':
        obj_fields.ObjectField('PortDNS', nullable=True),
        'fixed_ips':
        obj_fields.ListOfObjectsField('IPAllocation', nullable=True),
        # TODO(ihrachys): consider converting to boolean
        'security':
        obj_fields.ObjectField('PortSecurity', nullable=True),
        'security_group_ids':
        common_types.SetOfUUIDsField(
            nullable=True,
            # TODO(ihrachys): how do we safely pass a mutable default?
            default=None,
        ),
        'qos_policy_id':
        common_types.UUIDField(nullable=True, default=None),
        'qos_network_policy_id':
        common_types.UUIDField(nullable=True, default=None),
        'binding_levels':
        obj_fields.ListOfObjectsField('PortBindingLevel', nullable=True),

        # TODO(ihrachys): consider adding a 'dns_assignment' fully synthetic
        # field in later object iterations
    }

    extra_filter_names = {'security_group_ids'}

    fields_no_update = ['project_id', 'network_id']

    synthetic_fields = [
        'allowed_address_pairs',
        'bindings',
        'binding_levels',
        'data_plane_status',
        'dhcp_options',
        'distributed_bindings',
        'dns',
        'fixed_ips',
        'qos_policy_id',
        'qos_network_policy_id',
        'security',
        'security_group_ids',
    ]

    fields_need_translation = {
        'bindings': 'port_bindings',
        'dhcp_options': 'dhcp_opts',
        'distributed_bindings': 'distributed_port_binding',
        'security': 'port_security',
    }

    def create(self):
        fields = self.obj_get_changes()
        with self.db_context_writer(self.obj_context):
            sg_ids = self.security_group_ids
            if sg_ids is None:
                sg_ids = set()
            qos_policy_id = self.qos_policy_id
            super(Port, self).create()
            if 'security_group_ids' in fields:
                self._attach_security_groups(sg_ids)
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(qos_policy_id)

    def update(self):
        fields = self.obj_get_changes()
        with self.db_context_writer(self.obj_context):
            super(Port, self).update()
            if 'security_group_ids' in fields:
                self._attach_security_groups(fields['security_group_ids'])
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(fields['qos_policy_id'])

    def _attach_qos_policy(self, qos_policy_id):
        binding.QosPolicyPortBinding.delete_objects(self.obj_context,
                                                    port_id=self.id)
        if qos_policy_id:
            port_binding_obj = binding.QosPolicyPortBinding(
                self.obj_context, policy_id=qos_policy_id, port_id=self.id)
            port_binding_obj.create()

        self.qos_policy_id = qos_policy_id
        self.obj_reset_changes(['qos_policy_id'])

    def _attach_security_groups(self, sg_ids):
        # TODO(ihrachys): consider introducing an (internal) object for the
        # binding to decouple database operations a bit more
        obj_db_api.delete_objects(SecurityGroupPortBinding,
                                  self.obj_context,
                                  port_id=self.id)
        if sg_ids:
            for sg_id in sg_ids:
                self._attach_security_group(sg_id)
        self.security_group_ids = sg_ids
        self.obj_reset_changes(['security_group_ids'])

    def _attach_security_group(self, sg_id):
        obj_db_api.create_object(SecurityGroupPortBinding, self.obj_context, {
            'port_id': self.id,
            'security_group_id': sg_id
        })

    @classmethod
    def get_objects(cls,
                    context,
                    _pager=None,
                    validate_filters=True,
                    security_group_ids=None,
                    **kwargs):
        if security_group_ids:
            ports_with_sg = cls.get_ports_ids_by_security_groups(
                context, security_group_ids)
            port_ids = kwargs.get("id", [])
            if port_ids:
                kwargs['id'] = list(set(port_ids) & set(ports_with_sg))
            else:
                kwargs['id'] = ports_with_sg
        return super(Port, cls).get_objects(context, _pager, validate_filters,
                                            **kwargs)

    @classmethod
    def get_port_ids_filter_by_segment_id(cls, context, segment_id):
        query = context.session.query(models_v2.Port.id)
        query = query.join(
            ml2_models.PortBindingLevel,
            ml2_models.PortBindingLevel.port_id == models_v2.Port.id)
        query = query.filter(
            ml2_models.PortBindingLevel.segment_id == segment_id)
        return [p.id for p in query]

    @classmethod
    def modify_fields_to_db(cls, fields):
        result = super(Port, cls).modify_fields_to_db(fields)

        # TODO(rossella_s): get rid of it once we switch the db model to using
        # custom types.
        if 'mac_address' in result:
            result['mac_address'] = cls.filter_to_str(result['mac_address'])

        # convert None to []
        if 'distributed_port_binding' in result:
            result['distributed_port_binding'] = (
                result['distributed_port_binding'] or [])
        return result

    @classmethod
    def modify_fields_from_db(cls, db_obj):
        fields = super(Port, cls).modify_fields_from_db(db_obj)

        # TODO(rossella_s): get rid of it once we switch the db model to using
        # custom types.
        if 'mac_address' in fields:
            fields['mac_address'] = utils.AuthenticEUI(fields['mac_address'])

        distributed_port_binding = fields.get('distributed_bindings')
        if distributed_port_binding:
            # TODO(ihrachys) support multiple bindings
            fields['distributed_bindings'] = fields['distributed_bindings'][0]
        else:
            fields['distributed_bindings'] = []
        return fields

    def from_db_object(self, db_obj):
        super(Port, self).from_db_object(db_obj)
        # extract security group bindings
        if db_obj.get('security_groups', []):
            self.security_group_ids = {
                sg.security_group_id
                for sg in db_obj.security_groups
            }
        else:
            self.security_group_ids = set()
        fields_to_change = ['security_group_ids']

        # extract qos policy binding
        if db_obj.get('qos_policy_binding'):
            self.qos_policy_id = db_obj.qos_policy_binding.policy_id
            fields_to_change.append('qos_policy_id')
        if db_obj.get('qos_network_policy_binding'):
            self.qos_network_policy_id = (
                db_obj.qos_network_policy_binding.policy_id)
            fields_to_change.append('qos_network_policy_binding')

        self.obj_reset_changes(fields_to_change)

    def obj_make_compatible(self, primitive, target_version):
        _target_version = versionutils.convert_version_to_tuple(target_version)
        if _target_version < (1, 1):
            primitive.pop('data_plane_status', None)
        if _target_version < (1, 2):
            binding_levels = primitive.get('binding_levels', [])
            for lvl in binding_levels:
                lvl['versioned_object.version'] = '1.0'
                lvl['versioned_object.data'].pop('segment_id', None)
        if _target_version < (1, 3):
            bindings = primitive.pop('distributed_bindings', [])
            primitive['distributed_binding'] = (bindings[0]
                                                if bindings else None)
        if _target_version < (1, 4):
            # In version 1.4 we add support for multiple port bindings.
            # Previous versions only support one port binding. The following
            # lines look for the active port binding, which is the only one
            # needed in previous versions
            if 'bindings' in primitive:
                original_bindings = primitive.pop('bindings')
                primitive['binding'] = None
                for a_binding in original_bindings:
                    if (a_binding['versioned_object.data']['status'] ==
                            constants.ACTIVE):
                        primitive['binding'] = a_binding
                        break
        if _target_version < (1, 5):
            primitive.pop('qos_network_policy_id', None)

    @classmethod
    def get_ports_by_router_and_network(cls, context, router_id, owner,
                                        network_id):
        """Returns port objects filtering by router ID, owner and network ID"""
        rports_filter = (models_v2.Port.network_id == network_id, )
        router_filter = (models_v2.Port.network_id == network_id, )
        return cls._get_ports_by_router(context, router_id, owner,
                                        rports_filter, router_filter)

    @classmethod
    def get_ports_by_router_and_port(cls, context, router_id, owner, port_id):
        """Returns port objects filtering by router ID, owner and port ID"""
        rports_filter = (l3.RouterPort.port_id == port_id, )
        router_filter = (models_v2.Port.id == port_id, )
        return cls._get_ports_by_router(context, router_id, owner,
                                        rports_filter, router_filter)

    @classmethod
    def _get_ports_by_router(cls, context, router_id, owner, rports_filter,
                             router_filter):
        """Returns port objects filtering by router id and owner

        The method will receive extra filters depending of the caller (filter
        by network or filter by port).

        The ports are retrieved using:
        - The RouterPort registers. Each time a port is assigned to a router,
          a new RouterPort register is added to the DB.
        - The port owner and device_id information.

        Both searches should return the same result. If not, a warning message
        is logged and the port list to be returned is completed with the
        missing ones.
        """
        rports_filter += (l3.RouterPort.router_id == router_id,
                          l3.RouterPort.port_type == owner)
        router_filter += (models_v2.Port.device_id == router_id,
                          models_v2.Port.device_owner == owner)

        ports = context.session.query(models_v2.Port).join(
            l3.RouterPort).filter(*rports_filter)
        ports_rports = [
            cls._load_object(context, db_obj) for db_obj in ports.all()
        ]

        ports = context.session.query(models_v2.Port).filter(*router_filter)
        ports_router = [
            cls._load_object(context, db_obj) for db_obj in ports.all()
        ]

        ports_rports_ids = {p.id for p in ports_rports}
        ports_router_ids = {p.id for p in ports_router}
        missing_port_ids = ports_router_ids - ports_rports_ids
        if missing_port_ids:
            LOG.warning(
                'The following ports, assigned to router '
                '%(router_id)s, do not have a "routerport" register: '
                '%(port_ids)s', {
                    'router_id': router_id,
                    'port_ids': missing_port_ids
                })
            port_objs = [p for p in ports_router if p.id in missing_port_ids]
            ports_rports += port_objs

        return ports_rports

    @classmethod
    def get_ports_ids_by_security_groups(cls,
                                         context,
                                         security_group_ids,
                                         excluded_device_owners=None):
        query = context.session.query(sg_models.SecurityGroupPortBinding)
        query = query.filter(
            sg_models.SecurityGroupPortBinding.security_group_id.in_(
                security_group_ids))
        if excluded_device_owners:
            query = query.join(models_v2.Port)
            query = query.filter(
                ~models_v2.Port.device_owner.in_(excluded_device_owners))
        return [port_binding['port_id'] for port_binding in query.all()]

    @classmethod
    def get_ports_by_binding_type_and_host(cls, context, binding_type, host):
        query = context.session.query(models_v2.Port).join(
            ml2_models.PortBinding)
        query = query.filter(ml2_models.PortBinding.vif_type == binding_type,
                             ml2_models.PortBinding.host == host)
        return [cls._load_object(context, db_obj) for db_obj in query.all()]
Exemplo n.º 3
0
class Network(rbac_db.NeutronRbacObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    rbac_db_cls = NetworkRBAC
    db_model = models_v2.Network

    fields = {
        'id': common_types.UUIDField(),
        'project_id': obj_fields.StringField(nullable=True),
        'name': obj_fields.StringField(nullable=True),
        'status': obj_fields.StringField(nullable=True),
        'admin_state_up': obj_fields.BooleanField(nullable=True),
        'vlan_transparent': obj_fields.BooleanField(nullable=True),
        # TODO(ihrachys): consider converting to a field of stricter type
        'availability_zone_hints': obj_fields.ListOfStringsField(
            nullable=True),
        'shared': obj_fields.BooleanField(default=False),

        'mtu': obj_fields.IntegerField(nullable=True),

        # TODO(ihrachys): consider exposing availability zones

        # TODO(ihrachys): consider converting to boolean
        'security': obj_fields.ObjectField(
            'NetworkPortSecurity', nullable=True),
        'segments': obj_fields.ListOfObjectsField(
            'NetworkSegment', nullable=True),
        'dns_domain': common_types.DomainNameField(nullable=True),
        'qos_policy_id': common_types.UUIDField(nullable=True, default=None),

        # TODO(ihrachys): add support for tags, probably through a base class
        # since it's a feature that will probably later be added for other
        # resources too

        # TODO(ihrachys): expose external network attributes
    }

    synthetic_fields = [
        'dns_domain',
        'qos_policy_id',
        'security',
        'segments',
    ]

    fields_need_translation = {
        'security': 'port_security',
    }

    def create(self):
        fields = self.obj_get_changes()
        with self.db_context_writer(self.obj_context):
            dns_domain = self.dns_domain
            qos_policy_id = self.qos_policy_id
            super(Network, self).create()
            if 'dns_domain' in fields:
                self._set_dns_domain(dns_domain)
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(qos_policy_id)

    def update(self):
        fields = self.obj_get_changes()
        with self.db_context_writer(self.obj_context):
            super(Network, self).update()
            if 'dns_domain' in fields:
                self._set_dns_domain(fields['dns_domain'])
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(fields['qos_policy_id'])

    def _attach_qos_policy(self, qos_policy_id):
        binding.QosPolicyNetworkBinding.delete_objects(
            self.obj_context, network_id=self.id)
        if qos_policy_id:
            net_binding_obj = binding.QosPolicyNetworkBinding(
                self.obj_context, policy_id=qos_policy_id, network_id=self.id)
            net_binding_obj.create()

        self.qos_policy_id = qos_policy_id
        self.obj_reset_changes(['qos_policy_id'])

    def _set_dns_domain(self, dns_domain):
        NetworkDNSDomain.delete_objects(self.obj_context, network_id=self.id)
        if dns_domain:
            NetworkDNSDomain(self.obj_context, network_id=self.id,
                             dns_domain=dns_domain).create()
        self.dns_domain = dns_domain
        self.obj_reset_changes(['dns_domain'])

    @classmethod
    def modify_fields_from_db(cls, db_obj):
        result = super(Network, cls).modify_fields_from_db(db_obj)
        if az_def.AZ_HINTS in result:
            result[az_def.AZ_HINTS] = (
                az_validator.convert_az_string_to_list(
                    result[az_def.AZ_HINTS]))
        return result

    @classmethod
    def modify_fields_to_db(cls, fields):
        result = super(Network, cls).modify_fields_to_db(fields)
        if az_def.AZ_HINTS in result:
            result[az_def.AZ_HINTS] = (
                az_validator.convert_az_list_to_string(
                    result[az_def.AZ_HINTS]))
        return result

    def from_db_object(self, *objs):
        super(Network, self).from_db_object(*objs)
        for db_obj in objs:
            # extract domain name
            if db_obj.get('dns_domain'):
                self.dns_domain = (
                    db_obj.dns_domain.dns_domain
                )
            else:
                self.dns_domain = None
            self.obj_reset_changes(['dns_domain'])

            # extract qos policy binding
            if db_obj.get('qos_policy_binding'):
                self.qos_policy_id = (
                    db_obj.qos_policy_binding.policy_id
                )
            else:
                self.qos_policy_id = None
            self.obj_reset_changes(['qos_policy_id'])

    @classmethod
    def get_bound_tenant_ids(cls, context, policy_id):
        # TODO(ihrachys): provide actual implementation
        return set()
Exemplo n.º 4
0
class Volume(cleanable.CinderCleanableObject, base.CinderObject,
             base.CinderObjectDictCompat, base.CinderComparableObject,
             base.ClusteredObject):
    # Version 1.0: Initial version
    # Version 1.1: Added metadata, admin_metadata, volume_attachment, and
    #              volume_type
    # Version 1.2: Added glance_metadata, consistencygroup and snapshots
    # Version 1.3: Added finish_volume_migration()
    # Version 1.4: Added cluster fields
    # Version 1.5: Added group
    # Version 1.6: This object is now cleanable (adds rows to workers table)
    # Version 1.7: Added service_uuid
    # Version 1.8: Added shared_targets
    # Version 1.9: Added use_quota
    VERSION = '1.9'

    OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
                       'volume_type', 'volume_attachment', 'consistencygroup',
                       'snapshots', 'cluster', 'group')

    # NOTE: When adding a field obj_make_compatible needs to be updated
    fields = {
        'id':
        fields.UUIDField(),
        '_name_id':
        fields.UUIDField(nullable=True),
        'ec2_id':
        fields.UUIDField(nullable=True),
        'user_id':
        fields.StringField(nullable=True),
        'project_id':
        fields.StringField(nullable=True),

        # TODO: (Y release) Change nullable to False
        'use_quota':
        fields.BooleanField(default=True, nullable=True),
        'snapshot_id':
        fields.UUIDField(nullable=True),
        'cluster_name':
        fields.StringField(nullable=True),
        'cluster':
        fields.ObjectField('Cluster', nullable=True, read_only=True),
        'host':
        fields.StringField(nullable=True),
        'size':
        fields.IntegerField(nullable=True),
        'availability_zone':
        fields.StringField(nullable=True),
        'status':
        fields.StringField(nullable=True),
        'attach_status':
        c_fields.VolumeAttachStatusField(nullable=True),
        'migration_status':
        fields.StringField(nullable=True),
        'scheduled_at':
        fields.DateTimeField(nullable=True),
        'launched_at':
        fields.DateTimeField(nullable=True),
        'terminated_at':
        fields.DateTimeField(nullable=True),
        'display_name':
        fields.StringField(nullable=True),
        'display_description':
        fields.StringField(nullable=True),
        'provider_id':
        fields.StringField(nullable=True),
        'provider_location':
        fields.StringField(nullable=True),
        'provider_auth':
        fields.StringField(nullable=True),
        'provider_geometry':
        fields.StringField(nullable=True),
        'volume_type_id':
        fields.UUIDField(nullable=True),
        'source_volid':
        fields.UUIDField(nullable=True),
        'encryption_key_id':
        fields.UUIDField(nullable=True),
        'consistencygroup_id':
        fields.UUIDField(nullable=True),
        'group_id':
        fields.UUIDField(nullable=True),
        'deleted':
        fields.BooleanField(default=False, nullable=True),
        'bootable':
        fields.BooleanField(default=False, nullable=True),
        'multiattach':
        fields.BooleanField(default=False, nullable=True),
        'replication_status':
        fields.StringField(nullable=True),
        'replication_extended_status':
        fields.StringField(nullable=True),
        'replication_driver_data':
        fields.StringField(nullable=True),
        'previous_status':
        fields.StringField(nullable=True),
        'metadata':
        fields.DictOfStringsField(nullable=True),
        'admin_metadata':
        fields.DictOfStringsField(nullable=True),
        'glance_metadata':
        fields.DictOfStringsField(nullable=True),
        'volume_type':
        fields.ObjectField('VolumeType', nullable=True),
        'volume_attachment':
        fields.ObjectField('VolumeAttachmentList', nullable=True),
        'consistencygroup':
        fields.ObjectField('ConsistencyGroup', nullable=True),
        'snapshots':
        fields.ObjectField('SnapshotList', nullable=True),
        'group':
        fields.ObjectField('Group', nullable=True),
        'service_uuid':
        fields.StringField(nullable=True),
        'shared_targets':
        fields.BooleanField(default=True, nullable=True),
    }

    # NOTE(thangp): obj_extra_fields is used to hold properties that are not
    # usually part of the model
    obj_extra_fields = [
        'name', 'name_id', 'volume_metadata', 'volume_admin_metadata',
        'volume_glance_metadata'
    ]

    @classmethod
    def _get_expected_attrs(cls, context, *args, **kwargs):
        expected_attrs = [
            'metadata', 'volume_type', 'volume_type.extra_specs',
            'volume_attachment'
        ]
        if context.is_admin:
            expected_attrs.append('admin_metadata')

        return expected_attrs

    @property
    def name_id(self):
        return self.id if not self._name_id else self._name_id

    @name_id.setter
    def name_id(self, value):
        self._name_id = value

    @property
    def name(self):
        return CONF.volume_name_template % self.name_id

    # TODO(dulek): Three properties below are for compatibility with dict
    # representation of volume. The format there is different (list of
    # SQLAlchemy models) so we need a conversion. Anyway - these should be
    # removed when we stop this class from deriving from DictObjectCompat.
    @property
    def volume_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.metadata.items()]
        return md

    @volume_metadata.setter
    def volume_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.metadata = md

    @property
    def volume_admin_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()]
        return md

    @volume_admin_metadata.setter
    def volume_admin_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.admin_metadata = md

    def admin_metadata_update(self, metadata, delete, add=True, update=True):
        new_metadata = db.volume_admin_metadata_update(self._context, self.id,
                                                       metadata, delete, add,
                                                       update)
        self.admin_metadata = new_metadata
        self._reset_metadata_tracking(fields=('admin_metadata', ))

    @property
    def volume_glance_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()]
        return md

    @volume_glance_metadata.setter
    def volume_glance_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.glance_metadata = md

    def __init__(self, *args, **kwargs):
        super(Volume, self).__init__(*args, **kwargs)

        self._reset_metadata_tracking()

    def obj_reset_changes(self, fields=None):
        super(Volume, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        # TODO: (Y release) remove next line
        cls._ensure_use_quota_is_set(primitive['versioned_object.data'])
        obj = super(Volume,
                    Volume)._obj_from_primitive(context, objver, primitive)
        obj._reset_metadata_tracking()
        return obj

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})
        if fields is None or 'admin_metadata' in fields:
            self._orig_admin_metadata = (dict(self.admin_metadata)
                                         if 'admin_metadata' in self else {})
        if fields is None or 'glance_metadata' in fields:
            self._orig_glance_metadata = (dict(self.glance_metadata)
                                          if 'glance_metadata' in self else {})

    def obj_what_changed(self):
        changes = super(Volume, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if ('admin_metadata' in self
                and self.admin_metadata != self._orig_admin_metadata):
            changes.add('admin_metadata')
        if ('glance_metadata' in self
                and self.glance_metadata != self._orig_glance_metadata):
            changes.add('glance_metadata')

        return changes

    def obj_make_compatible(self, primitive, target_version):
        """Make a Volume representation compatible with a target version."""
        super(Volume, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        # TODO: (Y release) remove next 2 lines & method if nothing else below
        if target_version < (1, 9):
            primitive.pop('use_quota', None)

    @classmethod
    def _from_db_object(cls, context, volume, db_volume, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in volume.fields.items():
            if name in cls.OPTIONAL_FIELDS:
                continue
            value = db_volume.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            volume[name] = value

        # Get data from db_volume object that was queried by joined query
        # from DB
        if 'metadata' in expected_attrs:
            metadata = db_volume.get('volume_metadata', [])
            volume.metadata = {item['key']: item['value'] for item in metadata}
        if 'admin_metadata' in expected_attrs:
            metadata = db_volume.get('volume_admin_metadata', [])
            volume.admin_metadata = {
                item['key']: item['value']
                for item in metadata
            }
        if 'glance_metadata' in expected_attrs:
            metadata = db_volume.get('volume_glance_metadata', [])
            volume.glance_metadata = {
                item['key']: item['value']
                for item in metadata
            }
        if 'volume_type' in expected_attrs:
            db_volume_type = db_volume.get('volume_type')
            if db_volume_type:
                vt_expected_attrs = []
                if 'volume_type.extra_specs' in expected_attrs:
                    vt_expected_attrs.append('extra_specs')
                volume.volume_type = objects.VolumeType._from_db_object(
                    context,
                    objects.VolumeType(),
                    db_volume_type,
                    expected_attrs=vt_expected_attrs)
        if 'volume_attachment' in expected_attrs:
            attachments = base.obj_make_list(
                context, objects.VolumeAttachmentList(context),
                objects.VolumeAttachment, db_volume.get('volume_attachment'))
            volume.volume_attachment = attachments
        if volume.consistencygroup_id and 'consistencygroup' in expected_attrs:
            consistencygroup = objects.ConsistencyGroup(context)
            consistencygroup._from_db_object(context, consistencygroup,
                                             db_volume['consistencygroup'])
            volume.consistencygroup = consistencygroup
        if 'snapshots' in expected_attrs:
            snapshots = base.obj_make_list(context,
                                           objects.SnapshotList(context),
                                           objects.Snapshot,
                                           db_volume['snapshots'])
            volume.snapshots = snapshots
        if 'cluster' in expected_attrs:
            db_cluster = db_volume.get('cluster')
            # If this volume doesn't belong to a cluster the cluster field in
            # the ORM instance will have value of None.
            if db_cluster:
                volume.cluster = objects.Cluster(context)
                objects.Cluster._from_db_object(context, volume.cluster,
                                                db_cluster)
            else:
                volume.cluster = None
        if volume.group_id and 'group' in expected_attrs:
            group = objects.Group(context)
            group._from_db_object(context, group, db_volume['group'])
            volume.group = group

        volume._context = context
        volume.obj_reset_changes()
        return volume

    # TODO: (Z release): Remove method and leave the default of False from DB
    @staticmethod
    def _ensure_use_quota_is_set(updates, warning=False):
        if updates.get('use_quota') is None:
            use_quota = not ((updates.get('migration_status')
                              or '').startswith('target:') or
                             (updates.get('admin_metadata')
                              or {}).get('temporary') == 'True')
            if warning and not use_quota:
                LOG.warning('Ooooops, we forgot to set the use_quota field to '
                            'False!!  Fix code here')
            updates['use_quota'] = use_quota

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()

        if 'consistencygroup' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('consistencygroup assigned'))
        if 'snapshots' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('snapshots assigned'))
        if 'cluster' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('cluster assigned'))
        if 'group' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('group assigned'))
        if ('volume_type_id' not in updates
                or updates['volume_type_id'] is None):
            updates['volume_type_id'] = (
                volume_types.get_default_volume_type()['id'])

        # TODO: (Y release) Remove this call since we should have already made
        # all methods in Cinder make the call with the right values.
        self._ensure_use_quota_is_set(updates, warning=True)

        db_volume = db.volume_create(self._context, updates)
        expected_attrs = self._get_expected_attrs(self._context)
        self._from_db_object(self._context, self, db_volume, expected_attrs)

    def save(self):
        # TODO: (Y release) Remove this online migration code
        # Pass self directly since it's a CinderObjectDictCompat
        self._ensure_use_quota_is_set(self)

        updates = self.cinder_obj_get_changes()
        if updates:
            # NOTE(xyang): Allow this to pass if 'consistencygroup' is
            # set to None. This is to support backward compatibility.
            # Also remove 'consistencygroup' from updates because
            # consistencygroup is the name of a relationship in the ORM
            # Volume model, so SQLA tries to do some kind of update of
            # the foreign key based on the provided updates if
            # 'consistencygroup' is in updates.
            if updates.pop('consistencygroup', None):
                raise exception.ObjectActionError(
                    action='save', reason=_('consistencygroup changed'))
            if 'group' in updates:
                raise exception.ObjectActionError(action='save',
                                                  reason=_('group changed'))
            if 'glance_metadata' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('glance_metadata changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            if 'cluster' in updates:
                raise exception.ObjectActionError(action='save',
                                                  reason=_('cluster changed'))
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(
                    self._context, self.id, metadata, True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            # When we are creating a volume and we change from 'creating'
            # status to 'downloading' status we have to change the worker entry
            # in the DB to reflect this change, otherwise the cleanup will
            # not be performed as it will be mistaken for a volume that has
            # been somehow changed (reset status, forced operation...)
            if updates.get('status') == 'downloading':
                self.set_worker()

            # updates are changed after popping out metadata.
            if updates:
                db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def destroy(self):
        with self.obj_as_admin():
            updated_values = db.volume_destroy(self._context, self.id)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'metadata':
            self.metadata = db.volume_metadata_get(self._context, self.id)
        elif attrname == 'admin_metadata':
            self.admin_metadata = {}
            if self._context.is_admin:
                self.admin_metadata = db.volume_admin_metadata_get(
                    self._context, self.id)
        elif attrname == 'glance_metadata':
            try:
                # NOTE(dulek): We're using alias here to have conversion from
                # list to dict done there.
                self.volume_glance_metadata = db.volume_glance_metadata_get(
                    self._context, self.id)
            except exception.GlanceMetadataNotFound:
                # NOTE(dulek): DB API raises when volume has no
                # glance_metadata. Silencing this because at this level no
                # metadata is a completely valid result.
                self.glance_metadata = {}
        elif attrname == 'volume_type':
            # If the volume doesn't have volume_type, VolumeType.get_by_id
            # would trigger a db call which raise VolumeTypeNotFound exception.
            self.volume_type = (objects.VolumeType.get_by_id(
                self._context, self.volume_type_id)
                                if self.volume_type_id else None)
        elif attrname == 'volume_attachment':
            attachments = objects.VolumeAttachmentList.get_all_by_volume_id(
                self._context, self.id)
            self.volume_attachment = attachments
        elif attrname == 'consistencygroup':
            if self.consistencygroup_id is None:
                self.consistencygroup = None
            else:
                consistencygroup = objects.ConsistencyGroup.get_by_id(
                    self._context, self.consistencygroup_id)
                self.consistencygroup = consistencygroup
        elif attrname == 'snapshots':
            self.snapshots = objects.SnapshotList.get_all_for_volume(
                self._context, self.id)
        elif attrname == 'cluster':
            # If this volume doesn't belong to a cluster (cluster_name is
            # empty), then cluster field will be None.
            if self.cluster_name:
                self.cluster = objects.Cluster.get_by_id(
                    self._context, name=self.cluster_name)
            else:
                self.cluster = None
        elif attrname == 'group':
            if self.group_id is None:
                self.group = None
            else:
                group = objects.Group.get_by_id(self._context, self.group_id)
                self.group = group

        self.obj_reset_changes(fields=[attrname])

    def delete_metadata_key(self, key):
        db.volume_metadata_delete(self._context, self.id, key)
        md_was_changed = 'metadata' in self.obj_what_changed()

        del self.metadata[key]
        self._orig_metadata.pop(key, None)

        if not md_was_changed:
            self.obj_reset_changes(['metadata'])

    def finish_volume_migration(self, dest_volume):
        # We swap fields between source (i.e. self) and destination at the
        # end of migration because we want to keep the original volume id
        # in the DB but now pointing to the migrated volume.
        skip = ({
            'id', 'provider_location', 'glance_metadata', 'use_quota',
            'volume_type', 'volume_attachment'
        }
                | set(self.obj_extra_fields))
        for key in set(dest_volume.fields.keys()) - skip:
            # Only swap attributes that are already set.  We do not want to
            # unexpectedly trigger a lazy-load.
            if not dest_volume.obj_attr_is_set(key):
                continue

            value = getattr(dest_volume, key)
            value_to_dst = getattr(self, key)

            # Destination must have a _name_id since the id no longer matches
            # the volume.  If it doesn't have a _name_id we set one.
            if key == '_name_id':
                if not dest_volume._name_id:
                    setattr(dest_volume, key, self.id)
                continue
            elif key == 'migration_status':
                value = None
                value_to_dst = 'deleting'
            elif key == 'display_description':
                value_to_dst = 'migration src for ' + self.id
            elif key == 'status':
                value_to_dst = 'deleting'
            # Because dest_volume will be deleted soon, we can
            # skip to copy volume_type_id and volume_type which
            # are not keys for volume deletion.
            elif key == 'volume_type_id':
                # Initialize volume_type of source volume using
                # new volume_type_id.
                self.update({'volume_type_id': value})
                continue

            setattr(self, key, value)
            setattr(dest_volume, key, value_to_dst)

        self.save()
        dest_volume.save()
        return dest_volume

    def get_latest_snapshot(self):
        """Get volume's latest snapshot"""
        snapshot_db = db.snapshot_get_latest_for_volume(self._context, self.id)
        snapshot = objects.Snapshot(self._context)
        return snapshot._from_db_object(self._context, snapshot, snapshot_db)

    @staticmethod
    def _is_cleanable(status, obj_version):
        # Before 1.6 we didn't have workers table, so cleanup wasn't supported.
        # cleaning.
        if obj_version and obj_version < 1.6:
            return False
        return status in ('creating', 'deleting', 'uploading', 'downloading')

    def begin_attach(self, attach_mode):
        attachment = objects.VolumeAttachment(
            context=self._context,
            attach_status=c_fields.VolumeAttachStatus.ATTACHING,
            volume_id=self.id)
        attachment.create()
        with self.obj_as_admin():
            self.admin_metadata['attached_mode'] = attach_mode
            self.save()
        return attachment

    def finish_detach(self, attachment_id):
        with self.obj_as_admin():
            volume_updates, attachment_updates = (db.volume_detached(
                self._context, self.id, attachment_id))
            db.volume_admin_metadata_delete(self._context, self.id,
                                            'attached_mode')
            self.admin_metadata.pop('attached_mode', None)
        # Remove attachment in volume only when this field is loaded.
        if attachment_updates and self.obj_attr_is_set('volume_attachment'):
            for i, attachment in enumerate(self.volume_attachment):
                if attachment.id == attachment_id:
                    del self.volume_attachment.objects[i]
                    break

        self.update(volume_updates)
        self.obj_reset_changes(
            list(volume_updates.keys()) +
            ['volume_attachment', 'admin_metadata'])

    def is_replicated(self):
        return self.volume_type and self.volume_type.is_replicated()

    def is_multiattach(self):
        return self.volume_type and self.volume_type.is_multiattach()

    # Don't add it as a property to avoid having to add it obj_extra_fields,
    # to manager's _VOLUME_CLONE_SKIP_PROPERTIES, etc.
    def is_migration_target(self):
        return (self.migration_status or '').startswith('target:')
Exemplo n.º 5
0
class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):

    VERSION = '1.0'

    fields = {
        'addressing': ovo_fields.ObjectField('IpAddressAssignmentList'),
        'boot_mac': ovo_fields.StringField(nullable=True),
    }

    # A BaremetalNode is really nothing more than a physical
    # instantiation of a HostProfile, so they both represent
    # the same set of CIs
    def __init__(self, **kwargs):
        super(BaremetalNode, self).__init__(**kwargs)

    # Compile the applied version of this model sourcing referenced
    # data from the passed site design
    def compile_applied_model(self, site_design):
        self.apply_host_profile(site_design)
        self.apply_hardware_profile(site_design)
        self.source = hd_fields.ModelSource.Compiled
        return

    def apply_host_profile(self, site_design):
        self.apply_inheritance(site_design)
        return

    # Translate device alises to physical selectors and copy
    # other hardware attributes into this object
    def apply_hardware_profile(self, site_design):
        if self.hardware_profile is None:
            raise ValueError("Hardware profile not set")

        hw_profile = site_design.get_hardware_profile(self.hardware_profile)

        for i in getattr(self, 'interfaces', []):
            for s in i.get_hw_slaves():
                selector = hw_profile.resolve_alias("pci", s)
                if selector is None:
                    selector = objects.HardwareDeviceSelector()
                    selector.selector_type = 'name'
                    selector.address = s

                i.add_selector(selector)

        for p in getattr(self, 'partitions', []):
            selector = hw_profile.resolve_alias("scsi", p.get_device())
            if selector is None:
                selector = objects.HardwareDeviceSelector()
                selector.selector_type = 'name'
                selector.address = p.get_device()
            p.set_selector(selector)

        return

    def get_applied_interface(self, iface_name):
        for i in getattr(self, 'interfaces', []):
            if i.get_name() == iface_name:
                return i

        return None

    def get_network_address(self, network_name):
        for a in getattr(self, 'addressing', []):
            if a.network == network_name:
                return a.address

        return None

    def find_fs_block_device(self, fs_mount=None):
        if not fs_mount:
            return (None, None)

        if self.volume_groups is not None:
            for vg in self.volume_groups:
                if vg.logical_volumes is not None:
                    for lv in vg.logical_volumes:
                        if lv.mountpoint is not None and lv.mountpoint == fs_mount:
                            return (vg, lv)
        if self.storage_devices is not None:
            for sd in self.storage_devices:
                if sd.partitions is not None:
                    for p in sd.partitions:
                        if p.mountpoint is not None and p.mountpoint == fs_mount:
                            return (sd, p)
        return (None, None)
Exemplo n.º 6
0
class Port(base.NeutronDbObject):
    # Version 1.0: Initial version
    # Version 1.1: Add data_plane_status field
    # Version 1.2: Added segment_id to binding_levels
    VERSION = '1.2'

    db_model = models_v2.Port

    fields = {
        'id': common_types.UUIDField(),
        'project_id': obj_fields.StringField(nullable=True),
        'name': obj_fields.StringField(nullable=True),
        'network_id': common_types.UUIDField(),
        'mac_address': common_types.MACAddressField(),
        'admin_state_up': obj_fields.BooleanField(),
        'device_id': obj_fields.StringField(),
        'device_owner': obj_fields.StringField(),
        'status': obj_fields.StringField(),

        'allowed_address_pairs': obj_fields.ListOfObjectsField(
            'AllowedAddressPair', nullable=True
        ),
        'binding': obj_fields.ObjectField(
            'PortBinding', nullable=True
        ),
        'data_plane_status': obj_fields.ObjectField(
            'PortDataPlaneStatus', nullable=True
        ),
        'dhcp_options': obj_fields.ListOfObjectsField(
            'ExtraDhcpOpt', nullable=True
        ),
        'distributed_binding': obj_fields.ObjectField(
            'DistributedPortBinding', nullable=True
        ),
        'dns': obj_fields.ObjectField('PortDNS', nullable=True),
        'fixed_ips': obj_fields.ListOfObjectsField(
            'IPAllocation', nullable=True
        ),
        # TODO(ihrachys): consider converting to boolean
        'security': obj_fields.ObjectField(
            'PortSecurity', nullable=True
        ),
        'security_group_ids': common_types.SetOfUUIDsField(
            nullable=True,
            # TODO(ihrachys): how do we safely pass a mutable default?
            default=None,
        ),
        'qos_policy_id': common_types.UUIDField(nullable=True, default=None),

        'binding_levels': obj_fields.ListOfObjectsField(
            'PortBindingLevel', nullable=True
        ),

        # TODO(ihrachys): consider adding a 'dns_assignment' fully synthetic
        # field in later object iterations
    }

    extra_filter_names = {'security_group_ids'}

    fields_no_update = ['project_id', 'network_id']

    synthetic_fields = [
        'allowed_address_pairs',
        'binding',
        'binding_levels',
        'data_plane_status',
        'dhcp_options',
        'distributed_binding',
        'dns',
        'fixed_ips',
        'qos_policy_id',
        'security',
        'security_group_ids',
    ]

    fields_need_translation = {
        'binding': 'port_binding',
        'dhcp_options': 'dhcp_opts',
        'distributed_binding': 'distributed_port_binding',
        'security': 'port_security',
    }

    def create(self):
        fields = self.obj_get_changes()
        with self.db_context_writer(self.obj_context):
            sg_ids = self.security_group_ids
            if sg_ids is None:
                sg_ids = set()
            qos_policy_id = self.qos_policy_id
            super(Port, self).create()
            if 'security_group_ids' in fields:
                self._attach_security_groups(sg_ids)
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(qos_policy_id)

    def update(self):
        fields = self.obj_get_changes()
        with self.db_context_writer(self.obj_context):
            super(Port, self).update()
            if 'security_group_ids' in fields:
                self._attach_security_groups(fields['security_group_ids'])
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(fields['qos_policy_id'])

    def _attach_qos_policy(self, qos_policy_id):
        binding.QosPolicyPortBinding.delete_objects(
            self.obj_context, port_id=self.id)
        if qos_policy_id:
            port_binding_obj = binding.QosPolicyPortBinding(
                self.obj_context, policy_id=qos_policy_id, port_id=self.id)
            port_binding_obj.create()

        self.qos_policy_id = qos_policy_id
        self.obj_reset_changes(['qos_policy_id'])

    def _attach_security_groups(self, sg_ids):
        # TODO(ihrachys): consider introducing an (internal) object for the
        # binding to decouple database operations a bit more
        obj_db_api.delete_objects(
            SecurityGroupPortBinding, self.obj_context, port_id=self.id)
        if sg_ids:
            for sg_id in sg_ids:
                self._attach_security_group(sg_id)
        self.security_group_ids = sg_ids
        self.obj_reset_changes(['security_group_ids'])

    def _attach_security_group(self, sg_id):
        obj_db_api.create_object(
            SecurityGroupPortBinding, self.obj_context,
            {'port_id': self.id, 'security_group_id': sg_id}
        )

    @classmethod
    def get_objects(cls, context, _pager=None, validate_filters=True,
                    security_group_ids=None, **kwargs):
        if security_group_ids:
            ports_with_sg = cls.get_ports_ids_by_security_groups(
                context, security_group_ids)
            port_ids = kwargs.get("id", [])
            if port_ids:
                kwargs['id'] = list(set(port_ids) & set(ports_with_sg))
            else:
                kwargs['id'] = ports_with_sg
        return super(Port, cls).get_objects(context, _pager, validate_filters,
                                            **kwargs)

    @classmethod
    def modify_fields_to_db(cls, fields):
        result = super(Port, cls).modify_fields_to_db(fields)

        # TODO(rossella_s): get rid of it once we switch the db model to using
        # custom types.
        if 'mac_address' in result:
            result['mac_address'] = cls.filter_to_str(result['mac_address'])

        # convert None to []
        if 'distributed_port_binding' in result:
            result['distributed_port_binding'] = (
                result['distributed_port_binding'] or []
            )
        return result

    @classmethod
    def modify_fields_from_db(cls, db_obj):
        fields = super(Port, cls).modify_fields_from_db(db_obj)

        # TODO(rossella_s): get rid of it once we switch the db model to using
        # custom types.
        if 'mac_address' in fields:
            fields['mac_address'] = utils.AuthenticEUI(fields['mac_address'])

        distributed_port_binding = fields.get('distributed_binding')
        if distributed_port_binding:
            fields['distributed_binding'] = fields['distributed_binding'][0]
        else:
            fields['distributed_binding'] = None
        return fields

    def from_db_object(self, db_obj):
        super(Port, self).from_db_object(db_obj)
        # extract security group bindings
        if db_obj.get('security_groups', []):
            self.security_group_ids = {
                sg.security_group_id
                for sg in db_obj.security_groups
            }
        else:
            self.security_group_ids = set()
        self.obj_reset_changes(['security_group_ids'])

        # extract qos policy binding
        if db_obj.get('qos_policy_binding'):
            self.qos_policy_id = (
                db_obj.qos_policy_binding.policy_id
            )
        else:
            self.qos_policy_id = None
        self.obj_reset_changes(['qos_policy_id'])

    def obj_make_compatible(self, primitive, target_version):
        _target_version = versionutils.convert_version_to_tuple(target_version)
        if _target_version < (1, 1):
            primitive.pop('data_plane_status', None)
        if _target_version < (1, 2):
            binding_levels = primitive.get('binding_levels', [])
            for lvl in binding_levels:
                lvl['versioned_object.version'] = '1.0'
                lvl['versioned_object.data'].pop('segment_id', None)

    @classmethod
    def get_ports_by_router(cls, context, router_id, owner, subnet):
        rport_qry = context.session.query(models_v2.Port).join(
            l3.RouterPort)
        ports = rport_qry.filter(
            l3.RouterPort.router_id == router_id,
            l3.RouterPort.port_type == owner,
            models_v2.Port.network_id == subnet['network_id']
        )
        return [cls._load_object(context, db_obj) for db_obj in ports.all()]

    @classmethod
    def get_ports_ids_by_security_groups(cls, context, security_group_ids):
        query = context.session.query(sg_models.SecurityGroupPortBinding)
        query = query.filter(
            sg_models.SecurityGroupPortBinding.security_group_id.in_(
                security_group_ids))
        return [port_binding['port_id'] for port_binding in query.all()]
Exemplo n.º 7
0
class Backup(base.CinderPersistentObject, base.CinderObject,
             base.CinderObjectDictCompat, base.CinderComparableObject):
    # Version 1.0: Initial version
    # Version 1.1: Add new field num_dependent_backups and extra fields
    #              is_incremental and has_dependent_backups.
    # Version 1.2: Add new field snapshot_id and data_timestamp.
    # Version 1.3: Changed 'status' field to use BackupStatusField
    # Version 1.4: Add restore_volume_id
    # Version 1.5: Add metadata
    # Version 1.6: Add encryption_key_id
    # Version 1.7: Add parent
    VERSION = '1.7'

    OPTIONAL_FIELDS = ('metadata', 'parent')

    # NOTE: When adding a field obj_make_compatible needs to be updated
    fields = {
        'id': fields.UUIDField(),
        'user_id': fields.StringField(),
        'project_id': fields.StringField(),
        'volume_id': fields.UUIDField(),
        'host': fields.StringField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'container': fields.StringField(nullable=True),
        'parent_id': fields.StringField(nullable=True),
        'parent': fields.ObjectField('Backup', nullable=True),
        'status': c_fields.BackupStatusField(nullable=True),
        'fail_reason': fields.StringField(nullable=True),
        'size': fields.IntegerField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),

        # NOTE(dulek): Metadata field is used to store any strings by backup
        # drivers, that's why it can't be DictOfStringsField.
        'service_metadata': fields.StringField(nullable=True),
        'service': fields.StringField(nullable=True),
        'object_count': fields.IntegerField(nullable=True),
        'temp_volume_id': fields.StringField(nullable=True),
        'temp_snapshot_id': fields.StringField(nullable=True),
        'num_dependent_backups': fields.IntegerField(nullable=True),
        'snapshot_id': fields.StringField(nullable=True),
        'data_timestamp': fields.DateTimeField(nullable=True),
        'restore_volume_id': fields.StringField(nullable=True),
        'metadata': fields.DictOfStringsField(nullable=True),
        'encryption_key_id': fields.StringField(nullable=True),
    }

    obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups']

    def __init__(self, *args, **kwargs):
        super(Backup, self).__init__(*args, **kwargs)
        self._orig_metadata = {}

        self._reset_metadata_tracking()

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if self.obj_attr_is_set('metadata') else {})

    @classmethod
    def _get_expected_attrs(cls, context, *args, **kwargs):
        return 'metadata',

    @property
    def name(self):
        return CONF.backup_name_template % self.id

    @property
    def is_incremental(self) -> bool:
        return bool(self.parent_id)

    @property
    def has_dependent_backups(self) -> bool:
        return bool(self.num_dependent_backups)

    @classmethod
    def _from_db_object(cls,
                        context: context.RequestContext,
                        backup,
                        db_backup,
                        expected_attrs=None) -> 'Backup':
        if expected_attrs is None:
            expected_attrs = []
        for name, field in backup.fields.items():
            if name in cls.OPTIONAL_FIELDS:
                continue
            value = db_backup.get(name)
            if isinstance(field, fields.IntegerField):
                value = value if value is not None else 0
            backup[name] = value

        if 'metadata' in expected_attrs:
            metadata = db_backup.get('backup_metadata')
            if metadata is None:
                raise exception.MetadataAbsent()
            backup.metadata = {item['key']: item['value'] for item in metadata}

        backup._context = context
        backup.obj_reset_changes()
        return backup

    def obj_reset_changes(self, fields=None):
        super(Backup, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())
        if attrname == 'parent':
            if self.parent_id:
                self.parent = self.get_by_id(self._context, self.parent_id)
            else:
                self.parent = None
        self.obj_reset_changes(fields=[attrname])

    def obj_what_changed(self):
        changes = super(Backup, self).obj_what_changed()
        if hasattr(self, 'metadata') and self.metadata != self._orig_metadata:
            changes.add('metadata')

        return changes

    def create(self) -> None:
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.cinder_obj_get_changes()

        db_backup = db.backup_create(self._context, updates)
        self._from_db_object(self._context, self, db_backup)

    def save(self) -> None:
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'metadata' in updates:
                metadata = updates.pop('metadata', None)
                self.metadata = db.backup_metadata_update(
                    self._context, self.id, metadata, True)
            updates.pop('parent', None)
            db.backup_update(self._context, self.id, updates)

        self.obj_reset_changes()

    def destroy(self) -> None:
        with self.obj_as_admin():
            updated_values = db.backup_destroy(self._context, self.id)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())

    @staticmethod
    def decode_record(backup_url) -> dict:
        """Deserialize backup metadata from string into a dictionary.

        :raises InvalidInput:
        """
        try:
            return jsonutils.loads(base64.decode_as_text(backup_url))
        except TypeError:
            msg = _("Can't decode backup record.")
        except ValueError:
            msg = _("Can't parse backup record.")
        raise exception.InvalidInput(reason=msg)

    def encode_record(self, **kwargs) -> str:
        """Serialize backup object, with optional extra info, into a string."""
        # We don't want to export extra fields and we want to force lazy
        # loading, so we can't use dict(self) or self.obj_to_primitive
        record = {
            name: field.to_primitive(self, name, getattr(self, name))
            for name, field in self.fields.items() if name != 'parent'
        }
        # We must update kwargs instead of record to ensure we don't overwrite
        # "real" data from the backup
        kwargs.update(record)
        retval = jsonutils.dump_as_bytes(kwargs)
        return base64.encode_as_text(retval)
Exemplo n.º 8
0
class VolumeAttachment(base.CinderPersistentObject, base.CinderObject,
                       base.CinderObjectDictCompat,
                       base.CinderComparableObject):
    # Version 1.0: Initial version
    # Version 1.1: Added volume relationship
    VERSION = '1.1'

    OPTIONAL_FIELDS = ['volume']
    obj_extra_fields = ['project_id', 'volume_host']

    fields = {
        'id': fields.UUIDField(),
        'volume_id': fields.UUIDField(),
        'instance_uuid': fields.UUIDField(nullable=True),
        'attached_host': fields.StringField(nullable=True),
        'mountpoint': fields.StringField(nullable=True),
        'attach_time': fields.DateTimeField(nullable=True),
        'detach_time': fields.DateTimeField(nullable=True),
        'attach_status': c_fields.VolumeAttachStatusField(nullable=True),
        'attach_mode': fields.StringField(nullable=True),
        'volume': fields.ObjectField('Volume', nullable=False),
    }

    @property
    def project_id(self):
        return self.volume.project_id

    @property
    def volume_host(self):
        return self.volume.host

    @classmethod
    def _get_expected_attrs(cls, context, *args, **kwargs):
        return ['volume']

    @classmethod
    def _from_db_object(cls,
                        context,
                        attachment,
                        db_attachment,
                        expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = cls._get_expected_attrs(context)

        for name, field in attachment.fields.items():
            if name in cls.OPTIONAL_FIELDS:
                continue
            value = db_attachment.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            attachment[name] = value

        if 'volume' in expected_attrs:
            db_volume = db_attachment.get('volume')
            if db_volume:
                attachment.volume = objects.Volume._from_db_object(
                    context, objects.Volume(), db_volume)

        attachment._context = context
        attachment.obj_reset_changes()
        return attachment

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'volume':
            volume = objects.Volume.get_by_id(self._context, self.id)
            self.volume = volume

        self.obj_reset_changes(fields=[attrname])

    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'volume' in updates:
                raise exception.ObjectActionError(action='save',
                                                  reason=_('volume changed'))

            db.volume_attachment_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def finish_attach(self,
                      instance_uuid,
                      host_name,
                      mount_point,
                      attach_mode='rw'):
        with self.obj_as_admin():
            db_volume, updated_values = db.volume_attached(
                self._context, self.id, instance_uuid, host_name, mount_point,
                attach_mode)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())
        return objects.Volume._from_db_object(self._context, objects.Volume(),
                                              db_volume)

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()
        with self.obj_as_admin():
            db_attachment = db.volume_attach(self._context, updates)
        self._from_db_object(self._context, self, db_attachment)
Exemplo n.º 9
0
class WatchRule(base.VersionedObject, base.VersionedObjectDictCompat):

    fields = {
        'id': fields.IntegerField(),
        'name': fields.StringField(nullable=True),
        'rule': heat_fields.JsonField(nullable=True),
        'state': fields.StringField(nullable=True),
        'last_evaluated': fields.DateTimeField(nullable=True),
        'stack_id': fields.StringField(),
        'stack': fields.ObjectField(stack.Stack),
        'watch_data': fields.ListOfObjectsField(watch_data.WatchData),
        'created_at': fields.DateTimeField(read_only=True),
        'updated_at': fields.DateTimeField(nullable=True),
    }

    @staticmethod
    def _from_db_object(context, rule, db_rule):
        for field in rule.fields:
            if field == 'stack':
                rule[field] = stack.Stack._from_db_object(
                    context, stack.Stack(), db_rule[field])
            elif field == 'watch_data':
                rule[field] = watch_data.WatchData.get_all_by_watch_rule_id(
                    context, db_rule['id'])
            else:
                rule[field] = db_rule[field]
        rule._context = context
        rule.obj_reset_changes()
        return rule

    @classmethod
    def get_by_id(cls, context, rule_id):
        db_rule = db_api.watch_rule_get(context, rule_id)
        return cls._from_db_object(context, cls(), db_rule)

    @classmethod
    def get_by_name(cls, context, watch_rule_name):
        db_rule = db_api.watch_rule_get_by_name(context, watch_rule_name)
        return cls._from_db_object(context, cls(), db_rule)

    @classmethod
    def get_all(cls, context):
        return [
            cls._from_db_object(context, cls(), db_rule)
            for db_rule in db_api.watch_rule_get_all(context)
        ]

    @classmethod
    def get_all_by_stack(cls, context, stack_id):
        return [
            cls._from_db_object(context, cls(), db_rule)
            for db_rule in db_api.watch_rule_get_all_by_stack(
                context, stack_id)
        ]

    @classmethod
    def update_by_id(cls, context, watch_id, values):
        db_api.watch_rule_update(context, watch_id, values)

    @classmethod
    def create(cls, context, values):
        return cls._from_db_object(context, cls(),
                                   db_api.watch_rule_create(context, values))

    @classmethod
    def delete(cls, context, watch_id):
        db_api.watch_rule_delete(context, watch_id)
Exemplo n.º 10
0
class Snapshot(base.SGServicePersistentObject, base.SGServiceObject,
               base.SGServiceObjectDictCompat):
    # Version 1.0: Initial version
    VERSION = '1.0'

    OPTIONAL_FIELDS = ['volume']

    fields = {
        'id': fields.UUIDField(),
        'user_id': fields.StringField(),
        'project_id': fields.StringField(),
        'host': fields.StringField(nullable=True),
        'status': c_fields.SnapshotStatusField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'checkpoint_id': fields.StringField(nullable=True),
        'destination': fields.StringField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'replication_zone': fields.StringField(nullable=True),
        'volume_id': fields.UUIDField(nullable=True),
        'volume_size': fields.IntegerField(nullable=True),
        'sg_client': fields.StringField(nullable=True),

        'volume': fields.ObjectField('Volume', nullable=True),
    }

    # NOTE(thangp): obj_extra_fields is used to hold properties that are not
    # usually part of the model
    obj_extra_fields = ['name']

    @property
    def name(self):
        if self.display_name:
            return self.display_name
        else:
            return CONF.snapshot_name_template % self.id

    @property
    def volume_name(self):
        return self.volume.name

    @classmethod
    def _from_db_object(cls, context, snapshot, db_snapshot,
                        expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in snapshot.fields.items():
            if name in snapshot.OPTIONAL_FIELDS:
                continue
            value = db_snapshot.get(name)
            if isinstance(field, fields.IntegerField):
                value = value if value is not None else 0
            setattr(snapshot, name, value)

        if 'volume' in expected_attrs:
            db_volume = db_snapshot.get('volume', None)
            if db_volume:
                snapshot.volume = objects.Volume._from_db_object(
                    context, objects.Volume(), db_volume)

        snapshot._context = context
        snapshot.obj_reset_changes()
        return snapshot

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'volume':
            volume = objects.Volume.get_by_id(self._context, self.volume_id)
            self.volume = volume

        self.obj_reset_changes(fields=[attrname])

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.sgservice_obj_get_changes()
        db_snapshot = db.snapshot_create(self._context, updates)
        self._from_db_object(self._context, self, db_snapshot)

    @base.remotable
    def save(self):
        updates = self.sgservice_obj_get_changes()
        if updates:
            if 'volume' in updates:
                raise exception.ObjectActionError(action='save',
                                                  reason='volume changed')
            db.snapshot_update(self._context, self.id, updates)
        self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        with self.obj_as_admin():
            updated_values = db.snapshot_destroy(self._context, self.id)
            self.update(updated_values)
            self.obj_reset_changes(updated_values.keys())

    @base.remotable_classmethod
    def get_by_id(cls, context, id):
        db_snapshot = db.snapshot_get(context, id)
        if db_snapshot:
            return cls._from_db_object(context, cls(), db_snapshot)
Exemplo n.º 11
0
class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject,
                       base.CinderObjectDictCompat, base.ClusteredObject):
    # Version 1.0: Initial version
    # Version 1.1: Added cgsnapshots and volumes relationships
    # Version 1.2: Changed 'status' field to use ConsistencyGroupStatusField
    # Version 1.3: Added cluster fields
    # Version 1.4: Added from_group
    VERSION = '1.4'

    OPTIONAL_FIELDS = ('cgsnapshots', 'volumes', 'cluster')

    fields = {
        'id': fields.UUIDField(),
        'user_id': fields.StringField(),
        'project_id': fields.StringField(),
        'cluster_name': fields.StringField(nullable=True),
        'cluster': fields.ObjectField('Cluster', nullable=True,
                                      read_only=True),
        'host': fields.StringField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'name': fields.StringField(nullable=True),
        'description': fields.StringField(nullable=True),
        'volume_type_id': fields.StringField(nullable=True),
        'status': c_fields.ConsistencyGroupStatusField(nullable=True),
        'cgsnapshot_id': fields.UUIDField(nullable=True),
        'source_cgid': fields.UUIDField(nullable=True),
        'cgsnapshots': fields.ObjectField('CGSnapshotList', nullable=True),
        'volumes': fields.ObjectField('VolumeList', nullable=True),
    }

    @classmethod
    def _from_db_object(cls, context, consistencygroup, db_consistencygroup,
                        expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in consistencygroup.fields.items():
            if name in cls.OPTIONAL_FIELDS:
                continue
            value = db_consistencygroup.get(name)
            setattr(consistencygroup, name, value)

        if 'cgsnapshots' in expected_attrs:
            cgsnapshots = base.obj_make_list(
                context, objects.CGSnapshotList(context),
                objects.CGSnapshot,
                db_consistencygroup['cgsnapshots'])
            consistencygroup.cgsnapshots = cgsnapshots

        if 'volumes' in expected_attrs:
            volumes = base.obj_make_list(
                context, objects.VolumeList(context),
                objects.Volume,
                db_consistencygroup['volumes'])
            consistencygroup.volumes = volumes

        if 'cluster' in expected_attrs:
            db_cluster = db_consistencygroup.get('cluster')
            # If this consistency group doesn't belong to a cluster the cluster
            # field in the ORM instance will have value of None.
            if db_cluster:
                consistencygroup.cluster = objects.Cluster(context)
                objects.Cluster._from_db_object(context,
                                                consistencygroup.cluster,
                                                db_cluster)
            else:
                consistencygroup.cluster = None

        consistencygroup._context = context
        consistencygroup.obj_reset_changes()
        return consistencygroup

    def create(self, cg_snap_id=None, cg_id=None):
        """Create a consistency group.

        If cg_snap_id or cg_id are specified then volume_type_id,
        availability_zone, and host will be taken from the source Consistency
        Group.
        """
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already_created'))
        updates = self.cinder_obj_get_changes()

        if 'cgsnapshots' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('cgsnapshots assigned'))

        if 'volumes' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('volumes assigned'))

        if 'cluster' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('cluster assigned'))

        db_consistencygroups = db.consistencygroup_create(self._context,
                                                          updates,
                                                          cg_snap_id,
                                                          cg_id)
        self._from_db_object(self._context, self, db_consistencygroups)

    def from_group(self, group):
        """Convert a generic volume group object to a cg object."""
        self.id = group.id
        self.user_id = group.user_id
        self.project_id = group.project_id
        self.cluster_name = group.cluster_name
        self.host = group.host
        self.availability_zone = group.availability_zone
        self.name = group.name
        self.description = group.description
        self.volume_type_id = ""
        for v_type in group.volume_types:
            self.volume_type_id += v_type.id + ","
        self.status = group.status
        self.cgsnapshot_id = group.group_snapshot_id
        self.source_cgid = group.source_group_id

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'cgsnapshots':
            self.cgsnapshots = objects.CGSnapshotList.get_all_by_group(
                self._context, self.id)

        if attrname == 'volumes':
            self.volumes = objects.VolumeList.get_all_by_group(self._context,
                                                               self.id)

        # If this consistency group doesn't belong to a cluster (cluster_name
        # is empty), then cluster field will be None.
        if attrname == 'cluster':
            if self.cluster_name:
                self.cluster = objects.Cluster.get_by_id(
                    self._context, name=self.cluster_name)
            else:
                self.cluster = None

        self.obj_reset_changes(fields=[attrname])

    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'cgsnapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('cgsnapshots changed'))
            if 'volumes' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('volumes changed'))
            if 'cluster' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('cluster changed'))

            db.consistencygroup_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def destroy(self):
        with self.obj_as_admin():
            updated_values = db.consistencygroup_destroy(self._context,
                                                         self.id)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())
Exemplo n.º 12
0
class Resource(
        base.VersionedObject,
        base.VersionedObjectDictCompat,
        base.ComparableVersionedObject,
):
    fields = {
        'id':
        fields.IntegerField(),
        'uuid':
        fields.StringField(),
        'stack_id':
        fields.StringField(),
        'created_at':
        fields.DateTimeField(read_only=True),
        'updated_at':
        fields.DateTimeField(nullable=True),
        'nova_instance':
        fields.StringField(nullable=True),
        'name':
        fields.StringField(nullable=True),
        'status':
        fields.StringField(nullable=True),
        'status_reason':
        fields.StringField(nullable=True),
        'action':
        fields.StringField(nullable=True),
        'rsrc_metadata':
        heat_fields.JsonField(nullable=True),
        'properties_data':
        heat_fields.JsonField(nullable=True),
        'data':
        fields.ListOfObjectsField(resource_data.ResourceData, nullable=True),
        'stack':
        fields.ObjectField(stack.Stack),
        'engine_id':
        fields.StringField(nullable=True),
        'atomic_key':
        fields.IntegerField(nullable=True),
        'current_template_id':
        fields.IntegerField(),
        'needed_by':
        heat_fields.ListField(nullable=True, default=None),
        'requires':
        heat_fields.ListField(nullable=True, default=None),
        'replaces':
        fields.IntegerField(nullable=True),
        'replaced_by':
        fields.IntegerField(nullable=True),
    }

    @staticmethod
    def _from_db_object(resource, context, db_resource):
        if db_resource is None:
            return None
        for field in resource.fields:
            if field == 'data':
                resource['data'] = map(
                    lambda resd: resource_data.ResourceData._from_db_object(
                        resource_data.ResourceData(context), resd),
                    db_resource.data)
            else:
                resource[field] = db_resource[field]
        resource._context = context
        resource.obj_reset_changes()
        return resource

    @classmethod
    def get_obj(cls, context, resource_id):
        resource_db = db_api.resource_get(context, resource_id)
        resource = cls._from_db_object(cls(context), context, resource_db)
        return resource

    @classmethod
    def get_all(cls, context):
        resources_db = db_api.resource_get_all(context)
        resources = [
            (resource_name,
             cls._from_db_object(cls(context), context, resource_db))
            for resource_name, resource_db in resources_db.iteritems()
        ]
        return dict(resources)

    @classmethod
    def create(cls, context, values):
        return db_api.resource_create(context, values)

    @classmethod
    def delete(cls, context, resource_id):
        resource_db = db_api.resource_get(context, resource_id)
        resource_db.delete()

    @classmethod
    def exchange_stacks(cls, context, resource_id1, resource_id2):
        return db_api.resource_exchange_stacks(context, resource_id1,
                                               resource_id2)

    @classmethod
    def get_all_by_stack(cls, context, stack_id):
        resources_db = db_api.resource_get_all_by_stack(context, stack_id)
        resources = [
            (resource_name,
             cls._from_db_object(cls(context), context, resource_db))
            for resource_name, resource_db in resources_db.iteritems()
        ]
        return dict(resources)

    @classmethod
    def get_by_name_and_stack(cls, context, resource_name, stack_id):
        resource_db = db_api.resource_get_by_name_and_stack(
            context, resource_name, stack_id)
        resource = cls._from_db_object(cls(context), context, resource_db)
        return resource

    @classmethod
    def get_by_physical_resource_id(cls, context, physical_resource_id):
        resource_db = db_api.resource_get_by_physical_resource_id(
            context, physical_resource_id)
        resource = cls._from_db_object(cls(context), context, resource_db)
        return resource

    def update_and_save(self, values):
        resource_db = db_api.resource_get(self._context, self.id)
        resource_db.update_and_save(values)
        self._refresh()
        return resource_db

    def _refresh(self):
        return self.__class__._from_db_object(
            self, self._context,
            self.__class__.get_obj(self._context, self.id))

    def refresh(self, attrs=None):
        resource_db = db_api.resource_get(self._context, self.id)
        resource_db.refresh(attrs=attrs)
        return self._refresh()
Exemplo n.º 13
0
class CGSnapshot(base.CinderPersistentObject, base.CinderObject,
                 base.CinderObjectDictCompat):
    VERSION = '1.0'

    fields = {
        'id': fields.UUIDField(),
        'consistencygroup_id': fields.UUIDField(nullable=True),
        'project_id': fields.UUIDField(),
        'user_id': fields.UUIDField(),
        'name': fields.StringField(nullable=True),
        'description': fields.StringField(nullable=True),
        'status': fields.StringField(nullable=True),
        'consistencygroup': fields.ObjectField('ConsistencyGroup',
                                               nullable=True),
        'snapshots': fields.ObjectField('SnapshotList', nullable=True),
    }

    @staticmethod
    def _from_db_object(context, cgsnapshot, db_cgsnapshots,
                        expected_attrs=None):
        expected_attrs = expected_attrs or []
        for name, field in cgsnapshot.fields.items():
            if name in OPTIONAL_FIELDS:
                continue
            value = db_cgsnapshots.get(name)
            setattr(cgsnapshot, name, value)

        if 'consistencygroup' in expected_attrs:
            consistencygroup = objects.ConsistencyGroup(context)
            consistencygroup._from_db_object(context, consistencygroup,
                                             db_cgsnapshots[
                                                 'consistencygroup'])
            cgsnapshot.consistencygroup = consistencygroup

        if 'snapshots' in expected_attrs:
            snapshots = base.obj_make_list(
                context, objects.SnapshotsList(context),
                objects.Snapshots,
                db_cgsnapshots['snapshots'])
            cgsnapshot.snapshots = snapshots

        cgsnapshot._context = context
        cgsnapshot.obj_reset_changes()
        return cgsnapshot

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already_created'))
        updates = self.cinder_obj_get_changes()

        if 'consistencygroup' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('consistencygroup assigned'))

        db_cgsnapshots = db.cgsnapshot_create(self._context, updates)
        self._from_db_object(self._context, self, db_cgsnapshots)

    def obj_load_attr(self, attrname):
        if attrname not in OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'consistencygroup':
            self.consistencygroup = objects.ConsistencyGroup.get_by_id(
                self._context, self.consistencygroup_id)

        if attrname == 'snapshots':
            self.snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
                self._context, self.id)

        self.obj_reset_changes(fields=[attrname])

    @base.remotable
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'consistencygroup' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('consistencygroup changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            db.cgsnapshot_update(self._context, self.id, updates)
            self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        with self.obj_as_admin():
            db.cgsnapshot_destroy(self._context, self.id)
Exemplo n.º 14
0
class VolumeAttachment(base.CinderPersistentObject, base.CinderObject,
                       base.CinderObjectDictCompat,
                       base.CinderComparableObject):
    # Version 1.0: Initial version
    # Version 1.1: Added volume relationship
    # Version 1.2: Added connection_info attribute
    # Version 1.3: Added the connector attribute.
    VERSION = '1.3'

    OPTIONAL_FIELDS = ['volume']
    obj_extra_fields = ['project_id', 'volume_host']

    fields = {
        'id': fields.UUIDField(),
        'volume_id': fields.UUIDField(),
        'instance_uuid': fields.UUIDField(nullable=True),
        'attached_host': fields.StringField(nullable=True),
        'mountpoint': fields.StringField(nullable=True),
        'attach_time': fields.DateTimeField(nullable=True),
        'detach_time': fields.DateTimeField(nullable=True),
        'attach_status': c_fields.VolumeAttachStatusField(nullable=True),
        'attach_mode': fields.StringField(nullable=True),
        'volume': fields.ObjectField('Volume', nullable=False),
        'connection_info': c_fields.DictOfNullableField(nullable=True),
        'connector': c_fields.DictOfNullableField(nullable=True)
    }

    @property
    def project_id(self):
        return self.volume.project_id

    @property
    def volume_host(self):
        return self.volume.host

    @classmethod
    def _get_expected_attrs(cls, context, *args, **kwargs):
        return ['volume']

    def obj_make_compatible(self, primitive, target_version):
        """Make an object representation compatible with target version."""
        super(VolumeAttachment,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 3):
            primitive.pop('connector', None)
        if target_version < (1, 2):
            primitive.pop('connection_info', None)

    @classmethod
    def _from_db_object(cls,
                        context,
                        attachment,
                        db_attachment,
                        expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = cls._get_expected_attrs(context)

        for name, field in attachment.fields.items():
            if name in cls.OPTIONAL_FIELDS:
                continue
            value = db_attachment.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            if name in ('connection_info', 'connector'):
                # Both of these fields are nullable serialized json dicts.
                setattr(attachment, name,
                        jsonutils.loads(value) if value else None)
            else:
                attachment[name] = value
        if 'volume' in expected_attrs:
            db_volume = db_attachment.get('volume')
            if db_volume:
                attachment.volume = objects.Volume._from_db_object(
                    context, objects.Volume(), db_volume)

        attachment._context = context
        attachment.obj_reset_changes()

        # This is an online data migration which we should remove when enough
        # time has passed and we have a blocker schema migration to check to
        # make sure that the attachment_specs table is empty. Operators should
        # run the "cinder-manage db online_data_migrations" CLI to force the
        # migration on-demand.
        connector = db.attachment_specs_get(context, attachment.id)
        if connector:
            # Update ourselves and delete the attachment_specs.
            attachment.connector = connector
            attachment.save()
            # TODO(mriedem): Really need a delete-all method for this.
            for spec_key in connector:
                db.attachment_specs_delete(context, attachment.id, spec_key)

        return attachment

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'volume':
            volume = objects.Volume.get_by_id(self._context, self.volume_id)
            self.volume = volume

        self.obj_reset_changes(fields=[attrname])

    @staticmethod
    def _convert_connection_info_to_db_format(updates):
        properties = updates.pop('connection_info', None)
        if properties is not None:
            updates['connection_info'] = jsonutils.dumps(properties)

    @staticmethod
    def _convert_connector_to_db_format(updates):
        connector = updates.pop('connector', None)
        if connector is not None:
            updates['connector'] = jsonutils.dumps(connector)

    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'connection_info' in updates:
                self._convert_connection_info_to_db_format(updates)
            if 'connector' in updates:
                self._convert_connector_to_db_format(updates)
            if 'volume' in updates:
                raise exception.ObjectActionError(action='save',
                                                  reason=_('volume changed'))

            db.volume_attachment_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def finish_attach(self,
                      instance_uuid,
                      host_name,
                      mount_point,
                      attach_mode='rw'):
        with self.obj_as_admin():
            db_volume, updated_values = db.volume_attached(
                self._context, self.id, instance_uuid, host_name, mount_point,
                attach_mode)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())
        return objects.Volume._from_db_object(self._context, objects.Volume(),
                                              db_volume)

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()
        if 'connector' in updates:
            self._convert_connector_to_db_format(updates)
        with self.obj_as_admin():
            db_attachment = db.volume_attach(self._context, updates)
        self._from_db_object(self._context, self, db_attachment)

    def destroy(self):
        updated_values = db.attachment_destroy(self._context, self.id)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())
Exemplo n.º 15
0
 class TestFoo(TestFakeObject, obj_base.ComparableVersionedObject):
     fields = {
         'name': fields.StringField(),
         'bar': fields.ObjectField('TestBar', nullable=True)
     }
Exemplo n.º 16
0
class Resource(
        heat_base.HeatObject,
        base.VersionedObjectDictCompat,
        base.ComparableVersionedObject,
):
    fields = {
        'id':
        fields.IntegerField(),
        'uuid':
        fields.StringField(),
        'stack_id':
        fields.StringField(),
        'created_at':
        fields.DateTimeField(read_only=True),
        'updated_at':
        fields.DateTimeField(nullable=True),
        'physical_resource_id':
        fields.StringField(nullable=True),
        'name':
        fields.StringField(nullable=True),
        'status':
        fields.StringField(nullable=True),
        'status_reason':
        fields.StringField(nullable=True),
        'action':
        fields.StringField(nullable=True),
        'attr_data':
        fields.ObjectField(rpd.ResourcePropertiesData, nullable=True),
        'attr_data_id':
        fields.IntegerField(nullable=True),
        'rsrc_metadata':
        heat_fields.JsonField(nullable=True),
        'data':
        fields.ListOfObjectsField(resource_data.ResourceData, nullable=True),
        'rsrc_prop_data_id':
        fields.ObjectField(fields.IntegerField(nullable=True)),
        'engine_id':
        fields.StringField(nullable=True),
        'atomic_key':
        fields.IntegerField(nullable=True),
        'current_template_id':
        fields.IntegerField(),
        'needed_by':
        heat_fields.ListField(nullable=True, default=None),
        'requires':
        heat_fields.ListField(nullable=True, default=None),
        'replaces':
        fields.IntegerField(nullable=True),
        'replaced_by':
        fields.IntegerField(nullable=True),
        'root_stack_id':
        fields.StringField(nullable=True),
    }

    @staticmethod
    def _from_db_object(resource, context, db_resource, only_fields=None):
        if db_resource is None:
            return None
        for field in resource.fields:
            if (only_fields is not None and field not in only_fields
                    and field != 'id'):
                continue
            if field == 'data':
                resource['data'] = [
                    resource_data.ResourceData._from_db_object(
                        resource_data.ResourceData(context), resd)
                    for resd in db_resource.data
                ]
            elif field != 'attr_data':
                resource[field] = db_resource[field]

        if db_resource['rsrc_prop_data_id'] is not None:
            if hasattr(db_resource, '__dict__'):
                rpd_obj = db_resource.__dict__.get('rsrc_prop_data')
            else:
                rpd_obj = None
            if rpd_obj is not None:
                # Object is already eager loaded
                rpd_obj = (rpd.ResourcePropertiesData._from_db_object(
                    rpd.ResourcePropertiesData(), context, rpd_obj))
                resource._properties_data = rpd_obj.data
            else:
                resource._properties_data = {}
            if db_resource['properties_data']:
                LOG.error(
                    'Unexpected condition where resource.rsrc_prop_data '
                    'and resource.properties_data are both not null. '
                    'rsrc_prop_data.id: %(rsrc_prop_data_id)s, '
                    'resource id: %(res_id)s', {
                        'rsrc_prop_data_id': resource['rsrc_prop_data'].id,
                        'res_id': resource['id']
                    })
        elif db_resource['properties_data']:  # legacy field
            if db_resource['properties_data_encrypted']:
                decrypted_data = crypt.decrypted_dict(
                    db_resource['properties_data'])
                resource._properties_data = decrypted_data
            else:
                resource._properties_data = db_resource['properties_data']
        else:
            resource._properties_data = None

        if db_resource['attr_data'] is not None:
            resource._attr_data = rpd.ResourcePropertiesData._from_db_object(
                rpd.ResourcePropertiesData(context), context,
                db_resource['attr_data']).data
        else:
            resource._attr_data = None

        resource._context = context
        resource.obj_reset_changes()
        return resource

    @property
    def attr_data(self):
        return self._attr_data

    @property
    def properties_data(self):
        if (not self._properties_data and self.rsrc_prop_data_id is not None):
            LOG.info('rsrc_prop_data lazy load')
            rpd_obj = rpd.ResourcePropertiesData.get_by_id(
                self._context, self.rsrc_prop_data_id)
            self._properties_data = rpd_obj.data or {}
        return self._properties_data

    @classmethod
    def get_obj(cls, context, resource_id, refresh=False, fields=None):
        if fields is None or 'data' in fields:
            refresh_data = refresh
        else:
            refresh_data = False
        resource_db = db_api.resource_get(context,
                                          resource_id,
                                          refresh=refresh,
                                          refresh_data=refresh_data)
        return cls._from_db_object(cls(context),
                                   context,
                                   resource_db,
                                   only_fields=fields)

    @classmethod
    def get_all(cls, context):
        resources_db = db_api.resource_get_all(context)
        resources = [(resource_name,
                      cls._from_db_object(cls(context), context, resource_db))
                     for resource_name, resource_db in resources_db.items()]
        return dict(resources)

    @classmethod
    def create(cls, context, values):
        return cls._from_db_object(cls(context), context,
                                   db_api.resource_create(context, values))

    @classmethod
    def replacement(cls,
                    context,
                    existing_res_id,
                    new_res_values,
                    atomic_key=0,
                    expected_engine_id=None):
        replacement = db_api.resource_create_replacement(
            context, existing_res_id, new_res_values, atomic_key,
            expected_engine_id)
        if replacement is None:
            return None
        return cls._from_db_object(cls(context), context, replacement)

    @classmethod
    def delete(cls, context, resource_id):
        db_api.resource_delete(context, resource_id)

    @classmethod
    def attr_data_delete(cls, context, resource_id, attr_id):
        db_api.resource_attr_data_delete(context, resource_id, attr_id)

    @classmethod
    def exchange_stacks(cls, context, resource_id1, resource_id2):
        return db_api.resource_exchange_stacks(context, resource_id1,
                                               resource_id2)

    @classmethod
    def get_all_by_stack(cls, context, stack_id, filters=None):
        cache = context.cache(ResourceCache)
        resources = cache.by_stack_id_name.get(stack_id)
        if resources:
            return dict(resources)
        resources_db = db_api.resource_get_all_by_stack(
            context, stack_id, filters)
        return cls._resources_to_dict(context, resources_db)

    @classmethod
    def _resources_to_dict(cls, context, resources_db):
        resources = [(resource_name,
                      cls._from_db_object(cls(context), context, resource_db))
                     for resource_name, resource_db in resources_db.items()]
        return dict(resources)

    @classmethod
    def get_all_active_by_stack(cls, context, stack_id):
        resources_db = db_api.resource_get_all_active_by_stack(
            context, stack_id)
        resources = [(resource_id,
                      cls._from_db_object(cls(context), context, resource_db))
                     for resource_id, resource_db in resources_db.items()]
        return dict(resources)

    @classmethod
    def get_all_by_root_stack(cls, context, stack_id, filters, cache=False):
        resources_db = db_api.resource_get_all_by_root_stack(
            context, stack_id, filters)
        all = cls._resources_to_dict(context, resources_db)
        if cache:
            context.cache(ResourceCache).set_by_stack_id(all)
        return all

    @classmethod
    def get_all_stack_ids_by_root_stack(cls, context, stack_id):
        resources_db = db_api.resource_get_all_by_root_stack(
            context, stack_id, stack_id_only=True)
        return {db_res.stack_id for db_res in resources_db.values()}

    @classmethod
    def purge_deleted(cls, context, stack_id):
        return db_api.resource_purge_deleted(context, stack_id)

    @classmethod
    def get_by_name_and_stack(cls, context, resource_name, stack_id):
        resource_db = db_api.resource_get_by_name_and_stack(
            context, resource_name, stack_id)
        return cls._from_db_object(cls(context), context, resource_db)

    @classmethod
    def get_all_by_physical_resource_id(cls, context, physical_resource_id):
        matches = db_api.resource_get_all_by_physical_resource_id(
            context, physical_resource_id)
        return [
            cls._from_db_object(cls(context), context, resource_db)
            for resource_db in matches
        ]

    @classmethod
    def update_by_id(cls, context, resource_id, values):
        db_api.resource_update_and_save(context, resource_id, values)

    def update_and_save(self, values):
        db_api.resource_update_and_save(self._context, self.id, values)

    def select_and_update(self, values, expected_engine_id=None, atomic_key=0):
        return db_api.resource_update(self._context,
                                      self.id,
                                      values,
                                      atomic_key=atomic_key,
                                      expected_engine_id=expected_engine_id)

    @classmethod
    def select_and_update_by_id(cls,
                                context,
                                resource_id,
                                values,
                                expected_engine_id=None,
                                atomic_key=0):
        return db_api.resource_update(context,
                                      resource_id,
                                      values,
                                      atomic_key=atomic_key,
                                      expected_engine_id=expected_engine_id)

    @classmethod
    def store_attributes(cls, context, resource_id, atomic_key, attr_data,
                         attr_id):
        attr_id = rpd.ResourcePropertiesData.create_or_update(
            context, attr_data, attr_id).id
        if db_api.resource_attr_id_set(context, resource_id, atomic_key,
                                       attr_id):
            return attr_id
        return None

    def refresh(self):
        resource_db = db_api.resource_get(self._context, self.id, refresh=True)
        return self.__class__._from_db_object(self, self._context, resource_db)

    def convert_to_convergence(self, current_template_id, requires):
        return self.update_and_save({
            'current_template_id': current_template_id,
            'requires': sorted(requires, reverse=True),
        })

    @staticmethod
    def encrypt_properties_data(data):
        if cfg.CONF.encrypt_parameters_and_properties and data:
            result = crypt.encrypted_dict(data)
            return (True, result)
        return (False, data)

    def update_metadata(self, metadata):
        if self.rsrc_metadata != metadata:
            rows_updated = self.select_and_update({'rsrc_metadata': metadata},
                                                  self.engine_id,
                                                  self.atomic_key)
            if not rows_updated:
                action = _('metadata setting for resource %s') % self.name
                raise exception.ConcurrentTransaction(action=action)
            return True
        else:
            return False
Exemplo n.º 17
0
class VolumeMapping(base.ZunPersistentObject, base.ZunObject):
    # Version 1.0: Initial version
    # Version 1.1: Add field "auto_remove"
    # Version 1.2: Add field "host"
    # Version 1.3: Add field "contents"
    VERSION = '1.3'

    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.UUIDField(nullable=False),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'volume_id': fields.UUIDField(nullable=True),
        'volume_provider': fields.StringField(nullable=False),
        'container_path': fields.StringField(nullable=True),
        'container_uuid': fields.UUIDField(nullable=True),
        'container': fields.ObjectField('Container', nullable=True),
        'connection_info': fields.SensitiveStringField(nullable=True),
        'auto_remove': fields.BooleanField(nullable=True),
        'host': fields.StringField(nullable=True),
        'contents': fields.SensitiveStringField(nullable=True),
    }

    @staticmethod
    def _from_db_object(volume, db_volume):
        """Converts a database entity to a formal object."""
        for field in volume.fields:
            if field in VOLUME_MAPPING_OPTIONAL_ATTRS:
                continue
            setattr(volume, field, db_volume[field])

        volume.obj_reset_changes()
        return volume

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [
            VolumeMapping._from_db_object(cls(context), obj)
            for obj in db_objects
        ]

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a volume mapping based on uuid.

        :param uuid: the uuid of a volume mapping.
        :param context: Security context
        :returns: a :class:`VolumeMapping` object.
        """
        db_volume = dbapi.get_volume_mapping_by_uuid(context, uuid)
        volume = VolumeMapping._from_db_object(cls(context), db_volume)
        return volume

    @base.remotable_classmethod
    def list(cls,
             context,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None,
             filters=None):
        """Return a list of VolumeMapping objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :param filters: filters when list volume mappings.
        :returns: a list of :class:`VolumeMapping` object.

        """
        db_volumes = dbapi.list_volume_mappings(context,
                                                limit=limit,
                                                marker=marker,
                                                sort_key=sort_key,
                                                sort_dir=sort_dir,
                                                filters=filters)
        return VolumeMapping._from_db_object_list(db_volumes, cls, context)

    @base.remotable_classmethod
    def list_by_container(cls, context, container_uuid):
        filters = {'container_uuid': container_uuid}
        db_volumes = dbapi.list_volume_mappings(context, filters=filters)
        return VolumeMapping._from_db_object_list(db_volumes, cls, context)

    @base.remotable_classmethod
    def list_by_volume(cls, context, volume_id):
        filters = {'volume_id': volume_id}
        db_volumes = dbapi.list_volume_mappings(context, filters=filters)
        return VolumeMapping._from_db_object_list(db_volumes, cls, context)

    @base.remotable
    def create(self, context):
        """Create a VolumeMapping record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object.

        """
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        values = self.obj_get_changes()
        if 'container' in values:
            raise exception.ObjectActionError(action='create',
                                              reason='container assigned')

        db_volume = dbapi.create_volume_mapping(context, values)
        self._from_db_object(self, db_volume)

    @base.remotable
    def destroy(self, context=None):
        """Delete the VolumeMapping from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object.
        """
        if not self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='already destroyed')
        dbapi.destroy_volume_mapping(context, self.uuid)
        delattr(self, 'id')
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this VolumeMapping.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object.
        """
        updates = self.obj_get_changes()
        if 'container' in updates:
            raise exception.ObjectActionError(action='save',
                                              reason='container changed')
        updates.pop('id', None)
        dbapi.update_volume_mapping(context, self.uuid, updates)

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this VolumeMapping.

        Loads a volume mapping with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded volume mapping column by column, if there are any updates.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object.
        """
        current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and \
               getattr(self, field) != getattr(current, field):
                setattr(self, field, getattr(current, field))

    def obj_load_attr(self, attrname):
        if attrname not in VOLUME_MAPPING_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='attribute %s not lazy-loadable' % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s", {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })
        self.container = container.Container.get_by_uuid(
            self._context, self.container_uuid)
        self.obj_reset_changes(fields=['container'])
Exemplo n.º 18
0
class HostInterface(base.DrydockObject):

    VERSION = '1.0'

    fields = {
        'device_name':
        obj_fields.StringField(),
        'source':
        hd_fields.ModelSourceField(),
        'network_link':
        obj_fields.StringField(nullable=True),
        'hardware_slaves':
        obj_fields.ListOfStringsField(nullable=True),
        'slave_selectors':
        obj_fields.ObjectField('HardwareDeviceSelectorList', nullable=True),
        'networks':
        obj_fields.ListOfStringsField(nullable=True),
        'sriov':
        obj_fields.BooleanField(default=False),
        # SRIOV virtual functions
        'vf_count':
        obj_fields.IntegerField(nullable=True),
        # SRIOV VF trusted mode
        'trustedmode':
        obj_fields.BooleanField(nullable=True),
    }

    def __init__(self, **kwargs):
        super(HostInterface, self).__init__(**kwargs)

    # HostInterface is keyed by device_name
    def get_id(self):
        return self.get_name()

    def get_name(self):
        return self.device_name

    def get_hw_slaves(self):
        return self.hardware_slaves

    def get_slave_selectors(self):
        return self.slave_selectors

    # Return number of slaves for this interface
    def get_slave_count(self):
        return len(self.hardware_slaves)

    # The device attribute may be hardware alias that translates to a
    # physical device address. If the device attribute does not match an
    # alias, we assume it directly identifies a OS device name. When the
    # apply_hardware_profile method is called on the parent Node of this
    # device, the selector will be decided and applied

    def add_selector(self, slave_selector):
        if self.slave_selectors is None:
            self.slave_selectors = objects.HardwareDeviceSelectorList()

        self.slave_selectors.append(slave_selector)

    """
    Merge two lists of HostInterface models with child_list taking
    priority when conflicts. If a member of child_list has a device_name
    beginning with '!' it indicates that HostInterface should be
    removed from the merged list
    """

    @staticmethod
    def merge_lists(child_list, parent_list):
        if child_list is None:
            return parent_list

        if parent_list is None:
            return child_list

        effective_list = []

        if len(child_list) == 0 and len(parent_list) > 0:
            for p in parent_list:
                pp = deepcopy(p)
                pp.source = hd_fields.ModelSource.Compiled
                effective_list.append(pp)
        elif len(parent_list) == 0 and len(child_list) > 0:
            for i in child_list:
                if i.get_name().startswith('!'):
                    continue
                else:
                    ii = deepcopy(i)
                    ii.source = hd_fields.ModelSource.Compiled
                    effective_list.append(ii)
        elif len(parent_list) > 0 and len(child_list) > 0:
            parent_interfaces = []
            for i in parent_list:
                parent_name = i.get_name()
                parent_interfaces.append(parent_name)
                add = True
                for j in child_list:
                    if j.get_name() == ("!" + parent_name):
                        add = False
                        break
                    elif j.get_name() == parent_name:
                        m = objects.HostInterface()
                        m.device_name = j.get_name()

                        m.network_link = \
                            objects.Utils.apply_field_inheritance(
                                getattr(j, 'network_link', None),
                                getattr(i, 'network_link', None))

                        m.hardware_slaves = objects.Utils.merge_lists(
                            getattr(j, 'hardware_slaves', []),
                            getattr(i, 'hardware_slaves', []))

                        m.networks = objects.Utils.merge_lists(
                            getattr(j, 'networks', []),
                            getattr(i, 'networks', []))

                        m.source = hd_fields.ModelSource.Compiled

                        effective_list.append(m)
                        add = False
                        break

                if add:
                    ii = deepcopy(i)
                    ii.source = hd_fields.ModelSource.Compiled
                    effective_list.append(ii)

            for j in child_list:
                if (j.device_name not in parent_interfaces
                        and not j.get_name().startswith("!")):
                    jj = deepcopy(j)
                    jj.source = hd_fields.ModelSource.Compiled
                    effective_list.append(jj)

        return effective_list
Exemplo n.º 19
0
class Event(
        heat_base.HeatObject,
        base.VersionedObjectDictCompat,
):
    fields = {
        'id': fields.IntegerField(),
        'stack_id': fields.StringField(),
        'uuid': fields.StringField(),
        'resource_action': fields.StringField(nullable=True),
        'resource_status': fields.StringField(nullable=True),
        'resource_name': fields.StringField(nullable=True),
        'physical_resource_id': fields.StringField(nullable=True),
        'resource_status_reason': fields.StringField(nullable=True),
        'resource_type': fields.StringField(nullable=True),
        'rsrc_prop_data': fields.ObjectField(
            rpd.ResourcePropertiesData),
        'created_at': fields.DateTimeField(read_only=True),
        'updated_at': fields.DateTimeField(nullable=True),
    }

    @staticmethod
    def _from_db_object(context, event, db_event):
        for field in event.fields:
                event[field] = db_event[field]
        if db_event['rsrc_prop_data']:
            event['rsrc_prop_data'] = \
                rpd.ResourcePropertiesData._from_db_object(
                    rpd.ResourcePropertiesData(context), context,
                    db_event['rsrc_prop_data'])
            event._resource_properties = event['rsrc_prop_data'].data
        else:
            event._resource_properties = db_event['resource_properties'] or {}
        event._context = context
        event.obj_reset_changes()
        return event

    @property
    def resource_properties(self):
        return self._resource_properties

    @classmethod
    def get_by_id(cls, context, event_id):
        db_event = db_api.event_get(context, event_id)
        return cls._from_db_object(context, cls(context), db_event)

    @classmethod
    def get_all(cls, context):
        return [cls._from_db_object(context, cls(), db_event)
                for db_event in db_api.event_get_all(context)]

    @classmethod
    def get_all_by_tenant(cls, context, **kwargs):
        return [cls._from_db_object(context, cls(), db_event)
                for db_event in db_api.event_get_all_by_tenant(context,
                                                               **kwargs)]

    @classmethod
    def get_all_by_stack(cls, context, stack_id, **kwargs):
        return [cls._from_db_object(context, cls(), db_event)
                for db_event in db_api.event_get_all_by_stack(context,
                                                              stack_id,
                                                              **kwargs)]

    @classmethod
    def count_all_by_stack(cls, context, stack_id):
        return db_api.event_count_all_by_stack(context, stack_id)

    @classmethod
    def create(cls, context, values):
        return cls._from_db_object(context, cls(),
                                   db_api.event_create(context, values))

    def identifier(self, stack_identifier):
        """Return a unique identifier for the event."""

        res_id = identifier.ResourceIdentifier(
            resource_name=self.resource_name, **stack_identifier)

        return identifier.EventIdentifier(event_id=str(self.uuid), **res_id)
Exemplo n.º 20
0
class HostProfile(base.DrydockPersistentObject, base.DrydockObject):

    VERSION = '1.0'

    fields = {
        'name':
        obj_fields.StringField(nullable=False),
        'site':
        obj_fields.StringField(nullable=False),
        'source':
        hd_fields.ModelSourceField(nullable=False),
        'parent_profile':
        obj_fields.StringField(nullable=True),
        'hardware_profile':
        obj_fields.StringField(nullable=True),
        'oob_type':
        obj_fields.StringField(nullable=True),
        'oob_parameters':
        obj_fields.DictOfStringsField(nullable=True),
        'storage_devices':
        obj_fields.ObjectField('HostStorageDeviceList', nullable=True),
        'volume_groups':
        obj_fields.ObjectField('HostVolumeGroupList', nullable=True),
        'interfaces':
        obj_fields.ObjectField('HostInterfaceList', nullable=True),
        'tags':
        obj_fields.ListOfStringsField(nullable=True),
        'owner_data':
        obj_fields.DictOfStringsField(nullable=True),
        'rack':
        obj_fields.StringField(nullable=True),
        'base_os':
        obj_fields.StringField(nullable=True),
        'image':
        obj_fields.StringField(nullable=True),
        'kernel':
        obj_fields.StringField(nullable=True),
        'kernel_params':
        obj_fields.DictOfStringsField(nullable=True),
        'primary_network':
        obj_fields.StringField(nullable=True),
    }

    def __init__(self, **kwargs):
        super(HostProfile, self).__init__(**kwargs)

    def get_rack(self):
        return self.rack

    # HostProfile is keyed by name
    def get_id(self):
        return self.get_name()

    def get_name(self):
        return self.name

    def has_tag(self, tag):
        if tag in self.tags:
            return True

        return False

    def apply_inheritance(self, site_design):
        # No parent to inherit from, just apply design values
        # and return
        if self.source == hd_fields.ModelSource.Compiled:
            return

        if self.parent_profile is None:
            self.source = hd_fields.ModelSource.Compiled
            return

        parent = site_design.get_host_profile(self.parent_profile)

        if parent is None:
            raise NameError("Cannot find parent profile %s for %s" %
                            (self.design['parent_profile'], self.name))

        parent.apply_inheritance(site_design)

        # First compute inheritance for simple fields
        inheritable_field_list = [
            'hardware_profile', 'oob_type', 'storage_layout',
            'bootdisk_device', 'bootdisk_root_size', 'bootdisk_boot_size',
            'rack', 'base_os', 'image', 'kernel', 'primary_network'
        ]

        # Create applied data from self design values and parent
        # applied values

        for f in inheritable_field_list:
            setattr(
                self, f,
                objects.Utils.apply_field_inheritance(getattr(self, f, None),
                                                      getattr(parent, f,
                                                              None)))

        # Now compute inheritance for complex types
        self.oob_parameters = objects.Utils.merge_dicts(
            self.oob_parameters, parent.oob_parameters)

        self.tags = objects.Utils.merge_lists(self.tags, parent.tags)

        self.owner_data = objects.Utils.merge_dicts(self.owner_data,
                                                    parent.owner_data)

        self.kernel_params = objects.Utils.merge_dicts(self.kernel_params,
                                                       parent.kernel_params)

        self.storage_devices = HostStorageDeviceList.from_basic_list(
            HostStorageDevice.merge_lists(self.storage_devices,
                                          parent.storage_devices))

        self.volume_groups = HostVolumeGroupList.from_basic_list(
            HostVolumeGroup.merge_lists(self.volume_groups,
                                        parent.volume_groups))

        self.interfaces = HostInterfaceList.from_basic_list(
            HostInterface.merge_lists(self.interfaces, parent.interfaces))

        self.source = hd_fields.ModelSource.Compiled

        return
Exemplo n.º 21
0
class Port(base.NeutronDbObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    db_model = models_v2.Port

    fields = {
        'id': common_types.UUIDField(),
        'project_id': obj_fields.StringField(nullable=True),
        'name': obj_fields.StringField(nullable=True),
        'network_id': common_types.UUIDField(),
        'mac_address': common_types.MACAddressField(),
        'admin_state_up': obj_fields.BooleanField(),
        'device_id': obj_fields.StringField(),
        'device_owner': obj_fields.StringField(),
        'status': obj_fields.StringField(),

        'allowed_address_pairs': obj_fields.ListOfObjectsField(
            'AllowedAddressPair', nullable=True
        ),
        'binding': obj_fields.ObjectField(
            'PortBinding', nullable=True
        ),
        'dhcp_options': obj_fields.ListOfObjectsField(
            'ExtraDhcpOpt', nullable=True
        ),
        'distributed_binding': obj_fields.ObjectField(
            'DistributedPortBinding', nullable=True
        ),
        'dns': obj_fields.ObjectField('PortDNS', nullable=True),
        'fixed_ips': obj_fields.ListOfObjectsField(
            'IPAllocation', nullable=True
        ),
        # TODO(ihrachys): consider converting to boolean
        'security': obj_fields.ObjectField(
            'PortSecurity', nullable=True
        ),
        'security_group_ids': common_types.SetOfUUIDsField(
            nullable=True,
            # TODO(ihrachys): how do we safely pass a mutable default?
            default=None,
        ),
        'qos_policy_id': common_types.UUIDField(nullable=True, default=None),

        'binding_levels': obj_fields.ListOfObjectsField(
            'PortBindingLevel', nullable=True
        ),

        # TODO(ihrachys): consider adding a 'dns_assignment' fully synthetic
        # field in later object iterations
    }

    synthetic_fields = [
        'allowed_address_pairs',
        'binding',
        'binding_levels',
        'dhcp_options',
        'distributed_binding',
        'dns',
        'fixed_ips',
        'qos_policy_id',
        'security',
        'security_group_ids',
    ]

    fields_need_translation = {
        'binding': 'port_binding',
        'dhcp_options': 'dhcp_opts',
        'distributed_binding': 'distributed_port_binding',
        'security': 'port_security',
    }

    def create(self):
        fields = self.obj_get_changes()
        with db_api.autonested_transaction(self.obj_context.session):
            sg_ids = self.security_group_ids
            if sg_ids is None:
                sg_ids = set()
            qos_policy_id = self.qos_policy_id
            super(Port, self).create()
            if 'security_group_ids' in fields:
                self._attach_security_groups(sg_ids)
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(qos_policy_id)

    def update(self):
        fields = self.obj_get_changes()
        with db_api.autonested_transaction(self.obj_context.session):
            super(Port, self).update()
            if 'security_group_ids' in fields:
                self._attach_security_groups(fields['security_group_ids'])
            if 'qos_policy_id' in fields:
                self._attach_qos_policy(fields['qos_policy_id'])

    def _attach_qos_policy(self, qos_policy_id):
        # TODO(ihrachys): introduce an object for the binding to isolate
        # database access in a single place, currently scattered between port
        # and policy objects
        obj_db_api.delete_objects(
            self.obj_context, qos_models.QosPortPolicyBinding, port_id=self.id)
        if qos_policy_id:
            obj_db_api.create_object(
                self.obj_context, qos_models.QosPortPolicyBinding,
                {'port_id': self.id, 'policy_id': qos_policy_id}
            )
        self.qos_policy_id = qos_policy_id
        self.obj_reset_changes(['qos_policy_id'])

    def _attach_security_groups(self, sg_ids):
        # TODO(ihrachys): consider introducing an (internal) object for the
        # binding to decouple database operations a bit more
        obj_db_api.delete_objects(
            self.obj_context, sg_models.SecurityGroupPortBinding,
            port_id=self.id,
        )
        if sg_ids:
            for sg_id in sg_ids:
                self._attach_security_group(sg_id)
        self.security_group_ids = sg_ids
        self.obj_reset_changes(['security_group_ids'])

    def _attach_security_group(self, sg_id):
        obj_db_api.create_object(
            self.obj_context, sg_models.SecurityGroupPortBinding,
            {'port_id': self.id, 'security_group_id': sg_id}
        )

    # TODO(rossella_s): get rid of it once we switch the db model to using
    # custom types.
    @classmethod
    def modify_fields_to_db(cls, fields):
        result = super(Port, cls).modify_fields_to_db(fields)
        if 'mac_address' in result:
            result['mac_address'] = cls.filter_to_str(result['mac_address'])
        return result

    # TODO(rossella_s): get rid of it once we switch the db model to using
    # custom types.
    @classmethod
    def modify_fields_from_db(cls, db_obj):
        fields = super(Port, cls).modify_fields_from_db(db_obj)
        if 'mac_address' in fields:
            fields['mac_address'] = utils.AuthenticEUI(fields['mac_address'])
        distributed_port_binding = fields.get('distributed_binding')
        if distributed_port_binding:
            fields['distributed_binding'] = fields['distributed_binding'][0]
        else:
            fields['distributed_binding'] = None
        return fields

    def from_db_object(self, db_obj):
        super(Port, self).from_db_object(db_obj)
        # extract security group bindings
        if db_obj.get('security_groups', []):
            self.security_group_ids = {
                sg.security_group_id
                for sg in db_obj.security_groups
            }
        else:
            self.security_group_ids = set()
        self.obj_reset_changes(['security_group_ids'])

        # extract qos policy binding
        if db_obj.get('qos_policy_binding'):
            self.qos_policy_id = (
                db_obj.qos_policy_binding.policy_id
            )
        else:
            self.qos_policy_id = None
        self.obj_reset_changes(['qos_policy_id'])
Exemplo n.º 22
0
class HostVolumeGroup(base.DrydockObject):
    """Model representing a host volume group."""

    VERSION = '1.0'

    fields = {
        'name': obj_fields.StringField(),
        'vg_uuid': obj_fields.StringField(nullable=True),
        'logical_volumes': obj_fields.ObjectField('HostVolumeList',
                                                  nullable=True),
    }

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.physical_devices = []

    def get_name(self):
        return self.name

    def get_id(self):
        return self.name

    def add_pv(self, pv):
        self.physical_devices.append(pv)

    def is_sys(self):
        """Check if this is the VG for root and/or boot."""
        for lv in getattr(self, 'logical_volumes', []):
            if lv.is_sys():
                return True
        return False

    @staticmethod
    def merge_lists(child_list, parent_list):
        if child_list is None:
            return parent_list

        if parent_list is None:
            return child_list

        effective_list = []

        if len(child_list) == 0 and len(parent_list) > 0:
            for p in parent_list:
                pp = deepcopy(p)
                pp.source = hd_fields.ModelSource.Compiled
                effective_list.append(pp)
        elif len(parent_list) == 0 and len(child_list) > 0:
            for i in child_list:
                if i.get_name().startswith('!'):
                    continue
                else:
                    ii = deepcopy(i)
                    ii.source = hd_fields.ModelSource.Compiled
                    effective_list.append(ii)
        elif len(parent_list) > 0 and len(child_list) > 0:
            parent_devs = []
            for i in parent_list:
                parent_name = i.get_name()
                parent_devs.append(parent_name)
                add = True
                for j in child_list:
                    if j.get_name() == ("!" + parent_name):
                        add = False
                        break
                    elif j.get_name() == parent_name:
                        p = objects.HostVolumeGroup()
                        p.name = j.get_name()

                        inheritable_field_list = ['vg_uuid']

                        for f in inheritable_field_list:
                            setattr(
                                p, f,
                                objects.Utils.apply_field_inheritance(
                                    getattr(j, f, None), getattr(i, f, None)))

                        p.partitions = HostPartitionList.from_basic_list(
                            HostPartition.merge_lists(
                                getattr(j, 'logical_volumes', None),
                                getattr(i, 'logical_volumes', None)))

                        add = False
                        p.source = hd_fields.ModelSource.Compiled
                        effective_list.append(p)
            if add:
                ii = deepcopy(i)
                ii.source = hd_fields.ModelSource.Compiled
                effective_list.append(ii)

        for j in child_list:
            if (j.get_name() not in parent_devs
                    and not j.get_name().startswith("!")):
                jj = deepcopy(j)
                jj.source = hd_fields.ModelSource.Compiled
                effective_list.append(jj)

        return effective_list
Exemplo n.º 23
0
class Cluster(base.CinderPersistentObject, base.CinderObject,
              base.CinderComparableObject):
    """Cluster Versioned Object.

    Method get_by_id supports as additional named arguments:
        - get_services: If we want to load all services from this cluster.
        - services_summary: If we want to load num_nodes and num_down_nodes
                            fields.
        - is_up: Boolean value to filter based on the cluster's up status.
        - read_deleted: Filtering based on delete status. Default value "no".
        - Any other cluster field will be used as a filter.
    """
    # Version 1.0: Initial version
    VERSION = '1.0'
    OPTIONAL_FIELDS = ('num_hosts', 'num_down_hosts', 'services')

    # NOTE(geguileo): We don't want to expose race_preventer field at the OVO
    # layer since it is only meant for the DB layer internal mechanism to
    # prevent races.
    fields = {
        'id':
        fields.IntegerField(),
        'name':
        fields.StringField(nullable=False),
        'binary':
        fields.StringField(nullable=False),
        'disabled':
        fields.BooleanField(default=False, nullable=True),
        'disabled_reason':
        fields.StringField(nullable=True),
        'num_hosts':
        fields.IntegerField(default=0, read_only=True),
        'num_down_hosts':
        fields.IntegerField(default=0, read_only=True),
        'last_heartbeat':
        fields.DateTimeField(nullable=True, read_only=True),
        'services':
        fields.ObjectField('ServiceList', nullable=True, read_only=True),
    }

    @classmethod
    def _get_expected_attrs(cls, context, *args, **kwargs):
        """Return expected attributes when getting a cluster.

        Expected attributes depend on whether we are retrieving all related
        services as well as if we are getting the services summary.
        """
        expected_attrs = []
        if kwargs.get('get_services'):
            expected_attrs.append('services')
        if kwargs.get('services_summary'):
            expected_attrs.extend(('num_hosts', 'num_down_hosts'))
        return expected_attrs

    @staticmethod
    def _from_db_object(context, cluster, db_cluster, expected_attrs=None):
        """Fill cluster OVO fields from cluster ORM instance."""
        expected_attrs = expected_attrs or tuple()
        for name, field in cluster.fields.items():
            # The only field that cannot be assigned using setattr is services,
            # because it is an ObjectField.   So we don't assign the value if
            # it's a non expected optional field or if it's services field.
            if ((name in Cluster.OPTIONAL_FIELDS
                 and name not in expected_attrs) or name == 'services'):
                continue
            value = getattr(db_cluster, name)
            setattr(cluster, name, value)

        cluster._context = context
        if 'services' in expected_attrs:
            cluster.services = base.obj_make_list(context,
                                                  objects.ServiceList(context),
                                                  objects.Service,
                                                  db_cluster.services)

        cluster.obj_reset_changes()
        return cluster

    def obj_load_attr(self, attrname):
        """Lazy load services attribute."""
        # NOTE(geguileo): We only allow lazy loading services to raise
        # awareness of the high cost of lazy loading num_hosts and
        # num_down_hosts, so if we are going to need this information we should
        # be certain we really need it and it should loaded when retrieving the
        # data from the DB the first time we read the OVO.
        if attrname != 'services':
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        self.services = objects.ServiceList.get_all(
            self._context, {'cluster_name': self.name})

        self.obj_reset_changes(fields=('services', ))

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()
        if updates:
            for field in self.OPTIONAL_FIELDS:
                if field in updates:
                    raise exception.ObjectActionError(action='create',
                                                      reason=_('%s assigned') %
                                                      field)

        db_cluster = db.cluster_create(self._context, updates)
        self._from_db_object(self._context, self, db_cluster)

    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            for field in self.OPTIONAL_FIELDS:
                if field in updates:
                    raise exception.ObjectActionError(action='save',
                                                      reason=_('%s changed') %
                                                      field)
            db.cluster_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def destroy(self):
        with self.obj_as_admin():
            updated_values = db.cluster_destroy(self._context, self.id)
        for field, value in updated_values.items():
            setattr(self, field, value)
        self.obj_reset_changes(updated_values.keys())

    def is_up(self):
        return (self.last_heartbeat
                and self.last_heartbeat >= utils.service_expired_time(True))
Exemplo n.º 24
0
class HostStorageDevice(base.DrydockObject):
    """Model representing a host physical storage device."""

    VERSION = '1.0'

    fields = {
        'name': obj_fields.StringField(),
        'volume_group': obj_fields.StringField(nullable=True),
        'labels': obj_fields.DictOfStringsField(nullable=True),
        'partitions': obj_fields.ObjectField('HostPartitionList',
                                             nullable=True),
    }

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.physical_devices = []

    def get_name(self):
        return self.name

    def get_id(self):
        return self.name

    def add_partition(self, partition):
        self.partitions.append(partition)

    @staticmethod
    def merge_lists(child_list, parent_list):
        if child_list is None:
            return parent_list

        if parent_list is None:
            return child_list

        effective_list = []

        if len(child_list) == 0 and len(parent_list) > 0:
            for p in parent_list:
                pp = deepcopy(p)
                pp.source = hd_fields.ModelSource.Compiled
                effective_list.append(pp)
        elif len(parent_list) == 0 and len(child_list) > 0:
            for i in child_list:
                if i.get_name().startswith('!'):
                    continue
                else:
                    ii = deepcopy(i)
                    ii.source = hd_fields.ModelSource.Compiled
                    effective_list.append(ii)
        elif len(parent_list) > 0 and len(child_list) > 0:
            parent_devs = []
            for i in parent_list:
                parent_name = i.get_name()
                parent_devs.append(parent_name)
                add = True
                for j in child_list:
                    if j.get_name() == ("!" + parent_name):
                        add = False
                        break
                    elif j.get_name() == parent_name:
                        p = objects.HostStorageDevice()
                        p.name = j.get_name()

                        inherit_field_list = ['volume_group']

                        for f in inherit_field_list:
                            setattr(
                                p, f,
                                objects.Utils.apply_field_inheritance(
                                    getattr(j, f, None), getattr(i, f, None)))

                        p.labels = objects.Utils.merge_dicts(
                            getattr(j, 'labels', None),
                            getattr(i, 'labels', None))
                        p.partitions = HostPartitionList.from_basic_list(
                            HostPartition.merge_lists(
                                getattr(j, 'partitions', None),
                                getattr(i, 'partitions', None)))

                        add = False
                        p.source = hd_fields.ModelSource.Compiled
                        effective_list.append(p)
            if add:
                ii = deepcopy(i)
                ii.source = hd_fields.ModelSource.Compiled
                effective_list.append(ii)

        for j in child_list:
            if (j.get_name() not in parent_devs
                    and not j.get_name().startswith("!")):
                jj = deepcopy(j)
                jj.source = hd_fields.ModelSource.Compiled
                effective_list.append(jj)

        return effective_list
Exemplo n.º 25
0
class GroupSnapshot(base.CinderPersistentObject, base.CinderObject,
                    base.CinderObjectDictCompat):
    VERSION = '1.0'

    OPTIONAL_FIELDS = ['group', 'snapshots']

    fields = {
        'id': fields.UUIDField(),
        'group_id': fields.UUIDField(nullable=False),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'name': fields.StringField(nullable=True),
        'description': fields.StringField(nullable=True),
        'status': fields.StringField(nullable=True),
        'group_type_id': fields.UUIDField(nullable=True),
        'group': fields.ObjectField('Group', nullable=True),
        'snapshots': fields.ObjectField('SnapshotList', nullable=True),
    }

    @property
    def service_topic_queue(self):
        return self.group.service_topic_queue

    @classmethod
    def _from_db_object(cls,
                        context,
                        group_snapshot,
                        db_group_snapshots,
                        expected_attrs=None):
        expected_attrs = expected_attrs or []
        for name, field in group_snapshot.fields.items():
            if name in cls.OPTIONAL_FIELDS:
                continue
            value = db_group_snapshots.get(name)
            setattr(group_snapshot, name, value)

        if 'group' in expected_attrs:
            group = objects.Group(context)
            group._from_db_object(context, group, db_group_snapshots['group'])
            group_snapshot.group = group

        if 'snapshots' in expected_attrs:
            snapshots = base.obj_make_list(context,
                                           objects.SnapshotsList(context),
                                           objects.Snapshots,
                                           db_group_snapshots['snapshots'])
            group_snapshot.snapshots = snapshots

        group_snapshot._context = context
        group_snapshot.obj_reset_changes()
        return group_snapshot

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already_created'))
        updates = self.cinder_obj_get_changes()

        if 'group' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('group assigned'))

        db_group_snapshots = db.group_snapshot_create(self._context, updates)
        self._from_db_object(self._context, self, db_group_snapshots)

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'group':
            self.group = objects.Group.get_by_id(self._context, self.group_id)

        if attrname == 'snapshots':
            self.snapshots = objects.SnapshotList.get_all_for_group_snapshot(
                self._context, self.id)

        self.obj_reset_changes(fields=[attrname])

    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'group' in updates:
                raise exception.ObjectActionError(action='save',
                                                  reason=_('group changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            db.group_snapshot_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def destroy(self):
        with self.obj_as_admin():
            updated_values = db.group_snapshot_destroy(self._context, self.id)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())
Exemplo n.º 26
0
class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject,
                       base.CinderObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Added cgsnapshots and volumes relationships
    # Version 1.2: Changed 'status' field to use ConsistencyGroupStatusField
    VERSION = '1.2'

    fields = {
        'id': fields.UUIDField(),
        'user_id': fields.StringField(),
        'project_id': fields.StringField(),
        'host': fields.StringField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'name': fields.StringField(nullable=True),
        'description': fields.StringField(nullable=True),
        'volume_type_id': fields.StringField(nullable=True),
        'status': c_fields.ConsistencyGroupStatusField(nullable=True),
        'cgsnapshot_id': fields.UUIDField(nullable=True),
        'source_cgid': fields.UUIDField(nullable=True),
        'cgsnapshots': fields.ObjectField('CGSnapshotList', nullable=True),
        'volumes': fields.ObjectField('VolumeList', nullable=True),
    }

    @staticmethod
    def _from_db_object(context, consistencygroup, db_consistencygroup,
                        expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in consistencygroup.fields.items():
            if name in OPTIONAL_FIELDS:
                continue
            value = db_consistencygroup.get(name)
            setattr(consistencygroup, name, value)

        if 'cgsnapshots' in expected_attrs:
            cgsnapshots = base.obj_make_list(
                context, objects.CGSnapshotsList(context),
                objects.CGSnapshot,
                db_consistencygroup['cgsnapshots'])
            consistencygroup.cgsnapshots = cgsnapshots

        if 'volumes' in expected_attrs:
            volumes = base.obj_make_list(
                context, objects.VolumeList(context),
                objects.Volume,
                db_consistencygroup['volumes'])
            consistencygroup.cgsnapshots = volumes

        consistencygroup._context = context
        consistencygroup.obj_reset_changes()
        return consistencygroup

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already_created'))
        updates = self.cinder_obj_get_changes()

        if 'cgsnapshots' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('cgsnapshots assigned'))

        if 'volumes' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('volumes assigned'))

        db_consistencygroups = db.consistencygroup_create(self._context,
                                                          updates)
        self._from_db_object(self._context, self, db_consistencygroups)

    def obj_load_attr(self, attrname):
        if attrname not in OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'cgsnapshots':
            self.cgsnapshots = objects.CGSnapshotList.get_all_by_group(
                self._context, self.id)

        if attrname == 'volumes':
            self.volumes = objects.VolumeList.get_all_by_group(self._context,
                                                               self.id)

        self.obj_reset_changes(fields=[attrname])

    @base.remotable
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'cgsnapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('cgsnapshots changed'))
            if 'volumes' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('volumes changed'))

            db.consistencygroup_update(self._context, self.id, updates)
            self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        with self.obj_as_admin():
            db.consistencygroup_destroy(self._context, self.id)
Exemplo n.º 27
0
class ContainerBase(base.ZunPersistentObject, base.ZunObject):

    fields = {
        'id': fields.IntegerField(),
        'container_id': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(nullable=True),
        'name': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'image': fields.StringField(nullable=True),
        'cpu': fields.FloatField(nullable=True),
        'cpu_policy': fields.StringField(nullable=True),
        'cpuset': fields.ObjectField("Cpuset", nullable=True),
        'memory': fields.StringField(nullable=True),
        'command': fields.ListOfStringsField(nullable=True),
        'status': z_fields.ContainerStatusField(nullable=True),
        'status_reason': fields.StringField(nullable=True),
        'task_state': z_fields.TaskStateField(nullable=True),
        'environment': fields.DictOfStringsField(nullable=True),
        'workdir': fields.StringField(nullable=True),
        'auto_remove': fields.BooleanField(nullable=True),
        'ports': z_fields.ListOfIntegersField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'labels': fields.DictOfStringsField(nullable=True),
        'meta': fields.DictOfStringsField(nullable=True),
        'addresses': z_fields.JsonField(nullable=True),
        'image_pull_policy': fields.StringField(nullable=True),
        'host': fields.StringField(nullable=True),
        'restart_policy': fields.DictOfStringsField(nullable=True),
        'status_detail': fields.StringField(nullable=True),
        'interactive': fields.BooleanField(nullable=True),
        'image_driver': fields.StringField(nullable=True),
        'websocket_url': fields.StringField(nullable=True),
        'websocket_token': fields.StringField(nullable=True),
        'security_groups': fields.ListOfStringsField(nullable=True),
        'runtime': fields.StringField(nullable=True),
        'pci_devices': fields.ListOfObjectsField('PciDevice',
                                                 nullable=True),
        'disk': fields.IntegerField(nullable=True),
        'auto_heal': fields.BooleanField(nullable=True),
        'started_at': fields.DateTimeField(tzinfo_aware=False, nullable=True),
        'exposed_ports': z_fields.JsonField(nullable=True),
        'exec_instances': fields.ListOfObjectsField('ExecInstance',
                                                    nullable=True),
        'privileged': fields.BooleanField(nullable=True),
        'healthcheck': z_fields.JsonField(nullable=True),
        'registry_id': fields.IntegerField(nullable=True),
        'registry': fields.ObjectField("Registry", nullable=True),
    }

    # should be redefined in subclasses
    container_type = None

    @staticmethod
    def _from_db_object(container, db_container):
        """Converts a database entity to a formal object."""
        for field in container.fields:
            if field in ['pci_devices', 'exec_instances', 'registry',
                         'containers', 'init_containers']:
                continue
            if field == 'cpuset':
                container.cpuset = Cpuset._from_dict(
                    db_container['cpuset'])
                continue
            setattr(container, field, db_container[field])

        container.obj_reset_changes()
        return container

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [cls._from_db_object(cls(context), obj)
                for obj in db_objects]

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a container based on uuid and return a :class:`Container` object.

        :param uuid: the uuid of a container.
        :param context: Security context
        :returns: a :class:`Container` object.
        """
        db_container = dbapi.get_container_by_uuid(context, cls.container_type,
                                                   uuid)
        container = cls._from_db_object(cls(context), db_container)
        return container

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        """Find a container based on name and return a Container object.

        :param name: the logical name of a container.
        :param context: Security context
        :returns: a :class:`Container` object.
        """
        db_container = dbapi.get_container_by_name(context, cls.container_type,
                                                   name)
        container = cls._from_db_object(cls(context), db_container)
        return container

    @staticmethod
    def get_container_any_type(context, uuid):
        """Find a container of any type based on uuid.

        :param uuid: the uuid of a container.
        :param context: Security context
        :returns: a :class:`ContainerBase` object.
        """
        db_container = dbapi.get_container_by_uuid(context, consts.TYPE_ANY,
                                                   uuid)
        type = db_container['container_type']
        if type == consts.TYPE_CONTAINER:
            container_cls = Container
        elif type == consts.TYPE_CAPSULE:
            container_cls = Capsule
        elif type == consts.TYPE_CAPSULE_CONTAINER:
            container_cls = CapsuleContainer
        elif type == consts.TYPE_CAPSULE_INIT_CONTAINER:
            container_cls = CapsuleInitContainer
        else:
            raise exception.ZunException(_('Unknown container type: %s'), type)

        obj = container_cls(context)
        container = container_cls._from_db_object(obj, db_container)
        return container

    @base.remotable_classmethod
    def list(cls, context, limit=None, marker=None,
             sort_key=None, sort_dir=None, filters=None):
        """Return a list of Container objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :param filters: filters when list containers, the filter name could be
                        'name', 'image', 'project_id', 'user_id', 'memory'.
                        For example, filters={'image': 'nginx'}
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(
            context, cls.container_type, limit=limit, marker=marker,
            sort_key=sort_key, sort_dir=sort_dir, filters=filters)
        return cls._from_db_object_list(db_containers, cls, context)

    @base.remotable_classmethod
    def list_by_host(cls, context, host):
        """Return a list of Container objects by host.

        :param context: Security context.
        :param host: A compute host.
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(context, cls.container_type,
                                              filters={'host': host})
        return cls._from_db_object_list(db_containers, cls, context)

    @base.remotable
    def create(self, context):
        """Create a Container record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)

        """
        values = self.obj_get_changes()
        cpuset_obj = values.pop('cpuset', None)
        if cpuset_obj is not None:
            values['cpuset'] = cpuset_obj._to_dict()
        values['container_type'] = self.container_type
        db_container = dbapi.create_container(context, values)
        self._from_db_object(self, db_container)

    @base.remotable
    def destroy(self, context=None):
        """Delete the Container from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        dbapi.destroy_container(context, self.container_type, self.uuid)
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this Container.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        updates = self.obj_get_changes()
        cpuset_obj = updates.pop('cpuset', None)
        if cpuset_obj is not None:
            updates['cpuset'] = cpuset_obj._to_dict()
        dbapi.update_container(context, self.container_type, self.uuid,
                               updates)

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this Container.

        Loads a container with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded container column by column, if there are any updates.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and \
               getattr(self, field) != getattr(current, field):
                setattr(self, field, getattr(current, field))

    def obj_load_attr(self, attrname):
        if attrname not in CONTAINER_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)

        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s",
                  {'attr': attrname,
                   'name': self.obj_name(),
                   'uuid': self.uuid,
                   })

        # NOTE(danms): We handle some fields differently here so that we
        # can be more efficient
        if attrname == 'pci_devices':
            self._load_pci_devices()

        if attrname == 'exec_instances':
            self._load_exec_instances()

        if attrname == 'registry':
            self._load_registry()

        self.obj_reset_changes([attrname])

    def _load_pci_devices(self):
        self.pci_devices = pci_device.PciDevice.list_by_container_uuid(
            self._context, self.uuid)

    def _load_exec_instances(self):
        self.exec_instances = exec_inst.ExecInstance.list_by_container_id(
            self._context, self.id)

    def _load_registry(self):
        self.registry = None
        if self.registry_id:
            self.registry = registry.Registry.get_by_id(
                self._context, self.registry_id)

    @base.remotable_classmethod
    def get_count(cls, context, project_id, flag):
        """Get the counts of Container objects in the database.

        :param context: The request context for database access.
        :param project_id: The project_id to count across.
        :param flag: The name of resource, one of the following options:
                     - containers: Count the number of containers owned by the
                     project.
                     - memory: The sum of containers's memory.
                     - cpu: The sum of container's cpu.
                     - disk: The sum of container's disk size.
        """
        usage = dbapi.count_usage(context, cls.container_type, project_id,
                                  flag)[0] or 0
        return usage
Exemplo n.º 28
0
class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
                  base.CinderComparableObject):
    # Version 1.0: Initial version
    # Version 1.1: Added group_id and group_backend
    # Version 1.2 Added ``resource_backend``
    # Version 1.3: Added backup_id
    VERSION = '1.3'

    fields = {
        'consistencygroup_id': fields.UUIDField(nullable=True),
        'group_id': fields.UUIDField(nullable=True),
        'cgsnapshot_id': fields.UUIDField(nullable=True),
        'image_id': fields.UUIDField(nullable=True),
        'snapshot_id': fields.UUIDField(nullable=True),
        'source_replicaid': fields.UUIDField(nullable=True),
        'source_volid': fields.UUIDField(nullable=True),
        'volume_id': fields.UUIDField(nullable=True),
        'volume': fields.ObjectField('Volume', nullable=True),
        'volume_type': fields.ObjectField('VolumeType', nullable=True),
        'volume_properties': fields.ObjectField('VolumeProperties',
                                                nullable=True),
        'CG_backend': fields.StringField(nullable=True),
        'group_backend': fields.StringField(nullable=True),
        'resource_backend': fields.StringField(nullable=True),
        'backup_id': fields.UUIDField(nullable=True),
    }

    obj_extra_fields = ['resource_properties']

    @property
    def resource_properties(self):
        # TODO(dulek): This is to maintain compatibility with filters from
        # oslo-incubator. As we've moved them into our codebase we should adapt
        # them to use volume_properties and remove this shim.
        return self.volume_properties

    @classmethod
    def from_primitives(cls, spec):
        """Returns RequestSpec object creating it from legacy dictionary.

        FIXME(dulek): This should go away in early O as we stop supporting
        backward compatibility with M.
        """
        spec = spec.copy()
        spec_obj = cls()

        vol_props = spec.pop('volume_properties', {})
        if vol_props is not None:
            vol_props = VolumeProperties(**vol_props)
        spec_obj.volume_properties = vol_props

        if 'volume' in spec:
            vol = spec.pop('volume', {})
            vol.pop('name', None)
            if vol is not None:
                vol = objects.Volume(**vol)
            spec_obj.volume = vol

        if 'volume_type' in spec:
            vol_type = spec.pop('volume_type', {})
            if vol_type is not None:
                vol_type = objects.VolumeType(**vol_type)
            spec_obj.volume_type = vol_type

        spec.pop('resource_properties', None)

        for k, v in spec.items():
            setattr(spec_obj, k, v)

        return spec_obj

    def obj_make_compatible(self, primitive, target_version):
        """Make an object representation compatible with target version."""
        super(RequestSpec, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        added_fields = (((1, 1), ('group_id', 'group_backend')),
                        ((1, 2), ('resource_backend')))
        for version, remove_fields in added_fields:
            if target_version < version:
                for obj_field in remove_fields:
                    primitive.pop(obj_field, None)
Exemplo n.º 29
0
class Service(base.CinderPersistentObject, base.CinderObject,
              base.CinderObjectDictCompat, base.CinderComparableObject,
              base.ClusteredObject):
    # Version 1.0: Initial version
    # Version 1.1: Add rpc_current_version and object_current_version fields
    # Version 1.2: Add get_minimum_rpc_version() and get_minimum_obj_version()
    # Version 1.3: Add replication fields
    # Version 1.4: Add cluster fields
    VERSION = '1.4'

    OPTIONAL_FIELDS = ('cluster', )

    fields = {
        'id': fields.IntegerField(),
        'host': fields.StringField(nullable=True),
        'binary': fields.StringField(nullable=True),
        'cluster_name': fields.StringField(nullable=True),
        'cluster': fields.ObjectField('Cluster', nullable=True,
                                      read_only=True),
        'topic': fields.StringField(nullable=True),
        'report_count': fields.IntegerField(default=0),
        'disabled': fields.BooleanField(default=False, nullable=True),
        'availability_zone': fields.StringField(nullable=True,
                                                default='cinder'),
        'disabled_reason': fields.StringField(nullable=True),
        'modified_at': fields.DateTimeField(nullable=True),
        'rpc_current_version': fields.StringField(nullable=True),
        'object_current_version': fields.StringField(nullable=True),

        # Replication properties
        'replication_status': c_fields.ReplicationStatusField(nullable=True),
        'frozen': fields.BooleanField(default=False),
        'active_backend_id': fields.StringField(nullable=True),
    }

    def obj_make_compatible(self, primitive, target_version):
        """Make a service representation compatible with a target version."""
        # Convert all related objects
        super(Service, self).obj_make_compatible(primitive, target_version)

        target_version = versionutils.convert_version_to_tuple(target_version)
        # Before v1.4 we didn't have cluster fields so we have to remove them.
        if target_version < (1, 4):
            for obj_field in ('cluster', 'cluster_name'):
                primitive.pop(obj_field, None)

    @staticmethod
    def _from_db_object(context, service, db_service, expected_attrs=None):
        expected_attrs = expected_attrs or []
        for name, field in service.fields.items():
            if name in Service.OPTIONAL_FIELDS:
                continue
            value = db_service.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            elif isinstance(field, fields.DateTimeField):
                value = value or None
            service[name] = value

        service._context = context
        if 'cluster' in expected_attrs:
            db_cluster = db_service.get('cluster')
            # If this service doesn't belong to a cluster the cluster field in
            # the ORM instance will have value of None.
            if db_cluster:
                service.cluster = objects.Cluster(context)
                objects.Cluster._from_db_object(context, service.cluster,
                                                db_cluster)
            else:
                service.cluster = None

        service.obj_reset_changes()
        return service

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        # NOTE(geguileo): We only have 1 optional field, so we don't need to
        # confirm that we are loading the cluster.
        # If this service doesn't belong to a cluster (cluster_name is empty),
        # then cluster field will be None.
        if self.cluster_name:
            self.cluster = objects.Cluster.get_by_id(self._context,
                                                     None,
                                                     name=self.cluster_name)
        else:
            self.cluster = None
        self.obj_reset_changes(fields=(attrname, ))

    @classmethod
    def get_by_host_and_topic(cls, context, host, topic):
        db_service = db.service_get(context,
                                    disabled=False,
                                    host=host,
                                    topic=topic)
        return cls._from_db_object(context, cls(context), db_service)

    @classmethod
    def get_by_args(cls, context, host, binary_key):
        db_service = db.service_get(context, host=host, binary=binary_key)
        return cls._from_db_object(context, cls(context), db_service)

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()
        if 'cluster' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('cluster assigned'))
        db_service = db.service_create(self._context, updates)
        self._from_db_object(self._context, self, db_service)

    def save(self):
        updates = self.cinder_obj_get_changes()
        if 'cluster' in updates:
            raise exception.ObjectActionError(action='save',
                                              reason=_('cluster changed'))
        if updates:
            db.service_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def destroy(self):
        with self.obj_as_admin():
            updated_values = db.service_destroy(self._context, self.id)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())

    @classmethod
    def _get_minimum_version(cls, attribute, context, binary):
        services = ServiceList.get_all_by_binary(context, binary)
        min_ver = None
        min_ver_str = None
        for s in services:
            ver_str = getattr(s, attribute)
            if ver_str is None:
                # NOTE(dulek) None in *_current_version means that this
                # service is in Liberty version, which we now don't provide
                # backward compatibility to.
                msg = _('One of the services is in Liberty version. We do not '
                        'provide backward compatibility with Liberty now, you '
                        'need to upgrade to Mitaka first.')
                raise exception.ServiceTooOld(msg)
            ver = versionutils.convert_version_to_int(ver_str)
            if min_ver is None or ver < min_ver:
                min_ver = ver
                min_ver_str = ver_str

        return min_ver_str

    @classmethod
    def get_minimum_rpc_version(cls, context, binary):
        return cls._get_minimum_version('rpc_current_version', context, binary)

    @classmethod
    def get_minimum_obj_version(cls, context, binary=None):
        return cls._get_minimum_version('object_current_version', context,
                                        binary)
Exemplo n.º 30
0
class Volume(base.CinderPersistentObject, base.CinderObject,
             base.CinderObjectDictCompat, base.CinderComparableObject):
    # Version 1.0: Initial version
    # Version 1.1: Added metadata, admin_metadata, volume_attachment, and
    #              volume_type
    # Version 1.2: Added glance_metadata, consistencygroup and snapshots
    # Version 1.3: Added finish_volume_migration()
    VERSION = '1.3'

    OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
                       'volume_type', 'volume_attachment', 'consistencygroup',
                       'snapshots')

    fields = {
        'id':
        fields.UUIDField(),
        '_name_id':
        fields.UUIDField(nullable=True),
        'ec2_id':
        fields.UUIDField(nullable=True),
        'user_id':
        fields.StringField(nullable=True),
        'project_id':
        fields.StringField(nullable=True),
        'snapshot_id':
        fields.UUIDField(nullable=True),
        'host':
        fields.StringField(nullable=True),
        'size':
        fields.IntegerField(nullable=True),
        'availability_zone':
        fields.StringField(nullable=True),
        'status':
        fields.StringField(nullable=True),
        'attach_status':
        fields.StringField(nullable=True),
        'migration_status':
        fields.StringField(nullable=True),
        'scheduled_at':
        fields.DateTimeField(nullable=True),
        'launched_at':
        fields.DateTimeField(nullable=True),
        'terminated_at':
        fields.DateTimeField(nullable=True),
        'display_name':
        fields.StringField(nullable=True),
        'display_description':
        fields.StringField(nullable=True),
        'provider_id':
        fields.StringField(nullable=True),
        'provider_location':
        fields.StringField(nullable=True),
        'provider_auth':
        fields.StringField(nullable=True),
        'provider_geometry':
        fields.StringField(nullable=True),
        'volume_type_id':
        fields.UUIDField(nullable=True),
        'source_volid':
        fields.UUIDField(nullable=True),
        'encryption_key_id':
        fields.UUIDField(nullable=True),
        'consistencygroup_id':
        fields.UUIDField(nullable=True),
        'deleted':
        fields.BooleanField(default=False, nullable=True),
        'bootable':
        fields.BooleanField(default=False, nullable=True),
        'multiattach':
        fields.BooleanField(default=False, nullable=True),
        'replication_status':
        fields.StringField(nullable=True),
        'replication_extended_status':
        fields.StringField(nullable=True),
        'replication_driver_data':
        fields.StringField(nullable=True),
        'previous_status':
        fields.StringField(nullable=True),
        'metadata':
        fields.DictOfStringsField(nullable=True),
        'admin_metadata':
        fields.DictOfStringsField(nullable=True),
        'glance_metadata':
        fields.DictOfStringsField(nullable=True),
        'volume_type':
        fields.ObjectField('VolumeType', nullable=True),
        'volume_attachment':
        fields.ObjectField('VolumeAttachmentList', nullable=True),
        'consistencygroup':
        fields.ObjectField('ConsistencyGroup', nullable=True),
        'snapshots':
        fields.ObjectField('SnapshotList', nullable=True),
    }

    # NOTE(thangp): obj_extra_fields is used to hold properties that are not
    # usually part of the model
    obj_extra_fields = [
        'name', 'name_id', 'volume_metadata', 'volume_admin_metadata',
        'volume_glance_metadata'
    ]

    @classmethod
    def _get_expected_attrs(cls, context):
        expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs']
        if context.is_admin:
            expected_attrs.append('admin_metadata')

        return expected_attrs

    @property
    def name_id(self):
        return self.id if not self._name_id else self._name_id

    @name_id.setter
    def name_id(self, value):
        self._name_id = value

    @property
    def name(self):
        return CONF.volume_name_template % self.name_id

    # TODO(dulek): Three properties below are for compatibility with dict
    # representation of volume. The format there is different (list of
    # SQLAlchemy models) so we need a conversion. Anyway - these should be
    # removed when we stop this class from deriving from DictObjectCompat.
    @property
    def volume_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.metadata.items()]
        return md

    @volume_metadata.setter
    def volume_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.metadata = md

    @property
    def volume_admin_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()]
        return md

    @volume_admin_metadata.setter
    def volume_admin_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.admin_metadata = md

    @property
    def volume_glance_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()]
        return md

    @volume_glance_metadata.setter
    def volume_glance_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.glance_metadata = md

    def __init__(self, *args, **kwargs):
        super(Volume, self).__init__(*args, **kwargs)
        self._orig_metadata = {}
        self._orig_admin_metadata = {}
        self._orig_glance_metadata = {}

        self._reset_metadata_tracking()

    def obj_reset_changes(self, fields=None):
        super(Volume, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        obj = super(Volume,
                    Volume)._obj_from_primitive(context, objver, primitive)
        obj._reset_metadata_tracking()
        return obj

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})
        if fields is None or 'admin_metadata' in fields:
            self._orig_admin_metadata = (dict(self.admin_metadata)
                                         if 'admin_metadata' in self else {})
        if fields is None or 'glance_metadata' in fields:
            self._orig_glance_metadata = (dict(self.glance_metadata)
                                          if 'glance_metadata' in self else {})

    def obj_what_changed(self):
        changes = super(Volume, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if ('admin_metadata' in self
                and self.admin_metadata != self._orig_admin_metadata):
            changes.add('admin_metadata')
        if ('glance_metadata' in self
                and self.glance_metadata != self._orig_glance_metadata):
            changes.add('glance_metadata')

        return changes

    def obj_make_compatible(self, primitive, target_version):
        """Make an object representation compatible with a target version."""
        super(Volume, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)

    @staticmethod
    def _from_db_object(context, volume, db_volume, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in volume.fields.items():
            if name in Volume.OPTIONAL_FIELDS:
                continue
            value = db_volume.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            volume[name] = value

        # Get data from db_volume object that was queried by joined query
        # from DB
        if 'metadata' in expected_attrs:
            metadata = db_volume.get('volume_metadata', [])
            volume.metadata = {item['key']: item['value'] for item in metadata}
        if 'admin_metadata' in expected_attrs:
            metadata = db_volume.get('volume_admin_metadata', [])
            volume.admin_metadata = {
                item['key']: item['value']
                for item in metadata
            }
        if 'glance_metadata' in expected_attrs:
            metadata = db_volume.get('volume_glance_metadata', [])
            volume.glance_metadata = {
                item['key']: item['value']
                for item in metadata
            }
        if 'volume_type' in expected_attrs:
            db_volume_type = db_volume.get('volume_type')
            if db_volume_type:
                vt_expected_attrs = []
                if 'volume_type.extra_specs' in expected_attrs:
                    vt_expected_attrs.append('extra_specs')
                volume.volume_type = objects.VolumeType._from_db_object(
                    context,
                    objects.VolumeType(),
                    db_volume_type,
                    expected_attrs=vt_expected_attrs)
        if 'volume_attachment' in expected_attrs:
            attachments = base.obj_make_list(
                context, objects.VolumeAttachmentList(context),
                objects.VolumeAttachment, db_volume.get('volume_attachment'))
            volume.volume_attachment = attachments
        if 'consistencygroup' in expected_attrs:
            consistencygroup = objects.ConsistencyGroup(context)
            consistencygroup._from_db_object(context, consistencygroup,
                                             db_volume['consistencygroup'])
            volume.consistencygroup = consistencygroup
        if 'snapshots' in expected_attrs:
            snapshots = base.obj_make_list(context,
                                           objects.SnapshotList(context),
                                           objects.Snapshot,
                                           db_volume['snapshots'])
            volume.snapshots = snapshots

        volume._context = context
        volume.obj_reset_changes()
        return volume

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()

        if 'consistencygroup' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('consistencygroup assigned'))
        if 'snapshots' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('snapshots assigned'))

        db_volume = db.volume_create(self._context, updates)
        self._from_db_object(self._context, self, db_volume)

    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'consistencygroup' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('consistencygroup changed'))
            if 'glance_metadata' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('glance_metadata changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(
                    self._context, self.id, metadata, True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def destroy(self):
        with self.obj_as_admin():
            db.volume_destroy(self._context, self.id)

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'metadata':
            self.metadata = db.volume_metadata_get(self._context, self.id)
        elif attrname == 'admin_metadata':
            self.admin_metadata = {}
            if self._context.is_admin:
                self.admin_metadata = db.volume_admin_metadata_get(
                    self._context, self.id)
        elif attrname == 'glance_metadata':
            try:
                # NOTE(dulek): We're using alias here to have conversion from
                # list to dict done there.
                self.volume_glance_metadata = db.volume_glance_metadata_get(
                    self._context, self.id)
            except exception.GlanceMetadataNotFound:
                # NOTE(dulek): DB API raises when volume has no
                # glance_metadata. Silencing this because at this level no
                # metadata is a completely valid result.
                self.glance_metadata = {}
        elif attrname == 'volume_type':
            # If the volume doesn't have volume_type, VolumeType.get_by_id
            # would trigger a db call which raise VolumeTypeNotFound exception.
            self.volume_type = (objects.VolumeType.get_by_id(
                self._context, self.volume_type_id)
                                if self.volume_type_id else None)
        elif attrname == 'volume_attachment':
            attachments = objects.VolumeAttachmentList.get_all_by_volume_id(
                self._context, self.id)
            self.volume_attachment = attachments
        elif attrname == 'consistencygroup':
            consistencygroup = objects.ConsistencyGroup.get_by_id(
                self._context, self.consistencygroup_id)
            self.consistencygroup = consistencygroup
        elif attrname == 'snapshots':
            self.snapshots = objects.SnapshotList.get_all_for_volume(
                self._context, self.id)

        self.obj_reset_changes(fields=[attrname])

    def delete_metadata_key(self, key):
        db.volume_metadata_delete(self._context, self.id, key)
        md_was_changed = 'metadata' in self.obj_what_changed()

        del self.metadata[key]
        self._orig_metadata.pop(key, None)

        if not md_was_changed:
            self.obj_reset_changes(['metadata'])

    def finish_volume_migration(self, dest_volume):
        # We swap fields between source (i.e. self) and destination at the
        # end of migration because we want to keep the original volume id
        # in the DB but now pointing to the migrated volume.
        skip = ({'id', 'provider_location', 'glance_metadata', 'volume_type'}
                | set(self.obj_extra_fields))
        for key in set(dest_volume.fields.keys()) - skip:
            # Only swap attributes that are already set.  We do not want to
            # unexpectedly trigger a lazy-load.
            if not dest_volume.obj_attr_is_set(key):
                continue

            value = getattr(dest_volume, key)
            value_to_dst = getattr(self, key)

            # Destination must have a _name_id since the id no longer matches
            # the volume.  If it doesn't have a _name_id we set one.
            if key == '_name_id':
                if not dest_volume._name_id:
                    setattr(dest_volume, key, self.id)
                continue
            elif key == 'migration_status':
                value = None
                value_to_dst = 'deleting'
            elif key == 'display_description':
                value_to_dst = 'migration src for ' + self.id
            elif key == 'status':
                value_to_dst = 'deleting'
            # Because dest_volume will be deleted soon, we can
            # skip to copy volume_type_id and volume_type which
            # are not keys for volume deletion.
            elif key == 'volume_type_id':
                # Initialize volume_type of source volume using
                # new volume_type_id.
                self.update({'volume_type_id': value})
                continue

            setattr(self, key, value)
            setattr(dest_volume, key, value_to_dst)

        dest_volume.save()
        return dest_volume