Ejemplo n.º 1
0
    def get_permission_object_id(self, request, object_id=None):
        """Returns the permission object id for the current request.
        In the nominal case, it is just the current URI without version prefix.
        For collections, it is the related record URI using the specified
        `object_id`.

        See :meth:`kinto.core.resource.model.SharableModel` and
        :meth:`kinto.core.authorization.RouteFactory.__init__`
        """
        object_uri = utils.strip_uri_prefix(request.path)

        if self.on_collection and object_id is not None:
            # With the current request on a collection, the record URI must
            # be found out by inspecting the collection service and its sibling
            # record service.
            matchdict = request.matchdict.copy()
            matchdict['id'] = object_id
            try:
                object_uri = utils.instance_uri(request,
                                                self.resource_name,
                                                **matchdict)
                if object_id == '*':
                    object_uri = object_uri.replace('%2A', '*')
            except KeyError:
                # Maybe the resource has no single record endpoint.
                # We consider that object URIs in permissions backend will
                # be stored naively:
                object_uri = object_uri + '/' + object_id

        return object_uri
Ejemplo n.º 2
0
def create_collection(request, bucket_id):
    # Do nothing if current request does not involve a collection.
    subpath = request.matchdict.get('subpath')
    if not (subpath and subpath.rstrip('/').startswith('collections/')):
        return

    collection_id = subpath.split('/')[1]
    collection_uri = instance_uri(request, 'collection',
                                  bucket_id=bucket_id,
                                  id=collection_id)

    # Do not intent to create multiple times per request (e.g. in batch).
    already_created = request.bound_data.setdefault('collections', {})
    if collection_uri in already_created:
        return

    # Do nothing if current request will already create the collection.
    collection_put = (request.method.lower() == 'put' and
                      request.path.endswith(collection_id))
    if collection_put:
        return

    collection = resource_create_object(request=request,
                                        resource_cls=Collection,
                                        uri=collection_uri)
    already_created[collection_uri] = collection
Ejemplo n.º 3
0
def sign_collection_data(event, resources):
    """
    Listen to resource change events, to check if a new signature is
    requested.

    When a source collection specified in settings is modified, and its
    new metadata ``status`` is set to ``"to-sign"``, then sign the data
    and update the destination.
    """
    payload = event.payload

    current_user_id = event.request.prefixed_userid
    if current_user_id == _PLUGIN_USERID:
        # Ignore changes made by plugin.
        return

    for impacted in event.impacted_records:
        new_collection = impacted['new']

        key = instance_uri(event.request, "collection",
                           bucket_id=payload['bucket_id'],
                           id=new_collection['id'])
        resource = resources.get(key)

        # Only sign the configured resources.
        if resource is None:
            continue

        registry = event.request.registry
        updater = LocalUpdater(signer=registry.signers[key],
                               storage=registry.storage,
                               permission=registry.permission,
                               source=resource['source'],
                               destination=resource['destination'])

        try:
            new_status = new_collection.get("status")
            if new_status == STATUS.TO_SIGN:
                # Run signature process (will set `last_reviewer` field).
                updater.sign_and_update_destination(event.request)

            elif new_status == STATUS.TO_REVIEW:
                if 'preview' in resource:
                    # If preview collection: update and sign preview collection
                    updater.destination = resource['preview']
                    updater.sign_and_update_destination(event.request,
                                                        next_source_status=STATUS.TO_REVIEW)
                else:
                    # If no preview collection: just track `last_editor`
                    updater.update_source_editor(event.request)

        except Exception:
            logger.exception("Could not sign '{0}'".format(key))
            event.request.response.status = 503
Ejemplo n.º 4
0
def on_groups_deleted(event):
    """Some groups were deleted, remove them from users principals.
    """
    permission_backend = event.request.registry.permission

    for change in event.impacted_records:
        group = change["old"]
        bucket_id = event.payload["bucket_id"]
        group_uri = utils.instance_uri(event.request, "group", bucket_id=bucket_id, id=group["id"])

        permission_backend.remove_principal(group_uri)
Ejemplo n.º 5
0
def changes_record(request, bucket_id, collection_id, timestamp):
    """Generate a record for /buckets/monitor/collections/changes."""
    http_host = request.registry.settings.get('http_host') or ''
    collection_uri = core_utils.instance_uri(
        request, 'collection', bucket_id=bucket_id, id=collection_id)
    uniqueid = http_host + collection_uri
    identifier = hashlib.md5(uniqueid.encode('utf-8')).hexdigest()
    entry_id = str(UUID(identifier))

    return dict(id=entry_id,
                last_modified=timestamp,
                bucket=bucket_id,
                collection=collection_id,
                host=http_host)
Ejemplo n.º 6
0
def on_collections_deleted(event):
    """Some collections were deleted, delete records.
    """
    storage = event.request.registry.storage
    permission = event.request.registry.permission

    for change in event.impacted_records:
        collection = change["old"]
        bucket_id = event.payload["bucket_id"]
        parent_id = utils.instance_uri(
            event.request, "collection", bucket_id=bucket_id, id=collection["id"]
        )
        storage.delete_all(collection_id=None, parent_id=parent_id, with_deleted=False)
        storage.purge_deleted(collection_id=None, parent_id=parent_id)
        permission.delete_object_permissions(parent_id + "/*")
Ejemplo n.º 7
0
def create_bucket(request, bucket_id):
    """Create a bucket if it doesn't exists."""
    bucket_put = request.method.lower() == "put" and request.path.endswith("buckets/default")
    # Do nothing if current request will already create the bucket.
    if bucket_put:
        return

    # Do not intent to create multiple times per request (e.g. in batch).
    already_created = request.bound_data.setdefault("buckets", {})
    if bucket_id in already_created:
        return

    bucket_uri = instance_uri(request, "bucket", id=bucket_id)
    bucket = resource_create_object(request=request, resource_cls=Bucket, uri=bucket_uri)
    already_created[bucket_id] = bucket
Ejemplo n.º 8
0
def on_collections_deleted(event):
    """Some collections were deleted, delete records.
    """
    storage = event.request.registry.storage

    for change in event.impacted_records:
        collection = change['old']
        bucket_id = event.payload['bucket_id']
        parent_id = utils.instance_uri(event.request, 'collection',
                                       bucket_id=bucket_id,
                                       id=collection['id'])
        storage.delete_all(collection_id='record',
                           parent_id=parent_id,
                           with_deleted=False)
        storage.purge_deleted(collection_id='record',
                              parent_id=parent_id)
Ejemplo n.º 9
0
def on_buckets_deleted(event):
    """Some buckets were deleted, delete sub-resources.
    """
    storage = event.request.registry.storage

    for change in event.impacted_records:
        bucket = change['old']
        bucket_uri = instance_uri(event.request, 'bucket', id=bucket['id'])
        # Delete everything whose parent_id starts with bucket_uri.
        parent_pattern = bucket_uri + '*'
        storage.delete_all(parent_id=parent_pattern,
                           collection_id=None,
                           with_deleted=False)
        # Remove remaining tombstones too.
        storage.purge_deleted(parent_id=parent_pattern,
                              collection_id=None)
Ejemplo n.º 10
0
    def __init__(self, request, **kwargs):
        # Before all, first check that the parent collection exists.
        # Check if already fetched before (in batch).
        collections = request.bound_data.setdefault('collections', {})
        collection_uri = self.get_parent_id(request)
        if collection_uri not in collections:
            # Unknown yet, fetch from storage.
            bucket_uri = utils.instance_uri(request, 'bucket', id=self.bucket_id)
            collection = object_exists_or_404(request,
                                              collection_id='collection',
                                              parent_id=bucket_uri,
                                              object_id=self.collection_id)
            collections[collection_uri] = collection
        self._collection = collections[collection_uri]

        super().__init__(request, **kwargs)
Ejemplo n.º 11
0
def on_buckets_deleted(event):
    """Some buckets were deleted, delete sub-resources.
    """
    storage = event.request.registry.storage
    permission = event.request.registry.permission

    for change in event.impacted_records:
        bucket = change["old"]
        bucket_uri = instance_uri(event.request, "bucket", id=bucket["id"])

        # Delete everything with current parent id (eg. collections, groups)
        # and descending children objects (eg. records).
        for pattern in (bucket_uri, bucket_uri + "/*"):
            storage.delete_all(parent_id=pattern, collection_id=None, with_deleted=False)
            # Remove remaining tombstones too.
            storage.purge_deleted(parent_id=pattern, collection_id=None)
            # Remove related permissions
            permission.delete_object_permissions(pattern)
Ejemplo n.º 12
0
    def _entries(self):
        if self.__entries is None:
            self.__entries = {}

            for (bucket_id, collection_id) in monitored_collections(self.request.registry):
                collection_uri = core_utils.instance_uri(self.request,
                                                         'collection',
                                                         bucket_id=bucket_id,
                                                         id=collection_id)
                timestamp = self.storage.collection_timestamp(parent_id=collection_uri,
                                                              collection_id='record')

                entry = changes_record(self.request,
                                       bucket_id, collection_id, timestamp)

                self.__entries[entry[self.id_field]] = entry

        return self.__entries.values()
Ejemplo n.º 13
0
def ensure_resource_exists(request, resource_name, parent_id, obj,
                           permissions, matchdict):
    storage = request.registry.storage
    permission = request.registry.permission
    try:
        created = storage.create(resource_name=resource_name,
                                 parent_id=parent_id,
                                 obj=obj)
        object_uri = instance_uri(request, resource_name, **matchdict)
        permission.replace_object_permissions(object_uri, permissions)
        notify_resource_event(request,
                              {'method': 'PUT', 'path': object_uri},
                              matchdict=matchdict,
                              resource_name=resource_name,
                              parent_id=parent_id,
                              obj=created,
                              action=ACTIONS.CREATE)
    except UnicityError:
        pass
Ejemplo n.º 14
0
def set_work_in_progress_status(event, resources):
    """Put the status in work-in-progress if was signed.
    """
    payload = event.payload

    key = instance_uri(event.request, "collection",
                       bucket_id=payload["bucket_id"],
                       id=payload["collection_id"])
    resource = resources.get(key)

    # Skip if resource is not configured.
    if resource is None:
        return

    registry = event.request.registry
    updater = LocalUpdater(signer=registry.signers[key],
                           storage=registry.storage,
                           permission=registry.permission,
                           source=resource['source'],
                           destination=resource['destination'])
    updater.update_source_status(STATUS.WORK_IN_PROGRESS, event.request)
Ejemplo n.º 15
0
def set_work_in_progress_status(event, resources):
    """Put the status in work-in-progress if was signed.
    """
    payload = event.payload

    key = instance_uri(event.request,
                       "collection",
                       bucket_id=payload["bucket_id"],
                       id=payload["collection_id"])
    resource = resources.get(key)

    # Skip if resource is not configured.
    if resource is None:
        return

    registry = event.request.registry
    updater = LocalUpdater(signer=registry.signers[key],
                           storage=registry.storage,
                           permission=registry.permission,
                           source=resource['source'],
                           destination=resource['destination'])
    updater.update_source_status(STATUS.WORK_IN_PROGRESS, event.request)
Ejemplo n.º 16
0
def create_collection(request, bucket_id):
    # Do nothing if current request does not involve a collection.
    subpath = request.matchdict.get("subpath")
    if not (subpath and subpath.rstrip("/").startswith("collections/")):
        return

    collection_id = subpath.split("/")[1]
    collection_uri = instance_uri(request, "collection", bucket_id=bucket_id, id=collection_id)

    # Do not intent to create multiple times per request (e.g. in batch).
    already_created = request.bound_data.setdefault("collections", {})
    if collection_uri in already_created:
        return

    # Do nothing if current request will already create the collection.
    collection_put = request.method.lower() == "put" and request.path.endswith(collection_id)
    if collection_put:
        return

    collection = resource_create_object(
        request=request, resource_cls=Collection, uri=collection_uri
    )
    already_created[collection_uri] = collection
Ejemplo n.º 17
0
def validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]):
    """Lookup in the parent objects if a schema was defined for this resource.

    If the schema validation feature is enabled, if a schema is/are defined, and if the
    data does not validate it/them, then it raises a 400 exception.
    """
    settings = request.registry.settings
    schema_validation = 'experimental_collection_schema_validation'
    # If disabled from settings, do nothing.
    if not asbool(settings.get(schema_validation)):
        return

    bucket_id = request.matchdict["bucket_id"]
    bucket_uri = utils.instance_uri(request, 'bucket', id=bucket_id)
    buckets = request.bound_data.setdefault('buckets', {})
    if bucket_uri not in buckets:
        # Unknown yet, fetch from storage.
        bucket = object_exists_or_404(request,
                                      collection_id='bucket',
                                      parent_id='',
                                      object_id=bucket_id)
        buckets[bucket_uri] = bucket

    # Let's see if the bucket defines a schema for this resource.
    metadata_field = "{}:schema".format(resource_name)
    bucket = buckets[bucket_uri]
    if metadata_field not in bucket:
        return

    # Validate or fail with 400.
    schema = bucket[metadata_field]
    try:
        validate_schema(data, schema, ignore_fields=ignore_fields)
    except ValidationError as e:
        raise_invalid(request, name=e.field, description=e.message)
    except RefResolutionError as e:
        raise_invalid(request, name='schema', description=str(e))
Ejemplo n.º 18
0
def ensure_resource_exists(request, resource_name, parent_id, obj, permissions,
                           matchdict):
    storage = request.registry.storage
    permission = request.registry.permission
    try:
        created = storage.create(resource_name=resource_name,
                                 parent_id=parent_id,
                                 obj=obj)
        object_uri = instance_uri(request, resource_name, **matchdict)
        permission.replace_object_permissions(object_uri, permissions)
        notify_resource_event(
            request,
            {
                "method": "PUT",
                "path": object_uri
            },
            matchdict=matchdict,
            resource_name=resource_name,
            parent_id=parent_id,
            obj=created,
            action=ACTIONS.CREATE,
        )
    except UnicityError:
        pass
Ejemplo n.º 19
0
def check_collection_tracking(event, resources):
    """Make sure tracking fields are not changed manually/removed.
    """
    if event.request.prefixed_userid == _PLUGIN_USERID:
        return

    tracking_fields = (FIELD_LAST_AUTHOR, FIELD_LAST_EDITOR, FIELD_LAST_REVIEWER)

    for impacted in event.impacted_records:
        old_collection = impacted.get("old", {})
        new_collection = impacted["new"]

        # Skip if resource is not configured.
        key = instance_uri(event.request, "collection",
                           bucket_id=event.payload["bucket_id"],
                           id=new_collection["id"])
        if key not in resources:
            continue

        for field in tracking_fields:
            old = old_collection.get(field)
            new = new_collection.get(field)
            if old != new:
                raise_invalid(message="Cannot change %r" % field)
Ejemplo n.º 20
0
 def get_parent_id(self, request):
     self.bucket_id = request.matchdict['bucket_id']
     self.collection_id = request.matchdict['collection_id']
     return utils.instance_uri(request, 'collection',
                               bucket_id=self.bucket_id,
                               id=self.collection_id)
Ejemplo n.º 21
0
def on_resource_changed(event):
    """
    Everytime an object is created/changed/deleted, we update the
    bucket counters.

    If a new object exceeds the quotas, we reject the request.
    """
    payload = event.payload
    action = payload['action']
    resource_name = payload['resource_name']
    event_uri = payload['uri']

    settings = event.request.registry.settings

    bucket_id = payload['bucket_id']
    bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)
    collection_id = None
    collection_uri = None
    if 'collection_id' in payload:
        collection_id = payload['collection_id']
        collection_uri = instance_uri(event.request,
                                      'collection',
                                      bucket_id=bucket_id,
                                      id=collection_id)

    bucket_max_bytes = get_bucket_settings(settings, bucket_id, 'max_bytes')
    bucket_max_items = get_bucket_settings(settings, bucket_id, 'max_items')
    bucket_max_bytes_per_item = get_bucket_settings(settings, bucket_id,
                                                    'max_bytes_per_item')
    collection_max_bytes = get_collection_settings(settings, bucket_id,
                                                   collection_id, 'max_bytes')
    collection_max_items = get_collection_settings(settings, bucket_id,
                                                   collection_id, 'max_items')
    collection_max_bytes_per_item = get_collection_settings(
        settings, bucket_id, collection_id, 'max_bytes_per_item')

    max_bytes_per_item = (collection_max_bytes_per_item or
                          bucket_max_bytes_per_item)

    storage = event.request.registry.storage

    if action == 'delete' and resource_name == 'bucket':
        try:
            storage.delete(parent_id=bucket_uri,
                           collection_id=QUOTA_RESOURCE_NAME,
                           object_id=QUOTA_BUCKET_ID)
        except RecordNotFoundError:
            pass

        collection_pattern = instance_uri(event.request, 'collection',
                                          bucket_id=bucket_id, id='*')
        storage.delete_all(parent_id=collection_pattern,
                           collection_id=QUOTA_RESOURCE_NAME)
        return

    targets = []
    for impacted in event.impacted_records:
        target = impacted['new' if action != 'delete' else 'old']
        # On POST .../records, the URI does not contain the newly created
        # record id.
        obj_id = target['id']
        parts = event_uri.split('/')
        if resource_name in parts[-1]:
            parts.append(obj_id)
        else:
            # Make sure the id is correct on grouped events.
            parts[-1] = obj_id
        uri = '/'.join(parts)

        old = impacted.get('old', {})
        new = impacted.get('new', {})

        targets.append((uri, obj_id, old, new))

    try:
        bucket_info = copy.deepcopy(
            storage.get(parent_id=bucket_uri,
                        collection_id=QUOTA_RESOURCE_NAME,
                        object_id=QUOTA_BUCKET_ID))
    except RecordNotFoundError:
        bucket_info = {
            "collection_count": 0,
            "record_count": 0,
            "storage_size": 0,
        }

    collection_info = {
        "record_count": 0,
        "storage_size": 0,
    }
    if collection_id:
        try:
            collection_info = copy.deepcopy(
                storage.get(parent_id=collection_uri,
                            collection_id=QUOTA_RESOURCE_NAME,
                            object_id=QUOTA_COLLECTION_ID))
        except RecordNotFoundError:
            pass

    # Update the bucket quotas values for each impacted record.
    for (uri, obj_id, old, new) in targets:
        old_size = record_size(old)
        new_size = record_size(new)

        if max_bytes_per_item is not None and action != "delete":
            if new_size > max_bytes_per_item:
                message = ("Maximum bytes per object exceeded "
                           "(%d > %d Bytes." % (new_size, max_bytes_per_item))
                raise http_error(HTTPInsufficientStorage(),
                                 errno=ERRORS.FORBIDDEN.value,
                                 message=message)

        if action == 'create':
            bucket_info['storage_size'] += new_size
            if resource_name == 'collection':
                bucket_info['collection_count'] += 1
                collection_info['storage_size'] += new_size
            if resource_name == 'record':
                bucket_info['record_count'] += 1
                collection_info['record_count'] += 1
                collection_info['storage_size'] += new_size
        elif action == 'update':
            bucket_info['storage_size'] -= old_size
            bucket_info['storage_size'] += new_size
            if resource_name in ('collection', 'record'):
                collection_info['storage_size'] -= old_size
                collection_info['storage_size'] += new_size
        elif action == 'delete':
            bucket_info['storage_size'] -= old_size
            if resource_name == 'collection':
                collection_uri = uri
                bucket_info['collection_count'] -= 1
                # When we delete the collection all the records in it
                # are deleted without notification.
                collection_records, _ = storage.get_all(
                    collection_id='record',
                    parent_id=collection_uri)
                for r in collection_records:
                    old_record_size = record_size(r)
                    bucket_info['record_count'] -= 1
                    bucket_info['storage_size'] -= old_record_size
                    collection_info['record_count'] -= 1
                    collection_info['storage_size'] -= old_record_size
                collection_info['storage_size'] -= old_size

            if resource_name == 'record':
                bucket_info['record_count'] -= 1
                collection_info['record_count'] -= 1
                collection_info['storage_size'] -= old_size

    if bucket_max_bytes is not None:
        if bucket_info['storage_size'] > bucket_max_bytes:
            message = ("Bucket maximum total size exceeded "
                       "(%d > %d Bytes). " % (bucket_info['storage_size'],
                                              bucket_max_bytes))
            raise http_error(HTTPInsufficientStorage(),
                             errno=ERRORS.FORBIDDEN.value,
                             message=message)

    if bucket_max_items is not None:
        if bucket_info['record_count'] > bucket_max_items:
            message = ("Bucket maximum number of objects exceeded "
                       "(%d > %d objects)." % (bucket_info['record_count'],
                                               bucket_max_items))
            raise http_error(HTTPInsufficientStorage(),
                             errno=ERRORS.FORBIDDEN.value,
                             message=message)

    if collection_max_bytes is not None:
        if collection_info['storage_size'] > collection_max_bytes:
            message = ("Collection maximum size exceeded "
                       "(%d > %d Bytes)." % (collection_info['storage_size'],
                                             collection_max_bytes))
            raise http_error(HTTPInsufficientStorage(),
                             errno=ERRORS.FORBIDDEN.value,
                             message=message)

    if collection_max_items is not None:
        if collection_info['record_count'] > collection_max_items:
            message = ("Collection maximum number of objects exceeded "
                       "(%d > %d objects)." % (collection_info['record_count'],
                                               collection_max_items))
            raise http_error(HTTPInsufficientStorage(),
                             errno=ERRORS.FORBIDDEN.value,
                             message=message)

    storage.update(parent_id=bucket_uri,
                   collection_id=QUOTA_RESOURCE_NAME,
                   object_id=QUOTA_BUCKET_ID,
                   record=bucket_info)

    if collection_id:
        if action == 'delete' and resource_name == 'collection':
            try:
                storage.delete(parent_id=collection_uri,
                               collection_id=QUOTA_RESOURCE_NAME,
                               object_id=QUOTA_COLLECTION_ID)
            except RecordNotFoundError:
                pass
            return
        else:
            storage.update(parent_id=collection_uri,
                           collection_id=QUOTA_RESOURCE_NAME,
                           object_id=QUOTA_COLLECTION_ID,
                           record=collection_info)
Ejemplo n.º 22
0
 def get_parent_id(self, request):
     self.bucket_id = request.matchdict["bucket_id"]
     return instance_uri(request, "bucket", id=self.bucket_id)
Ejemplo n.º 23
0
def on_resource_changed(event):
    """
    Everytime an object is created/changed/deleted, we create an entry in the
    ``history`` resource. The entries are served as read-only in the
    :mod:`kinto.plugins.history.views` module.
    """
    payload = copy.deepcopy(event.payload)
    action = payload['action']
    resource_name = payload['resource_name']
    event_uri = payload['uri']

    bucket_id = payload.pop('bucket_id')
    bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)
    collection_id = None
    collection_uri = None
    if 'collection_id' in payload:
        collection_id = payload['collection_id']
        collection_uri = instance_uri(event.request,
                                      'collection',
                                      bucket_id=bucket_id,
                                      id=collection_id)

    storage = event.request.registry.storage
    permission = event.request.registry.permission

    targets = []
    for impacted in event.impacted_records:
        target = impacted['new' if action != 'delete' else 'old']
        obj_id = target['id']
        # On POST .../records, the URI does not contain the newly created
        # record id. Make sure it does:
        if event_uri.endswith(obj_id):
            uri = event_uri
        else:
            uri = event_uri + '/' + obj_id
        targets.append((uri, target))

    # Prepare a list of object ids to be fetched from permission backend,
    # and fetch them all at once. Use a mapping for later convenience.
    all_perms_objects_ids = [oid for (oid, _) in targets]
    all_perms_objects_ids.append(bucket_uri)
    if collection_uri is not None:
        all_perms_objects_ids.append(collection_uri)
    all_perms_objects_ids = list(set(all_perms_objects_ids))
    all_permissions = permission.get_objects_permissions(all_perms_objects_ids)
    perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))

    bucket_perms = perms_by_object_id[bucket_uri]
    collection_perms = {}
    if collection_uri is not None:
        collection_perms = perms_by_object_id[collection_uri]

    # The principals allowed to read the bucket and collection.
    # (Note: ``write`` means ``read``)
    read_principals = set(bucket_perms.get('read', []))
    read_principals.update(bucket_perms.get('write', []))
    read_principals.update(collection_perms.get('read', []))
    read_principals.update(collection_perms.get('write', []))

    # Create a history entry for each impacted record.
    for (uri, target) in targets:
        obj_id = target['id']
        # Prepare the history entry attributes.
        perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}
        eventattrs = dict(**payload)
        eventattrs.setdefault('%s_id' % resource_name, obj_id)
        eventattrs['uri'] = uri
        attrs = dict(date=datetime.now().isoformat(),
                     target={'data': target, 'permissions': perms},
                     **eventattrs)

        # Create a record for the 'history' resource, whose parent_id is
        # the bucket URI (c.f. views.py).
        # Note: this will be rolledback if the transaction is rolledback.
        entry = storage.create(parent_id=bucket_uri,
                               collection_id='history',
                               record=attrs)

        # The read permission on the newly created history entry is the union
        # of the record permissions with the one from bucket and collection.
        entry_principals = set(read_principals)
        entry_principals.update(perms.get('read', []))
        entry_principals.update(perms.get('write', []))
        entry_perms = {'read': list(entry_principals)}
        # /buckets/{id}/history is the URI for the list of history entries.
        entry_perm_id = '/buckets/%s/history/%s' % (bucket_id, entry['id'])
        permission.replace_object_permissions(entry_perm_id, entry_perms)
Ejemplo n.º 24
0
def sign_collection_data(event, resources, to_review_enabled, **kwargs):
    """
    Listen to resource change events, to check if a new signature is
    requested.

    When a source collection specified in settings is modified, and its
    new metadata ``status`` is set to ``"to-sign"``, then sign the data
    and update the destination.
    """
    payload = event.payload

    is_new_collection = payload['action'] == ACTIONS.CREATE.value

    current_user_id = event.request.prefixed_userid
    if current_user_id == PLUGIN_USERID:
        # Ignore changes made by plugin.
        return

    # Prevent recursivity, since the following operations will alter the current collection.
    impacted_records = list(event.impacted_records)

    for impacted in impacted_records:
        new_collection = impacted['new']
        old_collection = impacted.get('old', {})

        # Only sign the configured resources.
        resource, signer = pick_resource_and_signer(event.request, resources,
                                                    bucket_id=payload['bucket_id'],
                                                    collection_id=new_collection['id'])
        if resource is None:
            continue

        updater = LocalUpdater(signer=signer,
                               storage=event.request.registry.storage,
                               permission=event.request.registry.permission,
                               source=resource['source'],
                               destination=resource['destination'])

        uri = instance_uri(event.request, "collection", bucket_id=payload['bucket_id'],
                           id=new_collection['id'])

        has_review_enabled = ('preview' in resource and
                              resource.get('to_review_enabled', to_review_enabled))

        review_event_cls = None

        new_status = new_collection.get("status")
        old_status = old_collection.get("status")

        # Autorize kinto-attachment metadata write access. #190
        event.request._attachment_auto_save = True

        try:
            if is_new_collection:
                if has_review_enabled:
                    updater.destination = resource['preview']
                    updater.sign_and_update_destination(event.request,
                                                        source_attributes=new_collection,
                                                        next_source_status=None)
                updater.destination = resource['destination']
                updater.sign_and_update_destination(event.request,
                                                    source_attributes=new_collection,
                                                    next_source_status=None)

            if old_status == new_status:
                continue

            if new_status == STATUS.TO_SIGN:
                # Run signature process (will set `last_reviewer` field).
                updater.destination = resource['destination']
                updater.sign_and_update_destination(event.request,
                                                    source_attributes=new_collection,
                                                    previous_source_status=old_status)

                if old_status == STATUS.SIGNED:
                    # When we refresh the signature, it is mainly in order to make sure that
                    # the latest signer certificate was used. When a preview collection
                    # is configured, we also want to refresh its signature.
                    if has_review_enabled:
                        updater.destination = resource['preview']
                        updater.sign_and_update_destination(event.request,
                                                            source_attributes=new_collection,
                                                            previous_source_status=old_status)
                else:
                    review_event_cls = signer_events.ReviewApproved

            elif new_status == STATUS.TO_REVIEW:
                if has_review_enabled:
                    # If preview collection: update and sign preview collection
                    updater.destination = resource['preview']
                    updater.sign_and_update_destination(event.request,
                                                        source_attributes=new_collection,
                                                        next_source_status=STATUS.TO_REVIEW)
                else:
                    # If no preview collection: just track `last_editor`
                    updater.update_source_review_request_by(event.request)
                review_event_cls = signer_events.ReviewRequested

            elif old_status == STATUS.TO_REVIEW and new_status == STATUS.WORK_IN_PROGRESS:
                review_event_cls = signer_events.ReviewRejected

            elif new_status == STATUS.TO_REFRESH:
                updater.refresh_signature(event.request, next_source_status=old_status)
                if has_review_enabled:
                    updater.destination = resource['preview']
                    updater.refresh_signature(event.request, next_source_status=old_status)

        except Exception:
            logger.exception("Could not sign '{0}'".format(uri))
            event.request.response.status = 503

        # Notify request of review.
        if review_event_cls:
            payload = payload.copy()
            payload["uri"] = uri
            payload["collection_id"] = new_collection['id']
            review_event = review_event_cls(request=event.request,
                                            payload=payload,
                                            impacted_records=[impacted],
                                            resource=resource,
                                            original_event=event)
            event.request.bound_data.setdefault('kinto_signer.events', []).append(review_event)
Ejemplo n.º 25
0
 def get_parent_id(self, request):
     bucket_id = request.matchdict["bucket_id"]
     parent_id = utils.instance_uri(request, "bucket", id=bucket_id)
     return parent_id
Ejemplo n.º 26
0
 def get_parent_id(self, request):
     self.bucket_id = request.matchdict['bucket_id']
     return instance_uri(request, 'bucket', id=self.bucket_id)
Ejemplo n.º 27
0
 def get_parent_id(self, request):
     self.bucket_id = request.matchdict["bucket_id"]
     self.collection_id = request.matchdict["collection_id"]
     return utils.instance_uri(
         request, "collection", bucket_id=self.bucket_id, id=self.collection_id
     )
Ejemplo n.º 28
0
 def get_parent_id(self, request):
     bucket_id = request.matchdict["bucket_id"]
     parent_id = utils.instance_uri(request, "bucket", id=bucket_id)
     return parent_id
Ejemplo n.º 29
0
def check_collection_status(event, resources, group_check_enabled,
                            to_review_enabled, editors_group, reviewers_group):
    """Make sure status changes are allowed.
    """
    payload = event.payload

    current_user_id = event.request.prefixed_userid
    if current_user_id == _PLUGIN_USERID:
        # Ignore changes made by plugin.
        return

    user_principals = event.request.effective_principals

    for impacted in event.impacted_records:
        old_collection = impacted.get("old", {})
        old_status = old_collection.get("status")
        new_collection = impacted["new"]
        new_status = new_collection.get("status")

        # Skip if resource is not configured.
        key = instance_uri(event.request, "collection",
                           bucket_id=payload["bucket_id"],
                           id=new_collection["id"])
        resource = resources.get(key)
        if resource is None:
            continue

        _to_review_enabled = resource.get("to_review_enabled", to_review_enabled)
        _group_check_enabled = resource.get("group_check_enabled", group_check_enabled)

        _editors_group = resource.get("editors_group", editors_group)
        editors_group_uri = instance_uri(event.request, "group",
                                         bucket_id=payload["bucket_id"],
                                         id=_editors_group)
        _reviewers_group = resource.get("reviewers_group", reviewers_group)
        reviewers_group_uri = instance_uri(event.request, "group",
                                           bucket_id=payload["bucket_id"],
                                           id=_reviewers_group)

        if old_status == new_status:
            continue

        # 1. None -> work-in-progress
        if new_status == STATUS.WORK_IN_PROGRESS:
            pass

        # 2. work-in-progress -> to-review
        elif new_status == STATUS.TO_REVIEW:
            if editors_group_uri not in user_principals and _group_check_enabled:
                raise_forbidden(message="Not in %s group" % _editors_group)

        # 3. to-review -> work-in-progress
        # 3. to-review -> to-sign
        elif new_status == STATUS.TO_SIGN:
            # Only allow to-sign from to-review if reviewer and no-editor
            if reviewers_group_uri not in user_principals and _group_check_enabled:
                raise_forbidden(message="Not in %s group" % _reviewers_group)

            requires_review = old_status not in (STATUS.TO_REVIEW,
                                                 STATUS.SIGNED)
            if requires_review and _to_review_enabled:
                raise_invalid(message="Collection not reviewed")

            is_same_editor = old_collection.get(FIELD_LAST_EDITOR) == current_user_id
            if _to_review_enabled and is_same_editor and old_status != STATUS.SIGNED:
                raise_forbidden(message="Editor cannot review")

        # 4. to-sign -> signed
        elif new_status == STATUS.SIGNED:
            raise_invalid(message="Cannot set status to '%s'" % new_status)

        # Nobody can remove the status
        elif new_status is None:
            raise_invalid(message="Cannot remove status")
        # Unknown manual status
        else:
            raise_invalid(message="Invalid status '%s'" % new_status)
Ejemplo n.º 30
0
def on_resource_changed(event):
    """
    Everytime an object is created/changed/deleted, we update the
    bucket counters.

    If a new object exceeds the quotas, we reject the request.
    """
    payload = event.payload
    action = payload['action']
    resource_name = payload['resource_name']

    if action == 'delete' and resource_name == 'bucket':
        # Deleting a bucket already deletes everything underneath (including
        # quotas info). See kinto/views/bucket.
        return

    settings = event.request.registry.settings

    event_uri = payload['uri']
    bucket_id = payload['bucket_id']
    bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)
    collection_id = None
    collection_uri = None
    if 'collection_id' in payload:
        collection_id = payload['collection_id']
        collection_uri = instance_uri(event.request,
                                      'collection',
                                      bucket_id=bucket_id,
                                      id=collection_id)

    bucket_max_bytes = get_bucket_settings(settings, bucket_id, 'max_bytes')
    bucket_max_items = get_bucket_settings(settings, bucket_id, 'max_items')
    bucket_max_bytes_per_item = get_bucket_settings(settings, bucket_id,
                                                    'max_bytes_per_item')
    collection_max_bytes = get_collection_settings(settings, bucket_id,
                                                   collection_id, 'max_bytes')
    collection_max_items = get_collection_settings(settings, bucket_id,
                                                   collection_id, 'max_items')
    collection_max_bytes_per_item = get_collection_settings(
        settings, bucket_id, collection_id, 'max_bytes_per_item')

    max_bytes_per_item = (collection_max_bytes_per_item or
                          bucket_max_bytes_per_item)

    storage = event.request.registry.storage

    targets = []
    for impacted in event.impacted_records:
        target = impacted['new' if action != 'delete' else 'old']
        # On POST .../records, the URI does not contain the newly created
        # record id.
        obj_id = target['id']
        parts = event_uri.split('/')
        if resource_name in parts[-1]:
            parts.append(obj_id)
        else:
            # Make sure the id is correct on grouped events.
            parts[-1] = obj_id
        uri = '/'.join(parts)

        old = impacted.get('old', {})
        new = impacted.get('new', {})

        targets.append((uri, obj_id, old, new))

    try:
        bucket_info = copy.deepcopy(
            storage.get(parent_id=bucket_uri,
                        collection_id=QUOTA_RESOURCE_NAME,
                        object_id=BUCKET_QUOTA_OBJECT_ID))
    except RecordNotFoundError:
        bucket_info = {
            'collection_count': 0,
            'record_count': 0,
            'storage_size': 0,
        }

    collection_info = {
        'record_count': 0,
        'storage_size': 0,
    }
    if collection_id:
        try:
            collection_info = copy.deepcopy(
                storage.get(parent_id=collection_uri,
                            collection_id=QUOTA_RESOURCE_NAME,
                            object_id=COLLECTION_QUOTA_OBJECT_ID))
        except RecordNotFoundError:
            pass

    # Update the bucket quotas values for each impacted record.
    for (uri, obj_id, old, new) in targets:
        old_size = record_size(old)
        new_size = record_size(new)

        if max_bytes_per_item is not None and action != 'delete':
            if new_size > max_bytes_per_item:
                message = ('Maximum bytes per object exceeded '
                           '({} > {} Bytes.'.format(new_size, max_bytes_per_item))
                raise_insufficient_storage(message)

        if action == 'create':
            bucket_info['storage_size'] += new_size
            if resource_name == 'collection':
                bucket_info['collection_count'] += 1
                collection_info['storage_size'] += new_size
            if resource_name == 'record':
                bucket_info['record_count'] += 1
                collection_info['record_count'] += 1
                collection_info['storage_size'] += new_size
        elif action == 'update':
            bucket_info['storage_size'] -= old_size
            bucket_info['storage_size'] += new_size
            if resource_name in ('collection', 'record'):
                collection_info['storage_size'] -= old_size
                collection_info['storage_size'] += new_size
        else:   # action == 'delete':
            bucket_info['storage_size'] -= old_size
            if resource_name == 'collection':
                collection_uri = uri
                bucket_info['collection_count'] -= 1
                # When we delete the collection all the records in it
                # are deleted without notification.
                collection_records, _ = storage.get_all(
                    collection_id='record',
                    parent_id=collection_uri)
                for r in collection_records:
                    old_record_size = record_size(r)
                    bucket_info['record_count'] -= 1
                    bucket_info['storage_size'] -= old_record_size
                    collection_info['record_count'] -= 1
                    collection_info['storage_size'] -= old_record_size
                collection_info['storage_size'] -= old_size

            if resource_name == 'record':
                bucket_info['record_count'] -= 1
                collection_info['record_count'] -= 1
                collection_info['storage_size'] -= old_size

    if bucket_max_bytes is not None:
        if bucket_info['storage_size'] > bucket_max_bytes:
            message = ('Bucket maximum total size exceeded '
                       '({} > {} Bytes). '.format(bucket_info['storage_size'],
                                                  bucket_max_bytes))
            raise_insufficient_storage(message)

    if bucket_max_items is not None:
        if bucket_info['record_count'] > bucket_max_items:
            message = ('Bucket maximum number of objects exceeded '
                       '({} > {} objects).'.format(bucket_info['record_count'],
                                                   bucket_max_items))
            raise_insufficient_storage(message)

    if collection_max_bytes is not None:
        if collection_info['storage_size'] > collection_max_bytes:
            message = ('Collection maximum size exceeded '
                       '({} > {} Bytes).'.format(collection_info['storage_size'],
                                                 collection_max_bytes))
            raise_insufficient_storage(message)

    if collection_max_items is not None:
        if collection_info['record_count'] > collection_max_items:
            message = ('Collection maximum number of objects exceeded '
                       '({} > {} objects).'.format(collection_info['record_count'],
                                                   collection_max_items))
            raise_insufficient_storage(message)

    storage.update(parent_id=bucket_uri,
                   collection_id=QUOTA_RESOURCE_NAME,
                   object_id=BUCKET_QUOTA_OBJECT_ID,
                   record=bucket_info)

    if collection_id:
        if action == 'delete' and resource_name == 'collection':
            # Deleting a collection already deletes everything underneath
            # (including quotas info). See kinto/views/collection.
            return
        else:
            storage.update(parent_id=collection_uri,
                           collection_id=QUOTA_RESOURCE_NAME,
                           object_id=COLLECTION_QUOTA_OBJECT_ID,
                           record=collection_info)
Ejemplo n.º 31
0
def sign_collection_data(event, resources):
    """
    Listen to resource change events, to check if a new signature is
    requested.

    When a source collection specified in settings is modified, and its
    new metadata ``status`` is set to ``"to-sign"``, then sign the data
    and update the destination.
    """
    payload = event.payload

    current_user_id = event.request.prefixed_userid
    if current_user_id == _PLUGIN_USERID:
        # Ignore changes made by plugin.
        return

    # Prevent recursivity, since the following operations will alter the current collection.
    impacted_records = list(event.impacted_records)

    for impacted in impacted_records:
        new_collection = impacted['new']
        old_collection = impacted.get('old', {})

        uri = instance_uri(event.request,
                           "collection",
                           bucket_id=payload['bucket_id'],
                           id=new_collection['id'])
        resource = resources.get(uri)

        # Only sign the configured resources.
        if resource is None:
            continue

        registry = event.request.registry
        updater = LocalUpdater(signer=registry.signers[uri],
                               storage=registry.storage,
                               permission=registry.permission,
                               source=resource['source'],
                               destination=resource['destination'])

        review_event_cls = None
        try:
            new_status = new_collection.get("status")
            old_status = old_collection.get("status")

            # Autorize kinto-attachment metadata write access. #190
            event.request._attachment_auto_save = True

            if new_status == STATUS.TO_SIGN:
                # Run signature process (will set `last_reviewer` field).
                updater.sign_and_update_destination(event.request,
                                                    source=new_collection)
                if old_status != STATUS.SIGNED:
                    review_event_cls = signer_events.ReviewApproved

            elif new_status == STATUS.TO_REVIEW:
                if 'preview' in resource:
                    # If preview collection: update and sign preview collection
                    updater.destination = resource['preview']
                    updater.sign_and_update_destination(
                        event.request,
                        source=new_collection,
                        next_source_status=STATUS.TO_REVIEW)
                else:
                    # If no preview collection: just track `last_editor`
                    with updater.send_events(event.request):
                        updater.update_source_editor(event.request)
                review_event_cls = signer_events.ReviewRequested

            elif old_status == STATUS.TO_REVIEW and new_status == STATUS.WORK_IN_PROGRESS:
                review_event_cls = signer_events.ReviewRejected

        except Exception:
            logger.exception("Could not sign '{0}'".format(uri))
            event.request.response.status = 503

        # Notify request of review.
        if review_event_cls:
            payload = payload.copy()
            payload["uri"] = uri
            payload["collection_id"] = new_collection['id']
            review_event = review_event_cls(request=event.request,
                                            payload=payload,
                                            impacted_records=[impacted],
                                            resource=resource,
                                            original_event=event)
            event.request.bound_data.setdefault('kinto_signer.events',
                                                []).append(review_event)
Ejemplo n.º 32
0
def check_collection_status(event, resources, group_check_enabled,
                            to_review_enabled, editors_group, reviewers_group):
    """Make sure status changes are allowed.
    """
    payload = event.payload

    current_user_id = event.request.prefixed_userid
    if current_user_id == _PLUGIN_USERID:
        # Ignore changes made by plugin.
        return

    user_principals = event.request.effective_principals

    for impacted in event.impacted_records:
        old_collection = impacted.get("old", {})
        old_status = old_collection.get("status")
        new_collection = impacted["new"]
        new_status = new_collection.get("status")

        # Skip if resource is not configured.
        key = instance_uri(event.request,
                           "collection",
                           bucket_id=payload["bucket_id"],
                           id=new_collection["id"])
        resource = resources.get(key)
        if resource is None:
            continue

        _to_review_enabled = resource.get("to_review_enabled",
                                          to_review_enabled)
        _group_check_enabled = resource.get("group_check_enabled",
                                            group_check_enabled)

        _editors_group = resource.get("editors_group", editors_group)
        editors_group_uri = instance_uri(event.request,
                                         "group",
                                         bucket_id=payload["bucket_id"],
                                         id=_editors_group)
        _reviewers_group = resource.get("reviewers_group", reviewers_group)
        reviewers_group_uri = instance_uri(event.request,
                                           "group",
                                           bucket_id=payload["bucket_id"],
                                           id=_reviewers_group)

        if old_status == new_status:
            continue

        # 1. None -> work-in-progress
        if new_status == STATUS.WORK_IN_PROGRESS:
            pass

        # 2. work-in-progress -> to-review
        elif new_status == STATUS.TO_REVIEW:
            if editors_group_uri not in user_principals and _group_check_enabled:
                raise_forbidden(message="Not in %s group" % _editors_group)

        # 3. to-review -> work-in-progress
        # 3. to-review -> to-sign
        elif new_status == STATUS.TO_SIGN:
            # Only allow to-sign from to-review if reviewer and no-editor
            if reviewers_group_uri not in user_principals and _group_check_enabled:
                raise_forbidden(message="Not in %s group" % _reviewers_group)

            requires_review = old_status not in (STATUS.TO_REVIEW,
                                                 STATUS.SIGNED)
            if requires_review and _to_review_enabled:
                raise_invalid(message="Collection not reviewed")

            is_same_editor = old_collection.get(
                FIELD_LAST_EDITOR) == current_user_id
            if _to_review_enabled and is_same_editor and old_status != STATUS.SIGNED:
                raise_forbidden(message="Editor cannot review")

        # 4. to-sign -> signed
        elif new_status == STATUS.SIGNED:
            raise_invalid(message="Cannot set status to '%s'" % new_status)

        # Nobody can remove the status
        elif new_status is None:
            raise_invalid(message="Cannot remove status")
        # Unknown manual status
        else:
            raise_invalid(message="Invalid status '%s'" % new_status)
Ejemplo n.º 33
0
def on_resource_changed(event):
    """
    Everytime an object is created/changed/deleted, we update the
    bucket counters.

    If a new object exceeds the quotas, we reject the request.
    """
    payload = event.payload
    action = payload["action"]
    resource_name = payload["resource_name"]

    if action == "delete" and resource_name == "bucket":
        # Deleting a bucket already deletes everything underneath (including
        # quotas info). See kinto/views/bucket.
        return

    settings = event.request.registry.settings

    event_uri = payload["uri"]
    bucket_id = payload["bucket_id"]
    bucket_uri = instance_uri(event.request, "bucket", id=bucket_id)
    collection_id = None
    collection_uri = None
    if "collection_id" in payload:
        collection_id = payload["collection_id"]
        collection_uri = instance_uri(
            event.request, "collection", bucket_id=bucket_id, id=collection_id
        )

    bucket_max_bytes = get_bucket_settings(settings, bucket_id, "max_bytes")
    bucket_max_items = get_bucket_settings(settings, bucket_id, "max_items")
    bucket_max_bytes_per_item = get_bucket_settings(settings, bucket_id, "max_bytes_per_item")
    collection_max_bytes = get_collection_settings(settings, bucket_id, collection_id, "max_bytes")
    collection_max_items = get_collection_settings(settings, bucket_id, collection_id, "max_items")
    collection_max_bytes_per_item = get_collection_settings(
        settings, bucket_id, collection_id, "max_bytes_per_item"
    )

    max_bytes_per_item = collection_max_bytes_per_item or bucket_max_bytes_per_item

    storage = event.request.registry.storage

    targets = []
    for impacted in event.impacted_records:
        target = impacted["new" if action != "delete" else "old"]
        # On POST .../records, the URI does not contain the newly created
        # record id.
        obj_id = target["id"]
        parts = event_uri.split("/")
        if resource_name in parts[-1]:
            parts.append(obj_id)
        else:
            # Make sure the id is correct on grouped events.
            parts[-1] = obj_id
        uri = "/".join(parts)

        old = impacted.get("old", {})
        new = impacted.get("new", {})

        targets.append((uri, obj_id, old, new))

    try:
        bucket_info = copy.deepcopy(
            storage.get(
                parent_id=bucket_uri,
                collection_id=QUOTA_RESOURCE_NAME,
                object_id=BUCKET_QUOTA_OBJECT_ID,
            )
        )
    except RecordNotFoundError:
        bucket_info = {"collection_count": 0, "record_count": 0, "storage_size": 0}

    collection_info = {"record_count": 0, "storage_size": 0}
    if collection_id:
        try:
            collection_info = copy.deepcopy(
                storage.get(
                    parent_id=collection_uri,
                    collection_id=QUOTA_RESOURCE_NAME,
                    object_id=COLLECTION_QUOTA_OBJECT_ID,
                )
            )
        except RecordNotFoundError:
            pass

    # Update the bucket quotas values for each impacted record.
    for (uri, obj_id, old, new) in targets:
        old_size = record_size(old)
        new_size = record_size(new)

        if max_bytes_per_item is not None and action != "delete":
            if new_size > max_bytes_per_item:
                message = "Maximum bytes per object exceeded " "({} > {} Bytes.".format(
                    new_size, max_bytes_per_item
                )
                raise_insufficient_storage(message)

        if action == "create":
            bucket_info["storage_size"] += new_size
            if resource_name == "collection":
                bucket_info["collection_count"] += 1
                collection_info["storage_size"] += new_size
            if resource_name == "record":
                bucket_info["record_count"] += 1
                collection_info["record_count"] += 1
                collection_info["storage_size"] += new_size
        elif action == "update":
            bucket_info["storage_size"] -= old_size
            bucket_info["storage_size"] += new_size
            if resource_name in ("collection", "record"):
                collection_info["storage_size"] -= old_size
                collection_info["storage_size"] += new_size
        else:  # action == 'delete':
            bucket_info["storage_size"] -= old_size
            if resource_name == "collection":
                collection_uri = uri
                bucket_info["collection_count"] -= 1
                # When we delete the collection all the records in it
                # are deleted without notification.
                collection_records, _ = storage.get_all(
                    collection_id="record", parent_id=collection_uri
                )
                for r in collection_records:
                    old_record_size = record_size(r)
                    bucket_info["record_count"] -= 1
                    bucket_info["storage_size"] -= old_record_size
                    collection_info["record_count"] -= 1
                    collection_info["storage_size"] -= old_record_size
                collection_info["storage_size"] -= old_size

            if resource_name == "record":
                bucket_info["record_count"] -= 1
                collection_info["record_count"] -= 1
                collection_info["storage_size"] -= old_size

    if bucket_max_bytes is not None:
        if bucket_info["storage_size"] > bucket_max_bytes:
            message = "Bucket maximum total size exceeded " "({} > {} Bytes). ".format(
                bucket_info["storage_size"], bucket_max_bytes
            )
            raise_insufficient_storage(message)

    if bucket_max_items is not None:
        if bucket_info["record_count"] > bucket_max_items:
            message = "Bucket maximum number of objects exceeded " "({} > {} objects).".format(
                bucket_info["record_count"], bucket_max_items
            )
            raise_insufficient_storage(message)

    if collection_max_bytes is not None:
        if collection_info["storage_size"] > collection_max_bytes:
            message = "Collection maximum size exceeded " "({} > {} Bytes).".format(
                collection_info["storage_size"], collection_max_bytes
            )
            raise_insufficient_storage(message)

    if collection_max_items is not None:
        if collection_info["record_count"] > collection_max_items:
            message = "Collection maximum number of objects exceeded " "({} > {} objects).".format(
                collection_info["record_count"], collection_max_items
            )
            raise_insufficient_storage(message)

    storage.update(
        parent_id=bucket_uri,
        collection_id=QUOTA_RESOURCE_NAME,
        object_id=BUCKET_QUOTA_OBJECT_ID,
        record=bucket_info,
    )

    if collection_id:
        if action == "delete" and resource_name == "collection":
            # Deleting a collection already deletes everything underneath
            # (including quotas info). See kinto/views/collection.
            return
        else:
            storage.update(
                parent_id=collection_uri,
                collection_id=QUOTA_RESOURCE_NAME,
                object_id=COLLECTION_QUOTA_OBJECT_ID,
                record=collection_info,
            )
Ejemplo n.º 34
0
def get_changeset(request):
    bid = request.matchdict["bid"]
    cid = request.matchdict["cid"]

    storage = request.registry.storage

    queryparams = request.validated["querystring"]
    limit = queryparams.get("_limit")
    filters = []
    include_deleted = False
    if "_since" in queryparams:
        filters = [
            Filter("last_modified", queryparams["_since"], COMPARISON.GT)
        ]
        # Include tombstones when querying with _since
        include_deleted = True

    if (bid, cid) == (MONITOR_BUCKET, CHANGES_COLLECTION):
        # Redirect old since, on monitor/changes only.
        _handle_old_since_redirect(request)

        if "bucket" in queryparams:
            filters.append(
                Filter("bucket", queryparams["bucket"], COMPARISON.EQ))

        if "collection" in queryparams:
            filters.append(
                Filter("collection", queryparams["collection"], COMPARISON.EQ))

        model = ChangesModel(request)
        metadata = {}
        timestamp = model.timestamp()
        changes = model.get_objects(filters=filters,
                                    limit=limit,
                                    include_deleted=include_deleted)

    else:
        bucket_uri = instance_uri(request, "bucket", id=bid)
        collection_uri = instance_uri(request,
                                      "collection",
                                      bucket_id=bid,
                                      id=cid)

        try:
            # We'll make sure that data isn't changed while we read metadata, changes,
            # etc.
            before = storage.resource_timestamp(resource_name="record",
                                                parent_id=collection_uri)
            # Fetch collection metadata.
            metadata = storage.get(resource_name="collection",
                                   parent_id=bucket_uri,
                                   object_id=cid)

        except storage_exceptions.ObjectNotFoundError:
            raise httpexceptions.HTTPNotFound()

        except storage_exceptions.BackendError as e:
            # The call to `resource_timestamp()` on an empty collection will try
            # initialize it. If the instance is read-only, it fails with a backend
            # error. Raise 404 in this case otherwise raise the original backend error.
            if "when running in readonly" in str(e):
                raise httpexceptions.HTTPNotFound()
            raise

        # Fetch list of changes.
        changes = storage.list_all(
            resource_name="record",
            parent_id=collection_uri,
            filters=filters,
            limit=limit,
            id_field="id",
            modified_field="last_modified",
            deleted_field="deleted",
            sorting=[Sort("last_modified", -1)],
            include_deleted=include_deleted,
        )
        # Fetch current collection timestamp.
        timestamp = storage.resource_timestamp(resource_name="record",
                                               parent_id=collection_uri)

        # Do not serve inconsistent data.
        if before != timestamp:  # pragma: no cover
            raise storage_exceptions.IntegrityError(
                message="Inconsistent data. Retry.")

    # Cache control.
    _handle_cache_expires(request, bid, cid)

    # Set Last-Modified response header (Pyramid takes care of converting).
    request.response.last_modified = timestamp / 1000.0

    data = {
        "metadata": metadata,
        "timestamp": timestamp,
        "changes": changes,
    }
    return data
Ejemplo n.º 35
0
def on_resource_changed(event):
    """
    Everytime an object is created/changed/deleted, we update the
    bucket counters.

    If a new object exceeds the quotas, we reject the request.
    """
    payload = event.payload
    action = payload['action']
    resource_name = payload['resource_name']
    event_uri = payload['uri']

    settings = event.request.registry.settings

    bucket_id = payload['bucket_id']
    bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)
    collection_id = None
    collection_uri = None
    if 'collection_id' in payload:
        collection_id = payload['collection_id']
        collection_uri = instance_uri(event.request,
                                      'collection',
                                      bucket_id=bucket_id,
                                      id=collection_id)

    bucket_max_bytes = get_bucket_settings(settings, bucket_id, 'max_bytes')
    bucket_max_items = get_bucket_settings(settings, bucket_id, 'max_items')
    bucket_max_bytes_per_item = get_bucket_settings(settings, bucket_id,
                                                    'max_bytes_per_item')
    collection_max_bytes = get_collection_settings(settings, bucket_id,
                                                   collection_id, 'max_bytes')
    collection_max_items = get_collection_settings(settings, bucket_id,
                                                   collection_id, 'max_items')
    collection_max_bytes_per_item = get_collection_settings(
        settings, bucket_id, collection_id, 'max_bytes_per_item')

    max_bytes_per_item = (collection_max_bytes_per_item
                          or bucket_max_bytes_per_item)

    storage = event.request.registry.storage

    if action == 'delete' and resource_name == 'bucket':
        # Deleting a bucket already deletes everything underneath (including
        # quotas info). See kinto/views/bucket.
        return

    targets = []
    for impacted in event.impacted_records:
        target = impacted['new' if action != 'delete' else 'old']
        # On POST .../records, the URI does not contain the newly created
        # record id.
        obj_id = target['id']
        parts = event_uri.split('/')
        if resource_name in parts[-1]:
            parts.append(obj_id)
        else:
            # Make sure the id is correct on grouped events.
            parts[-1] = obj_id
        uri = '/'.join(parts)

        old = impacted.get('old', {})
        new = impacted.get('new', {})

        targets.append((uri, obj_id, old, new))

    try:
        bucket_info = copy.deepcopy(
            storage.get(parent_id=bucket_uri,
                        collection_id=QUOTA_RESOURCE_NAME,
                        object_id=BUCKET_QUOTA_OBJECT_ID))
    except RecordNotFoundError:
        bucket_info = {
            "collection_count": 0,
            "record_count": 0,
            "storage_size": 0,
        }

    collection_info = {
        "record_count": 0,
        "storage_size": 0,
    }
    if collection_id:
        try:
            collection_info = copy.deepcopy(
                storage.get(parent_id=collection_uri,
                            collection_id=QUOTA_RESOURCE_NAME,
                            object_id=COLLECTION_QUOTA_OBJECT_ID))
        except RecordNotFoundError:
            pass

    # Update the bucket quotas values for each impacted record.
    for (uri, obj_id, old, new) in targets:
        old_size = record_size(old)
        new_size = record_size(new)

        if max_bytes_per_item is not None and action != "delete":
            if new_size > max_bytes_per_item:
                message = ("Maximum bytes per object exceeded "
                           "({} > {} Bytes.".format(new_size,
                                                    max_bytes_per_item))
                raise http_error(HTTPInsufficientStorage(),
                                 errno=ERRORS.FORBIDDEN.value,
                                 message=message)

        if action == 'create':
            bucket_info['storage_size'] += new_size
            if resource_name == 'collection':
                bucket_info['collection_count'] += 1
                collection_info['storage_size'] += new_size
            if resource_name == 'record':
                bucket_info['record_count'] += 1
                collection_info['record_count'] += 1
                collection_info['storage_size'] += new_size
        elif action == 'update':
            bucket_info['storage_size'] -= old_size
            bucket_info['storage_size'] += new_size
            if resource_name in ('collection', 'record'):
                collection_info['storage_size'] -= old_size
                collection_info['storage_size'] += new_size
        else:  # action == 'delete':
            bucket_info['storage_size'] -= old_size
            if resource_name == 'collection':
                collection_uri = uri
                bucket_info['collection_count'] -= 1
                # When we delete the collection all the records in it
                # are deleted without notification.
                collection_records, _ = storage.get_all(
                    collection_id='record', parent_id=collection_uri)
                for r in collection_records:
                    old_record_size = record_size(r)
                    bucket_info['record_count'] -= 1
                    bucket_info['storage_size'] -= old_record_size
                    collection_info['record_count'] -= 1
                    collection_info['storage_size'] -= old_record_size
                collection_info['storage_size'] -= old_size

            if resource_name == 'record':
                bucket_info['record_count'] -= 1
                collection_info['record_count'] -= 1
                collection_info['storage_size'] -= old_size

    if bucket_max_bytes is not None:
        if bucket_info['storage_size'] > bucket_max_bytes:
            message = ("Bucket maximum total size exceeded "
                       "({} > {} Bytes). ".format(bucket_info['storage_size'],
                                                  bucket_max_bytes))
            raise http_error(HTTPInsufficientStorage(),
                             errno=ERRORS.FORBIDDEN.value,
                             message=message)

    if bucket_max_items is not None:
        if bucket_info['record_count'] > bucket_max_items:
            message = ("Bucket maximum number of objects exceeded "
                       "({} > {} objects).".format(bucket_info['record_count'],
                                                   bucket_max_items))
            raise http_error(HTTPInsufficientStorage(),
                             errno=ERRORS.FORBIDDEN.value,
                             message=message)

    if collection_max_bytes is not None:
        if collection_info['storage_size'] > collection_max_bytes:
            message = ("Collection maximum size exceeded "
                       "({} > {} Bytes).".format(
                           collection_info['storage_size'],
                           collection_max_bytes))
            raise http_error(HTTPInsufficientStorage(),
                             errno=ERRORS.FORBIDDEN.value,
                             message=message)

    if collection_max_items is not None:
        if collection_info['record_count'] > collection_max_items:
            message = ("Collection maximum number of objects exceeded "
                       "({} > {} objects).".format(
                           collection_info['record_count'],
                           collection_max_items))
            raise http_error(HTTPInsufficientStorage(),
                             errno=ERRORS.FORBIDDEN.value,
                             message=message)

    storage.update(parent_id=bucket_uri,
                   collection_id=QUOTA_RESOURCE_NAME,
                   object_id=BUCKET_QUOTA_OBJECT_ID,
                   record=bucket_info)

    if collection_id:
        if action == 'delete' and resource_name == 'collection':
            # Deleting a collection already deletes everything underneath
            # (including quotas info). See kinto/views/collection.
            return
        else:
            storage.update(parent_id=collection_uri,
                           collection_id=QUOTA_RESOURCE_NAME,
                           object_id=COLLECTION_QUOTA_OBJECT_ID,
                           record=collection_info)
Ejemplo n.º 36
0
 def get_parent_id(self, request):
     bucket_id = request.matchdict['bucket_id']
     parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)
     return parent_id
Ejemplo n.º 37
0
def on_resource_changed(event):
    """
    Everytime an object is created/changed/deleted, we create an entry in the
    ``history`` resource. The entries are served as read-only in the
    :mod:`kinto.plugins.history.views` module.
    """
    payload = event.payload
    resource_name = payload['resource_name']
    event_uri = payload['uri']

    bucket_id = None
    bucket_uri = None
    collection_uri = None

    storage = event.request.registry.storage
    permission = event.request.registry.permission

    targets = []
    for impacted in event.impacted_records:
        target = impacted['new']
        obj_id = target['id']

        try:
            bucket_id = payload['bucket_id']
        except KeyError:
            # e.g. DELETE /buckets
            bucket_id = obj_id
        bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)

        if 'collection_id' in payload:
            collection_id = payload['collection_id']
            collection_uri = instance_uri(event.request,
                                          'collection',
                                          bucket_id=bucket_id,
                                          id=collection_id)

        # On POST .../records, the URI does not contain the newly created
        # record id.
        parts = event_uri.split('/')
        if resource_name in parts[-1]:
            parts.append(obj_id)
        else:
            # Make sure the id is correct on grouped events.
            parts[-1] = obj_id
        uri = '/'.join(parts)
        targets.append((uri, target))

    # Prepare a list of object ids to be fetched from permission backend,
    # and fetch them all at once. Use a mapping for later convenience.
    all_perms_objects_ids = [oid for (oid, _) in targets]
    all_perms_objects_ids.append(bucket_uri)
    if collection_uri is not None:
        all_perms_objects_ids.append(collection_uri)
    all_perms_objects_ids = list(set(all_perms_objects_ids))
    all_permissions = permission.get_objects_permissions(all_perms_objects_ids)
    perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))

    bucket_perms = perms_by_object_id[bucket_uri]
    collection_perms = {}
    if collection_uri is not None:
        collection_perms = perms_by_object_id[collection_uri]

    # The principals allowed to read the bucket and collection.
    # (Note: ``write`` means ``read``)
    read_principals = set(bucket_perms.get('read', []))
    read_principals.update(bucket_perms.get('write', []))
    read_principals.update(collection_perms.get('read', []))
    read_principals.update(collection_perms.get('write', []))

    # Create a history entry for each impacted record.
    for (uri, target) in targets:
        obj_id = target['id']
        # Prepare the history entry attributes.
        perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}
        eventattrs = dict(**payload)
        eventattrs.pop('bucket_id', None)
        eventattrs.setdefault('%s_id' % resource_name, obj_id)
        eventattrs['uri'] = uri
        attrs = dict(date=datetime.now().isoformat(),
                     target={
                         'data': target,
                         'permissions': perms
                     },
                     **eventattrs)

        # Create a record for the 'history' resource, whose parent_id is
        # the bucket URI (c.f. views.py).
        # Note: this will be rolledback if the transaction is rolledback.
        entry = storage.create(parent_id=bucket_uri,
                               collection_id='history',
                               record=attrs)

        # The read permission on the newly created history entry is the union
        # of the record permissions with the one from bucket and collection.
        entry_principals = set(read_principals)
        entry_principals.update(perms.get('read', []))
        entry_principals.update(perms.get('write', []))
        entry_perms = {'read': list(entry_principals)}
        # /buckets/{id}/history is the URI for the list of history entries.
        entry_perm_id = '/buckets/%s/history/%s' % (bucket_id, entry['id'])
        permission.replace_object_permissions(entry_perm_id, entry_perms)
Ejemplo n.º 38
0
def check_collection_status(
    event,
    resources,
    to_review_enabled,
    editors_group,
    reviewers_group,
):
    """Make sure status changes are allowed."""
    payload = event.payload

    current_user_id = event.request.prefixed_userid
    if current_user_id == PLUGIN_USERID:
        # Ignore changes made by plugin.
        return

    user_principals = event.request.effective_principals

    for impacted in event.impacted_objects:
        old_collection = impacted.get("old", {})
        old_status = old_collection.get("status")
        new_collection = impacted["new"]
        new_status = new_collection.get("status")

        # Skip if collection is not configured for review.
        resource, _ = pick_resource_and_signer(
            event.request,
            resources,
            bucket_id=payload["bucket_id"],
            collection_id=new_collection["id"],
        )
        if resource is None:
            continue

        # Is review enabled for this resource?
        _to_review_enabled = resource.get("to_review_enabled", to_review_enabled)
        # Determine its related groups names.
        source_collection = resource["source"]["collection"]
        _editors_group = editors_group.format(collection_id=source_collection)
        _reviewers_group = reviewers_group.format(collection_id=source_collection)
        # Member of groups have their URIs in their principals.
        editors_group_uri = instance_uri(
            event.request, "group", bucket_id=payload["bucket_id"], id=_editors_group
        )
        reviewers_group_uri = instance_uri(
            event.request, "group", bucket_id=payload["bucket_id"], id=_reviewers_group
        )

        if old_status == new_status:
            # When collection is created old_status == new_status == None.
            continue

        # 0. Nobody can remove the status
        if new_status is None:
            raise_invalid(message="Cannot remove status")

        # 1. None -> work-in-progress
        elif new_status == STATUS.WORK_IN_PROGRESS:
            pass

        # 2. work-in-progress -> to-review
        elif new_status == STATUS.TO_REVIEW:
            if editors_group_uri not in user_principals and _to_review_enabled:
                raise_forbidden(message="Not in %s group" % _editors_group)

        # 3. to-review -> work-in-progress
        # 3. to-review -> to-sign
        # 3. signed -> to-sign
        elif new_status == STATUS.TO_SIGN:
            # Refresh signature (signed -> to-sign) does not require group membership
            if old_status == STATUS.SIGNED:
                raise_invalid(message="Collection already signed")

            if _to_review_enabled:
                # Only allow to-sign from to-review if reviewer and no-editor
                if reviewers_group_uri not in user_principals:
                    raise_forbidden(message="Not in %s group" % _reviewers_group)

                if old_status != STATUS.TO_REVIEW:
                    raise_invalid(message="Collection not under review")

                field_last_requester = TRACKING_FIELDS.LAST_REVIEW_REQUEST_BY.value
                is_same_editor = (
                    old_collection.get(field_last_requester) == current_user_id
                )
                if is_same_editor:
                    raise_forbidden(message="Last editor cannot review")

        # 4. to-sign -> signed
        elif new_status == STATUS.SIGNED:
            raise_invalid(message="Cannot set status to '%s'" % new_status)

        # 5. Refresh signature
        elif new_status == STATUS.TO_REFRESH:
            # Before here we would raise a 400 if the collection had never been
            # signed, but after some thought it does not really make sense.
            pass

        # Rollback changes
        elif new_status == STATUS.TO_ROLLBACK:
            if old_status == STATUS.SIGNED:
                raise_invalid(message="Collection has no work-in-progress")

        # Unknown manual status
        else:
            raise_invalid(message="Invalid status '%s'" % new_status)
Ejemplo n.º 39
0
def on_resource_changed(event):
    """
    Everytime an object is created/changed/deleted, we create an entry in the
    ``history`` resource. The entries are served as read-only in the
    :mod:`kinto.plugins.history.views` module.
    """
    payload = event.payload
    resource_name = payload["resource_name"]
    event_uri = payload["uri"]

    bucket_id = None
    bucket_uri = None
    collection_uri = None

    storage = event.request.registry.storage
    permission = event.request.registry.permission
    settings = event.request.registry.settings

    excluded_resources = aslist(settings.get("history.exclude_resources", ""))

    targets = []
    for impacted in event.impacted_objects:
        target = impacted["new"]
        obj_id = target["id"]

        try:
            bucket_id = payload["bucket_id"]
        except KeyError:
            # e.g. DELETE /buckets
            bucket_id = obj_id
        bucket_uri = instance_uri(event.request, "bucket", id=bucket_id)

        if bucket_uri in excluded_resources:
            continue

        if "collection_id" in payload:
            collection_id = payload["collection_id"]
            collection_uri = instance_uri(event.request,
                                          "collection",
                                          bucket_id=bucket_id,
                                          id=collection_id)
            if collection_uri in excluded_resources:
                continue

        # On POST .../records, the URI does not contain the newly created
        # record id.
        parts = event_uri.split("/")
        if resource_name in parts[-1]:
            parts.append(obj_id)
        else:
            # Make sure the id is correct on grouped events.
            parts[-1] = obj_id
        uri = "/".join(parts)

        if uri in excluded_resources:
            continue

        targets.append((uri, target))

    if not targets:
        return  # Nothing to do.

    # Prepare a list of object ids to be fetched from permission backend,
    # and fetch them all at once. Use a mapping for later convenience.
    all_perms_objects_ids = [oid for (oid, _) in targets]
    all_perms_objects_ids.append(bucket_uri)
    if collection_uri is not None:
        all_perms_objects_ids.append(collection_uri)
    all_perms_objects_ids = list(set(all_perms_objects_ids))
    all_permissions = permission.get_objects_permissions(all_perms_objects_ids)
    perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))

    bucket_perms = perms_by_object_id[bucket_uri]
    collection_perms = {}
    if collection_uri is not None:
        collection_perms = perms_by_object_id[collection_uri]

    # The principals allowed to read the bucket and collection.
    # (Note: ``write`` means ``read``)
    read_principals = set(bucket_perms.get("read", []))
    read_principals.update(bucket_perms.get("write", []))
    read_principals.update(collection_perms.get("read", []))
    read_principals.update(collection_perms.get("write", []))

    # Create a history entry for each impacted object.
    for (uri, target) in targets:
        obj_id = target["id"]
        # Prepare the history entry attributes.
        perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}
        eventattrs = dict(**payload)
        eventattrs.pop("timestamp", None)  # Already in target `last_modified`.
        eventattrs.pop("bucket_id", None)
        eventattrs[f"{resource_name}_id"] = obj_id
        eventattrs["uri"] = uri
        attrs = dict(
            date=datetime.now().isoformat(),
            target={
                "data": target,
                "permissions": perms
            },
            **eventattrs,
        )

        # Create an entry for the 'history' resource, whose parent_id is
        # the bucket URI (c.f. views.py).
        # Note: this will be rolledback if the transaction is rolledback.
        entry = storage.create(parent_id=bucket_uri,
                               resource_name="history",
                               obj=attrs)

        # The read permission on the newly created history entry is the union
        # of the object permissions with the one from bucket and collection.
        entry_principals = set(read_principals)
        entry_principals.update(perms.get("read", []))
        entry_principals.update(perms.get("write", []))
        entry_perms = {"read": list(entry_principals)}
        # /buckets/{id}/history is the URI for the list of history entries.
        entry_perm_id = f"/buckets/{bucket_id}/history/{entry['id']}"
        permission.replace_object_permissions(entry_perm_id, entry_perms)
Ejemplo n.º 40
0
def prevent_collection_delete(event, resources):
    request = event.request
    bid = event.payload["bucket_id"]
    for impacted in event.impacted_objects:
        cid = impacted["old"]["id"]

        # Locate any collections that imply usage of this collection.
        # If there's some path s -> p -> d for which this collection
        # corresponds to p or d, we forbid deletion of this collection
        # (it's "in use").
        in_use = None

        # The most obvious path is if there is a signer that mentions
        # this collection explicitly in p or d.
        specific_signers = [
            v
            for v in resources.values()
            if v["source"]["collection"] is not None
            and signer_impacts_resource(v, bid, cid)
        ]

        if specific_signers:
            assert (
                len(specific_signers) == 1
            ), f"Inconsistent signers: multiple signers touch {bid} and {cid}"
            in_use = specific_signers[0]

        if not in_use:
            # We identify bucket-wide signers for which p or d matches
            # this collection -- in this case, editing the collection of
            # the same name in s could trigger writes to p or d.
            bucket_signers = [
                v
                for v in resources.values()
                if v["source"]["collection"] is None
                and signer_impacts_resource(v, bid, cid)
            ]
            if bucket_signers:
                assert (
                    len(bucket_signers) == 1
                ), f"Inconsistent signers: multiple signers touch {bid}"
                in_use = bucket_signers[0]

            if in_use:
                # See if this bucket-wide signer is superseded by any
                # specific-collection signers. A specific-collection
                # signer counts as superseding a bucket-wide signer if
                # the specific collection is in the same bucket as the
                # bucket-wide signer, and the specific-collection
                # signer has the same collection ID as the collection
                # being deleted. In this case, we can ignore the
                # bucket-wide s -> p -> d because the
                # collection-specific signer specifies a different
                # workflow for the collection that we thought to
                # impact this one.
                #
                # Specific-collection signers that point *from* other
                # collections to this one are handled explicitly, above.
                #
                # N.B. We can't use signer_impacts_resource here
                # because we want to detect a signer for a
                # specific source collection, regardless of whether it
                # impacts the collection to be deleted or not. A good
                # example where this comes up is where a
                # specific-collection signer disables preview. We want
                # to find this signer even though the preview
                # collection is no longer being impacted.
                for signer in resources.values():
                    same_bucket = (
                        signer["source"]["bucket"] == in_use["source"]["bucket"]
                    )
                    this_collection = signer["source"]["collection"] == cid
                    if same_bucket and this_collection:
                        # Clear the bucket-wide signer.
                        # This signer either named this collection
                        # explicitly (in which case it was handled
                        # above), or it didn't (in which case the
                        # collection is safe to be deleted).
                        in_use = None
                        break

        if in_use is None:
            # Can delete!
            continue

        source_bucket_uri = instance_uri(
            event.request, "bucket", id=in_use["source"]["bucket"]
        )
        source_collection_id = in_use["source"]["collection"] or cid
        try:
            request.registry.storage.get(
                resource_name="collection",
                parent_id=source_bucket_uri,
                object_id=source_collection_id,
            )
            raise_forbidden(message="Collection is in use.")
        except ObjectNotFoundError:
            # Do not prevent delete of preview/destination if source does not exist.
            pass
Ejemplo n.º 41
0
 def get_parent_id(self, request):
     bucket_id = request.matchdict['bucket_id']
     parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)
     return parent_id
Ejemplo n.º 42
0
def sign_collection_data(event, resources, **kwargs):
    """
    Listen to resource change events, to check if a new signature is
    requested.

    When a source collection specified in settings is modified, and its
    new metadata ``status`` is set to ``"to-sign"``, then sign the data
    and update the destination.
    """
    payload = event.payload

    is_new_collection = payload["action"] == ACTIONS.CREATE.value

    current_user_id = event.request.prefixed_userid
    if current_user_id == PLUGIN_USERID:
        # Ignore changes made by plugin.
        return

    # Prevent recursivity, since the following operations will alter the current
    # collection.
    impacted_objects = list(event.impacted_objects)

    for impacted in impacted_objects:
        new_collection = impacted["new"]
        old_collection = impacted.get("old", {})

        # Only sign the configured resources.
        resource, signer = pick_resource_and_signer(
            event.request,
            resources,
            bucket_id=payload["bucket_id"],
            collection_id=new_collection["id"],
        )
        if resource is None:
            continue

        updater = LocalUpdater(
            signer=signer,
            storage=event.request.registry.storage,
            permission=event.request.registry.permission,
            source=resource["source"],
            destination=resource["destination"],
        )

        uri = instance_uri(
            event.request,
            "collection",
            bucket_id=payload["bucket_id"],
            id=new_collection["id"],
        )

        has_preview_collection = "preview" in resource

        payload = payload.copy()
        payload["uri"] = uri
        payload["collection_id"] = new_collection["id"]

        review_event_cls = None
        review_event_kw = dict(
            request=event.request,
            payload=payload,
            impacted_objects=[impacted],
            resource=resource,
            original_event=event,
        )

        new_status = new_collection.get("status")
        old_status = old_collection.get("status")

        # Autorize kinto-attachment metadata write access. #190
        event.request._attachment_auto_save = True

        if is_new_collection:
            if has_preview_collection:
                updater.destination = resource["preview"]
                updater.sign_and_update_destination(
                    event.request,
                    source_attributes=new_collection,
                    # Do not update source attributes (done below).
                    next_source_status=None,
                )
            updater.destination = resource["destination"]
            updater.sign_and_update_destination(
                event.request,
                source_attributes=new_collection,
                # Prevents last_review_date to be set.
                previous_source_status=STATUS.SIGNED,
                # Signed by default.
                next_source_status=STATUS.SIGNED,
            )

        elif old_status == new_status:
            continue

        elif new_status == STATUS.TO_SIGN:
            # Run signature process (will set `last_reviewer` field).
            if has_preview_collection:
                updater.destination = resource["preview"]
                updater.sign_and_update_destination(
                    event.request,
                    source_attributes=new_collection,
                    previous_source_status=old_status,
                )

            updater.destination = resource["destination"]
            review_event_cls = signer_events.ReviewApproved
            changes_count = updater.sign_and_update_destination(
                event.request,
                source_attributes=new_collection,
                previous_source_status=old_status,
            )
            review_event_kw["changes_count"] = changes_count

        elif new_status == STATUS.TO_REVIEW:
            if has_preview_collection:
                # If preview collection: update and sign preview collection
                updater.destination = resource["preview"]
                changes_count = updater.sign_and_update_destination(
                    event.request,
                    source_attributes=new_collection,
                    next_source_status=STATUS.TO_REVIEW,
                )
            else:
                # If no preview collection: just track `last_editor`
                updater.update_source_review_request_by(event.request)
                changes_count = None
            review_event_cls = signer_events.ReviewRequested
            review_event_kw["changes_count"] = changes_count
            review_event_kw["comment"] = new_collection.get("last_editor_comment", "")

        elif old_status == STATUS.TO_REVIEW and new_status == STATUS.WORK_IN_PROGRESS:
            review_event_cls = signer_events.ReviewRejected
            review_event_kw["comment"] = new_collection.get("last_reviewer_comment", "")

        elif new_status == STATUS.TO_REFRESH:
            updater.refresh_signature(event.request, next_source_status=old_status)
            if has_preview_collection:
                updater.destination = resource["preview"]
                updater.refresh_signature(event.request, next_source_status=old_status)

        elif new_status == STATUS.TO_ROLLBACK:
            # Reset source with destination content, and set status to SIGNED.
            changes_count = updater.rollback_changes(event.request)
            if has_preview_collection:
                # Reset preview with destination content.
                updater.source = resource["preview"]
                changes_count += updater.rollback_changes(
                    event.request, refresh_last_edit=False
                )
                # Refresh signature for this new preview collection content.
                updater.destination = resource["preview"]
                # Without refreshing the source attributes.
                updater.refresh_signature(event.request, next_source_status=None)
            # If some changes were effectively rolledback, send an event.
            if changes_count > 0:
                review_event_cls = signer_events.ReviewCanceled
                review_event_kw["changes_count"] = changes_count

        # Notify request of review.
        if review_event_cls:
            review_event = review_event_cls(**review_event_kw)
            event.request.bound_data.setdefault(
                "kinto_remote_settings.signer.events", []
            ).append(review_event)
Ejemplo n.º 43
0
 def get_parent_id(self, request):
     self.bucket_id = request.matchdict["bucket_id"]
     return instance_uri(request, "bucket", id=self.bucket_id)
Ejemplo n.º 44
0
def create_editors_reviewers_groups(event, resources, editors_group, reviewers_group):
    if event.request.prefixed_userid == PLUGIN_USERID:
        return

    bid = event.payload["bucket_id"]
    bucket_uri = instance_uri(event.request, "bucket", id=bid)

    current_user_id = event.request.prefixed_userid
    principals = event.request.prefixed_principals

    authz = event.request.registry.getUtility(IAuthorizationPolicy)

    for impacted in event.impacted_objects:
        new_collection = impacted["new"]

        # Skip if collection is not configured for review.
        resource, _ = pick_resource_and_signer(
            event.request,
            resources,
            bucket_id=event.payload["bucket_id"],
            collection_id=new_collection["id"],
        )
        if resource is None:
            continue

        source_collection = resource["source"]["collection"]
        _editors_group = editors_group.format(collection_id=source_collection)
        _reviewers_group = reviewers_group.format(collection_id=source_collection)

        required_perms = authz.get_bound_permissions(bucket_uri, "group:create")
        permission = event.request.registry.permission
        if not permission.check_permission(principals, required_perms):
            return

        group_perms = {"write": [current_user_id]}
        for group, members in (
            (_editors_group, [current_user_id]),
            (_reviewers_group, []),
        ):
            ensure_resource_exists(
                request=event.request,
                resource_name="group",
                parent_id=bucket_uri,
                obj={"id": group, "members": members},
                permissions=group_perms,
                matchdict={"bucket_id": bid, "id": group},
            )

        # Allow those groups to write to the source collection.
        permission = event.request.registry.permission
        collection_uri = instance_uri(
            event.request,
            "collection",
            bucket_id=bid,
            id=resource["source"]["collection"],
        )
        for group in (_editors_group, _reviewers_group):
            group_principal = instance_uri(
                event.request, "group", bucket_id=bid, id=group
            )
            permission.add_principal_to_ace(collection_uri, "write", group_principal)
Ejemplo n.º 45
0
def on_resource_changed(event):
    """
    Everytime an object is created/changed/deleted, we update the
    bucket counters.

    If a new object exceeds the quotas, we reject the request.
    """
    payload = event.payload
    action = payload["action"]
    resource_name = payload["resource_name"]

    if action == "delete" and resource_name == "bucket":
        # Deleting a bucket already deletes everything underneath (including
        # quotas info). See kinto/views/bucket.
        return

    settings = event.request.registry.settings

    event_uri = payload["uri"]
    bucket_id = payload["bucket_id"]
    bucket_uri = instance_uri(event.request, "bucket", id=bucket_id)
    collection_id = None
    collection_uri = None
    if "collection_id" in payload:
        collection_id = payload["collection_id"]
        collection_uri = instance_uri(event.request,
                                      "collection",
                                      bucket_id=bucket_id,
                                      id=collection_id)

    bucket_max_bytes = get_bucket_settings(settings, bucket_id, "max_bytes")
    bucket_max_items = get_bucket_settings(settings, bucket_id, "max_items")
    bucket_max_bytes_per_item = get_bucket_settings(settings, bucket_id,
                                                    "max_bytes_per_item")
    collection_max_bytes = get_collection_settings(settings, bucket_id,
                                                   collection_id, "max_bytes")
    collection_max_items = get_collection_settings(settings, bucket_id,
                                                   collection_id, "max_items")
    collection_max_bytes_per_item = get_collection_settings(
        settings, bucket_id, collection_id, "max_bytes_per_item")

    max_bytes_per_item = collection_max_bytes_per_item or bucket_max_bytes_per_item

    storage = event.request.registry.storage

    targets = []
    for impacted in event.impacted_objects:
        target = impacted["new" if action != "delete" else "old"]
        # On POST .../records, the URI does not contain the newly created
        # record id.
        obj_id = target["id"]
        parts = event_uri.split("/")
        if resource_name in parts[-1]:
            parts.append(obj_id)
        else:
            # Make sure the id is correct on grouped events.
            parts[-1] = obj_id
        uri = "/".join(parts)

        old = impacted.get("old", {})
        new = impacted.get("new", {})

        targets.append((uri, obj_id, old, new))

    try:
        bucket_info = copy.deepcopy(
            storage.get(
                parent_id=bucket_uri,
                resource_name=QUOTA_RESOURCE_NAME,
                object_id=BUCKET_QUOTA_OBJECT_ID,
            ))
    except ObjectNotFoundError:
        bucket_info = {
            "collection_count": 0,
            "record_count": 0,
            "storage_size": 0
        }

    collection_info = {"record_count": 0, "storage_size": 0}
    if collection_id:
        try:
            collection_info = copy.deepcopy(
                storage.get(
                    parent_id=collection_uri,
                    resource_name=QUOTA_RESOURCE_NAME,
                    object_id=COLLECTION_QUOTA_OBJECT_ID,
                ))
        except ObjectNotFoundError:
            pass

    # Update the bucket quotas values for each impacted record.
    for (uri, obj_id, old, new) in targets:
        old_size = record_size(old)
        new_size = record_size(new)

        if max_bytes_per_item is not None and action != "delete":
            if new_size > max_bytes_per_item:
                message = f'Maximum bytes per object exceeded " "({new_size} > {max_bytes_per_item} Bytes.'
                raise_insufficient_storage(message)

        if action == "create":
            bucket_info["storage_size"] += new_size
            if resource_name == "collection":
                bucket_info["collection_count"] += 1
                collection_info["storage_size"] += new_size
            if resource_name == "record":
                bucket_info["record_count"] += 1
                collection_info["record_count"] += 1
                collection_info["storage_size"] += new_size
        elif action == "update":
            bucket_info["storage_size"] -= old_size
            bucket_info["storage_size"] += new_size
            if resource_name in ("collection", "record"):
                collection_info["storage_size"] -= old_size
                collection_info["storage_size"] += new_size
        else:  # action == 'delete':
            bucket_info["storage_size"] -= old_size
            if resource_name == "collection":
                collection_uri = uri
                bucket_info["collection_count"] -= 1
                # When we delete the collection all the records in it
                # are deleted without notification.
                collection_records, _ = storage.get_all(
                    resource_name="record", parent_id=collection_uri)
                for r in collection_records:
                    old_record_size = record_size(r)
                    bucket_info["record_count"] -= 1
                    bucket_info["storage_size"] -= old_record_size
                    collection_info["record_count"] -= 1
                    collection_info["storage_size"] -= old_record_size
                collection_info["storage_size"] -= old_size

            if resource_name == "record":
                bucket_info["record_count"] -= 1
                collection_info["record_count"] -= 1
                collection_info["storage_size"] -= old_size

    if bucket_max_bytes is not None:
        if bucket_info["storage_size"] > bucket_max_bytes:
            message = (
                "Bucket maximum total size exceeded "
                f"({bucket_info['storage_size']} > {bucket_max_bytes} Bytes). "
            )
            raise_insufficient_storage(message)

    if bucket_max_items is not None:
        if bucket_info["record_count"] > bucket_max_items:
            message = (
                "Bucket maximum number of objects exceeded "
                f"({bucket_info['record_count']} > {bucket_max_items} objects)."
            )
            raise_insufficient_storage(message)

    if collection_max_bytes is not None:
        if collection_info["storage_size"] > collection_max_bytes:
            message = (
                "Collection maximum size exceeded "
                f"({collection_info['storage_size']} > {collection_max_bytes} Bytes)."
            )
            raise_insufficient_storage(message)

    if collection_max_items is not None:
        if collection_info["record_count"] > collection_max_items:
            message = (
                "Collection maximum number of objects exceeded "
                f"({collection_info['record_count']} > {collection_max_items} objects)."
            )
            raise_insufficient_storage(message)

    storage.update(
        parent_id=bucket_uri,
        resource_name=QUOTA_RESOURCE_NAME,
        object_id=BUCKET_QUOTA_OBJECT_ID,
        obj=bucket_info,
    )

    if collection_id:
        if action == "delete" and resource_name == "collection":
            # Deleting a collection already deletes everything underneath
            # (including quotas info). See kinto/views/collection.
            return
        else:
            storage.update(
                parent_id=collection_uri,
                resource_name=QUOTA_RESOURCE_NAME,
                object_id=COLLECTION_QUOTA_OBJECT_ID,
                obj=collection_info,
            )
Ejemplo n.º 46
0
 def get_parent_id(self, request):
     self.bucket_id = request.matchdict["bucket_id"]
     self.collection_id = request.matchdict["collection_id"]
     return utils.instance_uri(
         request, "collection", bucket_id=self.bucket_id, id=self.collection_id
     )