Ejemplo n.º 1
0
def oplog_wrapper(c_app, ref=None, updates=None, action=None, resource=None, **extra):
    """ A simple activity logger wrapping eve's push_oplog
    @todo: Testing and validating of the oplog_push from eve
    @todo: Implement workflow, watchers etc as op (operations)
    """

    raise NotImplementedError

    # def oplog_push(resource, updates, op, id=None):

    if ref == None:
        if request.method == 'POST':  # We do not have any ref before returning, should be sendt!
            pass
        else:
            r = request.get_json()  # Got all _id and _etag
            ref = r.get('_id')

    """ request.endpoint
    A string with 'endpoint|resource' syntax
    """
    if resource == None:
        if request.endpoint:
            resource = request.endpoint.split('|')[0]
        else:
            resource = 'Unknown'

    """ This is NOT tested yet
    @todo: need to replicate the updates dict, which is payload (r?)
    """
    oplog_push(resource=resource, updates=r, op=request.method, id=ref)
Ejemplo n.º 2
0
def deleteitem_internal(resource,
                        concurrency_check=False,
                        suppress_callbacks=False,
                        original=None,
                        **lookup):
    """ Intended for internal delete calls, this method is not rate limited,
    authentication is not checked, pre-request events are not raised, and
    concurrency checking is optional. Deletes a resource item.

    :param resource: name of the resource to which the item(s) belong.
    :param concurrency_check: concurrency check switch (bool)
    :param original: original document if already fetched from the database
    :param **lookup: item lookup query.

    .. versionchanged:: 0.6
       Support for soft delete.

    .. versionchanged:: 0.5
       Return 204 NoContent instead of 200.
       Push updates to OpLog.
       Original deleteitem() has been split into deleteitem() and
       deleteitem_internal().

    .. versionchanged:: 0.4
       Fix #284: If you have a media field, and set datasource projection to
       0 for that field, the media will not be deleted.
       Support for document versioning.
       'on_delete_item' events raised before performing the delete.
       'on_deleted_item' events raised after performing the delete.

    .. versionchanged:: 0.3
       Delete media files as needed.
       Pass the explicit query filter to the data driver, as it does not
       support the id argument anymore.

    .. versionchanged:: 0.2
       Raise pre_<method> event.

    .. versionchanged:: 0.0.7
       Support for Rate-Limiting.

    .. versionchanged:: 0.0.5
      Pass current resource to ``parse_request``, allowing for proper
      processing of new configuration settings: `filters`, `sorting`, `paging`.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.
    """
    resource_def = config.DOMAIN[resource]
    soft_delete_enabled = resource_def["soft_delete"]
    original = get_document(resource,
                            concurrency_check,
                            original,
                            force_auth_field_projection=soft_delete_enabled,
                            **lookup)
    if not original or (soft_delete_enabled
                        and original.get(config.DELETED) is True):
        return all_done()

    # notify callbacks
    if not suppress_callbacks:
        getattr(app, "on_delete_item")(resource, original)
        getattr(app, "on_delete_item_%s" % resource)(original)

    if soft_delete_enabled:
        # Instead of removing the document from the db, just mark it as deleted
        marked_document = copy.deepcopy(original)

        # Set DELETED flag and update metadata
        last_modified = datetime.utcnow().replace(microsecond=0)
        marked_document[config.DELETED] = True
        marked_document[config.LAST_UPDATED] = last_modified

        if config.IF_MATCH:
            resolve_document_etag(marked_document, resource)

        resolve_document_version(marked_document, resource, "DELETE", original)

        # Update document in database (including version collection if needed)
        id = original[resource_def["id_field"]]
        try:
            app.data.replace(resource, id, marked_document, original)
        except app.data.OriginalChangedError:
            if concurrency_check:
                abort(412, description="Client and server etags don't match")

        # create previous version if it wasn't already there
        late_versioning_catch(original, resource)
        # and add deleted version
        insert_versioning_documents(resource, marked_document)
        # update oplog if needed
        oplog_push(resource, marked_document, "DELETE", id)

    else:
        # Delete the document for real

        # media cleanup
        media_fields = app.config["DOMAIN"][resource]["_media"]

        # document might miss one or more media fields because of datasource
        # and/or client projection.
        missing_media_fields = [f for f in media_fields if f not in original]
        if missing_media_fields:
            # retrieve the whole document so we have all media fields available
            # Should be very a rare occurrence. We can't get rid of the
            # get_document() call since it also deals with etag matching, which
            # is still needed. Also, this lookup should never fail.
            # TODO not happy with this hack. Not at all. Is there a better way?
            original = app.data.find_one_raw(resource, **lookup)

        for field in media_fields:
            if field in original:
                media_field = original[field]
                if isinstance(media_field, list):
                    for file_id in media_field:
                        app.media.delete(file_id, resource)
                else:
                    app.media.delete(original[field], resource)

        id = original[resource_def["id_field"]]
        app.data.remove(resource, lookup)

        # TODO: should attempt to delete version collection even if setting is
        # off
        if app.config["DOMAIN"][resource]["versioning"] is True:
            app.data.remove(
                resource + config.VERSIONS,
                {
                    versioned_id_field(resource_def):
                    original[resource_def["id_field"]]
                },
            )

        # update oplog if needed
        oplog_push(resource, original, "DELETE", id)

    if not suppress_callbacks:
        getattr(app, "on_deleted_item")(resource, original)
        getattr(app, "on_deleted_item_%s" % resource)(original)

    return all_done()
Ejemplo n.º 3
0
def patch_internal(resource,
                   payload=None,
                   concurrency_check=False,
                   skip_validation=False,
                   mongo_options=None,
                   **lookup):
    """Intended for internal patch calls, this method is not rate limited,
    authentication is not checked, pre-request events are not raised, and
    concurrency checking is optional. Performs a document patch/update.
    Updates are first validated against the resource schema. If validation
    passes, the document is updated and an OK status update is returned.
    If validation fails, a set of validation issues is returned.

    :param resource: the name of the resource to which the document belongs.
    :param payload: alternative payload. When calling patch() from your own
                    code you can provide an alternative payload. This can be
                    useful, for example, when you have a callback function
                    hooked to a certain endpoint, and want to perform
                    additional patch() callsfrom there.

                    Please be advised that in order to successfully use this
                    option, a request context must be available.
    :param concurrency_check: concurrency check switch (bool)
    :param skip_validation: skip payload validation before write (bool)
    :param mongo_options: options to pass to PyMongo. e.g. ReadConcern of the initial get.
    :param **lookup: document lookup query.

    .. versionchanged:: 0.6.2
       Fix: validator is not set when skip_validation is true.

    .. versionchanged:: 0.6
       on_updated returns the updated document (#682).
       Allow restoring soft deleted documents via PATCH

    .. versionchanged:: 0.5
       Updating nested document fields does not overwrite the nested document
       itself (#519).
       Push updates to the OpLog.
       Original patch() has been split into patch() and patch_internal().
       You can now pass a pre-defined custom payload to the funcion.
       ETAG is now stored with the document (#369).
       Catching all HTTPExceptions and returning them to the caller, allowing
       for eventual flask.abort() invocations in callback functions to go
       through. Fixes #395.

    .. versionchanged:: 0.4
       Allow abort() to be invoked by callback functions.
       'on_update' raised before performing the update on the database.
       Support for document versioning.
       'on_updated' raised after performing the update on the database.

    .. versionchanged:: 0.3
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise 'on_pre_<method>' event.

    .. versionchanged:: 0.1.1
       Item-identifier wrapper stripped from both request and response payload.

    .. versionchanged:: 0.1.0
       Support for optional HATEOAS.
       Re-raises `exceptions.Unauthorized`, this could occur if the
       `auth_field` condition fails

    .. versionchanged:: 0.0.9
       More informative error messages.
       Support for Python 3.3.

    .. versionchanged:: 0.0.8
       Let ``werkzeug.exceptions.InternalServerError`` go through as they have
       probably been explicitly raised by the data driver.

    .. versionchanged:: 0.0.7
       Support for Rate-Limiting.

    .. versionchanged:: 0.0.6
       ETag is now computed without the need of an additional db lookup

    .. versionchanged:: 0.0.5
       Support for 'application/json' Content-Type.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.

    .. versionchanged:: 0.0.3
       JSON links. Superflous ``response`` container removed.
    """
    if payload is None:
        payload = payload_()

    original = get_document(resource, concurrency_check, mongo_options,
                            **lookup)
    if not original:
        # not found
        abort(404)

    resource_def = app.config["DOMAIN"][resource]
    schema = resource_def["schema"]
    normalize_document = resource_def.get("normalize_on_patch")
    validator = app.validator(schema,
                              resource=resource,
                              allow_unknown=resource_def["allow_unknown"])

    object_id = original[resource_def["id_field"]]
    last_modified = None
    etag = None

    issues = {}
    response = {}

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    try:
        updates = parse(payload, resource)
        if skip_validation:
            validation = True
        else:
            validation = validator.validate_update(updates, object_id,
                                                   original,
                                                   normalize_document)
            updates = validator.document

        if validation:
            # Apply coerced values

            # sneak in a shadow copy if it wasn't already there
            late_versioning_catch(original, resource)

            store_media_files(updates, resource, original)
            resolve_document_version(updates, resource, "PATCH", original)

            # some datetime precision magic
            updates[config.LAST_UPDATED] = datetime.utcnow().replace(
                microsecond=0)

            if resource_def["soft_delete"] is True:
                # PATCH with soft delete enabled should always set the DELETED
                # field to False. We are either carrying through un-deleted
                # status, or restoring a soft deleted document
                updates[config.DELETED] = False

            # the mongo driver has a different precision than the python
            # datetime. since we don't want to reload the document once it
            # has been updated, and we still have to provide an updated
            # etag, we're going to update the local version of the
            # 'original' document, and we will use it for the etag
            # computation.
            updated = deepcopy(original)

            # notify callbacks
            getattr(app, "on_update")(resource, updates, original)
            getattr(app, "on_update_%s" % resource)(updates, original)

            if resource_def["merge_nested_documents"]:
                updates = resolve_nested_documents(updates, updated)

            if mongo_options:
                updated.with_options(mongo_options).update(updates)
            else:
                updated.update(updates)

            if config.IF_MATCH:
                resolve_document_etag(updated, resource)
                # now storing the (updated) ETAG with every document (#453)
                updates[config.ETAG] = updated[config.ETAG]
            try:
                app.data.update(resource, object_id, updates, original)
            except app.data.OriginalChangedError:
                if concurrency_check:
                    abort(412,
                          description="Client and server etags don't match")

            # update oplog if needed
            oplog_push(resource, updates, "PATCH", object_id)

            insert_versioning_documents(resource, updated)

            # nofity callbacks
            getattr(app, "on_updated")(resource, updates, original)
            getattr(app, "on_updated_%s" % resource)(updates, original)

            updated.update(updates)

            # build the full response document
            build_response_document(updated, resource, embedded_fields,
                                    updated)
            response = updated
            if config.IF_MATCH:
                etag = response[config.ETAG]
        else:
            issues = validator.errors
    except DocumentError as e:
        # TODO should probably log the error and abort 400 instead (when we
        # got logging)
        issues["validator exception"] = str(e)
    except exceptions.HTTPException as e:
        raise e
    except Exception as e:
        # consider all other exceptions as Bad Requests
        app.logger.exception(e)
        abort(400,
              description=debug_error_message("An exception occurred: %s" % e))

    if len(issues):
        response[config.ISSUES] = issues
        response[config.STATUS] = config.STATUS_ERR
        status = config.VALIDATION_ERROR_STATUS
    else:
        response[config.STATUS] = config.STATUS_OK
        status = 200

    # limit what actually gets sent to minimize bandwidth usage
    response = marshal_write_response(response, resource)

    return response, last_modified, etag, status
Ejemplo n.º 4
0
def post_internal(resource, payl=None, skip_validation=False):
    """
    Intended for internal post calls, this method is not rate limited,
    authentication is not checked and pre-request events are not raised.
    Adds one or more documents to a resource. Each document is validated
    against the domain schema. If validation passes the document is inserted
    and ID_FIELD, LAST_UPDATED and DATE_CREATED along with a link to the
    document are returned. If validation fails, a list of validation issues
    is returned.

    :param resource: name of the resource involved.
    :param payl: alternative payload. When calling post() from your own code
                 you can provide an alternative payload. This can be useful,
                 for example, when you have a callback function hooked to a
                 certain endpoint, and want to perform additional post() calls
                 from there.

                 Please be advised that in order to successfully use this
                 option, a request context must be available.

                 See https://github.com/pyeve/eve/issues/74 for a
                 discussion, and a typical use case.
    :param skip_validation: skip payload validation before write (bool)

    .. versionchanged:: 0.7
       Add support for Location header. Closes #795.

    .. versionchanged:: 0.6
       Fix: since v0.6, skip_validation = True causes a 422 response (#726).

    .. versionchanged:: 0.6
       Initialize DELETED field when soft_delete is enabled.

    .. versionchanged:: 0.5
       Back to resolving default values after validation as now the validator
       can properly validate dependency even when some have default values. See
       #353.
       Push updates to the OpLog.
       Original post() has been split into post() and post_internal().
       ETAGS are now stored with documents (#369).

    .. versionchanged:: 0.4
       Resolve default values before validation is performed. See #353.
       Support for document versioning.

    .. versionchanged:: 0.3
       Return 201 if at least one document has been successfully inserted.
       Fix #231 auth field not set if resource level authentication is set.
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise 'on_pre_<method>' event.
       Explicitly resolve default values instead of letting them be resolved
       by common.parse. This avoids a validation error when a read-only field
       also has a default value.
       Added ``on_inserted*`` events after the database insert

    .. versionchanged:: 0.1.1
       auth.request_auth_value is now used to store the auth_field value.

    .. versionchanged:: 0.1.0
       More robust handling of auth_field.
       Support for optional HATEOAS.

    .. versionchanged: 0.0.9
       Event hooks renamed to be more robust and consistent: 'on_posting'
       renamed to 'on_insert'.
       You can now pass a pre-defined custom payload to the funcion.

    .. versionchanged:: 0.0.9
       Storing self.app.auth.userid in auth_field when 'user-restricted
       resource access' is enabled.

    .. versionchanged: 0.0.7
       Support for Rate-Limiting.
       Support for 'extra_response_fields'.

       'on_posting' and 'on_posting_<resource>' events are raised before the
       documents are inserted into the database. This allows callback functions
       to arbitrarily edit/update the documents being stored.

    .. versionchanged:: 0.0.6
       Support for bulk inserts.

       Please note: validation constraints are checked against the database,
       and not between the payload documents themselves. This causes an
       interesting corner case: in the event of a multiple documents payload
       where two or more documents carry the same value for a field where the
       'unique' constraint is set, the payload will validate successfully, as
       there are no duplicates in the database (yet). If this is an issue, the
       client can always send the documents once at a time for insertion, or
       validate locally before submitting the payload to the API.

    .. versionchanged:: 0.0.5
       Support for 'application/json' Content-Type .
       Support for 'user-restricted resource access'.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.

    .. versionchanged:: 0.0.3
       JSON links. Superflous ``response`` container removed.
    """

    date_utc = datetime.utcnow().replace(microsecond=0)
    resource_def = app.config["DOMAIN"][resource]
    schema = resource_def["schema"]
    validator = (
        None
        if skip_validation
        else app.validator(
            schema, resource=resource, allow_unknown=resource_def["allow_unknown"]
        )
    )

    documents = []
    results = []
    failures = 0
    id_field = resource_def["id_field"]

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    # validation, and additional fields
    if payl is None:
        payl = payload()

    if isinstance(payl, dict):
        payl = [payl]

    if not payl:
        # empty bulk insert
        abort(400, description=debug_error_message("Empty bulk insert"))

    if len(payl) > 1 and not config.DOMAIN[resource]["bulk_enabled"]:
        abort(400, description=debug_error_message("Bulk insert not allowed"))

    for value in payl:
        document = []
        doc_issues = {}
        try:
            document = parse(value, resource)
            resolve_sub_resource_path(document, resource)
            if skip_validation:
                validation = True
            else:
                validation = validator.validate(document)
            if validation:  # validation is successful
                # validator might be not available if skip_validation. #726.
                if validator:
                    # Apply coerced values
                    document = validator.document

                # Populate meta and default fields
                document[config.LAST_UPDATED] = document[config.DATE_CREATED] = date_utc

                if config.DOMAIN[resource]["soft_delete"] is True:
                    document[config.DELETED] = False

                resolve_user_restricted_access(document, resource)
                store_media_files(document, resource)
                resolve_document_version(document, resource, "POST")
            else:
                # validation errors added to list of document issues
                doc_issues = validator.errors
        except DocumentError as e:
            doc_issues["validation exception"] = str(e)
        except Exception as e:
            # most likely a problem with the incoming payload, report back to
            # the client as if it was a validation issue
            app.logger.exception(e)
            doc_issues["exception"] = str(e)

        if len(doc_issues):
            document = {config.STATUS: config.STATUS_ERR, config.ISSUES: doc_issues}
            failures += 1

        documents.append(document)

    if failures:
        # If at least one document got issues, the whole request fails and a
        # ``422 Bad Request`` status is return.
        for document in documents:
            if (
                config.STATUS in document
                and document[config.STATUS] == config.STATUS_ERR
            ):
                results.append(document)
            else:
                results.append({config.STATUS: config.STATUS_OK})

        return_code = config.VALIDATION_ERROR_STATUS
    else:
        # notify callbacks
        getattr(app, "on_insert")(resource, documents)
        getattr(app, "on_insert_%s" % resource)(documents)

        # compute etags here as documents might have been updated by callbacks.
        resolve_document_etag(documents, resource)

        # bulk insert
        ids = app.data.insert(resource, documents)

        # update oplog if needed
        oplog_push(resource, documents, "POST")

        # assign document ids
        for document in documents:
            # either return the custom ID_FIELD or the id returned by
            # data.insert().
            id_ = document.get(id_field, ids.pop(0))
            document[id_field] = id_

            # build the full response document
            result = document
            build_response_document(result, resource, embedded_fields, document)

            # add extra write meta data
            result[config.STATUS] = config.STATUS_OK

            # limit what actually gets sent to minimize bandwidth usage
            result = marshal_write_response(result, resource)
            results.append(result)

        # insert versioning docs
        insert_versioning_documents(resource, documents)

        # notify callbacks
        getattr(app, "on_inserted")(resource, documents)
        getattr(app, "on_inserted_%s" % resource)(documents)
        # request was received and accepted; at least one document passed
        # validation and was accepted for insertion.

        return_code = 201

    if len(results) == 1:
        response = results.pop(0)
    else:
        response = {
            config.STATUS: config.STATUS_ERR if failures else config.STATUS_OK,
            config.ITEMS: results,
        }

    if failures:
        response[config.ERROR] = {
            "code": return_code,
            "message": "Insertion failure: %d document(s) contain(s) error(s)"
            % failures,
        }

    location_header = (
        None
        if return_code != 201 or not documents
        else [("Location", "%s/%s" % (resource_link(), documents[0][id_field]))]
    )

    return response, None, None, return_code, location_header
Ejemplo n.º 5
0
def patch_internal(resource, payload=None, concurrency_check=False,
                   skip_validation=False, **lookup):
    """ Intended for internal patch calls, this method is not rate limited,
    authentication is not checked, pre-request events are not raised, and
    concurrency checking is optional. Performs a document patch/update.
    Updates are first validated against the resource schema. If validation
    passes, the document is updated and an OK status update is returned.
    If validation fails, a set of validation issues is returned.

    :param resource: the name of the resource to which the document belongs.
    :param payload: alternative payload. When calling patch() from your own
                    code you can provide an alternative payload. This can be
                    useful, for example, when you have a callback function
                    hooked to a certain endpoint, and want to perform
                    additional patch() callsfrom there.

                    Please be advised that in order to successfully use this
                    option, a request context must be available.
    :param concurrency_check: concurrency check switch (bool)
    :param skip_validation: skip payload validation before write (bool)
    :param **lookup: document lookup query.

    .. versionchanged:: 0.6.2
       Fix: validator is not set when skip_validation is true.

    .. versionchanged:: 0.6
       on_updated returns the updated document (#682).
       Allow restoring soft deleted documents via PATCH

    .. versionchanged:: 0.5
       Updating nested document fields does not overwrite the nested document
       itself (#519).
       Push updates to the OpLog.
       Original patch() has been split into patch() and patch_internal().
       You can now pass a pre-defined custom payload to the funcion.
       ETAG is now stored with the document (#369).
       Catching all HTTPExceptions and returning them to the caller, allowing
       for eventual flask.abort() invocations in callback functions to go
       through. Fixes #395.

    .. versionchanged:: 0.4
       Allow abort() to be inoked by callback functions.
       'on_update' raised before performing the update on the database.
       Support for document versioning.
       'on_updated' raised after performing the update on the database.

    .. versionchanged:: 0.3
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise 'on_pre_<method>' event.

    .. versionchanged:: 0.1.1
       Item-identifier wrapper stripped from both request and response payload.

    .. versionchanged:: 0.1.0
       Support for optional HATEOAS.
       Re-raises `exceptions.Unauthorized`, this could occur if the
       `auth_field` condition fails

    .. versionchanged:: 0.0.9
       More informative error messages.
       Support for Python 3.3.

    .. versionchanged:: 0.0.8
       Let ``werkzeug.exceptions.InternalServerError`` go through as they have
       probably been explicitly raised by the data driver.

    .. versionchanged:: 0.0.7
       Support for Rate-Limiting.

    .. versionchanged:: 0.0.6
       ETag is now computed without the need of an additional db lookup

    .. versionchanged:: 0.0.5
       Support for 'aplication/json' Content-Type.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.

    .. versionchanged:: 0.0.3
       JSON links. Superflous ``response`` container removed.
    """
    if payload is None:
        payload = payload_()

    original = get_document(resource, concurrency_check, **lookup)
    if not original:
        # not found
        abort(404)

    resource_def = app.config['DOMAIN'][resource]
    schema = resource_def['schema']
    validator = app.validator(schema, resource)

    object_id = original[resource_def['id_field']]
    last_modified = None
    etag = None

    issues = {}
    response = {}

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    try:
        updates = parse(payload, resource)
        if skip_validation:
            validation = True
        else:
            validation = validator.validate_update(updates, object_id,
                                                   original)
            updates = validator.document

        if validation:
            # Apply coerced values

            # sneak in a shadow copy if it wasn't already there
            late_versioning_catch(original, resource)

            store_media_files(updates, resource, original)
            resolve_document_version(updates, resource, 'PATCH', original)

            # some datetime precision magic
            updates[config.LAST_UPDATED] = \
                datetime.utcnow().replace(microsecond=0)

            if resource_def['soft_delete'] is True:
                # PATCH with soft delete enabled should always set the DELETED
                # field to False. We are either carrying through un-deleted
                # status, or restoring a soft deleted document
                updates[config.DELETED] = False

            # the mongo driver has a different precision than the python
            # datetime. since we don't want to reload the document once it
            # has been updated, and we still have to provide an updated
            # etag, we're going to update the local version of the
            # 'original' document, and we will use it for the etag
            # computation.
            updated = deepcopy(original)

            # notify callbacks
            getattr(app, "on_update")(resource, updates, original)
            getattr(app, "on_update_%s" % resource)(updates, original)

            updates = resolve_nested_documents(updates, updated)
            updated.update(updates)

            if config.IF_MATCH:
                resolve_document_etag(updated, resource)
                # now storing the (updated) ETAG with every document (#453)
                updates[config.ETAG] = updated[config.ETAG]

            try:
                app.data.update(
                    resource, object_id, updates, original)
            except app.data.OriginalChangedError:
                if concurrency_check:
                    abort(412,
                          description='Client and server etags don\'t match')

            # update oplog if needed
            oplog_push(resource, updates, 'PATCH', object_id)

            insert_versioning_documents(resource, updated)

            # nofity callbacks
            getattr(app, "on_updated")(resource, updates, original)
            getattr(app, "on_updated_%s" % resource)(updates, original)

            updated.update(updates)

            # build the full response document
            build_response_document(
                updated, resource, embedded_fields, updated)
            response = updated
            if config.IF_MATCH:
                etag = response[config.ETAG]
        else:
            issues = validator.errors
    except ValidationError as e:
        # TODO should probably log the error and abort 400 instead (when we
        # got logging)
        issues['validator exception'] = str(e)
    except exceptions.HTTPException as e:
        raise e
    except Exception as e:
        # consider all other exceptions as Bad Requests
        app.logger.exception(e)
        abort(400, description=debug_error_message(
            'An exception occurred: %s' % e
        ))

    if len(issues):
        response[config.ISSUES] = issues
        response[config.STATUS] = config.STATUS_ERR
        status = config.VALIDATION_ERROR_STATUS
    else:
        response[config.STATUS] = config.STATUS_OK
        status = 200

    # limit what actually gets sent to minimize bandwidth usage
    response = marshal_write_response(response, resource)

    return response, last_modified, etag, status
Ejemplo n.º 6
0
Archivo: post.py Proyecto: idserge7/eve
def post_internal(resource, payl=None, skip_validation=False):
    """
    Intended for internal post calls, this method is not rate limited,
    authentication is not checked and pre-request events are not raised.
    Adds one or more documents to a resource. Each document is validated
    against the domain schema. If validation passes the document is inserted
    and ID_FIELD, LAST_UPDATED and DATE_CREATED along with a link to the
    document are returned. If validation fails, a list of validation issues
    is returned.

    :param resource: name of the resource involved.
    :param payl: alternative payload. When calling post() from your own code
                 you can provide an alternative payload. This can be useful,
                 for example, when you have a callback function hooked to a
                 certain endpoint, and want to perform additional post() calls
                 from there.

                 Please be advised that in order to successfully use this
                 option, a request context must be available.

                 See https://github.com/pyeve/eve/issues/74 for a
                 discussion, and a typical use case.
    :param skip_validation: skip payload validation before write (bool)

    .. versionchanged:: 0.7
       Add support for Location header. Closes #795.

    .. versionchanged:: 0.6
       Fix: since v0.6, skip_validation = True causes a 422 response (#726).

    .. versionchanged:: 0.6
       Initialize DELETED field when soft_delete is enabled.

    .. versionchanged:: 0.5
       Back to resolving default values after validation as now the validator
       can properly validate dependency even when some have default values. See
       #353.
       Push updates to the OpLog.
       Original post() has been split into post() and post_internal().
       ETAGS are now stored with documents (#369).

    .. versionchanged:: 0.4
       Resolve default values before validation is performed. See #353.
       Support for document versioning.

    .. versionchanged:: 0.3
       Return 201 if at least one document has been successfully inserted.
       Fix #231 auth field not set if resource level authentication is set.
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise 'on_pre_<method>' event.
       Explicitly resolve default values instead of letting them be resolved
       by common.parse. This avoids a validation error when a read-only field
       also has a default value.
       Added ``on_inserted*`` events after the database insert

    .. versionchanged:: 0.1.1
       auth.request_auth_value is now used to store the auth_field value.

    .. versionchanged:: 0.1.0
       More robust handling of auth_field.
       Support for optional HATEOAS.

    .. versionchanged: 0.0.9
       Event hooks renamed to be more robust and consistent: 'on_posting'
       renamed to 'on_insert'.
       You can now pass a pre-defined custom payload to the funcion.

    .. versionchanged:: 0.0.9
       Storing self.app.auth.userid in auth_field when 'user-restricted
       resource access' is enabled.

    .. versionchanged: 0.0.7
       Support for Rate-Limiting.
       Support for 'extra_response_fields'.

       'on_posting' and 'on_posting_<resource>' events are raised before the
       documents are inserted into the database. This allows callback functions
       to arbitrarily edit/update the documents being stored.

    .. versionchanged:: 0.0.6
       Support for bulk inserts.

       Please note: validation constraints are checked against the database,
       and not between the payload documents themselves. This causes an
       interesting corner case: in the event of a multiple documents payload
       where two or more documents carry the same value for a field where the
       'unique' constraint is set, the payload will validate successfully, as
       there are no duplicates in the database (yet). If this is an issue, the
       client can always send the documents once at a time for insertion, or
       validate locally before submitting the payload to the API.

    .. versionchanged:: 0.0.5
       Support for 'application/json' Content-Type .
       Support for 'user-restricted resource access'.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.

    .. versionchanged:: 0.0.3
       JSON links. Superflous ``response`` container removed.
    """

    date_utc = datetime.utcnow().replace(microsecond=0)
    resource_def = app.config['DOMAIN'][resource]
    schema = resource_def['schema']
    validator = None if skip_validation \
        else app.validator(schema, resource=resource)
    documents = []
    results = []
    failures = 0
    id_field = resource_def['id_field']

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    # validation, and additional fields
    if payl is None:
        payl = payload()

    if isinstance(payl, dict):
        payl = [payl]

    if not payl:
        # empty bulk insert
        abort(400, description=debug_error_message(
            'Empty bulk insert'
        ))

    if len(payl) > 1 and not config.DOMAIN[resource]['bulk_enabled']:
        abort(400, description=debug_error_message(
            'Bulk insert not allowed'
        ))

    for value in payl:
        document = []
        doc_issues = {}
        try:
            document = parse(value, resource)
            resolve_sub_resource_path(document, resource)
            if skip_validation:
                validation = True
            else:
                validation = validator.validate(document)
            if validation:  # validation is successful
                # validator might be not available if skip_validation. #726.
                if validator:
                    # Apply coerced values
                    document = validator.document

                # Populate meta and default fields
                document[config.LAST_UPDATED] = \
                    document[config.DATE_CREATED] = date_utc

                if config.DOMAIN[resource]['soft_delete'] is True:
                    document[config.DELETED] = False

                resolve_user_restricted_access(document, resource)
                store_media_files(document, resource)
                resolve_document_version(document, resource, 'POST')
            else:
                # validation errors added to list of document issues
                doc_issues = validator.errors
        except DocumentError as e:
            doc_issues['validation exception'] = str(e)
        except Exception as e:
            # most likely a problem with the incoming payload, report back to
            # the client as if it was a validation issue
            app.logger.exception(e)
            doc_issues['exception'] = str(e)

        if len(doc_issues):
            document = {
                config.STATUS: config.STATUS_ERR,
                config.ISSUES: doc_issues,
            }
            failures += 1

        documents.append(document)

    if failures:
        # If at least one document got issues, the whole request fails and a
        # ``422 Bad Request`` status is return.
        for document in documents:
            if config.STATUS in document \
               and document[config.STATUS] == config.STATUS_ERR:
                results.append(document)
            else:
                results.append({config.STATUS: config.STATUS_OK})

        return_code = config.VALIDATION_ERROR_STATUS
    else:
        # notify callbacks
        getattr(app, "on_insert")(resource, documents)
        getattr(app, "on_insert_%s" % resource)(documents)

        # compute etags here as documents might have been updated by callbacks.
        resolve_document_etag(documents, resource)

        # bulk insert
        ids = app.data.insert(resource, documents)

        # update oplog if needed
        oplog_push(resource, documents, 'POST')

        # assign document ids
        for document in documents:
            # either return the custom ID_FIELD or the id returned by
            # data.insert().
            id_ = document.get(id_field, ids.pop(0))
            document[id_field] = id_

            # build the full response document
            result = document
            build_response_document(
                result, resource, embedded_fields, document)

            # add extra write meta data
            result[config.STATUS] = config.STATUS_OK

            # limit what actually gets sent to minimize bandwidth usage
            result = marshal_write_response(result, resource)
            results.append(result)

        # insert versioning docs
        insert_versioning_documents(resource, documents)

        # notify callbacks
        getattr(app, "on_inserted")(resource, documents)
        getattr(app, "on_inserted_%s" % resource)(documents)
        # request was received and accepted; at least one document passed
        # validation and was accepted for insertion.

        return_code = 201

    if len(results) == 1:
        response = results.pop(0)
    else:
        response = {
            config.STATUS: config.STATUS_ERR if failures else config.STATUS_OK,
            config.ITEMS: results,
        }

    if failures:
        response[config.ERROR] = {
            "code": return_code,
            "message": "Insertion failure: %d document(s) contain(s) error(s)"
            % failures,
        }

    location_header = None if return_code != 201 or not documents else \
        [('Location', '%s/%s' % (resource_link(), documents[0][id_field]))]

    return response, None, None, return_code, location_header
Ejemplo n.º 7
0
def put_internal(resource,
                 payload=None,
                 concurrency_check=False,
                 skip_validation=False,
                 **lookup):
    """ Intended for internal put calls, this method is not rate limited,
    authentication is not checked, pre-request events are not raised, and
    concurrency checking is optional. Performs a document replacement.
    Updates are first validated against the resource schema. If validation
    passes, the document is repalced and an OK status update is returned.
    If validation fails a set of validation issues is returned.

    :param resource: the name of the resource to which the document belongs.
    :param payload: alternative payload. When calling put() from your own code
                    you can provide an alternative payload. This can be useful,
                    for example, when you have a callback function hooked to a
                    certain endpoint, and want to perform additional put()
                    callsfrom there.

                    Please be advised that in order to successfully use this
                    option, a request context must be available.
    :param concurrency_check: concurrency check switch (bool)
    :param skip_validation: skip payload validation before write (bool)
    :param **lookup: document lookup query.

    .. versionchanged:: 0.6
       Create document if it does not exist. Closes #634.
       Allow restoring soft deleted documents via PUT

    .. versionchanged:: 0.5
       Back to resolving default values after validaton as now the validator
       can properly validate dependency even when some have default values. See
       #353.
       Original put() has been split into put() and put_internal().
       You can now pass a pre-defined custom payload to the funcion.
       ETAG is now stored with the document (#369).
       Catching all HTTPExceptions and returning them to the caller, allowing
       for eventual flask.abort() invocations in callback functions to go
       through. Fixes #395.

    .. versionchanged:: 0.4
       Allow abort() to be inoked by callback functions.
       Resolve default values before validation is performed. See #353.
       Raise 'on_replace' instead of 'on_insert'. The callback function gets
       the document (as opposed to a list of just 1 document) as an argument.
       Support for document versioning.
       Raise `on_replaced` after the document has been replaced

    .. versionchanged:: 0.3
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise pre_<method> event.
       explictly resolve default values instead of letting them be resolved
       by common.parse. This avoids a validation error when a read-only field
       also has a default value.

    .. versionchanged:: 0.1.1
       auth.request_auth_value is now used to store the auth_field value.
       Item-identifier wrapper stripped from both request and response payload.

    .. versionadded:: 0.1.0
    """
    resource_def = app.config['DOMAIN'][resource]
    schema = resource_def['schema']
    validator = app.validator(schema, resource)

    if payload is None:
        payload = payload_()

    original = get_document(resource, concurrency_check, **lookup)
    if not original:
        if config.UPSERT_ON_PUT:
            id = lookup[resource_def['id_field']]
            # this guard avoids a bson dependency, which would be needed if we
            # wanted to use 'isinstance'. Should also be slightly faster.
            if schema[resource_def['id_field']].get('type', '') == 'objectid':
                id = str(id)
            payload[resource_def['id_field']] = id
            return post_internal(resource, payl=payload)
        else:
            abort(404)

    last_modified = None
    etag = None
    issues = {}
    object_id = original[resource_def['id_field']]

    response = {}

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    try:
        document = parse(payload, resource)
        resolve_sub_resource_path(document, resource)
        if skip_validation:
            validation = True
        else:
            validation = validator.validate_replace(document, object_id,
                                                    original)
            # Apply coerced values
            document = validator.document

        if validation:
            # sneak in a shadow copy if it wasn't already there
            late_versioning_catch(original, resource)

            # update meta
            last_modified = datetime.utcnow().replace(microsecond=0)
            document[config.LAST_UPDATED] = last_modified
            document[config.DATE_CREATED] = original[config.DATE_CREATED]
            if resource_def['soft_delete'] is True:
                # PUT with soft delete enabled should always set the DELETED
                # field to False. We are either carrying through un-deleted
                # status, or restoring a soft deleted document
                document[config.DELETED] = False

            # id_field not in document means it is not being automatically
            # handled (it has been set to a field which exists in the
            # resource schema.
            if resource_def['id_field'] not in document:
                document[resource_def['id_field']] = object_id

            resolve_user_restricted_access(document, resource)
            resolve_default_values(document, resource_def['defaults'])
            store_media_files(document, resource, original)
            resolve_document_version(document, resource, 'PUT', original)

            # notify callbacks
            getattr(app, "on_replace")(resource, document, original)
            getattr(app, "on_replace_%s" % resource)(document, original)

            resolve_document_etag(document, resource)

            # write to db
            try:
                app.data.replace(resource, object_id, document, original)
            except app.data.OriginalChangedError:
                if concurrency_check:
                    abort(412,
                          description='Client and server etags don\'t match')

            # update oplog if needed
            oplog_push(resource, document, 'PUT')

            insert_versioning_documents(resource, document)

            # notify callbacks
            getattr(app, "on_replaced")(resource, document, original)
            getattr(app, "on_replaced_%s" % resource)(document, original)

            # build the full response document
            build_response_document(document, resource, embedded_fields,
                                    document)
            response = document
            if config.IF_MATCH:
                etag = response[config.ETAG]
        else:
            issues = validator.errors
    except ValidationError as e:
        # TODO should probably log the error and abort 400 instead (when we
        # got logging)
        issues['validator exception'] = str(e)
    except exceptions.HTTPException as e:
        raise e
    except Exception as e:
        # consider all other exceptions as Bad Requests
        app.logger.exception(e)
        abort(400,
              description=debug_error_message('An exception occurred: %s' % e))

    if len(issues):
        response[config.ISSUES] = issues
        response[config.STATUS] = config.STATUS_ERR
        status = config.VALIDATION_ERROR_STATUS
    else:
        response[config.STATUS] = config.STATUS_OK
        status = 200

    # limit what actually gets sent to minimize bandwidth usage
    response = marshal_write_response(response, resource)

    return response, last_modified, etag, status
Ejemplo n.º 8
0
Archivo: put.py Proyecto: pfreixes/eve
def put_internal(resource, payload=None, concurrency_check=False,
                 skip_validation=False, **lookup):
    """ Intended for internal put calls, this method is not rate limited,
    authentication is not checked, pre-request events are not raised, and
    concurrency checking is optional. Performs a document replacement.
    Updates are first validated against the resource schema. If validation
    passes, the document is repalced and an OK status update is returned.
    If validation fails a set of validation issues is returned.

    :param resource: the name of the resource to which the document belongs.
    :param payload: alternative payload. When calling put() from your own code
                    you can provide an alternative payload. This can be useful,
                    for example, when you have a callback function hooked to a
                    certain endpoint, and want to perform additional put()
                    callsfrom there.

                    Please be advised that in order to successfully use this
                    option, a request context must be available.
    :param concurrency_check: concurrency check switch (bool)
    :param skip_validation: skip payload validation before write (bool)
    :param **lookup: document lookup query.

    .. versionchanged:: 0.5
       Back to resolving default values after validaton as now the validator
       can properly validate dependency even when some have default values. See
       #353.
       Original put() has been split into put() and put_internal().
       You can now pass a pre-defined custom payload to the funcion.
       ETAG is now stored with the document (#369).
       Catching all HTTPExceptions and returning them to the caller, allowing
       for eventual flask.abort() invocations in callback functions to go
       through. Fixes #395.

    .. versionchanged:: 0.4
       Allow abort() to be inoked by callback functions.
       Resolve default values before validation is performed. See #353.
       Raise 'on_replace' instead of 'on_insert'. The callback function gets
       the document (as opposed to a list of just 1 document) as an argument.
       Support for document versioning.
       Raise `on_replaced` after the document has been replaced

    .. versionchanged:: 0.3
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise pre_<method> event.
       explictly resolve default values instead of letting them be resolved
       by common.parse. This avoids a validation error when a read-only field
       also has a default value.

    .. versionchanged:: 0.1.1
       auth.request_auth_value is now used to store the auth_field value.
       Item-identifier wrapper stripped from both request and response payload.

    .. versionadded:: 0.1.0
    """
    resource_def = app.config['DOMAIN'][resource]
    schema = resource_def['schema']
    if not skip_validation:
        validator = app.validator(schema, resource)

    if payload is None:
        payload = payload_()

    original = get_document(resource, concurrency_check, **lookup)
    if not original:
        # not found
        abort(404)

    last_modified = None
    etag = None
    issues = {}
    object_id = original[config.ID_FIELD]

    response = {}

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    try:
        document = parse(payload, resource)
        if skip_validation:
            validation = True
        else:
            validation = validator.validate_replace(document, object_id)
        if validation:
            # sneak in a shadow copy if it wasn't already there
            late_versioning_catch(original, resource)

            # update meta
            last_modified = datetime.utcnow().replace(microsecond=0)
            document[config.LAST_UPDATED] = last_modified
            document[config.DATE_CREATED] = original[config.DATE_CREATED]

            # ID_FIELD not in document means it is not being automatically
            # handled (it has been set to a field which exists in the resource
            # schema.
            if config.ID_FIELD not in document:
                document[config.ID_FIELD] = object_id

            resolve_user_restricted_access(document, resource)
            resolve_default_values(document, resource_def['defaults'])
            store_media_files(document, resource, original)
            resolve_document_version(document, resource, 'PUT', original)

            # notify callbacks
            getattr(app, "on_replace")(resource, document, original)
            getattr(app, "on_replace_%s" % resource)(document, original)

            resolve_document_etag(document)

            # write to db
            app.data.replace(resource, object_id, document)

            # update oplog if needed
            oplog_push(resource, document, 'PUT')

            insert_versioning_documents(resource, document)

            # notify callbacks
            getattr(app, "on_replaced")(resource, document, original)
            getattr(app, "on_replaced_%s" % resource)(document, original)

            # build the full response document
            build_response_document(
                document, resource, embedded_fields, document)
            response = document
        else:
            issues = validator.errors
    except ValidationError as e:
        # TODO should probably log the error and abort 400 instead (when we
        # got logging)
        issues['validator exception'] = str(e)
    except exceptions.HTTPException as e:
        raise e
    except Exception as e:
        # consider all other exceptions as Bad Requests
        abort(400, description=debug_error_message(
            'An exception occurred: %s' % e
        ))

    if len(issues):
        response[config.ISSUES] = issues
        response[config.STATUS] = config.STATUS_ERR
        status = config.VALIDATION_ERROR_STATUS
    else:
        response[config.STATUS] = config.STATUS_OK
        status = 200

    # limit what actually gets sent to minimize bandwidth usage
    response = marshal_write_response(response, resource)

    return response, last_modified, etag, status
Ejemplo n.º 9
0
def deleteitem_internal(
        resource, concurrency_check=False, suppress_callbacks=False, **lookup):
    """ Intended for internal delete calls, this method is not rate limited,
    authentication is not checked, pre-request events are not raised, and
    concurrency checking is optional. Deletes a resource item.

    :param resource: name of the resource to which the item(s) belong.
    :param concurrency_check: concurrency check switch (bool)
    :param **lookup: item lookup query.

    .. versionchanged:: 0.6
       Support for soft delete.

    .. versionchanged:: 0.5
       Return 204 NoContent instead of 200.
       Push updates to OpLog.
       Original deleteitem() has been split into deleteitem() and
       deleteitem_internal().

    .. versionchanged:: 0.4
       Fix #284: If you have a media field, and set datasource projection to
       0 for that field, the media will not be deleted.
       Support for document versioning.
       'on_delete_item' events raised before performing the delete.
       'on_deleted_item' events raised after performing the delete.

    .. versionchanged:: 0.3
       Delete media files as needed.
       Pass the explicit query filter to the data driver, as it does not
       support the id argument anymore.

    .. versionchanged:: 0.2
       Raise pre_<method> event.

    .. versionchanged:: 0.0.7
       Support for Rate-Limiting.

    .. versionchanged:: 0.0.5
      Pass current resource to ``parse_request``, allowing for proper
      processing of new configuration settings: `filters`, `sorting`, `paging`.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.
    """
    resource_def = config.DOMAIN[resource]
    soft_delete_enabled = resource_def['soft_delete']
    original = get_document(resource, concurrency_check, **lookup)
    if not original or (soft_delete_enabled and
                        original.get(config.DELETED) is True):
        abort(404)

    # notify callbacks
    if suppress_callbacks is not True:
        getattr(app, "on_delete_item")(resource, original)
        getattr(app, "on_delete_item_%s" % resource)(original)

    if soft_delete_enabled:
        # Instead of removing the document from the db, just mark it as deleted
        marked_document = copy.deepcopy(original)

        # Set DELETED flag and update metadata
        last_modified = datetime.utcnow().replace(microsecond=0)
        marked_document[config.DELETED] = True
        marked_document[config.LAST_UPDATED] = last_modified

        if config.IF_MATCH:
            resolve_document_etag(marked_document, resource)

        resolve_document_version(marked_document, resource, 'DELETE', original)

        # Update document in database (including version collection if needed)
        id = original[resource_def['id_field']]
        try:
            app.data.replace(resource, id, marked_document, original)
        except app.data.OriginalChangedError:
            if concurrency_check:
                abort(412, description='Client and server etags don\'t match')

        # create previous version if it wasn't already there
        late_versioning_catch(original, resource)
        # and add deleted version
        insert_versioning_documents(resource, marked_document)
        # update oplog if needed
        oplog_push(resource, marked_document, 'DELETE', id)

    else:
        # Delete the document for real

        # media cleanup
        media_fields = app.config['DOMAIN'][resource]['_media']

        # document might miss one or more media fields because of datasource
        # and/or client projection.
        missing_media_fields = [f for f in media_fields if f not in original]
        if len(missing_media_fields):
            # retrieve the whole document so we have all media fields available
            # Should be very a rare occurence. We can't get rid of the
            # get_document() call since it also deals with etag matching, which
            # is still needed. Also, this lookup should never fail.
            # TODO not happy with this hack. Not at all. Is there a better way?
            original = app.data.find_one_raw(
                resource, original[resource_def['id_field']])

        for field in media_fields:
            if field in original:
                app.media.delete(original[field], resource)

        id = original[resource_def['id_field']]
        app.data.remove(resource, {resource_def['id_field']: id})

        # TODO: should attempt to delete version collection even if setting is
        # off
        if app.config['DOMAIN'][resource]['versioning'] is True:
            app.data.remove(
                resource + config.VERSIONS,
                {versioned_id_field(resource_def):
                 original[resource_def['id_field']]})

        # update oplog if needed
        oplog_push(resource, original, 'DELETE', id)

    if suppress_callbacks is not True:
        getattr(app, "on_deleted_item")(resource, original)
        getattr(app, "on_deleted_item_%s" % resource)(original)

    return {}, None, None, 204
Ejemplo n.º 10
0
def deleteitem_internal(resource, concurrency_check=False, **lookup):
    """ Intended for internal delete calls, this method is not rate limited,
    authentication is not checked, pre-request events are not raised, and
    concurrency checking is optional. Deletes a resource item.

    :param resource: name of the resource to which the item(s) belong.
    :param concurrency_check: concurrency check switch (bool)
    :param **lookup: item lookup query.

    .. versionchanged:: 0.5
       Return 204 NoContent instead of 200.
       Push updates to OpLog.
       Original deleteitem() has been split into deleteitem() and
       deleteitem_internal().

    .. versionchanged:: 0.4
       Fix #284: If you have a media field, and set datasource projection to
       0 for that field, the media will not be deleted.
       Support for document versioning.
       'on_delete_item' events raised before performing the delete.
       'on_deleted_item' events raised after performing the delete.

    .. versionchanged:: 0.3
       Delete media files as needed.
       Pass the explicit query filter to the data driver, as it does not
       support the id argument anymore.

    .. versionchanged:: 0.2
       Raise pre_<method> event.

    .. versionchanged:: 0.0.7
       Support for Rate-Limiting.

    .. versionchanged:: 0.0.5
      Pass current resource to ``parse_request``, allowing for proper
      processing of new configuration settings: `filters`, `sorting`, `paging`.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.
    """
    original = get_document(resource, concurrency_check, **lookup)
    if not original:
        abort(404)

    # notify callbacks
    getattr(app, "on_delete_item")(resource, original)
    getattr(app, "on_delete_item_%s" % resource)(original)

    # media cleanup
    media_fields = app.config['DOMAIN'][resource]['_media']

    # document might miss one or more media fields because of datasource and/or
    # client projection.
    missing_media_fields = [f for f in media_fields if f not in original]
    if len(missing_media_fields):
        # retrieve the whole document so we have all media fields available.
        # Should be very a rare occurence. We can't get rid of the
        # get_document() call since it also deals with etag matching, which is
        # still needed. Also, this lookup should never fail.
        # TODO not happy with this hack. Not at all. Is there a better way?
        original = app.data.find_one_raw(resource, original[config.ID_FIELD])

    for field in media_fields:
        if field in original:
            app.media.delete(original[field])

    id = original[config.ID_FIELD]
    app.data.remove(resource, {config.ID_FIELD: id})

    # update oplog if needed
    oplog_push(resource, original, 'DELETE', id)

    # TODO: should attempt to delete version collection even if setting is off
    if app.config['DOMAIN'][resource]['versioning'] is True:
        app.data.remove(
            resource + config.VERSIONS,
            {versioned_id_field(): original[config.ID_FIELD]})

    getattr(app, "on_deleted_item")(resource, original)
    getattr(app, "on_deleted_item_%s" % resource)(original)

    return {}, None, None, 204
Ejemplo n.º 11
0
Archivo: put.py Proyecto: sunbit/eve
def put_internal(
    resource, payload=None, concurrency_check=False, skip_validation=False, **lookup
):
    """ Intended for internal put calls, this method is not rate limited,
    authentication is not checked, pre-request events are not raised, and
    concurrency checking is optional. Performs a document replacement.
    Updates are first validated against the resource schema. If validation
    passes, the document is replaced and an OK status update is returned.
    If validation fails a set of validation issues is returned.

    :param resource: the name of the resource to which the document belongs.
    :param payload: alternative payload. When calling put() from your own code
                    you can provide an alternative payload. This can be useful,
                    for example, when you have a callback function hooked to a
                    certain endpoint, and want to perform additional put()
                    callsfrom there.

                    Please be advised that in order to successfully use this
                    option, a request context must be available.
    :param concurrency_check: concurrency check switch (bool)
    :param skip_validation: skip payload validation before write (bool)
    :param **lookup: document lookup query.

    .. versionchanged:: 0.6
       Create document if it does not exist. Closes #634.
       Allow restoring soft deleted documents via PUT

    .. versionchanged:: 0.5
       Back to resolving default values after validation as now the validator
       can properly validate dependency even when some have default values. See
       #353.
       Original put() has been split into put() and put_internal().
       You can now pass a pre-defined custom payload to the funcion.
       ETAG is now stored with the document (#369).
       Catching all HTTPExceptions and returning them to the caller, allowing
       for eventual flask.abort() invocations in callback functions to go
       through. Fixes #395.

    .. versionchanged:: 0.4
       Allow abort() to be invoked by callback functions.
       Resolve default values before validation is performed. See #353.
       Raise 'on_replace' instead of 'on_insert'. The callback function gets
       the document (as opposed to a list of just 1 document) as an argument.
       Support for document versioning.
       Raise `on_replaced` after the document has been replaced

    .. versionchanged:: 0.3
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise pre_<method> event.
       explicitly resolve default values instead of letting them be resolved
       by common.parse. This avoids a validation error when a read-only field
       also has a default value.

    .. versionchanged:: 0.1.1
       auth.request_auth_value is now used to store the auth_field value.
       Item-identifier wrapper stripped from both request and response payload.

    .. versionadded:: 0.1.0
    """
    resource_def = app.config["DOMAIN"][resource]
    schema = resource_def["schema"]
    validator = app.validator(
        schema, resource=resource, allow_unknown=resource_def["allow_unknown"]
    )

    if payload is None:
        payload = payload_()

    # Retrieve the original document without checking user-restricted access,
    # but returning the document owner in the projection. This allows us to
    # prevent PUT if the document exists, but is owned by a different user
    # than the currently authenticated one.
    original = get_document(
        resource,
        concurrency_check,
        check_auth_value=False,
        force_auth_field_projection=True,
        **lookup
    )
    if not original:
        if config.UPSERT_ON_PUT:
            id = lookup[resource_def["id_field"]]
            # this guard avoids a bson dependency, which would be needed if we
            # wanted to use 'isinstance'. Should also be slightly faster.
            if schema[resource_def["id_field"]].get("type", "") == "objectid":
                id = str(id)
            payload[resource_def["id_field"]] = id
            return post_internal(resource, payl=payload)
        else:
            abort(404)

    # If the document exists, but is owned by someone else, return
    # 403 Forbidden
    auth_field, request_auth_value = auth_field_and_value(resource)
    if auth_field and original.get(auth_field) != request_auth_value:
        abort(403)

    last_modified = None
    etag = None
    issues = {}
    object_id = original[resource_def["id_field"]]

    response = {}

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    try:
        document = parse(payload, resource)
        resolve_sub_resource_path(document, resource)
        if skip_validation:
            validation = True
        else:
            validation = validator.validate_replace(document, object_id, original)
            # Apply coerced values
            document = validator.document

        if validation:
            # sneak in a shadow copy if it wasn't already there
            late_versioning_catch(original, resource)

            # update meta
            last_modified = datetime.utcnow().replace(microsecond=0)
            document[config.LAST_UPDATED] = last_modified
            document[config.DATE_CREATED] = original[config.DATE_CREATED]
            if resource_def["soft_delete"] is True:
                # PUT with soft delete enabled should always set the DELETED
                # field to False. We are either carrying through un-deleted
                # status, or restoring a soft deleted document
                document[config.DELETED] = False

            # id_field not in document means it is not being automatically
            # handled (it has been set to a field which exists in the
            # resource schema.
            if resource_def["id_field"] not in document:
                document[resource_def["id_field"]] = object_id

            resolve_user_restricted_access(document, resource)
            store_media_files(document, resource, original)
            resolve_document_version(document, resource, "PUT", original)

            # notify callbacks
            getattr(app, "on_replace")(resource, document, original)
            getattr(app, "on_replace_%s" % resource)(document, original)

            resolve_document_etag(document, resource)

            # write to db
            try:
                app.data.replace(resource, object_id, document, original)
            except app.data.OriginalChangedError:
                if concurrency_check:
                    abort(412, description="Client and server etags don't match")

            # update oplog if needed
            oplog_push(resource, document, "PUT")

            insert_versioning_documents(resource, document)

            # notify callbacks
            getattr(app, "on_replaced")(resource, document, original)
            getattr(app, "on_replaced_%s" % resource)(document, original)

            # build the full response document
            build_response_document(document, resource, embedded_fields, document)
            response = document
            if config.IF_MATCH:
                etag = response[config.ETAG]
        else:
            issues = validator.errors
    except DocumentError as e:
        # TODO should probably log the error and abort 400 instead (when we
        # got logging)
        issues["validator exception"] = str(e)
    except exceptions.HTTPException as e:
        raise e
    except Exception as e:
        # consider all other exceptions as Bad Requests
        app.logger.exception(e)
        abort(400, description=debug_error_message("An exception occurred: %s" % e))

    if len(issues):
        response[config.ISSUES] = issues
        response[config.STATUS] = config.STATUS_ERR
        status = config.VALIDATION_ERROR_STATUS
    else:
        response[config.STATUS] = config.STATUS_OK
        status = 200

    # limit what actually gets sent to minimize bandwidth usage
    response = marshal_write_response(response, resource)

    return response, last_modified, etag, status
def pre_get_callback(resource, request, lookup):
     app.logger.info("pre get fired")
     if resource:
        oplog_push(resource, None, "GET")
def post_get_callback(resource, request, lookup):
     events = [] 
     if resource:
        oplog_push(resource, events, "GET")