Esempio n. 1
0
def convert_storage_errors(viewfunc, request):
    """View decorator to convert storage errors into HTTP error responses.

    This decorator intercepts any storage-backend exceptions and translates
    them into a matching HTTPError subclass.
    """
    try:
        return viewfunc(request)
    except ConflictError:
        # NOTE:  the protocol specification states that we should return
        # a "409 Conflict" response here, but clients currently do not
        # handle these respones very well:
        #   * desktop bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959034
        #   * android bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959032
        headers = {"Retry-After": str(RETRY_AFTER)}
        raise HTTPServiceUnavailable(headers=headers)
    except NotFoundError:
        raise HTTPNotFound
    except InvalidOffsetError:
        raise json_error(400, "error",
                         [{
                             "location": "querystring",
                             "name": "offset",
                             "description": "Invalid value for offset",
                         }])
    except InvalidBatch, e:
        raise HTTPBadRequest("Invalid batch: %s" % e)
def convert_storage_errors(viewfunc, request):
    """View decorator to convert storage errors into HTTP error responses.

    This decorator intercepts any storage-backend exceptions and translates
    them into a matching HTTPError subclass.
    """
    try:
        return viewfunc(request)
    except ConflictError:
        # NOTE:  the protocol specification states that we should return
        # a "409 Conflict" response here, but clients currently do not
        # handle these respones very well:
        #   * desktop bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959034
        #   * android bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959032
        headers = {"Retry-After": str(RETRY_AFTER)}
        raise HTTPServiceUnavailable(headers=headers)
    except NotFoundError:
        raise HTTPNotFound
    except InvalidOffsetError:
        raise json_error(400, "error", [{
            "location": "querystring",
            "name": "offset",
            "description": "Invalid value for offset",
        }])
    except InvalidBatch, e:
        raise HTTPBadRequest("Invalid batch: %s" % e)
Esempio n. 3
0
def extract_batch_state(request):
    """Validator to extract the batch state of a request for slightly
    tidier code in the views.

    If the "batch" parameter is has no value or has a value of "true" then
    a new batch will be created.

    If the "commit" parameter is has a value of "true", this batch
    is to be committed and deleted.
    """
    # Don't extract or validate any of these params
    # if the batch-upload feature is disabled.
    settings = request.registry.settings
    if not settings.get("storage.batch_upload_enabled", False):
        return

    request.validated["batch"] = False
    batch_id = request.GET.get("batch")
    if batch_id is not None:
        if TRUE_REGEX.match(batch_id):
            batch_id = True
        else:
            try:
                batch_id = int(b64decode(batch_id))
            except TypeError:
                try:
                    batch_id = int(batch_id)
                except ValueError:
                    msg = "Invalid batch ID: \"%s\"" % (batch_id,)
                    request.errors.add("batch", "id", msg)
        request.validated["batch"] = batch_id
    elif batch_id is None and "batch" in request.GET:
        request.validated["batch"] = True

    request.validated["commit"] = False
    commit = request.GET.get("commit")
    if commit is not None:
        if TRUE_REGEX.match(commit):
            request.validated["commit"] = True
        else:
            msg = "commit parameter must be \"true\" to apply batches"
            request.errors.add("batch", "commit", msg)

    LIMITS = (
      ("X-Weave-Records", "max_post_records"),
      ("X-Weave-Bytes", "max_post_bytes"),
      ("X-Weave-Total-Records", "max_total_records"),
      ("X-Weave-Total-Bytes", "max_total_bytes"),
    )
    for (header, setting) in LIMITS:
        try:
            count = int(request.headers[header])
        except ValueError:
            msg = "Invalid integer value: %s" % (request.headers[header],)
            request.errors.add("header", header, msg)
            continue
        except KeyError:
            continue
        if count > get_limit_config(request, setting):
            raise json_error(400, "size-limit-exceeded")
def convert_storage_errors(viewfunc, request):
    """View decorator to convert storage errors into HTTP error responses.

    This decorator intercepts any storage-backend exceptions and translates
    them into a matching HTTPError subclass.
    """
    try:
        return viewfunc(request)
    except ConflictError:
        # NOTE:  the protocol specification states that we should return
        # a "409 Conflict" response here, but clients currently do not
        # handle these respones very well:
        #   * desktop bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959034
        #   * android bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959032
        if request.method != "POST" or "bsos" not in request.validated:
            # For most requests we instead return a "503 Service Unavailable"
            # gives approximately the right client retry behaviour.
            headers = {"Retry-After": str(RETRY_AFTER)}
            raise HTTPServiceUnavailable(headers=headers)
        else:
            # For bulk POST operations we can report the error in the response
            # body, and let the client continue with the rest of its sync.
            logger.error("ConflictError on POST request")
            res = {'success': [], 'failed': {}}
            for bso in request.validated["bsos"]:
                res["failed"][bso["id"]] = "conflict"
            return res
    except NotFoundError:
        raise HTTPNotFound
    except InvalidOffsetError:
        raise json_error(400, "error", [{
            "location": "querystring",
            "name": "offset",
            "description": "Invalid value for offset",
        }])
Esempio n. 5
0
def check_storage_quota(viewfunc, request):
    """View decorator to check the user's quota.

    This decorator checks if a write request (PUT or POST) would cause the
    user's storage quota to be exceeded.  If it would, then an appropriate
    error response is returned.

    In addition, if the user has less than one meg of quota remaining then
    it will include an "X-Weave-Quota-Remaining" header in the response.
    """
    # This only applies to write requests.
    if request.method not in ("PUT", "POST"):
        return viewfunc(request)

    storage = request.validated["storage"]
    userid = request.validated["userid"]
    quota_size = request.registry.settings.get("storage.quota_size")

    # Don't do anything if quotas are not enabled.
    if quota_size is None:
        return viewfunc(request)

    # Get the total size used from the underlying store, which may be cached.
    # If we're close to going over quota, ask it to recalculate fresher info.
    used = storage.get_total_size(userid)
    left = quota_size - used
    if left < ONE_MB:
        used = storage.get_total_size(userid, recalculate=True)
        left = quota_size - used

    # Look for new items that will be written by this request,
    # and subtract them from the remaining quota.
    new_bsos = request.validated.get("bsos")
    if new_bsos is None:
        new_bso = request.validated.get("bso")
        if new_bso is None:
            new_bsos = ()
        else:
            new_bsos = (new_bso, )

    for bso in new_bsos:
        left -= len(bso.get("payload", ""))

    # Report errors/warnings as appropriate.
    if left <= 0:  # no space left
        raise json_error(403, "quota-exceeded")
    if left < ONE_MB:
        left_kb = round(float(left) / ONE_KB, 2)
        request.response.headers["X-Weave-Quota-Remaining"] = str(left_kb)

    return viewfunc(request)
def check_storage_quota(viewfunc, request):
    """View decorator to check the user's quota.

    This decorator checks if a write request (PUT or POST) would cause the
    user's storage quota to be exceeded.  If it would, then an appropriate
    error response is returned.

    In addition, if the user has less than one meg of quota remaining then
    it will include an "X-Weave-Quota-Remaining" header in the response.
    """
    # This only applies to write requests.
    if request.method not in ("PUT", "POST"):
        return viewfunc(request)

    storage = request.validated["storage"]
    user = request.user
    quota_size = request.registry.settings.get("storage.quota_size")

    # Don't do anything if quotas are not enabled.
    if quota_size is None:
        return viewfunc(request)

    # Get the total size used from the underlying store, which may be cached.
    # If we're close to going over quota, ask it to recalculate fresher info.
    used = storage.get_total_size(user)
    left = quota_size - used
    if left < ONE_MB:
        used = storage.get_total_size(user, recalculate=True)
        left = quota_size - used

    # Look for new items that will be written by this request,
    # and subtract them from the remaining quota.
    new_bsos = request.validated.get("bsos")
    if new_bsos is None:
        new_bso = request.validated.get("bso")
        if new_bso is None:
            new_bsos = ()
        else:
            new_bsos = (new_bso,)

    for bso in new_bsos:
        left -= len(bso.get("payload", ""))

    # Report errors/warnings as appropriate.
    if left <= 0:  # no space left
        raise json_error(403, "quota-exceeded")
    if left < ONE_MB:
        left_kb = round(float(left) / ONE_KB, 2)
        request.response.headers["X-Weave-Quota-Remaining"] = str(left_kb)

    return viewfunc(request)
def convert_storage_errors(viewfunc, request):
    """View decorator to convert storage errors into HTTP error responses.

    This decorator intercepts any storage-backend exceptions and translates
    them into a matching HTTPError subclass.
    """
    try:
        return viewfunc(request)
    except ConflictError:
        headers = {"Retry-After": str(RETRY_AFTER)}
        raise HTTPConflict(headers=headers)
    except NotFoundError:
        raise HTTPNotFound
    except InvalidOffsetError:
        raise json_error(400, "error", [{
            "location": "querystring",
            "name": "offset",
            "description": "Invalid value for offset",
        }])
Esempio n. 8
0
def convert_storage_errors(viewfunc, request):
    """View decorator to convert storage errors into HTTP error responses.

    This decorator intercepts any storage-backend exceptions and translates
    them into a matching HTTPError subclass.
    """
    try:
        return viewfunc(request)
    except ConflictError:
        headers = {"Retry-After": str(RETRY_AFTER)}
        raise HTTPConflict(headers=headers)
    except NotFoundError:
        raise HTTPNotFound
    except InvalidOffsetError:
        raise json_error(400, "error",
                         [{
                             "location": "querystring",
                             "name": "offset",
                             "description": "Invalid value for offset",
                         }])
Esempio n. 9
0
def put_item(request):
    storage = request.validated["storage"]
    userid = request.validated["userid"]
    collection = request.validated["collection"]
    item = request.validated["item"]
    bso = request.validated["bso"]

    # A PUT request is a complete re-write of the item.
    # A payload must be specified, and any other missing fields are
    # explicitly set to their default value.
    if "payload" not in bso:
        raise json_error(400, "error", [{
            "location": "body",
            "name": "bso",
            "description": "BSO must specify a payload",
        }])

    for field in FIELD_DEFAULTS:
        if field not in bso:
            bso[field] = FIELD_DEFAULTS[field]

    res = storage.set_item(userid, collection, item, bso)
    request.response.headers["X-Last-Modified-Version"] = str(res["version"])
    return res
Esempio n. 10
0
def put_item(request):
    storage = request.validated["storage"]
    userid = request.validated["userid"]
    collection = request.validated["collection"]
    item = request.validated["item"]
    bso = request.validated["bso"]

    # A PUT request is a complete re-write of the item.
    # A payload must be specified, and any other missing fields are
    # explicitly set to their default value.
    if "payload" not in bso:
        raise json_error(400, "error", [{
            "location": "body",
            "name": "bso",
            "description": "BSO must specify a payload",
        }])

    for field in FIELD_DEFAULTS:
        if field not in bso:
            bso[field] = FIELD_DEFAULTS[field]

    res = storage.set_item(userid, collection, item, bso)
    request.response.headers["X-Last-Modified-Version"] = str(res["version"])
    return res
Esempio n. 11
0
def extract_batch_state(request):
    """Validator to extract the batch state of a request for slightly
    tidier code in the views.

    If the "batch" parameter is has no value or has a value of "true" then
    a new batch will be created.

    If the "commit" parameter is has a value of "true", this batch
    is to be committed and deleted.
    """
    request.validated["batch"] = False
    batch_id = request.GET.get("batch")
    if batch_id is not None:
        if TRUE_REGEX.match(batch_id):
            batch_id = True
        else:
            try:
                batch_id = int(b64decode(batch_id))
            except TypeError:
                try:
                    batch_id = int(batch_id)
                except ValueError:
                    msg = "Invalid batch ID: \"%s\"" % (batch_id, )
                    request.errors.add("batch", "id", msg)
        request.validated["batch"] = batch_id
    elif batch_id is None and "batch" in request.GET:
        request.validated["batch"] = True

    request.validated["commit"] = False
    commit = request.GET.get("commit")
    if commit is not None:
        if TRUE_REGEX.match(commit):
            request.validated["commit"] = True
        else:
            msg = "commit parameter must be \"true\" to apply batches"
            request.errors.add("batch", "commit", msg)

    # If batch uploads are not enabled in the config then
    # we want to:
    #  * silently ignore attempts to start a new batch, which
    #    will cause clients to fall back to non-batch mode.
    #  * error out on attempts to continue an existing batch,
    #    since we can't possibly do what the client expects.
    settings = request.registry.settings
    if not settings.get("storage.batch_upload_enabled", False):
        if request.validated["batch"]:
            if request.validated["batch"] is not True:
                request.errors.add("batch", "id", "Batch uploads disabled")

    LIMITS = (
        ("X-Weave-Records", "max_post_records"),
        ("X-Weave-Bytes", "max_post_bytes"),
        ("X-Weave-Total-Records", "max_total_records"),
        ("X-Weave-Total-Bytes", "max_total_bytes"),
    )
    for (header, setting) in LIMITS:
        try:
            count = int(request.headers[header])
        except ValueError:
            msg = "Invalid integer value: %s" % (request.headers[header], )
            request.errors.add("header", header, msg)
            continue
        except KeyError:
            continue
        if count > get_limit_config(request, setting):
            raise json_error(400, "size-limit-exceeded")
def extract_batch_state(request):
    """Validator to extract the batch state of a request for slightly
    tidier code in the views.

    If the "batch" parameter is has no value or has a value of "true" then
    a new batch will be created.

    If the "commit" parameter is has a value of "true", this batch
    is to be committed and deleted.
    """
    request.validated["batch"] = False
    batch_id = request.GET.get("batch")
    if batch_id is not None:
        if TRUE_REGEX.match(batch_id):
            batch_id = True
        else:
            try:
                batch_id = int(b64decode(batch_id))
            except TypeError:
                try:
                    batch_id = int(batch_id)
                except ValueError:
                    msg = "Invalid batch ID: \"%s\"" % (batch_id,)
                    request.errors.add("batch", "id", msg)
        request.validated["batch"] = batch_id
    elif batch_id is None and "batch" in request.GET:
        request.validated["batch"] = True

    request.validated["commit"] = False
    commit = request.GET.get("commit")
    if commit is not None:
        if TRUE_REGEX.match(commit):
            request.validated["commit"] = True
        else:
            msg = "commit parameter must be \"true\" to apply batches"
            request.errors.add("batch", "commit", msg)

    # If batch uploads are not enabled in the config then
    # we want to:
    #  * silently ignore attempts to start a new batch, which
    #    will cause clients to fall back to non-batch mode.
    #  * error out on attempts to continue an existing batch,
    #    since we can't possibly do what the client expects.
    settings = request.registry.settings
    if not settings.get("storage.batch_upload_enabled", False):
        if request.validated["batch"]:
            if request.validated["batch"] is not True:
                request.errors.add("batch", "id", "Batch uploads disabled")

    LIMITS = (
      ("X-Weave-Records", "max_post_records"),
      ("X-Weave-Bytes", "max_post_bytes"),
      ("X-Weave-Total-Records", "max_total_records"),
      ("X-Weave-Total-Bytes", "max_total_bytes"),
    )
    for (header, setting) in LIMITS:
        try:
            count = int(request.headers[header])
        except ValueError:
            msg = "Invalid integer value: %s" % (request.headers[header],)
            request.errors.add("header", header, msg)
            continue
        except KeyError:
            continue
        if count > get_limit_config(request, setting):
            raise json_error(400, "size-limit-exceeded")