def is_retryable_db_error(engine, exc):
    """Check whether we can safely retry in response to the given db error."""
    # Any connection-related errors can be safely retried.
    if exc.connection_invalidated:
        return True
    # Try to get the MySQL error number.
    # Unfortunately this requires use of a private API.
    # The try-except will also catch cases where we're not running MySQL.
    try:
        mysql_error_code = engine.dialect._extract_error_code(exc.orig)
    except AttributeError:
        pass
    else:
        # We're seeing a sporadic bug in production with TokuDB, where an
        # INSERT ON DUPLICATE KEY UPDATE will sometimes error out with:
        #    1032: could not find record in table
        # The below flags such errors in the request metrics log, so we can
        # easily track which requests are triggering the error.
        # See https://bugzilla.mozilla.org/show_bug.cgi?id=1057892
        if mysql_error_code == 1032:
            metric_name = "syncstorage.storage.sql.tokudb_error"
            annotate_request(None, metric_name, 1)
        # The following MySQL lock-related errors can be safely retried:
        #    1205: lock wait timeout exceeded
        #    1206: lock table full
        #    1213: deadlock found when trying to get lock
        #    1689: lock aborted
        # We also retry the TokuDB ON DUPLICATE KEY error noted above.
        if mysql_error_code in (1205, 1206, 1213, 1689, 1032):
            return True
    # Any other error is assumed not to be retryable.  Better safe than sorry.
    return False
示例#2
0
def is_retryable_db_error(engine, exc):
    """Check whether we can safely retry in response to the given db error."""
    # Any connection-related errors can be safely retried.
    if exc.connection_invalidated:
        return True
    # Try to get the MySQL error number.
    # Unfortunately this requires use of a private API.
    # The try-except will also catch cases where we're not running MySQL.
    try:
        mysql_error_code = engine.dialect._extract_error_code(exc.orig)
    except AttributeError:
        pass
    else:
        # We're seeing a sporadic bug in production with TokuDB, where an
        # INSERT ON DUPLICATE KEY UPDATE will sometimes error out with:
        #    1032: could not find record in table
        # The below flags such errors in the request metrics log, so we can
        # easily track which requests are triggering the error.
        # See https://bugzilla.mozilla.org/show_bug.cgi?id=1057892
        if mysql_error_code == 1032:
            metric_name = "syncstorage.storage.sql.tokudb_error"
            annotate_request(None, metric_name, 1)
        # The following MySQL lock-related errors can be safely retried:
        #    1205: lock wait timeout exceeded
        #    1206: lock table full
        #    1213: deadlock found when trying to get lock
        #    1689: lock aborted
        # We also retry the TokuDB ON DUPLICATE KEY error noted above.
        if mysql_error_code in (1205, 1206, 1213, 1689, 1032):
            return True
    # Any other error is assumed not to be retryable.  Better safe than sorry.
    return False
示例#3
0
def check_for_known_bad_payloads(request):
    """Reject specific payloads known to indicate client issues."""
    try:
        # Turns out some clients are not as good at crypto as we'd like.
        # Look for any signals that they might have messed it up and
        # reject attempts to set /crypto/keys in that case.
        if request.validated.get("collection") == "crypto":
            incoming_bsos = []
            if "bsos" in request.validated:
                incoming_bsos.extend(request.validated["bsos"])
            if "bso" in request.validated:
                incoming_bsos.append(request.validated["bso"])
            for bso in incoming_bsos:
                payload = bso.get("payload")
                if payload and KNOWN_BAD_PAYLOAD_REGEX.search(payload):
                    raise KnownBadPayloadError
    except KnownBadPayloadError:
        annotate_request(request, __name__ + ".known_bad_payload", 1)
        request.errors.add("body", "bso", "Known-bad BSO payload")
def check_for_known_bad_payloads(request):
    """Reject specific payloads known to indicate client issues."""
    try:
        # Turns out some clients are not as good at crypto as we'd like.
        # Look for any signals that they might have messed it up and
        # reject attempts to set /crypto/keys in that case.
        if request.validated.get("collection") == "crypto":
            incoming_bsos = []
            if "bsos" in request.validated:
                incoming_bsos.extend(request.validated["bsos"])
            if "bso" in request.validated:
                incoming_bsos.append(request.validated["bso"])
            for bso in incoming_bsos:
                payload = bso.get("payload")
                if payload and KNOWN_BAD_PAYLOAD_REGEX.search(payload):
                    raise KnownBadPayloadError
    except KnownBadPayloadError:
        annotate_request(request, __name__ + ".known_bad_payload", 1)
        request.errors.add("body", "bso", "Known-bad BSO payload")
示例#5
0
def gethash_view(request):
    try:
        parsed = parse_gethash(request)
    except ParseError as e:
        annotate_request(request, "shavar.gethash.unknown.format", 1)
        raise HTTPBadRequest(str(e))

    full = lookup_prefixes(request, parsed)

    # Nothing found?  Return a 204
    if len(full) == 0:
        return HTTPNoContent()

    # FIXME MAC handling
    body = b''
    for lname, chunk_data in full.items():
        for chunk_num, hashes in sorted(chunk_data.items()):
            h = b''.join(hashes)
            body += '{list_name}:{chunk_number}:{data_len}\n' \
                .format(list_name=lname, chunk_number=chunk_num,
                        data_len=len(h)).encode()
            body += h

    return HTTPOk(content_type="application/octet-stream", body=body)
示例#6
0
def gethash_view(request):
    try:
        parsed = parse_gethash(request)
    except ParseError, e:
        annotate_request(request, "shavar.gethash.unknown.format", 1)
        raise HTTPBadRequest(str(e))
示例#7
0
    resp_payload = {'interval': delay, 'lists': {}}

    try:
        parsed = parse_downloads(request)
    except ParseError, e:
        logger.error(e)
        raise HTTPBadRequest(e)

    for list_info in parsed:
        # Do we even serve that list?
        if list_info.name not in _setting(request, 'shavar', 'lists_served',
                                          tuple()):
            logger.warn('Unknown list "%s" reported; ignoring'
                        % list_info.name)
            annotate_request(request, "shavar.downloads.unknown.list", 1)
            continue
        provider, type_, format_ = list_info.name.split('-', 2)
        if not provider or not type_ or not format_:
            s = 'Unknown list format for "%s"; ignoring' % list_info.name
            logger.error(s)
            annotate_request(request, "shavar.downloads.unknown.format", 1)
            raise HTTPBadRequest(s)

        sblist = get_list(request, list_info.name)

        # Calculate delta
        to_add, to_sub = sblist.delta(list_info.adds, list_info.subs)

        # No delta?  No response, I think.  Spec doesn't actually say.
        if not to_add and not to_sub:
示例#8
0
def downloads_view(request):

    # Use the new config variable name but support the old one for
    default_interval = _setting(request, 'shavar', 'default_interval', None)
    backoff_delay = _setting(request, 'shavar', 'client_backoff_delay', None)

    # Throw a fit if both are specified
    if default_interval is not None and backoff_delay is not None:
        raise ConfigurationError("Specify either default_interval or "
                                 "client_backoff_delay in the [shavar] "
                                 "section of your config but not both.\n"
                                 "client_backoff_delay is preferred.")

    delay = backoff_delay or default_interval or 30 * 60

    resp_payload = {'interval': delay, 'lists': {}}

    try:
        parsed = parse_downloads(request)
    except ParseError as e:
        capture_exception(e)
        raise HTTPBadRequest(e)

    for list_info in parsed:
        # Do we even serve that list?
        if list_info.name not in _setting(request, 'shavar',
                                          'list_names_served', tuple()):
            logger.warn('Unknown list "%s" reported; ignoring' %
                        list_info.name)
            annotate_request(request, "shavar.downloads.unknown.list", 1)
            continue
        provider, type_, format_ = list_info.name.split('-', 2)
        if not provider or not type_ or not format_:
            s = 'Unknown list format for "%s"; ignoring' % list_info.name
            logger.error(s)
            annotate_request(request, "shavar.downloads.unknown.format", 1)
            raise HTTPBadRequest(s)

        app_ver = str(request.GET['appver'])
        sblist, list_ver = get_list(request, list_info.name, app_ver)

        # Calculate delta
        to_add, to_sub = sblist.delta(list_info.adds, list_info.subs)

        # No delta?  No response, I think.  Spec doesn't actually say.
        if not to_add and not to_sub:
            continue

        # Fetch the appropriate chunks
        resp_payload['lists'][list_info.name] = {
            'sblist': sblist,
            'ldata': sblist.fetch(to_add, to_sub),
            'list_ver': list_ver
        }

        # Not publishing deltas for this list?  Delete all previous chunks to
        # make way for the new corpus
        # if _setting(request, list_info.name, 'not_publishing_deltas'):
        if sblist.settings.get('not_publishing_deltas'):
            # Raise hell if we have suspicious data with this flag set
            if (len(to_add) != 1 or len(to_sub) != 0):
                logger.error("Configuration error!  Mismatch between "
                             "{0}'s configuration has "
                             "'not_publishing_deltas' enabled but its data"
                             "file has more than one chunk to serve.".format(
                                 list_info.name))
                raise HTTPInternalServerError()
            resp_payload['lists'][list_info.name]['adddels'] = list_info.adds

    return HTTPOk(content_type="application/octet-stream",
                  body=format_downloads(request, resp_payload))
示例#9
0
def gethash_view(request):
    try:
        parsed = parse_gethash(request)
    except ParseError, e:
        annotate_request(request, "shavar.gethash.unknown.format", 1)
        raise HTTPBadRequest(str(e))
示例#10
0
    resp_payload = {'interval': delay, 'lists': {}}

    try:
        parsed = parse_downloads(request)
    except ParseError, e:
        logger.error(e)
        raise HTTPBadRequest(e)

    for list_info in parsed:
        # Do we even serve that list?
        if list_info.name not in _setting(
            request, 'shavar', 'list_names_served', tuple()
        ):
            logger.warn('Unknown list "%s" reported; ignoring'
                        % list_info.name)
            annotate_request(request, "shavar.downloads.unknown.list", 1)
            continue
        provider, type_, format_ = list_info.name.split('-', 2)
        if not provider or not type_ or not format_:
            s = 'Unknown list format for "%s"; ignoring' % list_info.name
            logger.error(s)
            annotate_request(request, "shavar.downloads.unknown.format", 1)
            raise HTTPBadRequest(s)

        app_ver = str(request.GET['appver'])
        sblist, list_ver = get_list(request, list_info.name, app_ver)

        # Calculate delta
        to_add, to_sub = sblist.delta(list_info.adds, list_info.subs)

        # No delta?  No response, I think.  Spec doesn't actually say.