Exemplo n.º 1
0
 def _purge_cache(self, user, collection):
     if self.cache is None:
         return
     cache_key = _key(user, collection, "meta")
     try:
         self.cache.delete(cache_key)
     except CacheError:
         logger.error("Unable to delete a cache entry")
 def _purge_cache(self, user, collection):
     if self.cache is None:
         return
     cache_key = _key(user, collection, 'meta')
     try:
         self.cache.delete(cache_key)
     except CacheError:
         logger.error('Unable to delete a cache entry')
Exemplo n.º 3
0
    def _set_cached_metadata(self, session, user, collection, data, etag):
        key = collection + "::meta"
        doc = session.set(key, json.dumps(data), if_match=etag)

        if self.cache is not None:
            cache_key = _key(user, collection, "meta")
            try:
                self.cache.set(cache_key, doc, self.cache_ttl)
            except CacheError:
                logger.error("Unable to write the metadata in the cache.")
    def _set_cached_metadata(self, session, user, collection, data, etag):
        key = collection + "::meta"
        doc = session.set(key, json.dumps(data), if_match=etag)

        if self.cache is not None:
            cache_key = _key(user, collection, 'meta')
            try:
                self.cache.set(cache_key, doc, self.cache_ttl)
            except CacheError:
                logger.error('Unable to write the metadata in the cache.')
    def add_applications(self, user, collection, applications, token):
        self._check_token(token)
        res = self._execute(queries.IS_DEL, user=user, collection=collection)
        deleted = res.fetchone()
        res.close()
        if deleted is not None:
            self._execute(queries.REMOVE_DEL, user=user, collection=collection)

        now = int(round_time() * 100)

        # let's see if we have an uuid
        res = self._execute(queries.GET_UUID, user=user, collection=collection)
        res = res.fetchone()
        if res is None:
            # we need to create one
            uuid = '%s-%s' % (now, collection)
            self._execute(queries.ADD_UUID,
                          user=user,
                          collection=collection,
                          uuid=uuid)
        else:
            uuid = res.uuid

        # the *real* storage will do bulk inserts of course
        for app in applications:
            origin = app['origin']
            res = self._execute(queries.GET_BY_ORIGIN_QUERY,
                                user=user,
                                collection=collection,
                                origin=origin)
            res = res.fetchone()
            if res is None:
                self._execute(queries.PUT_QUERY,
                              user=user,
                              collection=collection,
                              last_modified=now,
                              data=json.dumps(app),
                              origin=app['origin'])
            else:
                ## FIXME: for debugging
                if res.data == json.dumps(app):
                    ## This is a logic error on the client:
                    logger.error(('Bad attempt to update an application '
                                  ' to overwrite itself: %r') % app['origin'])

                self._execute(queries.UPDATE_BY_ORIGIN_QUERY,
                              user=user,
                              collection=collection,
                              id=res.id,
                              data=json.dumps(app),
                              last_modified=now)
Exemplo n.º 6
0
    def add_applications(self, user, collection, applications, token):
        self._check_token(token)
        res = self._execute(queries.IS_DEL, user=user, collection=collection)
        deleted = res.fetchone()
        res.close()
        if deleted is not None:
            self._execute(queries.REMOVE_DEL, user=user, collection=collection)

        now = int(round_time() * 100)

        # let's see if we have an uuid
        res = self._execute(queries.GET_UUID, user=user,
                            collection=collection)
        res = res.fetchone()
        if res is None:
            # we need to create one
            uuid = '%s-%s' % (now, collection)
            self._execute(queries.ADD_UUID, user=user,
                          collection=collection, uuid=uuid)
        else:
            uuid = res.uuid

        # the *real* storage will do bulk inserts of course
        for app in applications:
            origin = app['origin']
            res = self._execute(queries.GET_BY_ORIGIN_QUERY, user=user,
                                collection=collection, origin=origin)
            res = res.fetchone()
            if res is None:
                self._execute(queries.PUT_QUERY, user=user,
                              collection=collection,
                              last_modified=now, data=json.dumps(app),
                              origin=app['origin'])
            else:
                ## FIXME: for debugging
                if res.data == json.dumps(app):
                    ## This is a logic error on the client:
                    logger.error(('Bad attempt to update an application '
                                  ' to overwrite itself: %r') % app['origin'])

                self._execute(queries.UPDATE_BY_ORIGIN_QUERY, user=user,
                              collection=collection,
                              id=res.id, data=json.dumps(app),
                              last_modified=now)
Exemplo n.º 7
0
    def _get_cached_metadata(self, session, user, collection):
        # getting the cached value if possible
        if self.cache is not None:
            cache_key = _key(user, collection, "meta")
            try:
                cached = self.cache.get(cache_key)
            except CacheError:
                logger.error("Unable to read the metadata in the cache.")
                cached = None

            if cached is not None:
                return cached

        # getting the meta document.
        doc = session.getitem(collection + "::meta")

        # set the cache
        if self.cache is not None:
            try:
                self.cache.set(cache_key, doc, time=self.cache_ttl)
            except CacheError:
                logger.error("Was unable to cache the metadata.")

        return doc
    def _get_cached_metadata(self, session, user, collection):
        # getting the cached value if possible
        if self.cache is not None:
            cache_key = _key(user, collection, 'meta')
            try:
                cached = self.cache.get(cache_key)
            except CacheError:
                logger.error('Unable to read the metadata in the cache.')
                cached = None

            if cached is not None:
                return cached

        # getting the meta document.
        doc = session.getitem(collection + "::meta")

        # set the cache
        if self.cache is not None:
            try:
                self.cache.set(cache_key, doc, time=self.cache_ttl)
            except CacheError:
                logger.error('Was unable to cache the metadata.')

        return doc
def check_auth(request):
    """Controls the Authorization header and returns the username and the
    collection.

    Raises a 401 in these cases:

    - If the header is not present or unrecognized
    - If the request path is not *owned* by that user
    - the database token

    The header is of the form:

        AppSync b64(assertion):b64(username):b64(token)

    """
    user = request.matchdict['user']
    collection = request.matchdict['collection']
    auth = request.environ.get('HTTP_AUTHORIZATION')
    mock_browserid = request.registry.get('mock_browserid')
    if mock_browserid:
        return user, collection, None

    if auth is None:
        raise HTTPUnauthorized('No authorization provided')

    if not auth.startswith('AppSync '):
        logger.error('Attempted auth with bad type (not AppSync): %r' % auth)
        raise HTTPUnauthorized('Invalid token; expected Authorization type '
                               'AppSync')

    auth = auth[len('AppSync '):].strip()
    auth_part = auth.split(':')
    if len(auth_part) != 3:
        logger.error('Attempted auth with bad value (not x:y:z): %r' % auth)
        raise HTTPUnauthorized('Invalid token; invalid format')

    try:
        auth_part = [b64dec(part) for part in auth_part]
    except (binascii.Error, ValueError), e:
        logger.error('Attempted auth with invalid base64 content'
                     ': %r (%s)' % (auth, e))
        raise HTTPUnauthorized('Invalid token: invalid base64 encoding')
    auth_part = auth.split(':')
    if len(auth_part) != 3:
        logger.error('Attempted auth with bad value (not x:y:z): %r' % auth)
        raise HTTPUnauthorized('Invalid token; invalid format')

    try:
        auth_part = [b64dec(part) for part in auth_part]
    except (binascii.Error, ValueError), e:
        logger.error('Attempted auth with invalid base64 content'
                     ': %r (%s)' % (auth, e))
        raise HTTPUnauthorized('Invalid token: invalid base64 encoding')

    assertion, username, dbtoken = auth_part

    # let's reject the call if the url is not owned by the user
    if user != username:
        logger.error('Attempted auth for user=%r for collection '
                     'user=%r' % (username, user))
        raise HTTPUnauthorized('Invalid user')

    # need to verify the user signature here
    # XXX
    return user, collection, dbtoken


def create_auth(assertion, username, token):

    auth = 'AppSync %s:%s:%s' % (b64enc(assertion), b64enc(username),
                                 b64enc(token))
    return auth
def get_data(request):
    """After authenticating with the server and getting back the URL of the
    collection, request::

        GET /collections/{user}/{collection}?since=timestamp

    `since` is optional; on first sync is should be empty or left off. The
    server will return an object::

        {until: timestamp,
         uuid: someuniquevalue,  # using a timestamp XXXX
         incomplete: bool, applications: {origin: {...},
                                                   ...} }

    The response may not be complete if there are too many applications.
    If this is the case then `incomplete` will be true (it may be left out
    if
    the response is complete).  Another request using `since={until}` will
    get further applications (this may need to be repeated many times).

    The client should save the value of `until` and use it for subsequent
    requests.

    In the case of no new items the response will be only::

        {until: timestamp}

    The client should always start with this GET request and only then send
    its own updates.  It should ensure that its local timestamp is
    sensible in comparison to the value of `until`.

    Applications returned may be older than the local applications, in that
    case then the client should ignore the server's application and use
    its local copy, causing an overwrite.  The same is true for deleted
    applications; if the local installed copy has a `last_modified` date
    newer than the deleted server instance then the server instance
    should be ignored (the user reinstalled an application).

    **NOTE:** there are some conflicts that may occur, specifically
    receipts should be merged.

    When an application is added from the server the client should
    *locally* set `app.sync` to true (this is in the [application
    representation]
    (https://developer.mozilla.org/en/OpenWebApps/The_JavaScript_API
    #Application_Representation), not the manifest).

    You must always retain `last_modified` as you received it from
    the server (unless you ignore the application in favor of a
    newer local version).
    """
    user, collection, dbtoken = check_auth(request)

    try:
        since = request.GET.get('since', '0')
        since = round_time(since)
    except TypeError:
        raise bad_request(INVALID_SINCE_VALUE)
    except ValueError:
        logger.error('Bad since %r' % since)
        raise bad_request(INVALID_SINCE_VALUE,
                          'Invalid value for since: %r' % since)

    if since.is_nan():
        raise bad_request(INVALID_SINCE_VALUE, 'Got NaN value for since')

    storage = get_storage(request)

    res = {'since': since, 'uuid': storage.get_uuid(user, collection, dbtoken)}
    until = 0
    apps = []
    try:
        for index, (last_modified, app) in enumerate(
                storage.get_applications(user,
                                         collection,
                                         since,
                                         token=dbtoken)):
            if last_modified > until:
                until = last_modified

            apps.append(app)

        res['applications'] = apps
        if not until:
            until = since
        res['until'] = until

    except CollectionDeletedError, e:
        return {
            'collection_deleted': {
                'reason': e.reason,
                'client_id': e.client_id
            }
        }
    except CollectionDeletedError, e:
        return {
            'collection_deleted': {
                'reason': e.reason,
                'client_id': e.client_id
            }
        }

    # do we want to add a X-Sync-Poll ?
    cache = get_cache(request)
    if cache is not None:
        try:
            poll_interval = cache.get('X-Sync-Poll')
        except CacheError, e:
            # a well, nevermind then
            logger.error(str(e))
        else:
            if poll_interval is not None:
                request.response.headers['X-Sync-Poll'] = str(poll_interval)

    return res


@data.post()
def post_data(request):
    """The client should keep track of the last time it sent updates to the
    server, and send updates when there are newer applications.

    **NOTE:** there is a case when an update might be lost because of an
    update from another device; this would be okay except that the client
    doesn't know it needs to re-send that update.  How do we confirm that ?

def execute_retry(engine, *args, **kwargs):
    try:
        return engine.execute(*args, **kwargs)
    except (OperationalError, TimeoutError), exc:
        retry = '2013' in str(exc)
    try:
        if retry:
            return engine.execute(*args, **kwargs)
        else:
            # re-raise
            raise exc
    except (OperationalError, TimeoutError), exc:
        err = traceback.format_exc()
        logger.error(err)
        raise BackendError(str(exc))


class SQLDatabase(object):
    implements(IAppSyncDatabase)

    def __init__(self, **options):
        verifier = options.pop("verifier", None)
        if verifier is None:
            verifier = vep.RemoteVerifier()
        else:
            verifier = maybe_resolve_name(verifier)
            if callable(verifier):
                verifier = verifier()
        self._verifier = verifier
Exemplo n.º 14
0
def get_data(request):
    """After authenticating with the server and getting back the URL of the
    collection, request::

        GET /collections/{user}/{collection}?since=timestamp

    `since` is optional; on first sync is should be empty or left off. The
    server will return an object::

        {until: timestamp,
         uuid: someuniquevalue,  # using a timestamp XXXX
         incomplete: bool, applications: {origin: {...},
                                                   ...} }

    The response may not be complete if there are too many applications.
    If this is the case then `incomplete` will be true (it may be left out
    if
    the response is complete).  Another request using `since={until}` will
    get further applications (this may need to be repeated many times).

    The client should save the value of `until` and use it for subsequent
    requests.

    In the case of no new items the response will be only::

        {until: timestamp}

    The client should always start with this GET request and only then send
    its own updates.  It should ensure that its local timestamp is
    sensible in comparison to the value of `until`.

    Applications returned may be older than the local applications, in that
    case then the client should ignore the server's application and use
    its local copy, causing an overwrite.  The same is true for deleted
    applications; if the local installed copy has a `last_modified` date
    newer than the deleted server instance then the server instance
    should be ignored (the user reinstalled an application).

    **NOTE:** there are some conflicts that may occur, specifically
    receipts should be merged.

    When an application is added from the server the client should
    *locally* set `app.sync` to true (this is in the [application
    representation]
    (https://developer.mozilla.org/en/OpenWebApps/The_JavaScript_API
    #Application_Representation), not the manifest).

    You must always retain `last_modified` as you received it from
    the server (unless you ignore the application in favor of a
    newer local version).
    """
    user, collection, dbtoken = check_auth(request)

    try:
        since = request.GET.get('since', '0')
        since = round_time(since)
    except TypeError:
        raise bad_request(INVALID_SINCE_VALUE)
    except ValueError:
        logger.error('Bad since %r' % since)
        raise bad_request(INVALID_SINCE_VALUE,
                          'Invalid value for since: %r' % since)

    if since.is_nan():
        raise bad_request(INVALID_SINCE_VALUE,
                          'Got NaN value for since')

    storage = get_storage(request)

    res = {'since': since,
           'uuid': storage.get_uuid(user, collection, dbtoken)}
    until = 0
    apps = []
    try:
        for index, (last_modified, app) in enumerate(
                storage.get_applications(user, collection, since,
                                         token=dbtoken)):
            if last_modified > until:
                until = last_modified

            apps.append(app)

        res['applications'] = apps
        if not until:
            until = since
        res['until'] = until

    except CollectionDeletedError, e:
        return {'collection_deleted': {'reason': e.reason,
                                       'client_id': e.client_id}}
Exemplo n.º 15
0
        if not until:
            until = since
        res['until'] = until

    except CollectionDeletedError, e:
        return {'collection_deleted': {'reason': e.reason,
                                       'client_id': e.client_id}}

    # do we want to add a X-Sync-Poll ?
    cache = get_cache(request)
    if cache is not None:
        try:
            poll_interval = cache.get('X-Sync-Poll')
        except CacheError, e:
            # a well, nevermind then
            logger.error(str(e))
        else:
            if poll_interval is not None:
                request.response.headers['X-Sync-Poll'] = str(poll_interval)

    return res


@data.post()
def post_data(request):
    """The client should keep track of the last time it sent updates to the
    server, and send updates when there are newer applications.

    **NOTE:** there is a case when an update might be lost because of an
    update from another device; this would be okay except that the client
    doesn't know it needs to re-send that update.  How do we confirm that ?
Exemplo n.º 16
0

def execute_retry(engine, *args, **kwargs):
    try:
        return engine.execute(*args, **kwargs)
    except (OperationalError, TimeoutError), exc:
        retry = '2013' in str(exc)
    try:
        if retry:
            return engine.execute(*args, **kwargs)
        else:
            # re-raise
            raise exc
    except (OperationalError, TimeoutError), exc:
        err = traceback.format_exc()
        logger.error(err)
        raise BackendError(str(exc))


class SQLDatabase(object):
    implements(IAppSyncDatabase)

    def __init__(self, **options):
        verifier = options.pop("verifier", None)
        if verifier is None:
            verifier = vep.RemoteVerifier()
        else:
            verifier = maybe_resolve_name(verifier)
            if callable(verifier):
                verifier = verifier()
        self._verifier = verifier