Example #1
0
    def matching(klass, query, offset=0, limit=None):
        """Returns entities matching the given search query."""

        # Check contents of cache.
        words = tokenize(query)
        if not words:
            keys = klass.query().order(-klass.posted_at).iter(keys_only=True)
            for keys in grouper(keys, n=12):
                for entity in utils.get_keys([key for key in keys if key]):
                    if entity and entity._keywords():
                        yield entity
            return

        results = memcache.get_multi(words, key_prefix=FTS)
        matches = None
        in_order = None
        writeback = {}

        # Fill anything missing from the initial fetch.
        for word in words:
            if word in results:
                urlsafe = json.loads(results[word])
                keys = [ndb.Key(urlsafe=val) for val in urlsafe]
            else:
                query = klass.query(klass.keywords == word)
                keys = query.order(-klass.posted_at).fetch(keys_only=True)
                writeback[word] = json.dumps([key.urlsafe() for key in keys])

            matches = (matches & set(keys)) if matches else set(keys)

        # Write back modified cache.
        memcache.set_multi(writeback, key_prefix=FTS, time=(3600 * 24 * 7))

        # Elide potentially stale entries from the cache.
        keys = [key for key in keys if key in matches]

        for keys in grouper(keys, n=12):
            for entity in utils.get_keys([key for key in keys if key]):
                if entity and set(words).issubset(set(entity.keywords)):
                    yield entity
Example #2
0
    def matching(klass, query, offset=0, limit=None):
        """Returns entities matching the given search query."""

        # Check contents of cache.
        words = tokenize(query)
        if not words:
            keys = klass.query().order(-klass.posted_at).iter(keys_only=True)
            for keys in grouper(keys, n=12):
                for entity in utils.get_keys([key for key in keys if key]):
                    if entity and entity._keywords():
                        yield entity
            return

        results = memcache.get_multi(words, key_prefix=FTS)
        matches = None
        in_order = None
        writeback = {}

        # Fill anything missing from the initial fetch.
        for word in words:
            if word in results:
                urlsafe = json.loads(results[word])
                keys = [ndb.Key(urlsafe=val) for val in urlsafe]
            else:
                query = klass.query(klass.keywords == word)
                keys = query.order(-klass.posted_at).fetch(keys_only=True)
                writeback[word] = json.dumps([key.urlsafe() for key in keys])

            matches = (matches & set(keys)) if matches else set(keys)

        # Write back modified cache.
        memcache.set_multi(writeback, key_prefix=FTS, time=(3600 * 24 * 7))

        # Elide potentially stale entries from the cache.
        keys = [key for key in keys if key in matches]

        for keys in grouper(keys, n=12):
            for entity in utils.get_keys([key for key in keys if key]):
                if entity and set(words).issubset(set(entity.keywords)):
                    yield entity
Example #3
0
 def get_by_id(klass, key_name):
     """Avoid the NDB cache since it sux."""
     return utils.get_keys([ndb.Key(klass.__name__, key_name)])[0]
Example #4
0
 def get_by_id(klass, key_name):
     """Avoid the NDB cache since it sux."""
     return utils.get_keys([ndb.Key(klass.__name__, key_name)])[0]