コード例 #1
0
ファイル: commands.py プロジェクト: biospin/papaya
    def Run(self, limit=None, offset=None):
        assert not self.query._Query__ancestor_pb #FIXME: We don't handle this yet

        opts = self.query._Query__query_options

        results = None

        #If we have a single key lookup going on, just hit the cache
        if len(self.keys) == 1:
            ret = caching.get_from_cache_by_key(self.keys[0])
            if ret is not None:
                results = [ret]

        #If there was nothing in the cache, or we had more than one key, then use Get()
        if results is None:
            results = sorted((x for x in datastore.Get(self.keys) if x is not None), cmp=partial(utils.django_ordering_comparison, self.ordering))

        results = [
            _convert_entity_based_on_query_options(x, opts)
            for x in results if utils.entity_matches_query(x, self.query)
        ]

        if offset:
            results = results[offset:]

        if limit is not None:
            results = results[:limit]

        return iter(results)
コード例 #2
0
    def Run(self, limit=None, offset=None):
        assert not self.query._Query__ancestor_pb #FIXME: We don't handle this yet

        opts = self.query._Query__query_options

        results = None

        #If we have a single key lookup going on, just hit the cache
        if len(self.keys) == 1:
            ret = caching.get_from_cache_by_key(self.keys[0])
            if ret is not None:
                results = [ret]

        #If there was nothing in the cache, or we had more than one key, then use Get()
        if results is None:
            results = sorted((x for x in datastore.Get(self.keys) if x is not None), cmp=partial(utils.django_ordering_comparison, self.ordering))

        results = [
            _convert_entity_based_on_query_options(x, opts)
            for x in results if utils.entity_matches_query(x, self.query)
        ]

        if offset:
            results = results[offset:]

        if limit is not None:
            results = results[:limit]

        return iter(results)
コード例 #3
0
    def Run(self, limit=None, offset=None):
        assert not self.queries[
            0]._Query__ancestor_pb  #FIXME: We don't handle this yet

        # FIXME: What if the query options differ?
        opts = self.queries[0]._Query__query_options

        results = None

        # If we have a single key lookup going on, just hit the cache
        if len(self.queries_by_key) == 1:
            keys = self.queries_by_key.keys()
            ret = caching.get_from_cache_by_key(keys[0])
            if ret is not None:
                results = [ret]

        # If there was nothing in the cache, or we had more than one key, then use Get()
        if results is None:
            keys = self.queries_by_key.keys()
            results = datastore.Get(keys)
            for result in results:
                if result is None:
                    continue
                caching.add_entity_to_cache(
                    self.model, result, caching.CachingSituation.DATASTORE_GET)
            results = sorted((x for x in results if x is not None),
                             cmp=partial(utils.django_ordering_comparison,
                                         self.ordering))

        results = [
            _convert_entity_based_on_query_options(x, opts) for x in results
            if any([
                utils.entity_matches_query(x, qry)
                for qry in self.queries_by_key[x.key()]
            ])
        ]

        if offset:
            results = results[offset:]

        if limit is not None:
            results = results[:limit]

        return iter(results)
コード例 #4
0
ファイル: commands.py プロジェクト: awizzard/djangae
    def Run(self, limit=None, offset=None):
        assert not self.queries[0]._Query__ancestor_pb #FIXME: We don't handle this yet

        # FIXME: What if the query options differ?
        opts = self.queries[0]._Query__query_options

        results = None

        # If we have a single key lookup going on, just hit the cache
        if len(self.queries_by_key) == 1:
            keys = self.queries_by_key.keys()
            ret = caching.get_from_cache_by_key(keys[0])
            if ret is not None:
                results = [ret]

        # If there was nothing in the cache, or we had more than one key, then use Get()
        if results is None:
            keys = self.queries_by_key.keys()
            results = datastore.Get(keys)
            for result in results:
                if result is None:
                    continue
                caching.add_entity_to_cache(self.model, result, caching.CachingSituation.DATASTORE_GET)
            results = sorted((x for x in results if x is not None), cmp=partial(utils.django_ordering_comparison, self.ordering))

        results = [
            _convert_entity_based_on_query_options(x, opts)
            for x in results if any([ utils.entity_matches_query(x, qry) for qry in self.queries_by_key[x.key()]])
        ]

        if offset:
            results = results[offset:]

        if limit is not None:
            results = results[:limit]

        return iter(results)
コード例 #5
0
ファイル: commands.py プロジェクト: jscissr/djangae
    def Run(self, limit=None, offset=None):
        """
            Here are the options:

            1. Single key, hit memcache
            2. Multikey projection, async MultiQueries with ancestors chained
            3. Full select, datastore get
        """

        opts = self.queries[0]._Query__query_options
        key_count = len(self.queries_by_key)

        cache = True

        results = None
        if key_count == 1:
            # FIXME: Potentially could use get_multi in memcache and the make a query
            # for whatever remains
            key = self.queries_by_key.keys()[0]
            result = caching.get_from_cache_by_key(key)
            if result is not None:
                results = [result]
                cache = False  # Don't update cache, we just got it from there

        if results is None:
            if opts.projection:
                cache = False  # Don't cache projection results!

                # Assumes projection ancestor queries are faster than a datastore Get
                # due to lower traffic over the RPC. This should be faster for queries with
                # < 30 keys (which is the most common case), and faster if the entities are
                # larger and there are many results, but there is probably a slower middle ground
                # because the larger number of RPC calls. Still, if performance is an issue the
                # user can just do a normal get() rather than values/values_list/only/defer

                to_fetch = (offset or 0) + limit if limit else None
                additional_cols = set([x[0] for x in self.ordering if x[0] not in opts.projection])

                multi_query = []
                final_queries = []
                orderings = self.queries[0]._Query__orderings
                for key, queries in self.queries_by_key.iteritems():
                    for query in queries:
                        if additional_cols:
                            # We need to include additional orderings in the projection so that we can
                            # sort them in memory. Annoyingly that means reinstantiating the queries
                            query = Query(
                                kind=query._Query__kind,
                                filters=query,
                                projection=list(opts.projection).extend(list(additional_cols)),
                            )

                        query.Ancestor(key)  # Make this an ancestor query
                        multi_query.append(query)
                        if len(multi_query) == 30:
                            final_queries.append(datastore.MultiQuery(multi_query, orderings).Run(limit=to_fetch))
                            multi_query = []
                else:
                    if len(multi_query) == 1:
                        final_queries.append(multi_query[0].Run(limit=to_fetch))
                    elif multi_query:
                        final_queries.append(datastore.MultiQuery(multi_query, orderings).Run(limit=to_fetch))

                results = chain(*final_queries)
            else:
                results = datastore.Get(self.queries_by_key.keys())

        def iter_results(results):
            returned = 0
            for result in sorted(results, cmp=partial(utils.django_ordering_comparison, self.ordering)):
                if result is None:
                    continue

                if not any([utils.entity_matches_query(result, qry) for qry in self.queries_by_key[result.key()]]):
                    continue

                if offset and returned < offset:
                    # Skip entities based on offset
                    returned += 1
                    continue
                else:
                    if cache:
                        caching.add_entity_to_cache(self.model, result, caching.CachingSituation.DATASTORE_GET)

                    yield _convert_entity_based_on_query_options(result, opts)

                    returned += 1

                    # If there is a limit, we might be done!
                    if limit is not None and returned == (offset or 0) + limit:
                        break

        return iter_results(results)
コード例 #6
0
ファイル: meta_queries.py プロジェクト: grzes/djangae
    def Run(self, limit=None, offset=None):
        """
            Here are the options:

            1. Single key, hit memcache
            2. Multikey projection, async MultiQueries with ancestors chained
            3. Full select, datastore get
        """

        opts = self.queries[0]._Query__query_options
        key_count = len(self.queries_by_key)

        is_projection = False

        max_cache_count = getattr(settings, "DJANGAE_CACHE_MAX_ENTITY_COUNT", DEFAULT_MAX_ENTITY_COUNT)

        cache_results = True
        results = None
        if key_count == 1:
            # FIXME: Potentially could use get_multi in memcache and the make a query
            # for whatever remains
            key = self.queries_by_key.keys()[0]
            result = caching.get_from_cache_by_key(key)
            if result is not None:
                results = [result]
                cache_results = False # Don't update cache, we just got it from there

        if results is None:
            if opts.projection and self.can_multi_query:
                is_projection = True
                cache_results = False # Don't cache projection results!

                # If we can multi-query in a single query, we do so using a number of
                # ancestor queries (to stay consistent) otherwise, we just do a
                # datastore Get, but this will return extra data over the RPC
                to_fetch = (offset or 0) + limit if limit else None
                additional_cols = set([ x[0] for x in self.ordering if x[0] not in opts.projection])

                multi_query = []
                orderings = self.queries[0]._Query__orderings
                for key, queries in self.queries_by_key.iteritems():
                    for query in queries:
                        if additional_cols:
                            # We need to include additional orderings in the projection so that we can
                            # sort them in memory. Annoyingly that means reinstantiating the queries
                            query = Query(
                                kind=query._Query__kind,
                                filters=query,
                                projection=list(opts.projection).extend(list(additional_cols)),
                                namespace=self.namespace,
                            )

                        query.Ancestor(key) # Make this an ancestor query
                        multi_query.append(query)

                if len(multi_query) == 1:
                    results = multi_query[0].Run(limit=to_fetch)
                else:
                    results = AsyncMultiQuery(multi_query, orderings).Run(limit=to_fetch)
            else:
                results = datastore.Get(self.queries_by_key.keys())

        def iter_results(results):
            returned = 0
            # This is safe, because Django is fetching all results any way :(
            sorted_results = sorted(results, cmp=partial(utils.django_ordering_comparison, self.ordering))
            sorted_results = [result for result in sorted_results if result is not None]
            if cache_results and sorted_results:
                caching.add_entities_to_cache(
                    self.model,
                    sorted_results[:max_cache_count],
                    caching.CachingSituation.DATASTORE_GET,
                    self.namespace,
                )

            for result in sorted_results:
                if is_projection:
                    entity_matches_query = True
                else:
                    entity_matches_query = any(
                        utils.entity_matches_query(result, qry) for qry in self.queries_by_key[result.key()]
                    )

                if not entity_matches_query:
                    continue

                if offset and returned < offset:
                    # Skip entities based on offset
                    returned += 1
                    continue
                else:

                    yield _convert_entity_based_on_query_options(result, opts)

                    returned += 1

                    # If there is a limit, we might be done!
                    if limit is not None and returned == (offset or 0) + limit:
                        break

        return iter_results(results)
コード例 #7
0
    def Run(self, limit=None, offset=None):
        """
            Here are the options:

            1. Single key, hit memcache
            2. Multikey projection, async MultiQueries with ancestors chained
            3. Full select, datastore get
        """
        opts = self.queries[0]._Query__query_options
        key_count = len(self.queries_by_key)

        is_projection = False

        max_cache_count = getattr(settings, "DJANGAE_CACHE_MAX_ENTITY_COUNT",
                                  DEFAULT_MAX_ENTITY_COUNT)

        cache_results = True
        results = None
        if key_count == 1:
            # FIXME: Potentially could use get_multi in memcache and the make a query
            # for whatever remains
            key = self.queries_by_key.keys()[0]
            result = caching.get_from_cache_by_key(key)
            if result is not None:
                results = [result]
                cache_results = False  # Don't update cache, we just got it from there

        if results is None:
            if opts.projection and self.can_multi_query:
                is_projection = True
                cache_results = False  # Don't cache projection results!

                # If we can multi-query in a single query, we do so using a number of
                # ancestor queries (to stay consistent) otherwise, we just do a
                # datastore Get, but this will return extra data over the RPC
                to_fetch = (offset or 0) + limit if limit else None
                additional_cols = set([
                    x[0] for x in self.ordering if x[0] not in opts.projection
                ])

                multi_query = []
                orderings = self.queries[0]._Query__orderings
                for key, queries in self.queries_by_key.items():
                    for query in queries:
                        if additional_cols:
                            # We need to include additional orderings in the projection so that we can
                            # sort them in memory. Annoyingly that means reinstantiating the queries
                            query = rpc.Query(
                                kind=query._Query__kind,
                                filters=query,
                                projection=list(opts.projection).extend(
                                    list(additional_cols)),
                                namespace=self.namespace,
                            )

                        query.Ancestor(key)  # Make this an ancestor query
                        multi_query.append(query)

                if len(multi_query) == 1:
                    results = multi_query[0].Run(limit=to_fetch)
                else:
                    results = AsyncMultiQuery(multi_query,
                                              orderings).Run(limit=to_fetch)
            else:
                results = rpc.Get(self.queries_by_key.keys())

        def iter_results(results):
            returned = 0
            # This is safe, because Django is fetching all results any way :(
            sorted_results = sorted(results,
                                    cmp=partial(
                                        utils.django_ordering_comparison,
                                        self.ordering))
            sorted_results = [
                result for result in sorted_results if result is not None
            ]
            if cache_results and sorted_results:
                caching.add_entities_to_cache(
                    self.model,
                    sorted_results[:max_cache_count],
                    caching.CachingSituation.DATASTORE_GET,
                    self.namespace,
                )

            for result in sorted_results:
                if is_projection:
                    entity_matches_query = True
                else:
                    entity_matches_query = any(
                        utils.entity_matches_query(result, qry)
                        for qry in self.queries_by_key[result.key()])

                if not entity_matches_query:
                    continue

                if offset and returned < offset:
                    # Skip entities based on offset
                    returned += 1
                    continue
                else:

                    yield _convert_entity_based_on_query_options(result, opts)

                    returned += 1

                    # If there is a limit, we might be done!
                    if limit is not None and returned == (offset or 0) + limit:
                        break

        return iter_results(results)
コード例 #8
0
    def Run(self, limit=None, offset=None):
        """
            Here are the options:

            1. Single key, hit memcache
            2. Multikey projection, async MultiQueries with ancestors chained
            3. Full select, datastore get
        """

        opts = self.queries[0]._Query__query_options
        key_count = len(self.queries_by_key)

        is_projection = False

        results = None
        if key_count == 1:
            # FIXME: Potentially could use get_multi in memcache and the make a query
            # for whatever remains
            key = self.queries_by_key.keys()[0]
            result = caching.get_from_cache_by_key(key)
            if result is not None:
                results = [result]
                cache = False  # Don't update cache, we just got it from there

        if results is None:
            if opts.projection:
                is_projection = True  # Don't cache projection results!

                # Assumes projection ancestor queries are faster than a datastore Get
                # due to lower traffic over the RPC. This should be faster for queries with
                # < 30 keys (which is the most common case), and faster if the entities are
                # larger and there are many results, but there is probably a slower middle ground
                # because the larger number of RPC calls. Still, if performance is an issue the
                # user can just do a normal get() rather than values/values_list/only/defer

                to_fetch = (offset or 0) + limit if limit else None
                additional_cols = set([
                    x[0] for x in self.ordering if x[0] not in opts.projection
                ])

                multi_query = []
                final_queries = []
                orderings = self.queries[0]._Query__orderings
                for key, queries in self.queries_by_key.iteritems():
                    for query in queries:
                        if additional_cols:
                            # We need to include additional orderings in the projection so that we can
                            # sort them in memory. Annoyingly that means reinstantiating the queries
                            query = Query(
                                kind=query._Query__kind,
                                filters=query,
                                projection=list(opts.projection).extend(
                                    list(additional_cols)),
                                namespace=self.namespace,
                            )

                        query.Ancestor(key)  # Make this an ancestor query
                        multi_query.append(query)
                        if len(multi_query) == 30:
                            final_queries.append(
                                datastore.MultiQuery(
                                    multi_query,
                                    orderings).Run(limit=to_fetch))
                            multi_query = []
                else:
                    if len(multi_query) == 1:
                        final_queries.append(
                            multi_query[0].Run(limit=to_fetch))
                    elif multi_query:
                        final_queries.append(
                            datastore.MultiQuery(
                                multi_query, orderings).Run(limit=to_fetch))

                results = chain(*final_queries)
            else:
                results = datastore.Get(self.queries_by_key.keys())

        def iter_results(results):
            returned = 0
            # This is safe, because Django is fetching all results any way :(
            sorted_results = sorted(results,
                                    cmp=partial(
                                        utils.django_ordering_comparison,
                                        self.ordering))
            sorted_results = [
                result for result in sorted_results if result is not None
            ]
            if not is_projection and sorted_results:
                caching.add_entities_to_cache(
                    self.model,
                    sorted_results,
                    caching.CachingSituation.DATASTORE_GET,
                    self.namespace,
                )

            for result in sorted_results:
                if is_projection:
                    entity_matches_query = True
                else:
                    entity_matches_query = any(
                        utils.entity_matches_query(result, qry)
                        for qry in self.queries_by_key[result.key()])

                if not entity_matches_query:
                    continue

                if offset and returned < offset:
                    # Skip entities based on offset
                    returned += 1
                    continue
                else:

                    yield _convert_entity_based_on_query_options(result, opts)

                    returned += 1

                    # If there is a limit, we might be done!
                    if limit is not None and returned == (offset or 0) + limit:
                        break

        return iter_results(results)