예제 #1
0
    def cleanup(cls, datastore_key):

        # Kindless query, we don't know the kinds because we don't know all the fields
        # that use contains. But, we do know that all the things we need to delete are:
        # a.) A descendent
        # b.) Have a key name of whatever OPERATOR is

        qry = Query(keys_only=True, namespace=datastore_key.namespace())
        qry = qry.Ancestor(datastore_key)

        # Delete all the entities matching the ancestor query
        Delete([x for x in qry.Run() if x.name() == cls.OPERATOR])
예제 #2
0
def key_exists(key):
    qry = Query(keys_only=True, namespace=key.namespace())
    qry.Ancestor(key)
    return qry.Count(limit=1) > 0
예제 #3
0
def key_exists(key):
    qry = Query(keys_only=True)
    qry.Ancestor(key)
    return qry.Count(limit=1) > 0
예제 #4
0
    def Run(self, limit=None, offset=None):
        """
            Here are the options:

            1. Single key, hit memcache
            2. Multikey projection, async MultiQueries with ancestors chained
            3. Full select, datastore get
        """

        opts = self.queries[0]._Query__query_options
        key_count = len(self.queries_by_key)

        is_projection = False

        results = None
        if key_count == 1:
            # FIXME: Potentially could use get_multi in memcache and the make a query
            # for whatever remains
            key = self.queries_by_key.keys()[0]
            result = caching.get_from_cache_by_key(key)
            if result is not None:
                results = [result]
                cache = False  # Don't update cache, we just got it from there

        if results is None:
            if opts.projection:
                is_projection = True  # Don't cache projection results!

                # Assumes projection ancestor queries are faster than a datastore Get
                # due to lower traffic over the RPC. This should be faster for queries with
                # < 30 keys (which is the most common case), and faster if the entities are
                # larger and there are many results, but there is probably a slower middle ground
                # because the larger number of RPC calls. Still, if performance is an issue the
                # user can just do a normal get() rather than values/values_list/only/defer

                to_fetch = (offset or 0) + limit if limit else None
                additional_cols = set([
                    x[0] for x in self.ordering if x[0] not in opts.projection
                ])

                multi_query = []
                final_queries = []
                orderings = self.queries[0]._Query__orderings
                for key, queries in self.queries_by_key.iteritems():
                    for query in queries:
                        if additional_cols:
                            # We need to include additional orderings in the projection so that we can
                            # sort them in memory. Annoyingly that means reinstantiating the queries
                            query = Query(
                                kind=query._Query__kind,
                                filters=query,
                                projection=list(opts.projection).extend(
                                    list(additional_cols)),
                                namespace=self.namespace,
                            )

                        query.Ancestor(key)  # Make this an ancestor query
                        multi_query.append(query)
                        if len(multi_query) == 30:
                            final_queries.append(
                                datastore.MultiQuery(
                                    multi_query,
                                    orderings).Run(limit=to_fetch))
                            multi_query = []
                else:
                    if len(multi_query) == 1:
                        final_queries.append(
                            multi_query[0].Run(limit=to_fetch))
                    elif multi_query:
                        final_queries.append(
                            datastore.MultiQuery(
                                multi_query, orderings).Run(limit=to_fetch))

                results = chain(*final_queries)
            else:
                results = datastore.Get(self.queries_by_key.keys())

        def iter_results(results):
            returned = 0
            # This is safe, because Django is fetching all results any way :(
            sorted_results = sorted(results,
                                    cmp=partial(
                                        utils.django_ordering_comparison,
                                        self.ordering))
            sorted_results = [
                result for result in sorted_results if result is not None
            ]
            if not is_projection and sorted_results:
                caching.add_entities_to_cache(
                    self.model,
                    sorted_results,
                    caching.CachingSituation.DATASTORE_GET,
                    self.namespace,
                )

            for result in sorted_results:
                if is_projection:
                    entity_matches_query = True
                else:
                    entity_matches_query = any(
                        utils.entity_matches_query(result, qry)
                        for qry in self.queries_by_key[result.key()])

                if not entity_matches_query:
                    continue

                if offset and returned < offset:
                    # Skip entities based on offset
                    returned += 1
                    continue
                else:

                    yield _convert_entity_based_on_query_options(result, opts)

                    returned += 1

                    # If there is a limit, we might be done!
                    if limit is not None and returned == (offset or 0) + limit:
                        break

        return iter_results(results)
예제 #5
0
    def Run(self, limit=None, offset=None):
        """
            Here are the options:

            1. Single key, hit memcache
            2. Multikey projection, async MultiQueries with ancestors chained
            3. Full select, datastore get
        """
        opts = self.queries[0]._Query__query_options
        key_count = len(self.queries_by_key)

        is_projection = False

        max_cache_count = getattr(settings, "DJANGAE_CACHE_MAX_ENTITY_COUNT",
                                  DEFAULT_MAX_ENTITY_COUNT)

        cache_results = True
        results = None
        if key_count == 1:
            # FIXME: Potentially could use get_multi in memcache and the make a query
            # for whatever remains
            key = self.queries_by_key.keys()[0]
            result = caching.get_from_cache_by_key(key)
            if result is not None:
                results = [result]
                cache_results = False  # Don't update cache, we just got it from there

        if results is None:
            if opts.projection and self.can_multi_query:
                is_projection = True
                cache_results = False  # Don't cache projection results!

                # If we can multi-query in a single query, we do so using a number of
                # ancestor queries (to stay consistent) otherwise, we just do a
                # datastore Get, but this will return extra data over the RPC
                to_fetch = (offset or 0) + limit if limit else None
                additional_cols = set([
                    x[0] for x in self.ordering if x[0] not in opts.projection
                ])

                multi_query = []
                orderings = self.queries[0]._Query__orderings
                for key, queries in self.queries_by_key.iteritems():
                    for query in queries:
                        if additional_cols:
                            # We need to include additional orderings in the projection so that we can
                            # sort them in memory. Annoyingly that means reinstantiating the queries
                            query = Query(
                                kind=query._Query__kind,
                                filters=query,
                                projection=list(opts.projection).extend(
                                    list(additional_cols)),
                                namespace=self.namespace,
                            )

                        query.Ancestor(key)  # Make this an ancestor query
                        multi_query.append(query)

                if len(multi_query) == 1:
                    results = multi_query[0].Run(limit=to_fetch)
                else:
                    results = AsyncMultiQuery(multi_query,
                                              orderings).Run(limit=to_fetch)
            else:
                results = datastore.Get(self.queries_by_key.keys())

        def iter_results(results):
            returned = 0
            # This is safe, because Django is fetching all results any way :(
            sorted_results = sorted(results,
                                    cmp=partial(
                                        utils.django_ordering_comparison,
                                        self.ordering))
            sorted_results = [
                result for result in sorted_results if result is not None
            ]
            if cache_results and sorted_results:
                caching.add_entities_to_cache(
                    self.model,
                    sorted_results[:max_cache_count],
                    caching.CachingSituation.DATASTORE_GET,
                    self.namespace,
                )

            for result in sorted_results:
                if is_projection:
                    entity_matches_query = True
                else:
                    entity_matches_query = any(
                        utils.entity_matches_query(result, qry)
                        for qry in self.queries_by_key[result.key()])

                if not entity_matches_query:
                    continue

                if offset and returned < offset:
                    # Skip entities based on offset
                    returned += 1
                    continue
                else:

                    yield _convert_entity_based_on_query_options(result, opts)

                    returned += 1

                    # If there is a limit, we might be done!
                    if limit is not None and returned == (offset or 0) + limit:
                        break

        return iter_results(results)