def _validate_query_is_possible(self, query): """ Given the *django* query, check the following: - The query only has one inequality filter - The query does no joins - The query ordering is compatible with the filters """ # Check for joins, we ignore select related tables as they aren't actually used (the connector marks select # related as unsupported in its features) tables = [ k for k, v in query.alias_refcount.items() if v ] inherited_tables = set([x._meta.db_table for x in query.model._meta.parents ]) select_related_tables = set([y[0][0] for y in query.related_select_cols ]) tables = set(tables) - inherited_tables - select_related_tables if len(tables) > 1: raise NotSupportedError(""" The appengine database connector does not support JOINs. The requested join map follows\n %s """ % query.join_map) if query.aggregates: if query.aggregates.keys() == [ None ]: agg_col = query.aggregates[None].col opts = self.model._meta if agg_col != "*" and agg_col != (opts.db_table, opts.pk.column): raise NotSupportedError("Counting anything other than '*' or the primary key is not supported") else: raise NotSupportedError("Unsupported aggregate query")
def process_and_branch(query, and_branch): for child in and_branch[-1]: column, op, value = child[1] # for column, op, value in and_branch[-1]: if column == self.pk_col: column = "__key__" #FIXME: This EmptyResultSet check should happen during normalization so that Django doesn't count it as a query if op == "=" and "__key__ =" in query and query["__key__ ="] != value: # We've already done an exact lookup on a key, this query can't return anything! raise EmptyResultSet() if not isinstance(value, datastore.Key): value = get_datastore_key(self.model, value) key = "%s %s" % (column, op) try: if isinstance(value, basestring): value = coerce_unicode(value) if key in query: if type(query[key]) == list: if value not in query[key]: query[key].append(value) else: if query[key] != value: query[key] = [ query[key], value ] else: query[key] = value except datastore_errors.BadFilterError as e: raise NotSupportedError(str(e))
def txn(): for key in keys: if check_existence and key is not None: if utils.key_exists(key): raise IntegrityError("Tried to INSERT with existing key") id_or_name = key.id_or_name() if isinstance(id_or_name, six.string_types) and id_or_name.startswith("__"): raise NotSupportedError("Datastore ids cannot start with __. Id was %s" % id_or_name) # Notify App Engine of any keys we're specifying intentionally reserve_id(key.kind(), key.id_or_name(), self.namespace) results = perform_insert(entities) for entity, _ in entities: markers.extend(constraints.acquire(self.model, entity)) caching.add_entities_to_cache( self.model, [x[0] for x in entities], caching.CachingSituation.DATASTORE_GET_PUT, self.namespace, skip_memcache=True ) return results
def txn(): if key is not None: if utils.key_exists(key): raise IntegrityError( "Tried to INSERT with existing key") id_or_name = key.id_or_name() if isinstance(id_or_name, basestring) and id_or_name.startswith("__"): raise NotSupportedError( "Datastore ids cannot start with __. Id was %s" % id_or_name) if not constraints.constraint_checks_enabled(self.model): # Fast path, just insert results.append(datastore.Put(ent)) else: markers = constraints.acquire(self.model, ent) try: results.append(datastore.Put(ent)) if not was_in_transaction: # We can cache if we weren't in a transaction before this little nested one caching.add_entity_to_cache( self.model, ent, caching.CachingSituation.DATASTORE_GET_PUT) except: # Make sure we delete any created markers before we re-raise constraints.release_markers(markers) raise
def execute(self): if self.unsupported_query_message: raise NotSupportedError(self.unsupported_query_message) self.gae_query = self._build_gae_query() self.results = None self.query_done = False self.aggregate_type = "count" if self.is_count else None self._do_fetch()
def process_literal(node, is_pk_filter, excluded_pks, filtered_columns=None, negated=False): column, op, value = node[1] if filtered_columns is not None: assert isinstance(filtered_columns, set) filtered_columns.add(column) if op == 'in': # Explode INs into OR if not isinstance(value, (list, tuple, set)): raise ValueError("IN queries must be supplied a list of values") if negated: if len(value) == 0: return None, filtered_columns lits = [] for x in value: lits.append(('LIT', (column, '>', x))) lits.append(('LIT', (column, '<', x))) return ('OR', lits), filtered_columns else: if not value: # Add an impossible filter when someone queries on an empty list, which should never return anything for # this branch. We can't just raise an EmptyResultSet because another branch might return something return ('AND', [('LIT', IMPOSSIBLE_FILTER)]), filtered_columns return ('OR', [('LIT', (column, '=', x)) for x in value]), filtered_columns elif op == "isnull": if negated: value = not value negated = not negated if not value: lits = [] lits.append(('LIT', (column, '>', None))) lits.append(('LIT', (column, '<', None))) return ('OR', lits), filtered_columns else: op = "exact" value = None if not OPERATORS_MAP.get(op): raise NotSupportedError("Unsupported operator %s" % op) _op = OPERATORS_MAP[op] if negated and _op == '=': # Explode if is_pk_filter and excluded_pks is not None: #Excluded pks is a set() if we should be using it excluded_pks.add(value) return None, filtered_columns return ('OR', [('LIT', (column, '>', value)), ('LIT', (column, '<', value))]), filtered_columns return ('LIT', (column, _op, value)), filtered_columns
def parse_dnf(node, connection, ordering=None): should_in_memory_exclude = should_exclude_pks_in_memory(node, ordering) tree, filtered_columns, excluded_pks = parse_tree( node, connection, excluded_pks=set() if should_in_memory_exclude else None) if not should_exclude_pks_in_memory: assert excluded_pks is None if tree: tree = tripled(tree) if tree and tree[0] != 'OR': tree = ('OR', [tree]) # Filter out impossible branches of the where, if that then results in an empty tree then # raise an EmptyResultSet, otherwise replace the tree with the now simpler query if tree: final = [] for and_branch in tree[-1]: if and_branch[0] == 'LIT' and and_branch[-1] == IMPOSSIBLE_FILTER: continue elif and_branch[0] == 'AND' and IMPOSSIBLE_FILTER in [ x[-1] for x in and_branch[-1] ]: continue final.append(and_branch) if not final: raise EmptyResultSet() else: tree = (tree[0], final) # If there are more than 30 filters, and not all filters are PK filters if tree and len(tree[-1]) > 30: for and_branch in tree[-1]: if and_branch[0] == 'LIT': and_branch = [and_branch] for lit in and_branch[-1] if and_branch[ 0] == 'AND' else and_branch: # Go through each literal tuple if isinstance(lit[-1], datastore.Key) or isinstance( lit[-1][-1], datastore.Key ): # If the value is a key, then break the loop break else: # If we didn't find a literal with a datastore Key, then raise unsupported raise NotSupportedError( "The datastore doesn't support this query, more than 30 filters were needed" ) return tree, filtered_columns, excluded_pks or set()
def _apply_extra_to_entity(extra_select, entity, pk_col): """ Obviously the datastore doesn't support extra columns, but we can emulate simple extra selects as we iterate the results. This function does that! """ def prep_value(attr): if attr == pk_col: attr = entity.key().id_or_name() else: attr = entity[attr] if attr in entity else attr try: attr = int(attr) except (TypeError, ValueError): pass if isinstance(attr, basestring): if (attr[0], attr[-1]) == ("'", "'"): attr = attr[1:-1] elif (attr[0], attr[-1]) == ('"', '"'): attr = attr[1:-1] return attr for column, (select, _) in extra_select.iteritems(): arithmetic_regex = "(\w+)\s?([+|-|/|*|\=])\s?([\w|'|\"]+)" match = re.match(arithmetic_regex, select) if match: lhs = match.group(1) op = match.group(2) rhs = match.group(3) lhs = prep_value(lhs) rhs = prep_value(rhs) fun = EXTRA_SELECT_FUNCTIONS.get(op) if not fun: raise NotSupportedError("Unimplemented extra select operation: '%s'" % op) entity[column] = fun(lhs, rhs) else: rhs = prep_value(select) entity[column] = rhs return entity
def _convert_ordering(query): if not query.default_ordering: result = query.order_by else: result = query.order_by or query.get_meta().ordering if result: # We factor out cross-table orderings (rather than raising NotSupportedError) otherwise we'll break # the admin which uses them. We log a warning when this happens though try: ordering = [] for name in result: if name == "?": raise NotSupportedError( "Random ordering is not supported on the datastore") if not (isinstance(name, basestring) and "__" in name): if isinstance(name, basestring): if name.lstrip("-") == "pk": field_column = query.model._meta.pk.column else: field_column = query.model._meta.get_field( name.lstrip("-")).column ordering.append(field_column if not name.startswith( "-") else "-{}".format(field_column)) else: ordering.append(name) except FieldDoesNotExist: opts = query.model._meta available = opts.get_all_field_names() raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(available))) if len(ordering) < len(result): diff = set(result) - set(ordering) log_once( DJANGAE_LOG.warning if not on_production() else DJANGAE_LOG.debug, "The following orderings were ignored as cross-table orderings are not supported on the datastore: %s", diff) result = ordering return result
def _build_gae_query(self): """ Build and return the Datastore Query object. """ query_kwargs = { "kind": str(self.db_table) } if self.distinct: if self.projection: query_kwargs["distinct"] = True else: logging.warning("Ignoring distinct on a query where a projection wasn't possible") if self.keys_only: query_kwargs["keys_only"] = self.keys_only elif self.projection: query_kwargs["projection"] = self.projection query = Query( **query_kwargs ) if has_concrete_parents(self.model) and not self.model._meta.proxy: query["class ="] = self.model._meta.db_table ordering = [] for order in self.ordering: if isinstance(order, (long, int)): direction = datastore.Query.ASCENDING if order == 1 else datastore.Query.DESCENDING order = self.queried_fields[0] else: direction = datastore.Query.DESCENDING if order.startswith("-") else datastore.Query.ASCENDING order = order.lstrip("-") if order == self.model._meta.pk.column or order == "pk": order = "__key__" #Flip the ordering if someone called reverse() on the queryset if not self.original_query.standard_ordering: direction = datastore.Query.DESCENDING if direction == datastore.Query.ASCENDING else datastore.Query.ASCENDING ordering.append((order, direction)) def process_and_branch(query, and_branch): for child in and_branch[-1]: column, op, value = child[1] # for column, op, value in and_branch[-1]: if column == self.pk_col: column = "__key__" #FIXME: This EmptyResultSet check should happen during normalization so that Django doesn't count it as a query if op == "=" and "__key__ =" in query and query["__key__ ="] != value: # We've already done an exact lookup on a key, this query can't return anything! raise EmptyResultSet() if not isinstance(value, datastore.Key): value = get_datastore_key(self.model, value) key = "%s %s" % (column, op) try: if isinstance(value, basestring): value = coerce_unicode(value) if key in query: if type(query[key]) == list: if value not in query[key]: query[key].append(value) else: if query[key] != value: query[key] = [ query[key], value ] else: query[key] = value except datastore_errors.BadFilterError as e: raise NotSupportedError(str(e)) if self.where: queries = [] # print query._Query__kind, self.where for and_branch in self.where[1]: # Duplicate the query for all the "OR"s queries.append(Query(**query_kwargs)) queries[-1].update(query) # Make sure we copy across filters (e.g. class =) try: if and_branch[0] == "LIT": and_branch = ("AND", [and_branch]) process_and_branch(queries[-1], and_branch) except EmptyResultSet: # This is a little hacky but basically if there is only one branch in the or, and it raises # and EmptyResultSet, then we just bail, however if there is more than one branch the query the # query might still return something. This logic needs cleaning up and moving to the DNF phase if len(self.where[1]) == 1: return NoOpQuery() else: queries.pop() if not queries: return NoOpQuery() included_pks = [ qry["__key__ ="] for qry in queries if "__key__ =" in qry ] if len(included_pks) == len(queries): # If all queries have a key, we can perform a Get return QueryByKeys(self.model, queries, ordering) # Just use whatever query to determine the matches else: if len(queries) > 1: # Disable keys only queries for MultiQuery new_queries = [] for i, query in enumerate(queries): if i > 30: raise NotSupportedError("Too many subqueries (max: 30, got {}). Probably cause too many IN/!= filters".format( len(queries) )) qry = Query(query._Query__kind, projection=query._Query__query_options.projection) qry.update(query) try: qry.Order(*ordering) except datastore_errors.BadArgumentError as e: raise NotSupportedError(e) new_queries.append(qry) query = datastore.MultiQuery(new_queries, ordering) else: query = queries[0] try: query.Order(*ordering) except datastore_errors.BadArgumentError as e: raise NotSupportedError(e) else: try: query.Order(*ordering) except datastore_errors.BadArgumentError as e: raise NotSupportedError(e) # If the resulting query was unique, then wrap as a unique query which # will hit the cache first unique_identifier = query_is_unique(self.model, query) if unique_identifier: return UniqueQuery(unique_identifier, query, self.model) DJANGAE_LOG.debug("Select query: {0}, {1}".format(self.model.__name__, self.where)) return query
def _convert_ordering(query): if not query.default_ordering: result = query.order_by else: result = query.order_by or query.get_meta().ordering if query.extra_order_by: # This is a best attempt at ordering by extra select, it covers the cases # in the Django tests, but use this functionality with care all_fields = query.get_meta().get_all_field_names() new_ordering = [] for col in query.extra_order_by: # If the query in the extra order by is part of the extra select # and the extra select is just an alias, then use the original column if col in query.extra_select: if query.extra_select[col][0] in all_fields: new_ordering.append(query.extra_select[col][0]) else: # It wasn't an alias, probably can't support it raise NotSupportedError("Unsupported extra_order_by: {}".format(query.extra_order_by)) else: # Not in the extra select, probably just a column so use it if it is if col in all_fields: new_ordering.append(col) else: raise NotSupportedError("Unsupported extra_order_by: {}".format(query.extra_order_by)) result = tuple(new_ordering) if result: # We factor out cross-table orderings (rather than raising NotSupportedError) otherwise we'll break # the admin which uses them. We log a warning when this happens though try: ordering = [] for name in result: if name == "?": raise NotSupportedError("Random ordering is not supported on the datastore") if not (isinstance(name, basestring) and "__" in name): if isinstance(name, basestring): if name.lstrip("-") == "pk": field_column = query.model._meta.pk.column else: field = query.model._meta.get_field_by_name(name.lstrip("-"))[0] field_column = field.column ordering.append(field_column if not name.startswith("-") else "-{}".format(field_column)) else: ordering.append(name) except FieldDoesNotExist: opts = query.model._meta available = opts.get_all_field_names() raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(available)) ) if len(ordering) < len(result): diff = set(result) - set(ordering) log_once( DJANGAE_LOG.warning if not on_production() else DJANGAE_LOG.debug, "The following orderings were ignored as cross-table orderings are not supported on the datastore: %s", diff ) result = ordering return result
def parse_constraint(child, connection, negated=False): if isinstance(child, tuple): # First, unpack the constraint constraint, op, annotation, value = child was_list = isinstance(value, (list, tuple)) if isinstance(value, query.Query): value = value.get_compiler(connection.alias).as_sql()[0].execute() else: packed, value = constraint.process(op, value, connection) alias, column, db_type = packed field = constraint.field else: # Django 1.7+ field = child.lhs.target column = child.lhs.target.column op = child.lookup_name value = child.rhs annotation = value was_list = isinstance(value, (list, tuple)) if isinstance(value, query.Query): value = value.get_compiler(connection.alias).as_sql()[0].execute() elif value != []: value = child.lhs.output_field.get_db_prep_lookup( child.lookup_name, child.rhs, connection, prepared=True) is_pk = field and field.primary_key if column == "id" and op == "iexact" and is_pk and isinstance(field, AutoField): # When new instance is created, automatic primary key 'id' does not generate '_idx_iexact_id'. # As the primary key 'id' (AutoField) is integer and is always case insensitive, we can deal with 'id_iexact=' query by using 'exact' rather than 'iexact'. op = "exact" if field and field.db_type(connection) in ("bytes", "text"): raise NotSupportedError("Text and Blob fields are not indexed by the datastore, so you can't filter on them") if op not in REQUIRES_SPECIAL_INDEXES: # Don't convert if this op requires special indexes, it will be handled there if field: value = [ connection.ops.prep_lookup_value(field.model, x, field, column=column) for x in value] # Don't ask me why, but on Django 1.6 constraint.process on isnull wipes out the value (it returns an empty list) # so we have to special case this to use the annotation value instead if op == "isnull": if annotation is not None: value = [ annotation ] if is_pk and value[0]: raise EmptyResultSet() if not was_list: value = value[0] else: if negated: raise CouldBeSupportedError("Special indexing does not currently supported negated queries. See #80") if not was_list: value = value[0] add_special_index(field.model, column, op) # Add the index if we can (e.g. on dev_appserver) if op not in special_indexes_for_column(field.model, column): raise RuntimeError("There is a missing index in your djangaeidx.yaml - \n\n{0}:\n\t{1}: [{2}]".format( field.model, column, op) ) indexer = REQUIRES_SPECIAL_INDEXES[op] value = indexer.prep_value_for_query(value) column = indexer.indexed_column_name(column, value=value) op = indexer.prep_query_operator(op) return column, op, value
def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) if lookup_value is None: continue if f.primary_key and not self._state.adding: continue ########################################################################## # This is a modification to Django's native implementation of this method; # we conditionally build a __in lookup if the value is an iterable. lookup = str(field_name) if isinstance(lookup_value, (list, set, tuple)): lookup = "%s__overlap" % lookup lookup_kwargs[lookup] = lookup_value ########################################################################## # / end of changes if len(unique_check) != len(lookup_kwargs): continue ####################################################### # Deal with long __in lookups by doing multiple queries in that case # This is a bit hacky, but we really have no choice due to App Engine's # 30 multi-query limit. This also means we can't support multiple list fields in # a unique combination ####################################################### if len([x for x in lookup_kwargs if x.endswith("__in")]) > 1: raise NotSupportedError( "You cannot currently have two list fields in a unique combination" ) # Split IN queries into multiple lookups if they are too long lookups = [] for k, v in lookup_kwargs.iteritems(): if (k.endswith("__in") or k.endswith("__overlap") ) and len(v) > MAX_ALLOWABLE_QUERIES: v = list(v) while v: new_lookup = lookup_kwargs.copy() new_lookup[k] = v[:30] v = v[30:] lookups.append(new_lookup) break else: # Otherwise just use the one lookup lookups = [lookup_kwargs] for lookup_kwargs in lookups: qs = model_class._default_manager.filter( **lookup_kwargs).values_list("pk", flat=True) model_class_pk = self._get_pk_val(model_class._meta) result = list(qs) if not self._state.adding and model_class_pk is not None: # If we are saving an instance, we ignore it's PK in the result try: result.remove(model_class_pk) except ValueError: pass if result: if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append( self.unique_error_message(model_class, unique_check)) break return errors
def _build_query(self): self._sanity_check() queries = [] projection = self._exclude_pk(self.query.columns) or None query_kwargs = { "kind": self.query.concrete_model._meta.db_table, "distinct": self.query.distinct or None, "keys_only": self.keys_only or None, "projection": projection } ordering = convert_django_ordering_to_gae(self.query.order_by) if self.query.distinct and not ordering: # If we specified we wanted a distinct query, but we didn't specify # an ordering, we must set the ordering to the distinct columns, otherwise # App Engine shouts at us. Nastily. And without remorse. ordering = self.query.columns[:] # Deal with the no filters case if self.query.where is None: query = Query(**query_kwargs) try: query.Order(*ordering) except datastore_errors.BadArgumentError as e: raise NotSupportedError(e) return query assert self.query.where # Go through the normalized query tree for and_branch in self.query.where.children: query = Query(**query_kwargs) # This deals with the oddity that the root of the tree may well be a leaf filters = [and_branch ] if and_branch.is_leaf else and_branch.children for filter_node in filters: lookup = "{} {}".format(filter_node.column, filter_node.operator) value = filter_node.value # This is a special case. Annoyingly Django's decimal field doesn't # ever call ops.get_prep_save or lookup or whatever when you are filtering # on a query. It *does* do it on a save, so we basically need to do a # conversion here, when really it should be handled elsewhere if isinstance(value, decimal.Decimal): field = get_field_from_column(self.query.model, filter_node.column) value = self.connection.ops.value_to_db_decimal( value, field.max_digits, field.decimal_places) elif isinstance(value, basestring): value = unicode(value) # If there is already a value for this lookup, we need to make the # value a list and append the new entry if lookup in query and not isinstance( query[lookup], (list, tuple)) and query[lookup] != value: query[lookup] = [query[lookup]] + [value] else: # If the value is a list, we can't just assign it to the query # which will treat each element as its own value. So in this # case we nest it. This has the side effect of throwing a BadValueError # which we could throw ourselves, but the datastore might start supporting # list values in lookups.. you never know! if isinstance(value, (list, tuple)): query[lookup] = [value] else: # Common case: just add the raw where constraint query[lookup] = value if ordering: try: query.Order(*ordering) except datastore_errors.BadArgumentError as e: # This is the easiest way to detect unsupported orderings # ideally we'd detect this at the query normalization stage # but it's a lot of hassle, this is much easier and seems to work OK raise NotSupportedError(e) queries.append(query) if can_perform_datastore_get(self.query): # Yay for optimizations! return QueryByKeys(self.query.model, queries, ordering) if len(queries) == 1: identifier = query_is_unique(self.query.model, queries[0]) if identifier: # Yay for optimizations! return UniqueQuery(identifier, queries[0], self.query.model) return queries[0] else: return datastore.MultiQuery(queries, ordering)
def _sanity_check(self): if self.query.distinct and not self.query.columns: raise NotSupportedError( "Tried to perform distinct query when projection wasn't possible" )
def __init__(self, connection, query, keys_only=False): self.original_query = query self.connection = connection self.limits = (query.low_mark, query.high_mark) self.results_returned = 0 opts = query.get_meta() self.distinct = query.distinct self.distinct_values = set() self.distinct_on_field = None self.distinct_field_convertor = None self.queried_fields = [] self.model = query.model self.pk_col = opts.pk.column self.is_count = query.aggregates self.extra_select = query.extra_select self._set_db_table() self._validate_query_is_possible(query) self.ordering = _convert_ordering(query) # If the query uses defer()/only() then we need to process deferred. We have to get all deferred columns # for all (concrete) inherited models and then only include columns if they appear in that list deferred_columns = {} query.deferred_to_data(deferred_columns, query.deferred_to_columns_cb) inherited_db_tables = [ x._meta.db_table for x in get_concrete_parents(self.model) ] only_load = list( chain(*[ list(deferred_columns.get(x, [])) for x in inherited_db_tables ])) if query.select: for x in query.select: if hasattr(x, "field"): # In Django 1.6+ 'x' above is a SelectInfo (which is a tuple subclass), whereas in 1.5 it's a tuple # in 1.6 x[1] == Field, but 1.5 x[1] == unicode (column name) if x.field is None: column = x.col.col[ 1] # This is the column we are getting lookup_type = x.col.lookup_type self.distinct_on_field = column # This whole section of code is weird, and is probably better implemented as a custom Query type (like QueryByKeys) # basically, appengine gives back dates as a time since the epoch, we convert it to a date, then floor it, then convert it back # in our transform function. The transform is applied when the results are read back so that only distinct values are returned. # this is very hacky... if lookup_type in DATE_TRANSFORMS: self.distinct_field_convertor = lambda value: DATE_TRANSFORMS[ lookup_type](self.connection, value) else: raise CouldBeSupportedError( "Unhandled lookup_type %s" % lookup_type) else: column = x.field.column else: column = x[1] if only_load and column not in only_load: continue self.queried_fields.append(column) else: # If no specific fields were specified, select all fields if the query is distinct (as App Engine only supports # distinct on projection queries) or the ones specified by only_load self.queried_fields = [ x.column for x in opts.fields if (x.column in only_load) or self.distinct ] self.keys_only = keys_only or self.queried_fields == [opts.pk.column] # Projection queries don't return results unless all projected fields are # indexed on the model. This means if you add a field, and all fields on the model # are projectable, you will never get any results until you've resaved all of them. # Because it's not possible to detect this situation, we only try a projection query if a # subset of fields was specified (e.g. values_list('bananas')) which makes the behaviour a # bit more predictable. It would be nice at some point to add some kind of force_projection() # thing on a queryset that would do this whenever possible, but that's for the future, maybe. try_projection = (self.keys_only is False) and bool( self.queried_fields) if not self.queried_fields: # If we don't have any queried fields yet, it must have been an empty select and not a distinct # and not an only/defer, so get all the fields self.queried_fields = [x.column for x in opts.fields] self.excluded_pks = set() self.has_inequality_filter = False self.all_filters = [] self.results = None self.gae_query = None projection_fields = [] if try_projection: for field in self.queried_fields: # We don't include the primary key in projection queries... if field == self.pk_col: order_fields = set([x.strip("-") for x in self.ordering]) if self.pk_col in order_fields or "pk" in order_fields: # If we were ordering on __key__ we can't do a projection at all self.projection_fields = [] break continue # Text and byte fields aren't indexed, so we can't do a # projection query f = get_field_from_column(self.model, field) if not f: raise CouldBeSupportedError( "Attempting a cross-table select or dates query, or something?!" ) assert f # If this happens, we have a cross-table select going on! #FIXME db_type = f.db_type(connection) if db_type in ("bytes", "text", "list", "set"): projection_fields = [] break projection_fields.append(field) self.projection = list(set(projection_fields)) or None if opts.parents: self.projection = None if isinstance(query.where, EmptyWhere): # Empty where means return nothing! raise EmptyResultSet() else: where_tables = list( set([x[0] for x in query.where.get_cols() if x[0]])) if where_tables and where_tables != [query.model._meta.db_table]: raise NotSupportedError( "Cross-join WHERE constraints aren't supported: %s" % query.where.get_cols()) from dnf import parse_dnf self.where, columns, self.excluded_pks = parse_dnf( query.where, self.connection, ordering=self.ordering) # DISABLE PROJECTION IF WE ARE FILTERING ON ONE OF THE PROJECTION_FIELDS for field in self.projection or []: if field in columns: self.projection = None break try: # If the PK was queried, we switch it in our queried # fields store with __key__ pk_index = self.queried_fields.index(self.pk_col) self.queried_fields[pk_index] = "__key__" except ValueError: pass