def __init__(self, connection, query, keys_only=False): self.where = None self.original_query = query self.connection = connection self.limits = (query.low_mark, query.high_mark) self.results_returned = 0 opts = query.get_meta() self.distinct = query.distinct self.distinct_values = set() self.distinct_on_field = None self.distinct_field_convertor = None self.queried_fields = [] self.model = query.model self.pk_col = opts.pk.column self.is_count = query.aggregates self.extra_select = query.extra_select self._set_db_table() try: self._validate_query_is_possible(query) self.ordering = _convert_ordering(query) except NotSupportedError as e: # If we can detect here, or when parsing the WHERE tree that a query is unsupported # we set this flag, and then throw NotSupportedError when execute is called. # This will then wrap the exception in Django's NotSupportedError meaning users # only need to catch that one, and not both Django's and ours self.unsupported_query_message = str(e) return else: self.unsupported_query_message = "" # If the query uses defer()/only() then we need to process deferred. We have to get all deferred columns # for all (concrete) inherited models and then only include columns if they appear in that list deferred_columns = {} query.deferred_to_data(deferred_columns, query.deferred_to_columns_cb) inherited_db_tables = [x._meta.db_table for x in get_concrete_parents(self.model)] only_load = list(chain(*[list(deferred_columns.get(x, [])) for x in inherited_db_tables])) if query.select: for x in query.select: if hasattr(x, "field"): # In Django 1.6+ 'x' above is a SelectInfo (which is a tuple subclass), whereas in 1.5 it's a tuple # in 1.6 x[1] == Field, but 1.5 x[1] == unicode (column name) if x.field is None: column = x.col.col[1] # This is the column we are getting lookup_type = x.col.lookup_type self.distinct_on_field = column # This whole section of code is weird, and is probably better implemented as a custom Query type (like QueryByKeys) # basically, appengine gives back dates as a time since the epoch, we convert it to a date, then floor it, then convert it back # in our transform function. The transform is applied when the results are read back so that only distinct values are returned. # this is very hacky... if lookup_type in DATE_TRANSFORMS: self.distinct_field_convertor = lambda value: DATE_TRANSFORMS[lookup_type](self.connection, value) else: raise CouldBeSupportedError("Unhandled lookup_type %s" % lookup_type) else: column = x.field.column else: column = x[1] if only_load and column not in only_load: continue self.queried_fields.append(column) else: # If no specific fields were specified, select all fields if the query is distinct (as App Engine only supports # distinct on projection queries) or the ones specified by only_load self.queried_fields = [x.column for x in opts.fields if (x.column in only_load) or self.distinct] self.keys_only = keys_only or self.queried_fields == [opts.pk.column] # Projection queries don't return results unless all projected fields are # indexed on the model. This means if you add a field, and all fields on the model # are projectable, you will never get any results until you've resaved all of them. # Because it's not possible to detect this situation, we only try a projection query if a # subset of fields was specified (e.g. values_list('bananas')) which makes the behaviour a # bit more predictable. It would be nice at some point to add some kind of force_projection() # thing on a queryset that would do this whenever possible, but that's for the future, maybe. try_projection = (self.keys_only is False) and bool(self.queried_fields) if not self.queried_fields: # If we don't have any queried fields yet, it must have been an empty select and not a distinct # and not an only/defer, so get all the fields self.queried_fields = [ x.column for x in opts.fields ] self.excluded_pks = set() self.has_inequality_filter = False self.all_filters = [] self.results = None self.gae_query = None projection_fields = [] if try_projection: for field in self.queried_fields: # We don't include the primary key in projection queries... if field == self.pk_col: order_fields = set([ x.strip("-") for x in self.ordering]) if self.pk_col in order_fields or "pk" in order_fields: # If we were ordering on __key__ we can't do a projection at all self.projection_fields = [] break continue # Text and byte fields aren't indexed, so we can't do a # projection query f = get_field_from_column(self.model, field) if not f: raise CouldBeSupportedError("Attempting a cross-table select or dates query, or something?!") assert f # If this happens, we have a cross-table select going on! #FIXME db_type = f.db_type(connection) if db_type in ("bytes", "text", "list", "set"): projection_fields = [] break projection_fields.append(field) self.projection = list(set(projection_fields)) or None if opts.parents: self.projection = None if isinstance(query.where, EmptyWhere): # Empty where means return nothing! raise EmptyResultSet() else: try: where_tables = _get_tables_from_where(query.where) except TypeError: # This exception is thrown by get_group_by_cols if one of the constraints is a SubQueryConstraint # yeah, we can't do that. self.unsupported_query_message = "Subquery WHERE constraints aren't supported" return if where_tables and where_tables != [ query.model._meta.db_table ]: # Mark this query as unsupported and return self.unsupported_query_message = "Cross-join WHERE constraints aren't supported: %s" % _cols_from_where_node(query.where) return from dnf import parse_dnf try: self.where, columns, self.excluded_pks = parse_dnf(query.where, self.connection, ordering=self.ordering) except NotSupportedError as e: # Mark this query as unsupported and return self.unsupported_query_message = str(e) return # DISABLE PROJECTION IF WE ARE FILTERING ON ONE OF THE PROJECTION_FIELDS for field in self.projection or []: if field in columns: self.projection = None break try: # If the PK was queried, we switch it in our queried # fields store with __key__ pk_index = self.queried_fields.index(self.pk_col) self.queried_fields[pk_index] = "__key__" except ValueError: pass
def entity_matches_query(entity, query): """ Return True if the entity would potentially be returned by the datastore query """ from djangae.db.backends.appengine.dbapi import CouldBeSupportedError OPERATORS = { "=": lambda x, y: x == y, "<": lt, ">": gt, "<=": lte, ">=": gte } queries = [query] if isinstance(query, datastore.MultiQuery): raise CouldBeSupportedError("We just need to separate the multiquery " "into 'queries' then everything should work") for query in queries: comparisons = chain( [("_Query__kind", "=", "_Query__kind") ], [tuple(x.split(" ") + [ x ]) for x in query.keys()] ) for ent_attr, op, query_attr in comparisons: if ent_attr == "__key__": continue op = OPERATORS[op] # We want this to throw if there's some op we don't know about if ent_attr == "_Query__kind": ent_attr = entity.kind() else: ent_attr = entity.get(ent_attr) if callable(ent_attr): # entity.kind() is a callable, so we need this to save special casing it in a more # ugly way ent_attr = ent_attr() if not isinstance(query_attr, (list, tuple)): query_attrs = [query_attr] else: # The query value can be a list of ANDed values query_attrs = query_attr query_attrs = ( getattr(query, x) if x == "_Query__kind" else query.get(x) for x in query_attrs ) if not isinstance(ent_attr, (list, tuple)): ent_attr = [ ent_attr ] matches = False for query_attr in query_attrs: # [22, 23] #If any of the values don't match then this query doesn't match if not any(op(attr, query_attr) for attr in ent_attr): matches = False break else: # One of the ent_attrs matches the query_attrs matches = True if not matches: # One of the AND values didn't match break else: # If we got through the loop without breaking, then the entity matches return True return False
def parse_constraint(child, connection, negated=False): if isinstance(child, tuple): # First, unpack the constraint constraint, op, annotation, value = child was_list = isinstance(value, (list, tuple)) if isinstance(value, query.Query): value = value.get_compiler(connection.alias).as_sql()[0].execute() else: packed, value = constraint.process(op, value, connection) alias, column, db_type = packed field = constraint.field else: # Django 1.7+ field = child.lhs.target column = child.lhs.target.column op = child.lookup_name value = child.rhs annotation = value was_list = isinstance(value, (list, tuple)) if isinstance(value, query.Query): value = value.get_compiler(connection.alias).as_sql()[0].execute() elif value != []: value = child.lhs.output_field.get_db_prep_lookup( child.lookup_name, child.rhs, connection, prepared=True) is_pk = field and field.primary_key if column == "id" and op == "iexact" and is_pk and isinstance(field, AutoField): # When new instance is created, automatic primary key 'id' does not generate '_idx_iexact_id'. # As the primary key 'id' (AutoField) is integer and is always case insensitive, we can deal with 'id_iexact=' query by using 'exact' rather than 'iexact'. op = "exact" if field and field.db_type(connection) in ("bytes", "text"): raise NotSupportedError("Text and Blob fields are not indexed by the datastore, so you can't filter on them") if op not in REQUIRES_SPECIAL_INDEXES: # Don't convert if this op requires special indexes, it will be handled there if field: value = [ connection.ops.prep_lookup_value(field.model, x, field, column=column) for x in value] # Don't ask me why, but on Django 1.6 constraint.process on isnull wipes out the value (it returns an empty list) # so we have to special case this to use the annotation value instead if op == "isnull": if annotation is not None: value = [ annotation ] if is_pk and value[0]: raise EmptyResultSet() if not was_list: value = value[0] else: if negated: raise CouldBeSupportedError("Special indexing does not currently supported negated queries. See #80") if not was_list: value = value[0] add_special_index(field.model, column, op) # Add the index if we can (e.g. on dev_appserver) if op not in special_indexes_for_column(field.model, column): raise RuntimeError("There is a missing index in your djangaeidx.yaml - \n\n{0}:\n\t{1}: [{2}]".format( field.model, column, op) ) indexer = REQUIRES_SPECIAL_INDEXES[op] value = indexer.prep_value_for_query(value) column = indexer.indexed_column_name(column, value=value) op = indexer.prep_query_operator(op) return column, op, value
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace): """ Moves a model's table between tablespaces. """ # Say what? # I guess this is equivalent to the Datastore namespaces. raise CouldBeSupportedError()