def window_frame_range_start_end(self, start=None, end=None): start_, end_ = super().window_frame_range_start_end(start, end) if (start and start < 0) or (end and end > 0): raise NotSupportedError( 'PostgreSQL only supports UNBOUNDED together with PRECEDING ' 'and FOLLOWING.' ) return start_, end_
def as_sql(self, compiler, connection): if not connection.features.supports_json_field_contains: raise NotSupportedError( 'contains lookup is not supported on this database backend.') lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(lhs_params) + tuple(rhs_params) return 'JSON_CONTAINS(%s, %s)' % (lhs, rhs), params
def as_postgresql(self, compiler, connection, **extra_context): function = None if self.geo_field.geodetic(connection) and not self.source_is_geography(): raise NotSupportedError("ST_Perimeter cannot use a non-projected non-geography field.") dim = min(f.dim for f in self.get_source_fields()) if dim > 2: function = connection.ops.perimeter3d return super().as_sql(compiler, connection, function=function, **extra_context)
def window_frame_rows_start_end(self, start=None, end=None): """ Return SQL for start and end points in an OVER clause window frame. """ if not self.connection.features.supports_over_clause: raise NotSupportedError( 'This backend does not support window expressions.') return self.window_frame_start(start), self.window_frame_end(end)
def subtract_temporals(self, internal_type, lhs, rhs): if self.connection.features.supports_temporal_subtraction: lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return "(%s - %s)" % (lhs_sql, rhs_sql), (*lhs_params, *rhs_params) raise NotSupportedError( "This backend does not support %s subtraction." % internal_type )
def as_sql(self, compiler, connection, **extra_context): if not connection.features.supports_area_geodetic and self.geo_field.geodetic( connection ): raise NotSupportedError( "Area on geodetic coordinate systems not supported." ) return super().as_sql(compiler, connection, **extra_context)
def simple_select_related(self, *fields: str) -> 'SalesforceQuerySet[_T]': if DJANGO_20_PLUS: raise NotSupportedError( "Obsoleted method .simple_select_related(), use .select_related() instead" ) warnings.warn( "Obsoleted method .simple_select_related(), use .select_related() instead" ) return self.select_related(*fields)
def as_sql(self, compiler, connection, **extra_context): if ( self.geo_field.geodetic(connection) and not connection.features.supports_length_geodetic ): raise NotSupportedError( "This backend doesn't support Length on geodetic fields" ) return super().as_sql(compiler, connection, **extra_context)
def process_distance(self, compiler, connection): dist_param = self.rhs_params[0] if (not connection.features.supports_dwithin_distance_expr and hasattr(dist_param, 'resolve_expression') and not isinstance(dist_param, Distance)): raise NotSupportedError( 'This backend does not support expressions for specifying ' 'distance in the dwithin lookup.') return super().process_distance(compiler, connection)
def __init__(self, backend, db_alias=None): self.backend = backend self.name = self.backend.index_name self.db_alias = DEFAULT_DB_ALIAS if db_alias is None else db_alias self.connection = connections[self.db_alias] if self.connection.vendor != 'postgresql': raise NotSupportedError('You must select a PostgreSQL database ' 'to use PostgreSQL search.') self.entries = IndexEntry._default_manager.using(self.db_alias)
def window_frame_range_start_end(self, start=None, end=None): start_, end_ = self.window_frame_rows_start_end(start, end) if (self.connection.features. only_supports_unbounded_with_preceding_and_following and ((start and start < 0) or (end and end > 0))): raise NotSupportedError( '%s only supports UNBOUNDED together with PRECEDING and ' 'FOLLOWING.' % self.connection.display_name) return start_, end_
def simple_select_related(self, *fields): if DJANGO_20_PLUS: raise NotSupportedError( "Obsoleted method .simple_select_related(), use .select_related() instead" ) warnings.warn( "Obsoleted method .simple_select_related(), use .select_related() instead" ) return self.select_related(*fields)
def _determine_query_kind_17(query): from django.db.models.sql.aggregates import Count if query.aggregates: if None in query.aggregates and isinstance(query.aggregates[None], Count): return "COUNT" else: raise NotSupportedError("Unsupported aggregate: {}".format(query.aggregates)) return "SELECT"
def distinct_sql(self, fields, params): """ Return an SQL DISTINCT clause which removes duplicate rows from the result set. If any fields are given, only check the given fields for duplicates. """ if fields: raise NotSupportedError('DISTINCT ON fields is not supported by this database backend') else: return ['DISTINCT'], []
def check_supported(self, schema_editor): if ( self.include and self.index_type.lower() == "spgist" and not schema_editor.connection.features.supports_covering_spgist_indexes ): raise NotSupportedError( "Covering exclusion constraints using an SP-GiST index " "require PostgreSQL 14+." )
def __init__(self, backend, model, db_alias=None): self.backend = backend self.model = model if db_alias is None: db_alias = DEFAULT_DB_ALIAS if connections[db_alias].vendor != 'postgresql': raise NotSupportedError('You must select a PostgreSQL database ' 'to use PostgreSQL search.') self.db_alias = db_alias self.name = model._meta.label self.search_fields = self.model.get_search_fields()
def __enter__(self): # Some SQLite schema alterations need foreign key constraints to be # disabled. Enforce it here for the duration of the schema edition. if not self.connection.disable_constraint_checking(): raise NotSupportedError( 'SQLite schema editor cannot be used while foreign key ' 'constraint checks are enabled. Make sure to disable them ' 'before entering a transaction.atomic() context because ' 'SQLite does not support disabling them in the middle of ' 'a multi-statement transaction.') return super().__enter__()
def as_sql(self, qn, connection): if '.postgresql' in connection.settings_dict['ENGINE']: return super().as_sql(qn, connection) if '.mysql' in connection.settings_dict['ENGINE']: lhs, lhs_params = self.process_lhs(qn, connection) key_name = self.rhs path = '$.{}'.format(json.dumps(key_name)) params = lhs_params + [path] return "JSON_CONTAINS_PATH({}, 'one', %s)".format(lhs), params raise NotSupportedError('Lookup not supported for %s' % connection.settings_dict['ENGINE'])
def get_prep_lookup(self): field_name = self.lhs.field.name if field_name not in ["host", "network"]: raise NotSupportedError( f"Lookup only provided on the host and network fields, not {field_name}." ) if field_name == "network" and self.lookup_name in [ "net_host", "net_host_contained", "net_in" ]: raise NotSupportedError( f"Lookup for network field does not include the {self.lookup_name} lookup." ) if field_name == "host" and self.lookup_name not in [ "net_host", "net_host_contained", "net_in" ]: raise NotSupportedError( f"Lookup for host field does not include the {self.lookup_name} lookup." ) self.ip = get_ip_info(field_name, self.rhs) return str(self.ip.ip)
def __init__(self, backend, db_alias=None): self.backend = backend self.name = self.backend.index_name self.db_alias = DEFAULT_DB_ALIAS if db_alias is None else db_alias self.connection = connections[self.db_alias] if self.connection.vendor != "sqlite": raise NotSupportedError( "You must select a SQLite database " "to use the SQLite search backend." ) self.entries = IndexEntry._default_manager.using(self.db_alias)
def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): # Spanner requires dropping indexes before changing the nullability # of a column. nullability_changed = old_field.null != new_field.null if nullability_changed: index_names = self._constraint_names( model, [old_field.column], index=True, ) if index_names and not old_field.db_index: raise NotSupportedError( "Changing nullability of a field with an index other than " "Field(db_index=True) isn't yet supported.") if len(index_names) > 1: raise NotSupportedError( "Changing nullability of a field with more than one " "index isn't yet supported.") for index_name in index_names: self.execute(self._delete_index_sql(model, index_name)) super()._alter_field( model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False, ) # Recreate the index that was dropped earlier. if nullability_changed and new_field.db_index: self.execute(self._create_index_sql(model, [new_field]))
def alter_field(self, model, old_field, new_field, strict=False): if not self._field_should_be_altered(old_field, new_field): return old_field_name = old_field.name table_name = model._meta.db_table _, old_column_name = old_field.get_attname_column() if ( new_field.name != old_field_name and not self.connection.features.supports_atomic_references_rename and self._is_referenced_by_fk_constraint( table_name, old_column_name, ignore_self=True ) ): if self.connection.in_atomic_block: raise NotSupportedError( ( "Renaming the %r.%r column while in a transaction is not " "supported on SQLite < 3.26 because it would break referential " "integrity. Try adding `atomic = False` to the Migration class." ) % (model._meta.db_table, old_field_name) ) with atomic(self.connection.alias): super().alter_field(model, old_field, new_field, strict=strict) # Follow SQLite's documented procedure for performing changes # that don't affect the on-disk content. # https://sqlite.org/lang_altertable.html#otheralter with self.connection.cursor() as cursor: schema_version = cursor.execute("PRAGMA schema_version").fetchone()[ 0 ] cursor.execute("PRAGMA writable_schema = 1") references_template = ' REFERENCES "%s" ("%%s") ' % table_name new_column_name = new_field.get_attname_column()[1] search = references_template % old_column_name replacement = references_template % new_column_name cursor.execute( "UPDATE sqlite_master SET sql = replace(sql, %s, %s)", (search, replacement), ) cursor.execute("PRAGMA schema_version = %d" % (schema_version + 1)) cursor.execute("PRAGMA writable_schema = 0") # The integrity check will raise an exception and rollback # the transaction if the sqlite_master updates corrupt the # database. cursor.execute("PRAGMA integrity_check") # Perform a VACUUM to refresh the database representation from # the sqlite_master table. with self.connection.cursor() as cursor: cursor.execute("VACUUM") else: super().alter_field(model, old_field, new_field, strict=strict)
def __init__(self, backend, db_alias=None): self.backend = backend self.name = self.backend.index_name self.db_alias = DEFAULT_DB_ALIAS if db_alias is None else db_alias self.connection = connections[self.db_alias] if self.connection.vendor != 'postgresql': raise NotSupportedError('You must select a PostgreSQL database ' 'to use PostgreSQL search.') # Whether to allow adding items via the faster upsert method available in Postgres >=9.5 self._enable_upsert = (self.connection.pg_version >= 90500) self.entries = IndexEntry._default_manager.using(self.db_alias)
def as_sql(self, qn, connection): if '.postgresql' in connection.settings_dict['ENGINE']: return super().as_sql(qn, connection) if '.mysql' in connection.settings_dict['ENGINE']: lhs, lhs_params = self.process_lhs(qn, connection) rhs, rhs_params = self.process_rhs(qn, connection) for i, p in enumerate(rhs_params): rhs_params[i] = p.dumps( p.adapted) # Convert JSONAdapter to str params = rhs_params + lhs_params return 'JSON_CONTAINS({}, {})'.format(rhs, lhs), params raise NotSupportedError('Lookup not supported for %s' % connection.settings_dict['ENGINE'])
def create_collation(self, schema_editor): if ( self.deterministic is False and not schema_editor.connection.features.supports_non_deterministic_collations ): raise NotSupportedError( 'Non-deterministic collations require PostgreSQL 12+.' ) if ( self.provider != 'libc' and not schema_editor.connection.features.supports_alternate_collation_providers ): raise NotSupportedError('Non-libc providers require PostgreSQL 10+.') args = {'locale': schema_editor.quote_name(self.locale)} if self.provider != 'libc': args['provider'] = schema_editor.quote_name(self.provider) if self.deterministic is False: args['deterministic'] = 'false' schema_editor.execute('CREATE COLLATION %(name)s (%(args)s)' % { 'name': schema_editor.quote_name(self.name), 'args': ', '.join(f'{option}={value}' for option, value in args.items()), })
def add_annotation(self, column, annotation): # The Trunc annotation class doesn't exist in Django 1.8, hence we compare by # strings, rather than importing the class to compare it name = annotation.__class__.__name__ if name == "Count": return # Handled elsewhere if name not in ("Trunc", "Col", "Date", "DateTime"): raise NotSupportedError("Unsupported annotation %s" % name) def process_date(value, lookup_type): value = ensure_datetime(value) ret = datetime.datetime.utcfromtimestamp(0) POSSIBLE_LOOKUPS = ("year", "month", "day", "hour", "minute", "second") ret = ret.replace( value.year, value.month if lookup_type in POSSIBLE_LOOKUPS[1:] else ret.month, value.day if lookup_type in POSSIBLE_LOOKUPS[2:] else ret.day, value.hour if lookup_type in POSSIBLE_LOOKUPS[3:] else ret.hour, value.minute if lookup_type in POSSIBLE_LOOKUPS[4:] else ret.minute, value.second if lookup_type in POSSIBLE_LOOKUPS[5:] else ret.second, ) return ret # Abuse the extra_select functionality if name == "Col": self.extra_selects.append((column, (lambda x: x, [column]))) elif name in ("Trunc", "Date", "DateTime"): # Trunc stores the source column and the lookup type differently to Date # which is why we have the getattr craziness here lookup_column = (annotation.lhs.output_field.column if name == "Trunc" else getattr( annotation, "lookup", column)) lookup_type = getattr(annotation, "lookup_type", getattr(annotation, "kind", None)) assert lookup_type self.extra_selects.append( (column, (lambda x: process_date(x, lookup_type), [lookup_column]))) # Override the projection so that we only get this column self.columns = set([lookup_column])
def alter_db_table(self, model, old_db_table, new_db_table, disable_constraints=True): if (not self.connection.features.supports_atomic_references_rename and disable_constraints and self._is_referenced_by_fk_constraint(old_db_table)): if self.connection.in_atomic_block: raise NotSupportedError(( 'Renaming the %r table while in a transaction is not ' 'supported on SQLite < 3.26 because it would break referential ' 'integrity. Try adding `atomic = False` to the Migration class.' ) % old_db_table) self.connection.enable_constraint_checking() super().alter_db_table(model, old_db_table, new_db_table) self.connection.disable_constraint_checking() else: super().alter_db_table(model, old_db_table, new_db_table)
def check_expression_support(self, expression): bad_fields = (models.DateField, models.DateTimeField, models.TimeField) bad_aggregates = (models.Sum, models.Avg, models.Variance, models.StdDev) if isinstance(expression, bad_aggregates): for expr in expression.get_source_expressions(): try: output_field = expr.output_field except (AttributeError, FieldError): # Not every subexpression has an output_field which is fine # to ignore. pass else: if isinstance(output_field, bad_fields): raise NotSupportedError( 'You cannot use Sum, Avg, StdDev, and Variance ' 'aggregations on date/time fields in sqlite3 ' 'since date/time is saved as text.') if (isinstance(expression, models.Aggregate) and expression.distinct and len(expression.source_expressions) > 1): raise NotSupportedError( "SQLite doesn't support DISTINCT on aggregate functions " "accepting multiple arguments.")
def explain_query_prefix(self, format=None, **options): if not self.connection.features.supports_explaining_query_execution: raise NotSupportedError('This backend does not support explaining query execution.') if format: supported_formats = self.connection.features.supported_explain_formats normalized_format = format.upper() if normalized_format not in supported_formats: msg = '%s is not a recognized format.' % normalized_format if supported_formats: msg += ' Allowed formats: %s' % ', '.join(sorted(supported_formats)) raise ValueError(msg) if options: raise ValueError('Unknown options: %s' % ', '.join(sorted(options.keys()))) return self.explain_prefix
def walk(node, negated): if node.negated: negated = not negated for child in node.children[:]: if (negated and child.operator == "=") or child.operator in (">", "<", ">=", "<="): inequality_fields.add(child.column) walk(child, negated) if len(inequality_fields) > 1: raise NotSupportedError( "You can only have one inequality filter per query on the datastore. " "Filters were: %s" % ' '.join(inequality_fields))