def __init__( self, query_string: str, tables: typing.List[sql_table.Table] = None, distinct: bool = False, order_by: typing.Optional[OrderBy] = None, limit: typing.Optional[int] = None, ): self.distinct = distinct tables = tables or [] self._tables = tables self._order_by = order_by self.limit = limit self._filter_groups: typing.List[sql_table.FilterGroup] = [] self._query_string = query_string assert len({ col.position for col in self.columns }) == len(self.columns), ( "All columns in an SQLQuery must have unique position values to avoid " "ambiguity") queried_or_modified_tables = [ table for table in self.tables if table.has_columns ] if len(queried_or_modified_tables) > 1: raise exceptions.NotSupportedError( "Due to limitations in how Fauna joins document sets, only queries that select " "or modify one table at a time are supported. Cross-table 'where' clauses " "via joins are still supported.")
def from_comparison_group(cls, comparison_group: token_groups.Comparison, position: int = 0) -> Column: """Create a column from a Comparison group token. Params: ------- comparison_group: Token group that contains the column identifier and updated value. Returns: -------- A column object with the value attribute. """ _, column_identifier = comparison_group.token_next_by( i=token_groups.Identifier) idx, _ = comparison_group.token_next_by(m=(token_types.Comparison, "=")) _, column_value = comparison_group.token_next(idx, skip_ws=True, skip_cm=True) _, value_literal = comparison_group.token_next_by( t=token_types.Literal) if value_literal is None: raise exceptions.NotSupportedError( "Only updating to literal values is currently supported " "(e.g. can't assign one column's value to another column " "in a single UPDATE query)") column_value = common.extract_value(value_literal) column = cls.from_identifier(column_identifier, position) column.value = column_value return column
def _sort_document_set(document_set: QueryExpression, order_by: typing.Optional[sql.OrderBy]): if order_by is None: return q.paginate(document_set, size=common.MAX_PAGE_SIZE) if len(order_by.columns) > 1: raise exceptions.NotSupportedError( "Ordering by multiple columns is not yet supported.") ordered_column = order_by.columns[0] assert ordered_column.table_name is not None ordered_document_set = q.join( document_set, q.index( common.index_name( ordered_column.table_name, column_name=ordered_column.name, index_type=common.IndexType.SORT, )), ) if order_by.direction == sql.OrderDirection.DESC: ordered_document_set = q.reverse(ordered_document_set) return q.map_( q.lambda_(["_", "ref"], q.var("ref")), q.paginate(ordered_document_set, size=common.MAX_PAGE_SIZE), )
def add_join( self, foreign_table: Table, comparison_group: token_groups.Comparison, direction: JoinDirection, ): """Add a foreign reference via join.""" setattr(self, f"{direction.value}_join_table", foreign_table) setattr(foreign_table, f"{REVERSE_JOIN[direction].value}_join_table", self) join_columns = Column.from_identifier_group(comparison_group) join_on_id = functools.reduce( lambda has_id, column: has_id or column.name == "ref", join_columns, False) if not join_on_id: raise exceptions.NotSupportedError( "Table joins are only permitted on IDs and foreign keys " f"that refer to IDs, but tried to join on {comparison_group.value}." ) join_key = next(join_column for join_column in join_columns if join_column.belongs_to_table(self)) setattr(self, f"{direction.value}_join_key", join_key) foreign_join_key = next(join_column for join_column in join_columns if join_column.belongs_to_table(foreign_table)) setattr( foreign_table, f"{REVERSE_JOIN[direction].value}_join_key", foreign_join_key, )
def translate_alter( statement: token_groups.Statement) -> typing.List[QueryExpression]: """Translate an ALTER SQL query into an equivalent FQL query. Params: ------- statement: An SQL statement returned by sqlparse. Returns: -------- An FQL query expression. """ idx, table_keyword = statement.token_next_by(m=(token_types.Keyword, "TABLE")) assert table_keyword is not None idx, table_identifier = statement.token_next_by(i=token_groups.Identifier, idx=idx) table = sql.Table.from_identifier(table_identifier) _, second_alter = statement.token_next_by(m=(token_types.DDL, "ALTER"), idx=idx) _, column_keyword = statement.token_next_by(m=(token_types.Keyword, "COLUMN"), idx=idx) if second_alter and column_keyword: return [_translate_alter_column(statement, table, idx)] raise exceptions.NotSupportedError( "For ALTER TABLE queries, only ALTER COLUMN is currently supported.")
def from_where_group( cls, where_group: typing.Optional[token_groups.Where] ) -> typing.List[FilterGroup]: """Parse a WHERE token to extract all filters groups contained therein. Params: ------- where_group: A Where SQL token from sqlparse. Returns: -------- A list of FilterGroup instances based on all conditions contained within the WHERE clause. """ if where_group is None: return [] _, between_keyword = where_group.token_next_by(m=(token_types.Keyword, "BETWEEN")) if between_keyword is not None: raise exceptions.NotSupportedError( "BETWEEN not yet supported in WHERE clauses.") filter_groups = [] where_filters: typing.List[Filter] = [] idx = 0 while True: idx, comparison = where_group.token_next_by( i=(token_groups.Comparison, token_groups.Identifier), idx=idx) if comparison is None: filter_groups.append(cls(filters=where_filters)) break next_comparison_idx, next_comparison_keyword = where_group.token_next_by( m=[(token_types.Keyword, "AND"), (token_types.Keyword, "OR")], idx=idx) # I'm not sure what the exact cause is, but sometimes sqlparse has trouble # with grouping tokens into Comparison groups (seems to mostly be an issue # after the AND keyword, but not always). if isinstance(comparison, token_groups.Identifier): comparison = token_groups.Comparison( where_group.tokens[idx:next_comparison_idx]) where_filter = Filter.from_comparison_group(comparison) where_filters.append(where_filter) if next_comparison_idx is None: filter_groups.append(cls(filters=where_filters)) break if next_comparison_keyword.match(token_types.Keyword, "OR"): filter_groups.append(cls(filters=where_filters)) where_filters = [] idx = next_comparison_idx return filter_groups
def join_collections(sql_query: sql.SQLQuery) -> QueryExpression: """Join together multiple collections to return their documents in the response. Params: ------- sql_query: SQLQuery object with information about the query params. Returns: -------- An FQL query expression for joined and filtered documents. """ tables = sql_query.tables order_by = sql_query.order_by from_table = tables[0] to_table = tables[-1] table_with_columns = next(table for table in tables if table.has_columns) if (order_by is not None and order_by.columns[0].table_name != table_with_columns.name): raise exceptions.NotSupportedError( "Fauna uses indexes for both joining and ordering of results, " "and we currently can only sort the principal table " "(i.e. the one whose columns are being selected or modified) in the query. " "You can sort on a column from the principal table, query one table at a time, " "or remove the ordering constraint.") if not any(sql_query.filter_groups): raise exceptions.NotSupportedError( "Joining tables without cross-table filters via the WHERE clause is not supported. " "Selecting columns from multiple tables is not supported either, " "so there's no performance gain from joining tables without cross-table conditions " "for filtering query results.") assert from_table.left_join_table is None intersection_queries = [] for filter_group in sql_query.filter_groups: intersection_query = q.intersection(*[ _build_intersecting_query(filter_group, None, table, direction) for table, direction in [(from_table, "right"), (to_table, "left")] ]) intersection_queries.append(intersection_query) return q.union(*intersection_queries)
def _define_foreign_key_constraint( metadata: AllFieldMetadata, column_definition_group: token_groups.TokenList ) -> typing.Optional[AllFieldMetadata]: idx, foreign_keyword = column_definition_group.token_next_by( m=(token_types.Keyword, "FOREIGN")) if foreign_keyword is None: return None idx, _ = column_definition_group.token_next_by(m=(token_types.Name, "KEY"), idx=idx) idx, foreign_key_column = column_definition_group.token_next_by( t=token_types.Name, idx=idx) column_name = foreign_key_column.value idx, _ = column_definition_group.token_next_by(m=(token_types.Keyword, "REFERENCES"), idx=idx) idx, reference_table = column_definition_group.token_next_by( t=token_types.Name, idx=idx) reference_table_name = reference_table.value idx, reference_column = column_definition_group.token_next_by( t=token_types.Name, idx=idx) reference_column_name = reference_column.value if any( metadata.get(column_name, EMPTY_DICT).get("references", EMPTY_DICT)): raise exceptions.NotSupportedError( "Foreign keys with multiple references are not currently supported." ) if reference_column_name != "id": raise exceptions.NotSupportedError( "Foreign keys referring to fields other than ID are not currently supported." ) return { **metadata, column_name: { **DEFAULT_FIELD_METADATA, # type: ignore **metadata.get(column_name, EMPTY_DICT), "references": { reference_table_name: reference_column_name }, }, }
def _build_insert_query(cls, statement: token_groups.Statement, table: sql_table.Table) -> SQLQuery: _, function_group = statement.token_next_by(i=token_groups.Function) if function_group is None: raise exceptions.NotSupportedError( "INSERT INTO statements without column names are not currently supported." ) _, column_name_group = function_group.token_next_by( i=token_groups.Parenthesis) _, column_name_identifiers = column_name_group.token_next_by( i=(token_groups.IdentifierList, token_groups.Identifier)) _, value_group = statement.token_next_by(i=token_groups.Values) val_idx, column_value_group = value_group.token_next_by( i=token_groups.Parenthesis) _, additional_parenthesis_group = value_group.token_next_by( i=token_groups.Parenthesis, idx=val_idx) if additional_parenthesis_group is not None: raise exceptions.NotSupportedError( "INSERT for multiple rows is not supported yet.") _, column_value_identifiers = column_value_group.token_next_by( i=(token_groups.IdentifierList, token_groups.Identifier), ) # If there's just one value in the VALUES clause, it doesn't get wrapped in an Identifer column_value_identifiers = column_value_identifiers or column_value_group idx = -1 for column in sql_table.Column.from_identifier_group( column_name_identifiers): idx, column_value = column_value_identifiers.token_next_by( t=[token_types.Literal, token_types.Keyword], idx=idx) if column_value is None: raise exceptions.NotSupportedError( "Assigning values dynamically is not supported. " "You must use literal values only in INSERT statements.") column.value = common.extract_value(column_value) table.add_column(column) return cls(str(statement), tables=[table])
def from_identifier(cls, identifier: token_groups.Identifier, position: int = 0) -> Column: """Create a column from an SQL identifier token. Params: ------- identifier: SQL token with column label. Returns: -------- A Column object based on the given identifier token. """ idx, identifier_name = identifier.token_next_by( t=token_types.Name, i=token_groups.Function) _, maybe_dot = identifier.token_next(idx, skip_ws=True, skip_cm=True) if maybe_dot is None or not maybe_dot.match(token_types.Punctuation, "."): table_name = None name = identifier_name.value else: table_name = identifier_name.value idx, column_name_token = identifier.token_next_by( t=token_types.Name, idx=idx) # Fauna doesn't have an 'id' field, so we extract the ID value from the 'ref' included # in query responses, but we still want to map the field name to aliases as with other # fields for consistency when passing results to SQLAlchemy name = "ref" if column_name_token.value == "id" else column_name_token.value idx, as_keyword = identifier.token_next_by(m=(token_types.Keyword, "AS"), idx=idx) if as_keyword is None: alias = "id" if name == "ref" else name else: _, alias_identifier = identifier.token_next_by( i=token_groups.Identifier, idx=idx) alias = alias_identifier.value function_name: typing.Optional[Function] = None if re.match(COUNT_REGEX, name): function_name = Function.COUNT elif re.match(NOT_SUPPORTED_FUNCTION_REGEX, name): raise exceptions.NotSupportedError( "MIN, MAX, AVG, and SUM functions are not yet supported.") column_params: ColumnParams = { "table_name": table_name, "name": name, "alias": alias, "function_name": function_name, "position": position, } return Column(**column_params)
def _translate_create_index(statement: token_groups.Statement, idx: int) -> typing.List[QueryExpression]: _, unique = statement.token_next_by(m=(token_types.Keyword, "UNIQUE"), idx=idx) idx, _ = statement.token_next_by(m=(token_types.Keyword, "ON"), idx=idx) _, index_params = statement.token_next_by(i=token_groups.Function, idx=idx) params_idx, table_identifier = index_params.token_next_by( i=token_groups.Identifier) table_name = table_identifier.value params_idx, column_identifiers = index_params.token_next_by( i=token_groups.Parenthesis, idx=params_idx) index_fields = [ token.value for token in column_identifiers.flatten() if token.ttype == token_types.Name ] if len(index_fields) > 1: raise exceptions.NotSupportedError( "Creating indexes for multiple columns is not currently supported." ) index_terms = [{ "field": ["data", index_field] } for index_field in index_fields] index_name = fql.index_name(table_name, column_name=index_fields[0], index_type=fql.IndexType.TERM) return [ q.do( q.if_( # We automatically create indices for some fields on collection creation, # so we can skip explicit index creation if it already exists. q.exists(q.index(index_name)), None, q.create_index({ "name": index_name, "source": q.collection(table_name), "terms": index_terms, "unique": unique, }), ), q.let( {"collection": q.collection(table_name)}, {"data": [{ "id": q.var("collection") }]}, ), ) ]
def translate_sql_to_fql(sql_string: str, ) -> typing.List[QueryExpression]: """Translate from an SQL string to an FQL query""" sql_statements = sqlparse.parse(sql_string) if len(sql_statements) > 1: raise exceptions.NotSupportedError( "Only one SQL statement at a time is currently supported. " f"The following query has more than one:\n{sql_string}") sql_statement = sql_statements[0] if sql_statement.token_first().match(token_types.DDL, "CREATE"): return translate_create(sql_statement) if sql_statement.token_first().match(token_types.DDL, "DROP"): return translate_drop(sql_statement) if sql_statement.token_first().match(token_types.DDL, "ALTER"): return translate_alter(sql_statement) if sql_statement.token_first().match(token_types.DML, "SELECT"): sql_query = sql.SQLQuery.from_statement(sql_statement) return [fql.translate_select(sql_query)] if sql_statement.token_first().match(token_types.DML, "INSERT"): sql_query = sql.SQLQuery.from_statement(sql_statement) return [fql.translate_insert(sql_query)] if sql_statement.token_first().match(token_types.DML, "DELETE"): sql_query = sql.SQLQuery.from_statement(sql_statement) return [fql.translate_delete(sql_query)] if sql_statement.token_first().match(token_types.DML, "UPDATE"): sql_query = sql.SQLQuery.from_statement(sql_statement) return [fql.update_documents(sql_query)] raise exceptions.NotSupportedError()
def _define_column( metadata: AllFieldMetadata, column_definition_group: token_groups.TokenList, ) -> AllFieldMetadata: idx, column = column_definition_group.token_next_by(t=token_types.Name) column_name = column.value # "id" is auto-generated by Fauna, so we ignore it in SQL column definitions if column_name == "id": return metadata idx, data_type = column_definition_group.token_next_by(t=token_types.Name, idx=idx) _, not_null_keyword = column_definition_group.token_next_by( m=(token_types.Keyword, "NOT NULL")) _, unique_keyword = column_definition_group.token_next_by( m=(token_types.Keyword, "UNIQUE")) _, primary_key_keyword = column_definition_group.token_next_by( m=(token_types.Keyword, "PRIMARY KEY")) _, default_keyword = column_definition_group.token_next_by( m=(token_types.Keyword, "DEFAULT")) _, check_keyword = column_definition_group.token_next_by( m=(token_types.Keyword, "CHECK")) if check_keyword is not None: raise exceptions.NotSupportedError("CHECK keyword is not supported.") column_metadata: typing.Union[FieldMetadata, typing.Dict[str, str]] = metadata.get( column_name, {}) is_primary_key = primary_key_keyword is not None is_not_null = (not_null_keyword is not None or is_primary_key or column_metadata.get("not_null") or False) is_unique = (unique_keyword is not None or is_primary_key or column_metadata.get("unique") or False) default_value = (default_keyword if default_keyword is None else sql.extract_value(default_keyword.value)) return { **metadata, column_name: { **DEFAULT_FIELD_METADATA, # type: ignore **metadata.get(column_name, EMPTY_DICT), # type: ignore "unique": is_unique, "not_null": is_not_null, "default": default_value, "type": DATA_TYPE_MAP[data_type.value], }, }
def _collect_tables( cls, statement: token_groups.Statement) -> typing.List[sql_table.Table]: idx, _ = statement.token_next_by(m=[ (token_types.Keyword, "FROM"), (token_types.Keyword, "INTO"), (token_types.DML, "UPDATE"), ]) _, maybe_table_identifier = statement.token_next(idx=idx, skip_cm=True, skip_ws=True) if isinstance(maybe_table_identifier, token_groups.Function): maybe_table_identifier = maybe_table_identifier.token_first( skip_cm=True, skip_ws=True) # If we can't find a single table identifier, it means that multiple tables # are referenced in the FROM/INTO clause, which isn't supported. if not isinstance(maybe_table_identifier, token_groups.Identifier): raise exceptions.NotSupportedError( "In order to query multiple tables at a time, you must join them " "together with a JOIN clause.") table_identifier = maybe_table_identifier tables = [sql_table.Table.from_identifier(table_identifier)] while True: idx, join_kw = statement.token_next_by(m=(token_types.Keyword, "JOIN"), idx=idx) if join_kw is None: break idx, table_identifier = statement.token_next(idx, skip_ws=True, skip_cm=True) table = sql_table.Table.from_identifier(table_identifier) idx, comparison_group = statement.token_next_by( i=token_groups.Comparison, idx=idx) table.add_join(tables[-1], comparison_group, sql_table.JoinDirection.LEFT) tables.append(table) return tables
def _define_primary_key( metadata: AllFieldMetadata, column_definition_group: token_groups.TokenList, ) -> typing.Optional[AllFieldMetadata]: idx, constraint_keyword = column_definition_group.token_next_by( m=(token_types.Keyword, "CONSTRAINT")) idx, primary_keyword = column_definition_group.token_next_by( m=(token_types.Keyword, "PRIMARY"), idx=(idx or -1)) if constraint_keyword is not None and primary_keyword is None: raise exceptions.NotSupportedError( "When a column definition clause begins with CONSTRAINT, " "only a PRIMARY KEY constraint is supported") if primary_keyword is None: return None # If the keyword isn't followed by column name(s), then it's part of # a regular column definition and should be handled by _define_column if not _contains_column_name(column_definition_group, idx): return None new_metadata: AllFieldMetadata = deepcopy(metadata) while True: idx, primary_key_column = column_definition_group.token_next_by( t=token_types.Name, idx=idx) # 'id' is defined and managed by Fauna, so we ignore any attempts # to manage it from SQLAlchemy if primary_key_column is None or primary_key_column.value == "id": break primary_key_column_name = primary_key_column.value new_metadata[primary_key_column_name] = { **DEFAULT_FIELD_METADATA, # type: ignore **new_metadata.get(primary_key_column_name, {}), # type: ignore "unique": True, "not_null": True, } return new_metadata
def from_comparison_group( cls, comparison_group: token_groups.Comparison) -> Comparison: """Create a Comparison object based on an SQL Comparison token group. Params: ------- comparison_group: An SQL token group representing a comparison. reverse: Whether to reverse a directional comparison (e.g. change '>' to '<'). Returns: -------- A Comparison object. """ _, comparison_token = comparison_group.token_next_by( t=token_types.Comparison, m=(token_types.Keyword, "IS")) assert comparison_token is not None comparison_operator = cls.OPERATOR_MAP.get(comparison_token.value) if comparison_operator is None: raise exceptions.NotSupportedError( "Only the following comparisons are supported in WHERE clauses: " ", ".join(cls.OPERATOR_MAP.keys())) # We're enforcing the convention of <column name> <operator> <value> for WHERE # clauses here to simplify later query translation. # Unfortunately, FQL generation depends on this convention without that dependency # being explicit, which increases the likelihood of future bugs. However, I can't # think of a good way to centralize the knowledge of this convention across all # query translation, so I'm leaving this note as a warning. id_idx, _ = comparison_group.token_next_by(i=token_groups.Identifier) value_idx, _ = comparison_group.token_next_by( t=token_types.Literal, m=[ (token_types.Keyword, "NULL"), (token_types.Keyword, "TRUE"), (token_types.Keyword, "FALSE"), ], ) identifier_comes_before_value = id_idx < value_idx if identifier_comes_before_value: return cls(operator=comparison_operator) return cls(operator=cls.REVERSE_MAP.get(comparison_operator, comparison_operator))
def _translate_alter_column( statement: token_groups.Statement, table: sql.Table, starting_idx: int, ) -> QueryExpression: idx, column_identifier = statement.token_next_by(i=token_groups.Identifier, idx=starting_idx) column = sql.Column.from_identifier(column_identifier) table.add_column(column) _, drop = statement.token_next_by(m=(token_types.DDL, "DROP"), idx=idx) _, default = statement.token_next_by(m=(token_types.Keyword, "DEFAULT")) if drop and default: return _translate_drop_default(table.name, table.columns[0].name) raise exceptions.NotSupportedError( "For statements with ALTER COLUMN, only DROP DEFAULT is currently supported." )
def from_statement(cls, statement: token_groups.Statement) -> SQLQuery: """Extract an SQLQuery object from an SQL statement token. Params: ------- statement: SQL token that contains the entire query. Returns: -------- A new SQLQuery object. """ first_token = statement.token_first(skip_cm=True, skip_ws=True) tables = cls._collect_tables(statement) if first_token.match(token_types.DML, "SELECT"): sql_instance = cls._build_select_query(statement, tables) if first_token.match(token_types.DML, "UPDATE"): assert len(tables) == 1 table = tables[0] sql_instance = cls._build_update_query(statement, table) if first_token.match(token_types.DML, "INSERT"): assert len(tables) == 1 table = tables[0] sql_instance = cls._build_insert_query(statement, table) if first_token.match(token_types.DML, "DELETE"): assert len(tables) == 1 table = tables[0] sql_instance = cls._build_delete_query(statement, table) if sql_instance is None: raise exceptions.NotSupportedError( f"Unsupported query type {first_token}") _, where_group = statement.token_next_by(i=(token_groups.Where)) filter_groups = sql_table.FilterGroup.from_where_group(where_group) for filter_group in filter_groups: sql_instance.add_filter_group(filter_group) return sql_instance
def _build_select_query(cls, statement: token_groups.Statement, tables: typing.List[sql_table.Table]) -> SQLQuery: _, wildcard = statement.token_next_by(t=(token_types.Wildcard)) if wildcard is not None: raise exceptions.NotSupportedError( "Wildcards ('*') are not yet supported") _, identifiers = statement.token_next_by(i=( token_groups.Identifier, token_groups.IdentifierList, token_groups.Function, )) for column in sql_table.Column.from_identifier_group(identifiers): try: table = next(table for table in tables if table.name == column.table_name) except StopIteration: table = tables[0] table.add_column(column) _, distinct = statement.token_next_by(m=(token_types.Keyword, "DISTINCT")) idx, _ = statement.token_next_by(m=(token_types.Keyword, "LIMIT")) _, limit = statement.token_next(skip_cm=True, skip_ws=True, idx=idx) limit_value = None if limit is None else int(limit.value) order_by = OrderBy.from_statement(statement) return cls( str(statement), tables=tables, distinct=bool(distinct), order_by=order_by, limit=limit_value, )
def translate_create( statement: token_groups.Statement) -> typing.List[QueryExpression]: """Translate a CREATE SQL query into an equivalent FQL query. Params: ------- statement: An SQL statement returned by sqlparse. Returns: -------- An FQL query expression. """ idx, keyword = statement.token_next_by( m=[(token_types.Keyword, "TABLE"), (token_types.Keyword, "INDEX")]) if keyword.value == "TABLE": return _translate_create_table(statement, idx) if keyword.value == "INDEX": return _translate_create_index(statement, idx) raise exceptions.NotSupportedError( "Only TABLE and INDEX are supported in CREATE statements.")
def _define_match_set(query_filter: sql.Filter) -> QueryExpression: field_name = query_filter.column.name comparison_value = query_filter.value index_name_for_collection = functools.partial(index_name, query_filter.table_name) convert_to_collection_ref_set = functools.partial(convert_to_ref_set, query_filter.table_name) get_info_indexes_with_references = lambda collection_name, field_name: q.map_( q.lambda_("info_index_ref", q.get(q.var("info_index_ref"))), q.paginate( q.match( q.index( index_name( "information_schema_indexes_", column_name="name_", index_type=IndexType.TERM, )), index_name( collection_name, column_name=field_name, index_type=IndexType.REF, ), ), ), ) index_name_for_field = functools.partial(index_name_for_collection, field_name) equality_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [comparison_value], [comparison_value], ) if query_filter.checks_whether_equal: if field_name == "ref": assert isinstance(comparison_value, str) return q.singleton( q.ref(q.collection(query_filter.table_name), comparison_value)) return q.let( { "ref_index": q.index(index_name_for_field(IndexType.REF)), "term_index": q.index(index_name_for_field(IndexType.TERM)), "info_indexes": get_info_indexes_with_references(query_filter.table_name, field_name), "comparison_value": comparison_value, }, q.if_( q.exists(q.var("ref_index")), q.match( q.var("ref_index"), get_foreign_key_ref( q.var("comparison_value"), # Assumes that there is only one reference per foreign key # and that it refers to the associated collection's ID field # (e.g. {'associated_table': 'id'}). # This is enforced via NotSupported errors when creating collections. q.select([0, DATA, "referred_table_"], q.var("info_indexes")), ), ), q.if_( q.exists(q.var("term_index")), q.match( q.var("term_index"), q.var("comparison_value"), ), convert_to_collection_ref_set(equality_range), ), ), ) # In the building of Filter objects from SQL tokens, we enforce the convention # of <column name> <operator> <value> for WHERE clauses, so we build the FQL queries # assuming that '>' means 'column value greater than literal value'. I can't think # of a good way to centralize the knowledge of this convention across # all query translation, so I'm leaving this note as a warning. if query_filter.checks_whether_greater_than: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [comparison_value], [], ) return convert_to_collection_ref_set( q.difference(inclusive_comparison_range, equality_range)) if query_filter.checks_whether_greater_than_or_equal: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [comparison_value], [], ) return convert_to_collection_ref_set(inclusive_comparison_range) if query_filter.checks_whether_less_than: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [], [comparison_value], ) return convert_to_collection_ref_set( q.difference(inclusive_comparison_range, equality_range)) if query_filter.checks_whether_less_than_or_equal: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [], [comparison_value], ) return convert_to_collection_ref_set(inclusive_comparison_range) raise exceptions.NotSupportedError( f"Unsupported comparison {query_filter.comparison} was received.")