def test_union(self): self.assertJson(query.union(), '{"union":[]}') self.assertJson(query.union(query.index("widget")), '{"union":{"index":"widget"}}') self.assertJson( query.union(query.index("widget"), query.index("things")), '{"union":[{"index":"widget"},{"index":"things"}]}')
def questions(): username = session["user"]["username"] question_type = request.args.get("type", "all").lower() if question_type == "answered": question_indexes = client.query( q.paginate(q.match(q.index("questions_index"), True, username), size=100_000)) elif question_type == "unanswered": question_indexes = client.query( q.paginate(q.match(q.index("questions_index"), False, username), size=100_000)) elif question_type == "all": question_indexes = client.query( q.paginate(q.union( q.match(q.index("questions_index"), True, username), q.match(q.index("questions_index"), False, username)), size=100_000)) else: return redirect(url_for("questions")) questions = [ q.get(q.ref(q.collection("questions"), i.id())) for i in question_indexes["data"] ] return render_template("questions.html", questions=client.query(questions)[::-1])
def test_union(self): n_value = 101 m_value = 201 ref_n = self._create(n=n_value)["ref"] ref_m = self._create(m=m_value)["ref"] ref_nm = self._create(n=n_value, m=m_value)["ref"] q = query.union(query.match(self.n_index_ref, n_value), query.match(self.m_index_ref, m_value)) self.assertEqual(self._set_to_list(q), [ref_n, ref_m, ref_nm])
def read_list_of_customers(client, cust_list): # # Finally a much more general use case where we can supply any number of id values # and return the data for each. # res = client.query( q.map_( lambda x: q.select("data", q.get(x)), q.paginate( q.union( q.map_(lambda y: q.match(q.index("customer_by_id"), y), cust_list))))) print('Union variable \'customer\' {0}: {1}'.format(cust_list, res))
def read_three_customers(client, cust_id1, cust_id2, cust_id3): # # Here is a more general use case where we retrieve multiple class references # by id and return the actual data underlying them. # res = client.query( q.map_( lambda x: q.select("data", q.get(x)), q.paginate( q.union(q.match(q.index("customer_by_id"), cust_id1), q.match(q.index("customer_by_id"), cust_id2), q.match(q.index("customer_by_id"), cust_id3))))) print('Union specific \'customer\' 1, 3, 8: {0}'.format(res))
def join_collections(sql_query: sql.SQLQuery) -> QueryExpression: """Join together multiple collections to return their documents in the response. Params: ------- sql_query: SQLQuery object with information about the query params. Returns: -------- An FQL query expression for joined and filtered documents. """ tables = sql_query.tables order_by = sql_query.order_by from_table = tables[0] to_table = tables[-1] table_with_columns = next(table for table in tables if table.has_columns) if (order_by is not None and order_by.columns[0].table_name != table_with_columns.name): raise exceptions.NotSupportedError( "Fauna uses indexes for both joining and ordering of results, " "and we currently can only sort the principal table " "(i.e. the one whose columns are being selected or modified) in the query. " "You can sort on a column from the principal table, query one table at a time, " "or remove the ordering constraint.") if not any(sql_query.filter_groups): raise exceptions.NotSupportedError( "Joining tables without cross-table filters via the WHERE clause is not supported. " "Selecting columns from multiple tables is not supported either, " "so there's no performance gain from joining tables without cross-table conditions " "for filtering query results.") assert from_table.left_join_table is None intersection_queries = [] for filter_group in sql_query.filter_groups: intersection_query = q.intersection(*[ _build_intersecting_query(filter_group, None, table, direction) for table, direction in [(from_table, "right"), (to_table, "left")] ]) intersection_queries.append(intersection_query) return q.union(*intersection_queries)
def wordPartsGenerator(word): return q.let( { "indexes": q.map_( # Reduce this array if you want less ngrams per word. # Setting it to [ 0 ] would only create the word itself, Setting it to [0, 1] would result in the word itself # and all ngrams that are one character shorter, etc.. lambda index: q.subtract(q.length(word), index), maxNgrams), "indexesFiltered": q.filter_( # left min parts length 3 lambda l: q.gte(l, 3), q.var('indexes')), "ngramsArray": q.distinct( q.union( q.map_(lambda l: q.ngram(q.lowercase(word), l, l), q.var('indexesFiltered')))) }, q.var('ngramsArray'))
def build_document_set_union( table: sql.Table, filter_groups: typing.List[sql.FilterGroup]) -> QueryExpression: """Build an FQL match query that joins results from different filter groups. Params: ------- table: A Table object associated with a Fauna collection. filter_groups: A list of groups of filters representing, each one an intersection of filtered results. Returns: -------- FQL query expression that is a union of each filter group's intersection of results, all associated with the given table's filters. """ if not any(filter_groups): return build_document_set_intersection(table, None) return q.union(*[ build_document_set_intersection(table, filter_group) for filter_group in filter_groups ])
def translate_drop( statement: token_groups.Statement) -> typing.List[QueryExpression]: """Translate a DROP SQL query into an equivalent FQL query. Params: ------- statement: An SQL statement returned by sqlparse. Returns: -------- An FQL query expression. """ idx, _ = statement.token_next_by(m=(token_types.Keyword, "TABLE")) _, table_identifier = statement.token_next_by(i=token_groups.Identifier, idx=idx) table_name = table_identifier.value deleted_collection = q.select("ref", q.delete(q.collection(table_name))) return [ q.do( q.map_( q.lambda_("ref", q.delete(q.var("ref"))), q.paginate( q.union( q.match( q.index( fql.index_name( "information_schema_tables_", column_name="name_", index_type=fql.IndexType.TERM, )), table_name, ), fql.convert_to_ref_set( "information_schema_columns_", q.range( q.match( q.index( fql.index_name( "information_schema_columns_", column_name="table_name_", index_type=fql.IndexType.VALUE, ))), [table_name], [table_name], ), ), fql.convert_to_ref_set( "information_schema_indexes_", q.range( q.match( q.index( fql.index_name( "information_schema_indexes_", column_name="table_name_", index_type=fql.IndexType.VALUE, ))), [table_name], [table_name], ), ), ), ), ), q.let( {"collection": deleted_collection}, {"data": [{ "id": q.var("collection") }]}, ), ) ]
def test_typecheckfns(self): coll = query.collection("typecheck_coll") db = query.database("typecheck_db") fn = query.function("typecheck_fn") index = query.index("typecheck_index") self.admin_client.query(query.create_collection({"name": "typecheck_coll"})) self.admin_client.query(query.create_index( {"name": "typecheck_index", "source": coll, "active": True})) doc = self.admin_client.query(query.create( coll, {"data": {}, "credentials": {"password": "******"}})) self.admin_client.query(query.create_database({"name": "typecheck_db"})) function = self._q(query.create_function( {"name": "typecheck_fn", "body": query.query(query.lambda_("x", query.now()))})) key = self.admin_client.query( query.create_key({"database": db, "role": "admin"})) token = self._q(query.login(doc["ref"], {"password": "******"})) credentials = self._q(query.select(['data', 0], query.paginate(query.credentials()))) role = self.admin_client.query(query.create_role( {"name": "typecheck_role", "membership": [], "privileges": []})) values = [ None, bytearray([12,3,4,5]), credentials, 90, 3.14, True, query.to_date(query.now()), query.date("1970-01-01"), query.now(), query.epoch(1, "second"), query.time("1970-01-01T00:00:00Z"), {"x": 10}, query.get(doc["ref"]), query.paginate(query.collections()), [1, 2, 3], "a string", coll, query.collections(), query.match(index), query.union(query.match(index)), doc["ref"], query.get(doc["ref"]), index, db, coll, token["ref"], role["ref"], key["ref"], function["ref"], query.get(function["ref"]), query.query(query.lambda_("x", query.var("x"))), ] pairs = [ ["array", query.is_array], ["object", query.is_object], ["string", query.is_string], ["null", query.is_null], ["number", query.is_number], ["bytes", query.is_bytes], ["date", query.is_date], ["timestamp", query.is_timestamp], ["set", query.is_set], ["ref", query.is_ref], ["boolean", query.is_boolean], ["double", query.is_double], ["integer", query.is_integer], ["database", query.is_database], ["index", query.is_index], ["collection", query.is_collection], ["token", query.is_token], ["function", query.is_function], ["collection", query.is_collection], ["role", query.is_role], ["credentials", query.is_credentials], ["key", query.is_key], ] expected = { "array": 1, "boolean": 1, "bytes": 1, "collection": 3, "credentials": 1, "database": 1, "date": 2, "double": 1, "function": 2, "integer": 1, "index": 1, "key": 1, "null": 1, "number": 2, "object": 5, "ref": 11, "role": 1, "set": 3, "string": 1, "timestamp": 3, "token": 1, } q = [] for p in pairs: d = dict() d[p[0]] = query.count(query.filter_(query.lambda_("v", p[1](query.var("v"))), query.var("vals"))) q.append(d) actual = self._q(query.let({"vals": values}, query.merge({}, q))) self.assertEqual(actual, expected)