def unsubscribe(update: Update, context: CallbackContext): user = User(update.effective_chat.id) try: animes_watched = client.query( q.let({'bot_user': q.ref(q.collection(users), user.chat_id)}, q.if_( q.exists(q.var('bot_user')), q.map_( q.lambda_('doc_ref', q.get(q.var('doc_ref'))), q.select(['data', 'animes_watching'], q.get(q.var('bot_user')))), []))) for anime in animes_watched: markup = [[ InlineKeyboardButton('Unsubscribe', callback_data='unsubscribe=' + anime['ref'].id()) ]] context.bot.send_message(chat_id=user.chat_id, text=anime['data']['title'], reply_markup=InlineKeyboardMarkup(markup)) # update last command user.last_command = '' if not animes_watched: context.bot.send_message( chat_id=user.chat_id, text='You are currently not subscribed to any anime') except Exception as err: log_error(err)
def get_foreign_key_ref( foreign_value: QueryExpression, reference_collection_name: QueryExpression, ) -> QueryExpression: """Get the Ref to a document associated with a foreign key value. Params: ------- foreign_value: The value to look up, usually an ID. references: Field metadata dict that defines the collection (key) and field name (value) that the foreign key refers to. Returns: -------- Fauna query expression that returns an array of Refs for the associated document(s). """ return q.let( { "is_blank_reference": q.or_( q.is_null(foreign_value), q.equals(foreign_value, NULL), q.equals(reference_collection_name, NULL), ), }, q.if_( q.var("is_blank_reference"), None, q.ref(q.collection(reference_collection_name), foreign_value), ), )
def update_documents(sql_query: sql.SQLQuery) -> QueryExpression: """Update document fields with the given values. Params: ------- table: Table object that contains the parameters for building an update query in FQL. Returns: -------- An FQL update query for the given collection and documents. """ assert len(sql_query.tables) == 1 table = sql_query.tables[0] assert len(sql_query.filter_groups) <= 1 filter_group = (None if not any(sql_query.filter_groups) else sql_query.filter_groups[0]) field_updates = {column.name: column.value for column in table.columns} return q.let( {"document_set": build_document_set_intersection(table, filter_group)}, q.do( q.update( q.select( "ref", q.get(q.var("document_set")), ), {"data": field_updates}, ), {"data": [{ "count": q.count(q.var("document_set")) }]}, ), )
def _create_documents(let_params, records, build_document): results = [] idx = 0 while True: if idx > len(records): break end_idx = idx + BATCH_LIMIT batch = _execute_with_retries( q.let( let_params, q.map_( q.lambda_( "document", q.create( q.var("collection"), {"data": build_document(q.var("document"))}, ), ), records[idx:end_idx], ), ) ) results.extend(batch) idx = end_idx return results
def test_varargs(self): # Works for lists too self.assertEqual(self._q(query.add([2, 3, 5])), 10) # Works for a variable equal to a list self.assertEqual( self._q(query.let({"x": [2, 3, 5]}, query.add(query.var("x")))), 10)
def login(data): try: return current_app.fauna_client.query( q.let( { 'response': q.login( q.match(q.index('unique_account_username_type'), [data.get('username'), 'EMAIL']), {'password': data.get('password')}), 'user': q.select_with_default( ['data', 'user'], q.get(q.select(['instance'], q.var('response'))), None) }, { 'data': { 'token': q.select('secret', q.var('response')), 'user': q.if_( q.is_ref(q.var('user')), q.select(['data', 'alias'], q.get(q.var('user'))), None) } })) except Exception as e: print(e)
def create_user(data): try: current_identity = get_current_identity() email_hash = md5( current_identity['data']['username'].encode('utf-8')).hexdigest() return current_app.fauna_client.query( q.if_( q.is_ref( q.select_with_default(['data', 'user'], q.get(q.current_identity()), None)), q.abort('exists'), q.let( { 'userMetaRef': q.new_id(), 'userRef': q.new_id() }, q.do( q.create( q.ref(q.collection('user_metas'), q.var('userMetaRef')), { 'data': { 'name': data.get('name'), 'email': q.select(['data', 'username'], q.get(q.current_identity())), 'dob': parser.parse(data.get('dob')).date() } }), q.create( q.ref(q.collection('users'), q.var('userRef')), { 'data': { 'alias': data.get('alias'), 'avatar': f'https://www.gravatar.com/avatar/{email_hash}', 'public': False, 'meta': q.ref(q.collection('user_metas'), q.var('userMetaRef')), } }), q.update( q.current_identity(), { 'data': { 'user': q.ref(q.collection('users'), q.var('userRef')) } }), q.call('current_user', []))))) except Exception as e: if str(e) == 'exists': abort(409, 'User for current identity already exists.') print(e)
def test_get_foreign_key_ref(): fql_query = q.let( { "references": {}, "foreign_key": Fake.credit_card_number() }, common.get_foreign_key_ref(q.var("foreign_key"), q.var("references")), ) assert isinstance(fql_query, QueryExpression)
def create_transaction(client, num_customers, max_txn_amount): # # This method is going to create a random transaction that moves a random amount # from a source customer to a destination customer. Prior to committing the transaction # a check will be performed to insure that the source customer has a sufficient balance # to cover the amount and not go into an overdrawn state. # uuid = uuid4().urn[9:] source_id = randint(1, num_customers) dest_id = randint(1, num_customers) while dest_id == source_id: dest_id = randint(1, num_customers) amount = randint(1, max_txn_amount) transaction = {"uuid": uuid, "sourceCust": source_id, "destCust": dest_id , "amount": amount} res = client.query( q.let( {"source_customer": q.get(q.match(q.index("customer_by_id"), source_id)), "dest_customer": q.get(q.match(q.index("customer_by_id"), dest_id))}, q.let( {"source_balance": q.select(["data", "balance"], q.var("source_customer")), "dest_balance": q.select(["data", "balance"], q.var("dest_customer"))}, q.let( {"new_source_balance": q.subtract(q.var("source_balance"), amount), "new_dest_balance": q.add(q.var("dest_balance"), amount)}, q.if_( q.gte(q.var("new_source_balance"), 0), q.do( q.create(q.class_("transactions"), {"data": transaction}), q.update(q.select("ref", q.var("source_customer")), {"data": {"txnID": uuid, "balance": q.var("new_source_balance")}}), q.update(q.select("ref", q.var("dest_customer")), {"data": {"txnID": uuid, "balance": q.var("new_dest_balance")}}) ), "Error. Insufficient funds." ) ) ) ) )
def _translate_create_index(statement: token_groups.Statement, idx: int) -> typing.List[QueryExpression]: _, unique = statement.token_next_by(m=(token_types.Keyword, "UNIQUE"), idx=idx) idx, _ = statement.token_next_by(m=(token_types.Keyword, "ON"), idx=idx) _, index_params = statement.token_next_by(i=token_groups.Function, idx=idx) params_idx, table_identifier = index_params.token_next_by( i=token_groups.Identifier) table_name = table_identifier.value params_idx, column_identifiers = index_params.token_next_by( i=token_groups.Parenthesis, idx=params_idx) index_fields = [ token.value for token in column_identifiers.flatten() if token.ttype == token_types.Name ] if len(index_fields) > 1: raise exceptions.NotSupportedError( "Creating indexes for multiple columns is not currently supported." ) index_terms = [{ "field": ["data", index_field] } for index_field in index_fields] index_name = fql.index_name(table_name, column_name=index_fields[0], index_type=fql.IndexType.TERM) return [ q.do( q.if_( # We automatically create indices for some fields on collection creation, # so we can skip explicit index creation if it already exists. q.exists(q.index(index_name)), None, q.create_index({ "name": index_name, "source": q.collection(table_name), "terms": index_terms, "unique": unique, }), ), q.let( {"collection": q.collection(table_name)}, {"data": [{ "id": q.var("collection") }]}, ), ) ]
def find_order(secret, order_ref): client = FaunaClient(secret=secret) return client.query( q.let( { "order": q.get(q.ref(q.collection("orders"), order_ref)), "status_history": q.call("get_order_status_history", q.select(["ref"], q.var("order"))) }, { "ref": q.select(["ref"], q.var("order")), "data": q.merge(q.select(["data"], q.var("order")), {"status_history": q.var("status_history")}) } ) )
def unsubscribe_from_anime(self, anime_doc_id: str): try: anime = client.query( q.get(q.ref(q.collection(animes), anime_doc_id))) client.query( q.let( { 'anime_ref': q.ref(q.collection(animes), anime_doc_id), 'bot_user': q.ref(q.collection(users), self.chat_id), 'followers': q.select(['data', 'followers'], q.get(q.var('anime_ref'))), }, q.do( q.update( q.var('anime_ref'), { 'data': { 'followers': q.subtract( q.var('followers'), 1) } }), q.update( q.var('bot_user'), { 'data': { 'animes_watching': q.filter_( q.lambda_( 'watched_anime_ref', q.not_( q.equals( q.var('watched_anime_ref'), q.var('anime_ref')))), q.select(['data', 'animes_watching'], q.get(q.var('bot_user')))) } }), q.if_(q.equals(q.var('followers'), 1), q.delete(q.var('anime_ref')), 'successful!')))) updater.bot.send_message(chat_id=self.chat_id, text='You have stopped following ' + anime['data']['title']) except errors.NotFound: logger.info( 'Somehow, a user {0} almost unsubscribed from an anime that did not exist' .format(self.chat_id)) except Exception as err: log_error(err)
def donate(update, context): try: for message in config['message']['donate']: context.bot.send_message(chat_id=update.effective_chat.id, text=message) client.query( q.let( {'user': q.ref(q.collection(users), update.effective_chat.id)}, q.if_( q.exists(q.var('user')), q.update(q.var('user'), {'data': { 'last_command': '', }}), 'Success!'))) except Exception as err: log_error(err)
def help_user(update, context): user = User(update.effective_chat.id) if str(user.chat_id) == str(os.getenv('ADMIN_CHAT_ID')): message = config['message']['help_admin'] else: message = config['message']['help'] context.bot.send_message(chat_id=user.chat_id, text=message) try: client.query( q.let({'user': q.ref(q.collection(users), user.chat_id)}, q.if_( q.exists(q.var('user')), q.update(q.var('user'), {'data': { 'last_command': '', }}), 'Success!'))) except Exception as err: log_error(err)
def processMatchCounter(): resp = client.query( q.let( {'currVal': q.select(['data', 'data'], q.get(q.ref(q.collection('match_aggregate_info'), getIntValue('match_count'))))}, q.update( q.ref( q.collection('match_aggregate_info'), getIntValue('match_count') ), { "data": { "data": q.add(1, q.var('currVal')) } } ) ) ) return getDataFromRef(resp)
def wordPartsGenerator(word): return q.let( { "indexes": q.map_( # Reduce this array if you want less ngrams per word. # Setting it to [ 0 ] would only create the word itself, Setting it to [0, 1] would result in the word itself # and all ngrams that are one character shorter, etc.. lambda index: q.subtract(q.length(word), index), maxNgrams), "indexesFiltered": q.filter_( # left min parts length 3 lambda l: q.gte(l, 3), q.var('indexes')), "ngramsArray": q.distinct( q.union( q.map_(lambda l: q.ngram(q.lowercase(word), l, l), q.var('indexesFiltered')))) }, q.var('ngramsArray'))
def _translate_drop_default(table_name: str, column_name: str) -> QueryExpression: drop_default = q.map_( q.lambda_( "column_info_ref", q.update(q.var("column_info_ref"), {"data": { "default_": None }}), ), q.paginate(_fetch_column_info_refs(table_name, column_name)), ) return q.let( { "altered_docs": drop_default, # Should only be one document that matches the unique combination # of collection and field name, so we just select the first. "altered_ref": q.select([0, "ref"], q.var("altered_docs")), }, {"data": [{ "id": q.var("altered_ref") }]}, )
def _translate_create_table( statement: token_groups.Statement, table_token_idx: int) -> typing.List[QueryExpression]: idx, table_identifier = statement.token_next_by(i=token_groups.Identifier, idx=table_token_idx) table_name = table_identifier.value idx, column_identifiers = statement.token_next_by( i=token_groups.Parenthesis, idx=idx) field_metadata = _extract_column_definitions(column_identifiers) index_queries = _create_table_indices(table_name, field_metadata) collection_metadata: CollectionMetadata = { "fields": field_metadata, "indexes": _create_index_metadata(table_name, field_metadata), } information_metadata_query = _update_information_metadata( table_name, collection_metadata) # Fauna creates resources asynchronously, so we cannot create and use a collection # in the same transaction, so we have to run the expressions that create # the collection and the indices that depend on it separately return [ *_make_sure_information_schema_exists(), q.create_collection({"name": table_name}), q.do( *index_queries, information_metadata_query, q.let( {"collection": q.collection(table_name)}, {"data": [{ "id": q.var("collection") }]}, ), ), ]
def record(self, data: Data) -> QueryResult: """In one query, updates existing topic document with data if document already exists, else create a new document. Parameters ---------- data : Data Parsed message from the MQTT broker. Returns ------- QueryResult See src.types.QueryResult for its signature. """ topic = q.match(self.index, data.get("topic", "")) update_record = q.update(q.select(["ref"], q.get(topic)), {"data": data}) create_record = q.create(self.collection, {"data": data}) result = self.db.query( q.let( {"topic_exists": q.exists(topic)}, q.if_(q.var("topic_exists"), update_record, create_record), )) return result
def translate_drop( statement: token_groups.Statement) -> typing.List[QueryExpression]: """Translate a DROP SQL query into an equivalent FQL query. Params: ------- statement: An SQL statement returned by sqlparse. Returns: -------- An FQL query expression. """ idx, _ = statement.token_next_by(m=(token_types.Keyword, "TABLE")) _, table_identifier = statement.token_next_by(i=token_groups.Identifier, idx=idx) table_name = table_identifier.value deleted_collection = q.select("ref", q.delete(q.collection(table_name))) return [ q.do( q.map_( q.lambda_("ref", q.delete(q.var("ref"))), q.paginate( q.union( q.match( q.index( fql.index_name( "information_schema_tables_", column_name="name_", index_type=fql.IndexType.TERM, )), table_name, ), fql.convert_to_ref_set( "information_schema_columns_", q.range( q.match( q.index( fql.index_name( "information_schema_columns_", column_name="table_name_", index_type=fql.IndexType.VALUE, ))), [table_name], [table_name], ), ), fql.convert_to_ref_set( "information_schema_indexes_", q.range( q.match( q.index( fql.index_name( "information_schema_indexes_", column_name="table_name_", index_type=fql.IndexType.VALUE, ))), [table_name], [table_name], ), ), ), ), ), q.let( {"collection": deleted_collection}, {"data": [{ "id": q.var("collection") }]}, ), ) ]
def test_let(self): self.assertJson(query.let({"x": 1}, 1), '{"in":1,"let":[{"x":1}]}')
def test_let(self): self.assertEqual(self._q(query.let({ "x": 1, "y": 2 }, query.var("x"))), 1) self.assertEqual(self._q(query.let(x=1, y=2).in_(query.var("x"))), 1)
def test_object(self): self.assertEqual(self._q({"x": query.let({"x": 1}, query.var("x"))}), {"x": 1})
def _define_match_set(query_filter: sql.Filter) -> QueryExpression: field_name = query_filter.column.name comparison_value = query_filter.value index_name_for_collection = functools.partial(index_name, query_filter.table_name) convert_to_collection_ref_set = functools.partial(convert_to_ref_set, query_filter.table_name) get_info_indexes_with_references = lambda collection_name, field_name: q.map_( q.lambda_("info_index_ref", q.get(q.var("info_index_ref"))), q.paginate( q.match( q.index( index_name( "information_schema_indexes_", column_name="name_", index_type=IndexType.TERM, )), index_name( collection_name, column_name=field_name, index_type=IndexType.REF, ), ), ), ) index_name_for_field = functools.partial(index_name_for_collection, field_name) equality_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [comparison_value], [comparison_value], ) if query_filter.checks_whether_equal: if field_name == "ref": assert isinstance(comparison_value, str) return q.singleton( q.ref(q.collection(query_filter.table_name), comparison_value)) return q.let( { "ref_index": q.index(index_name_for_field(IndexType.REF)), "term_index": q.index(index_name_for_field(IndexType.TERM)), "info_indexes": get_info_indexes_with_references(query_filter.table_name, field_name), "comparison_value": comparison_value, }, q.if_( q.exists(q.var("ref_index")), q.match( q.var("ref_index"), get_foreign_key_ref( q.var("comparison_value"), # Assumes that there is only one reference per foreign key # and that it refers to the associated collection's ID field # (e.g. {'associated_table': 'id'}). # This is enforced via NotSupported errors when creating collections. q.select([0, DATA, "referred_table_"], q.var("info_indexes")), ), ), q.if_( q.exists(q.var("term_index")), q.match( q.var("term_index"), q.var("comparison_value"), ), convert_to_collection_ref_set(equality_range), ), ), ) # In the building of Filter objects from SQL tokens, we enforce the convention # of <column name> <operator> <value> for WHERE clauses, so we build the FQL queries # assuming that '>' means 'column value greater than literal value'. I can't think # of a good way to centralize the knowledge of this convention across # all query translation, so I'm leaving this note as a warning. if query_filter.checks_whether_greater_than: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [comparison_value], [], ) return convert_to_collection_ref_set( q.difference(inclusive_comparison_range, equality_range)) if query_filter.checks_whether_greater_than_or_equal: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [comparison_value], [], ) return convert_to_collection_ref_set(inclusive_comparison_range) if query_filter.checks_whether_less_than: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [], [comparison_value], ) return convert_to_collection_ref_set( q.difference(inclusive_comparison_range, equality_range)) if query_filter.checks_whether_less_than_or_equal: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [], [comparison_value], ) return convert_to_collection_ref_set(inclusive_comparison_range) raise exceptions.NotSupportedError( f"Unsupported comparison {query_filter.comparison} was received.")
def callback_handler_func(update: Update, context: CallbackContext): user = User(update.effective_chat.id) callback_message = update.callback_query.message.reply_markup.inline_keyboard[ 0][0].callback_data [command, payload] = callback_message.split(sep='=') if command == 'subscribe': user.subscribe_to_anime(payload) elif command == 'unsubscribe': user.unsubscribe_from_anime(payload) elif command == 'getlatest': try: anime_info = scraper.get_anime_info(payload) latest_episode_download_link = shorten( scraper.get_download_link(anime_info['latest_episode_link'], resolution=user.resolution)) markup = [[ InlineKeyboardButton(text='Download', url=latest_episode_download_link) ]] context.bot.send_message(chat_id=user.chat_id, text=anime_info['latest_episode_title'], reply_markup=InlineKeyboardMarkup(markup)) except CannotDownloadAnimeException as err: log_error(err) context.bot.send_message(chat_id=user.chat_id, text="Sorry," + payload + "could not be downloaded at this " "time!") context.bot.send_message(chat_id=os.getenv('ADMIN_CHAT_ID'), text='A user tried to download ' + payload + "but could not due to error: " + str(err)) return except Exception as err: log_error(err) return else: # check if anime is in our anime registry try: anime_from_db = client.query( q.if_( q.exists( q.match(q.index(anime_by_id), anime_info['anime_id'])), q.let( { 'anime': q.get( q.match(q.index(anime_by_id), anime_info['anime_id'])) }, q.if_( q.gt( anime_info['number_of_episodes'], q.select(['data', 'episodes'], q.var('anime'))), q.var('anime'), None)), None)) except errors.NotFound: anime_from_db = None if anime_from_db is not None: send_update_to_subscribed_users( anime_from_db, download_links={ user.resolution: latest_episode_download_link }, anime_info=anime_info) elif command == 'set_resolution': try: new_res = Resolution(payload) user.resolution = new_res context.bot.send_message( chat_id=user.chat_id, text= f'Your desired resolution has been set to {new_res.value}({resolutions[new_res]}).\nThis resolution will be used for your future /subscribe and /latest commands.' ) except ValueError: context.bot.send_message(chat_id=user.chat_id, text='Unidentified resolution level!') context.bot.send_message(chat_id=os.getenv('ADMIN_CHAT_ID'), text='Unidentified resolution level!') else: pass
def translate_select(sql_query: sql.SQLQuery) -> QueryExpression: """Translate a SELECT SQL query into an equivalent FQL query. Params: ------- sql_query: An SQLQuery instance. Returns: -------- An FQL query expression based on the SQL query. """ document_pages = _define_document_pages(sql_query) selected_table = next(table for table in sql_query.tables if table.has_columns) get_field_value = lambda function_value, raw_value: q.if_( q.equals(function_value, common.NULL), q.if_(q.equals(raw_value, common.NULL), None, raw_value), q.select([common.DATA, 0], function_value), ) calculate_function_value = lambda document_set, function_name: q.if_( q.is_null(function_name), common.NULL, q.if_( q.equals(function_name, sql.Function.COUNT.value), q.count(document_set), common.NULL, ), ) # With aggregation functions, standard behaviour is to include the first value # if any column selections are part of the query, at least until we add support # for GROUP BY get_first_document = lambda documents: q.if_(q.is_empty(documents), [{}], q.take(1, documents)) translate_document_fields = lambda maybe_documents: q.let( { # We map over selected_fields to build document object # to maintain the order of fields as queried. Otherwise, # SQLAlchemy gets confused and assigns values to the incorrect keys. "selected_column_info": [[col.table_name, col.name, col.function_name] for col in sql_query.columns], "has_functions": any(col.function_name for col in sql_query.columns), "maybe_document_set": q.if_( q.var("has_functions"), get_first_document(maybe_documents), maybe_documents, ), "field_alias_map": sql_query.alias_map, }, q.map_( q.lambda_( "maybe_document", q.let( { "document": q.if_( q.is_ref(q.var("maybe_document")), { # We use the selected table name here instead of deriving # the collection name from the document ref in order to # save a 'get' call from inside of a map, which could get # expensive. selected_table.name: q.merge( q.select( common.DATA, q.get(q.var("maybe_document")), ), {"ref": q.var("maybe_document")}, ), }, q.var("maybe_document"), ), }, q.to_object( q.map_( q.lambda_( [ "collection_name", "field_name", "function_name" ], q.let( { "function_value": calculate_function_value( maybe_documents, q.var("function_name")), "raw_value": q.select( [ q.var("collection_name"), q.var("field_name"), ], q.var("document"), default=common.NULL, ), }, [ q.select( [ q.var("collection_name"), q.var("field_name"), ], q.var("field_alias_map"), ), get_field_value( q.var("function_value"), q.var("raw_value")), ], ), ), q.var("selected_column_info"), )), ), ), q.var("maybe_document_set"), ), ) return q.let( { "maybe_documents": document_pages, "translated_documents": translate_document_fields(q.var("maybe_documents")), "result": q.distinct(q.var("translated_documents")) if sql_query.distinct else q.var("translated_documents"), }, # Paginated sets hold an array of results in a 'data' field, so we try to flatten it # in case we're dealing with pages instead of an array of results which doesn't # have such nesting {common.DATA: q.select(common.DATA, q.var("result"), q.var("result"))}, )
def test_typecheckfns(self): coll = query.collection("typecheck_coll") db = query.database("typecheck_db") fn = query.function("typecheck_fn") index = query.index("typecheck_index") self.admin_client.query(query.create_collection({"name": "typecheck_coll"})) self.admin_client.query(query.create_index( {"name": "typecheck_index", "source": coll, "active": True})) doc = self.admin_client.query(query.create( coll, {"data": {}, "credentials": {"password": "******"}})) self.admin_client.query(query.create_database({"name": "typecheck_db"})) function = self._q(query.create_function( {"name": "typecheck_fn", "body": query.query(query.lambda_("x", query.now()))})) key = self.admin_client.query( query.create_key({"database": db, "role": "admin"})) token = self._q(query.login(doc["ref"], {"password": "******"})) credentials = self._q(query.select(['data', 0], query.paginate(query.credentials()))) role = self.admin_client.query(query.create_role( {"name": "typecheck_role", "membership": [], "privileges": []})) values = [ None, bytearray([12,3,4,5]), credentials, 90, 3.14, True, query.to_date(query.now()), query.date("1970-01-01"), query.now(), query.epoch(1, "second"), query.time("1970-01-01T00:00:00Z"), {"x": 10}, query.get(doc["ref"]), query.paginate(query.collections()), [1, 2, 3], "a string", coll, query.collections(), query.match(index), query.union(query.match(index)), doc["ref"], query.get(doc["ref"]), index, db, coll, token["ref"], role["ref"], key["ref"], function["ref"], query.get(function["ref"]), query.query(query.lambda_("x", query.var("x"))), ] pairs = [ ["array", query.is_array], ["object", query.is_object], ["string", query.is_string], ["null", query.is_null], ["number", query.is_number], ["bytes", query.is_bytes], ["date", query.is_date], ["timestamp", query.is_timestamp], ["set", query.is_set], ["ref", query.is_ref], ["boolean", query.is_boolean], ["double", query.is_double], ["integer", query.is_integer], ["database", query.is_database], ["index", query.is_index], ["collection", query.is_collection], ["token", query.is_token], ["function", query.is_function], ["collection", query.is_collection], ["role", query.is_role], ["credentials", query.is_credentials], ["key", query.is_key], ] expected = { "array": 1, "boolean": 1, "bytes": 1, "collection": 3, "credentials": 1, "database": 1, "date": 2, "double": 1, "function": 2, "integer": 1, "index": 1, "key": 1, "null": 1, "number": 2, "object": 5, "ref": 11, "role": 1, "set": 3, "string": 1, "timestamp": 3, "token": 1, } q = [] for p in pairs: d = dict() d[p[0]] = query.count(query.filter_(query.lambda_("v", p[1](query.var("v"))), query.var("vals"))) q.append(d) actual = self._q(query.let({"vals": values}, query.merge({}, q))) self.assertEqual(actual, expected)
def subscribe_to_anime(self, anime_link: str): try: # create a new anime document anime_info = anime_alarm.utils.GGAScraper().get_anime_info( anime_link) print(anime_info['anime_id']) result = client.query( q.let( { 'user_anime_list': q.select(['data', 'animes_watching'], q.get(q.ref(q.collection(users), self.chat_id))), }, q.if_( # check if this anime exists in the db q.exists( q.match(q.index(anime_by_id), anime_info['anime_id'])), # if it exists... q.let( { 'anime_ref': q.select( 'ref', q.get( q.match(q.index(anime_by_id), anime_info['anime_id']))) }, q.if_( # check if user has subscribed to this anime already q.contains_value(q.var('anime_ref'), q.var('user_anime_list')), 'This anime is already on your watch list!', q.do( q.update( q.ref(q.collection(users), self.chat_id), { 'data': { 'animes_watching': q.append( q.var('user_anime_list'), [q.var('anime_ref')]) } }), q.update( q.var('anime_ref'), { 'data': { 'followers': q.add( q.select([ 'data', 'followers' ], q.get( q.var('anime_ref'))), 1) } }), ))), q.let( {'new_anime_id': q.new_id()}, q.do( # create new anime document q.create( q.ref(q.collection(animes), q.var('new_anime_id')), { 'data': { 'title': anime_info['title'], 'followers': 1, 'link': anime_link, 'anime_id': anime_info['anime_id'], 'anime_alias': anime_info['anime_alias'], 'episodes': anime_info['number_of_episodes'], 'last_episode': { 'link': anime_info[ 'latest_episode_link'], 'title': anime_info[ 'latest_episode_title'], }, } }), # add to user's list of subscribed animes q.update( q.ref(q.collection(users), self.chat_id), { 'data': { 'animes_watching': q.append( q.var('user_anime_list'), [ q.ref( q.collection(animes), q.var('new_anime_id')) ]) } }), ))))) if isinstance(result, str): updater.bot.send_message(chat_id=self.chat_id, text=result) else: updater.bot.send_message( chat_id=self.chat_id, text='You are now listening for updates on ' + anime_info['title']) except Exception as err: log_error(err)