def _load_predictions(data): predictions = data["predictions"] records = list(predictions.values()) build_document = lambda prediction: q.to_object( q.map_( q.lambda_( ["key", "value"], [ q.var("key"), q.if_( q.equals(q.var("key"), "predicted_winner_id"), _assign_ref(q.var("teams"), q.var("team_map"), q.var("value")), q.if_( q.equals(q.var("key"), "match_id"), _assign_ref( q.var("matches"), q.var("match_map"), q.var("value") ), q.if_( q.equals(q.var("key"), "ml_model_id"), _assign_ref( q.var("ml_models"), q.var("ml_model_map"), q.var("value"), ), q.var("value"), ), ), ), ], ), q.to_array(prediction), ) ) let_params = { "collection": q.collection("predictions"), "teams": q.collection("teams"), "team_map": data["teams"], "matches": q.collection("matches"), "match_map": data["matches"], "ml_models": q.collection("ml_models"), "ml_model_map": data["ml_models"], } documents = _create_documents(let_params, records, build_document) for record, document in zip(records, documents): record["id"] = document["ref"].id()
def create_database(scheme, domain, port, secret, db_name): # # Create an admin client. This is the client we will use to create the database. # # If you are using the the FaunaDB-Cloud you will need to replace the value of the # 'secret' in the command below with your "secret". # adminClient = FaunaClient(secret=secret, domain=domain, scheme=scheme, port=port) print("Connected to FaunaDB as admin!") # # The code below creates the Database that will be used for this example. Please note that # the existence of the database is evaluated, deleted if it exists and recreated with a single # call to the Fauna DB. # res = adminClient.query( q.if_( q.exists(q.database(db_name)), [q.delete(q.database(db_name)), q.create_database({"name": db_name})], q.create_database({"name": db_name})) ) print('DB {0} created:'.format(db_name)) pprint.pprint(res) # # Create a key specific to the database we just created. We will use this to # create a new client we will use in the remainder of the examples. # res = adminClient.query(q.select(["secret"], q.create_key({"database": q.database(db_name), "role": "server"}))) print('DB {0} secret: {1}'.format(db_name, res)) return res
def get_latest(update: Update, context: CallbackContext): chat_id = update.effective_chat.id try: client.query( q.if_( q.exists(q.ref(q.collection(users), chat_id)), q.update(q.ref(q.collection(users), chat_id), {'data': { 'last_command': 'getlatest' }}), q.create( q.ref(q.collection(users), chat_id), { 'data': { 'name': update.message.chat.first_name, 'last_command': 'getlatest', 'animes_watching': [], 'config': { 'resolution': Resolution.MEDIUM.value } } }))) context.bot.send_message(chat_id=chat_id, text='Enter the anime you want to get!') except Exception as err: log_error(err)
def create_collection(client): collections = ["products", "users", "orders", "categories", "users"] client.query( q.map_( lambda collection_name: q.if_( q.exists(q.collection(collection_name)), True, q.create_collection({"name": collection_name})), collections))
def login(data): try: return current_app.fauna_client.query( q.let( { 'response': q.login( q.match(q.index('unique_account_username_type'), [data.get('username'), 'EMAIL']), {'password': data.get('password')}), 'user': q.select_with_default( ['data', 'user'], q.get(q.select(['instance'], q.var('response'))), None) }, { 'data': { 'token': q.select('secret', q.var('response')), 'user': q.if_( q.is_ref(q.var('user')), q.select(['data', 'alias'], q.get(q.var('user'))), None) } })) except Exception as e: print(e)
def unsubscribe(update: Update, context: CallbackContext): user = User(update.effective_chat.id) try: animes_watched = client.query( q.let({'bot_user': q.ref(q.collection(users), user.chat_id)}, q.if_( q.exists(q.var('bot_user')), q.map_( q.lambda_('doc_ref', q.get(q.var('doc_ref'))), q.select(['data', 'animes_watching'], q.get(q.var('bot_user')))), []))) for anime in animes_watched: markup = [[ InlineKeyboardButton('Unsubscribe', callback_data='unsubscribe=' + anime['ref'].id()) ]] context.bot.send_message(chat_id=user.chat_id, text=anime['data']['title'], reply_markup=InlineKeyboardMarkup(markup)) # update last command user.last_command = '' if not animes_watched: context.bot.send_message( chat_id=user.chat_id, text='You are currently not subscribed to any anime') except Exception as err: log_error(err)
def subscribe(update, context): chat_id = update.effective_chat.id try: client.query( q.if_( q.exists(q.ref(q.collection(users), chat_id)), q.update(q.ref(q.collection(users), chat_id), {'data': { 'last_command': 'subscribe' }}), q.create( q.ref(q.collection(users), chat_id), { 'data': { 'name': update.message.chat.first_name, 'is_admin': False, 'last_command': 'subscribe', 'animes_watching': [], 'config': { 'resolution': Resolution.MEDIUM.value } } }))) context.bot.send_message( chat_id=chat_id, text='Enter the anime you want to get notifications for!') except Exception as err: log_error(err)
def main(argv): # # Create an admin client. This is the client we will use to create the database. # scheme = "http" domain = "127.0.0.1" port = "8443" secret = "secret" adminClient = FaunaClient(secret=secret, domain=domain, scheme=scheme, port=port) # # If you are using the the FaunaDB-Cloud use these lines to create the connection. # Change the secret to your value and comment out the lines above # # secret = "Your Secret Goes here" # adminClient = FaunaClient(secret=secret) dbName = "TestDB" # # Call to Create the database # res = adminClient.query(q.create_database({"name": dbName})) print('DB {0} created: {1}'.format(dbName, res)) # # Call to check to see if database exists and to delete it id it does. # res = adminClient.query( q.if_(q.exists(q.database(dbName)), q.delete(q.database(dbName)), True)) print('DB {0} deleted: {1}'.format(dbName, res))
def get_foreign_key_ref( foreign_value: QueryExpression, reference_collection_name: QueryExpression, ) -> QueryExpression: """Get the Ref to a document associated with a foreign key value. Params: ------- foreign_value: The value to look up, usually an ID. references: Field metadata dict that defines the collection (key) and field name (value) that the foreign key refers to. Returns: -------- Fauna query expression that returns an array of Refs for the associated document(s). """ return q.let( { "is_blank_reference": q.or_( q.is_null(foreign_value), q.equals(foreign_value, NULL), q.equals(reference_collection_name, NULL), ), }, q.if_( q.var("is_blank_reference"), None, q.ref(q.collection(reference_collection_name), foreign_value), ), )
def create_user(data): try: current_identity = get_current_identity() email_hash = md5( current_identity['data']['username'].encode('utf-8')).hexdigest() return current_app.fauna_client.query( q.if_( q.is_ref( q.select_with_default(['data', 'user'], q.get(q.current_identity()), None)), q.abort('exists'), q.let( { 'userMetaRef': q.new_id(), 'userRef': q.new_id() }, q.do( q.create( q.ref(q.collection('user_metas'), q.var('userMetaRef')), { 'data': { 'name': data.get('name'), 'email': q.select(['data', 'username'], q.get(q.current_identity())), 'dob': parser.parse(data.get('dob')).date() } }), q.create( q.ref(q.collection('users'), q.var('userRef')), { 'data': { 'alias': data.get('alias'), 'avatar': f'https://www.gravatar.com/avatar/{email_hash}', 'public': False, 'meta': q.ref(q.collection('user_metas'), q.var('userMetaRef')), } }), q.update( q.current_identity(), { 'data': { 'user': q.ref(q.collection('users'), q.var('userRef')) } }), q.call('current_user', []))))) except Exception as e: if str(e) == 'exists': abort(409, 'User for current identity already exists.') print(e)
def _translate_create_index(statement: token_groups.Statement, idx: int) -> typing.List[QueryExpression]: _, unique = statement.token_next_by(m=(token_types.Keyword, "UNIQUE"), idx=idx) idx, _ = statement.token_next_by(m=(token_types.Keyword, "ON"), idx=idx) _, index_params = statement.token_next_by(i=token_groups.Function, idx=idx) params_idx, table_identifier = index_params.token_next_by( i=token_groups.Identifier) table_name = table_identifier.value params_idx, column_identifiers = index_params.token_next_by( i=token_groups.Parenthesis, idx=params_idx) index_fields = [ token.value for token in column_identifiers.flatten() if token.ttype == token_types.Name ] if len(index_fields) > 1: raise exceptions.NotSupportedError( "Creating indexes for multiple columns is not currently supported." ) index_terms = [{ "field": ["data", index_field] } for index_field in index_fields] index_name = fql.index_name(table_name, column_name=index_fields[0], index_type=fql.IndexType.TERM) return [ q.do( q.if_( # We automatically create indices for some fields on collection creation, # so we can skip explicit index creation if it already exists. q.exists(q.index(index_name)), None, q.create_index({ "name": index_name, "source": q.collection(table_name), "terms": index_terms, "unique": unique, }), ), q.let( {"collection": q.collection(table_name)}, {"data": [{ "id": q.var("collection") }]}, ), ) ]
def _make_sure_information_schema_exists() -> typing.List[QueryExpression]: index_queries = [ _create_table_indices(collection_name, field_metadata) for collection_name, field_metadata in INFORMATION_SCHEMA_COLLECTIONS.items() ] return [ q.if_( q.exists(q.collection("information_schema_tables_")), None, q.do(*[ q.create_collection({"name": collection_name}) for collection_name in INFORMATION_SCHEMA_COLLECTIONS ]), ), q.if_( q.exists(q.index(fql.index_name("information_schema_tables_"))), None, q.do(*index_queries), ), ]
def unsubscribe_from_anime(self, anime_doc_id: str): try: anime = client.query( q.get(q.ref(q.collection(animes), anime_doc_id))) client.query( q.let( { 'anime_ref': q.ref(q.collection(animes), anime_doc_id), 'bot_user': q.ref(q.collection(users), self.chat_id), 'followers': q.select(['data', 'followers'], q.get(q.var('anime_ref'))), }, q.do( q.update( q.var('anime_ref'), { 'data': { 'followers': q.subtract( q.var('followers'), 1) } }), q.update( q.var('bot_user'), { 'data': { 'animes_watching': q.filter_( q.lambda_( 'watched_anime_ref', q.not_( q.equals( q.var('watched_anime_ref'), q.var('anime_ref')))), q.select(['data', 'animes_watching'], q.get(q.var('bot_user')))) } }), q.if_(q.equals(q.var('followers'), 1), q.delete(q.var('anime_ref')), 'successful!')))) updater.bot.send_message(chat_id=self.chat_id, text='You have stopped following ' + anime['data']['title']) except errors.NotFound: logger.info( 'Somehow, a user {0} almost unsubscribed from an anime that did not exist' .format(self.chat_id)) except Exception as err: log_error(err)
def donate(update, context): try: for message in config['message']['donate']: context.bot.send_message(chat_id=update.effective_chat.id, text=message) client.query( q.let( {'user': q.ref(q.collection(users), update.effective_chat.id)}, q.if_( q.exists(q.var('user')), q.update(q.var('user'), {'data': { 'last_command': '', }}), 'Success!'))) except Exception as err: log_error(err)
def help_user(update, context): user = User(update.effective_chat.id) if str(user.chat_id) == str(os.getenv('ADMIN_CHAT_ID')): message = config['message']['help_admin'] else: message = config['message']['help'] context.bot.send_message(chat_id=user.chat_id, text=message) try: client.query( q.let({'user': q.ref(q.collection(users), user.chat_id)}, q.if_( q.exists(q.var('user')), q.update(q.var('user'), {'data': { 'last_command': '', }}), 'Success!'))) except Exception as err: log_error(err)
def create_product(secret, name, price, quantity, categories): client = FaunaClient(secret=secret) return client.query( q.if_( q.call('check_if_categories_exists', categories), q.create(q.collection('products'), {"data": { "name": name, "price": price, "quantity":quantity, "createdAt": q.now(), "categories": list(map( lambda category: q.ref(q.collection("categories"), category), categories )) }}), q.abort('Categories not found'), ) )
def create_transaction(client, num_customers, max_txn_amount): # # This method is going to create a random transaction that moves a random amount # from a source customer to a destination customer. Prior to committing the transaction # a check will be performed to insure that the source customer has a sufficient balance # to cover the amount and not go into an overdrawn state. # uuid = uuid4().urn[9:] source_id = randint(1, num_customers) dest_id = randint(1, num_customers) while dest_id == source_id: dest_id = randint(1, num_customers) amount = randint(1, max_txn_amount) transaction = {"uuid": uuid, "sourceCust": source_id, "destCust": dest_id , "amount": amount} res = client.query( q.let( {"source_customer": q.get(q.match(q.index("customer_by_id"), source_id)), "dest_customer": q.get(q.match(q.index("customer_by_id"), dest_id))}, q.let( {"source_balance": q.select(["data", "balance"], q.var("source_customer")), "dest_balance": q.select(["data", "balance"], q.var("dest_customer"))}, q.let( {"new_source_balance": q.subtract(q.var("source_balance"), amount), "new_dest_balance": q.add(q.var("dest_balance"), amount)}, q.if_( q.gte(q.var("new_source_balance"), 0), q.do( q.create(q.class_("transactions"), {"data": transaction}), q.update(q.select("ref", q.var("source_customer")), {"data": {"txnID": uuid, "balance": q.var("new_source_balance")}}), q.update(q.select("ref", q.var("dest_customer")), {"data": {"txnID": uuid, "balance": q.var("new_dest_balance")}}) ), "Error. Insufficient funds." ) ) ) ) )
def record(self, data: Data) -> QueryResult: """In one query, updates existing topic document with data if document already exists, else create a new document. Parameters ---------- data : Data Parsed message from the MQTT broker. Returns ------- QueryResult See src.types.QueryResult for its signature. """ topic = q.match(self.index, data.get("topic", "")) update_record = q.update(q.select(["ref"], q.get(topic)), {"data": data}) create_record = q.create(self.collection, {"data": data}) result = self.db.query( q.let( {"topic_exists": q.exists(topic)}, q.if_(q.var("topic_exists"), update_record, create_record), )) return result
def update_product(secret, ref, data): client = FaunaClient(secret=secret) if(data.get("categories") == None): return client.query(q.update( q.ref(q.collection('products'), ref), { "data": data } )) return client.query( q.if_( q.call('check_if_categories_exists', data.get("categories")), q.update( q.ref(q.collection('products'), ref), { "data": { **data, "categories": list(map( lambda category: q.ref(q.collection("categories"), category), data.get("categories") )) } } ), q.abort('Categories not found'), ) )
def callback_handler_func(update: Update, context: CallbackContext): user = User(update.effective_chat.id) callback_message = update.callback_query.message.reply_markup.inline_keyboard[ 0][0].callback_data [command, payload] = callback_message.split(sep='=') if command == 'subscribe': user.subscribe_to_anime(payload) elif command == 'unsubscribe': user.unsubscribe_from_anime(payload) elif command == 'getlatest': try: anime_info = scraper.get_anime_info(payload) latest_episode_download_link = shorten( scraper.get_download_link(anime_info['latest_episode_link'], resolution=user.resolution)) markup = [[ InlineKeyboardButton(text='Download', url=latest_episode_download_link) ]] context.bot.send_message(chat_id=user.chat_id, text=anime_info['latest_episode_title'], reply_markup=InlineKeyboardMarkup(markup)) except CannotDownloadAnimeException as err: log_error(err) context.bot.send_message(chat_id=user.chat_id, text="Sorry," + payload + "could not be downloaded at this " "time!") context.bot.send_message(chat_id=os.getenv('ADMIN_CHAT_ID'), text='A user tried to download ' + payload + "but could not due to error: " + str(err)) return except Exception as err: log_error(err) return else: # check if anime is in our anime registry try: anime_from_db = client.query( q.if_( q.exists( q.match(q.index(anime_by_id), anime_info['anime_id'])), q.let( { 'anime': q.get( q.match(q.index(anime_by_id), anime_info['anime_id'])) }, q.if_( q.gt( anime_info['number_of_episodes'], q.select(['data', 'episodes'], q.var('anime'))), q.var('anime'), None)), None)) except errors.NotFound: anime_from_db = None if anime_from_db is not None: send_update_to_subscribed_users( anime_from_db, download_links={ user.resolution: latest_episode_download_link }, anime_info=anime_info) elif command == 'set_resolution': try: new_res = Resolution(payload) user.resolution = new_res context.bot.send_message( chat_id=user.chat_id, text= f'Your desired resolution has been set to {new_res.value}({resolutions[new_res]}).\nThis resolution will be used for your future /subscribe and /latest commands.' ) except ValueError: context.bot.send_message(chat_id=user.chat_id, text='Unidentified resolution level!') context.bot.send_message(chat_id=os.getenv('ADMIN_CHAT_ID'), text='Unidentified resolution level!') else: pass
def _define_match_set(query_filter: sql.Filter) -> QueryExpression: field_name = query_filter.column.name comparison_value = query_filter.value index_name_for_collection = functools.partial(index_name, query_filter.table_name) convert_to_collection_ref_set = functools.partial(convert_to_ref_set, query_filter.table_name) get_info_indexes_with_references = lambda collection_name, field_name: q.map_( q.lambda_("info_index_ref", q.get(q.var("info_index_ref"))), q.paginate( q.match( q.index( index_name( "information_schema_indexes_", column_name="name_", index_type=IndexType.TERM, )), index_name( collection_name, column_name=field_name, index_type=IndexType.REF, ), ), ), ) index_name_for_field = functools.partial(index_name_for_collection, field_name) equality_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [comparison_value], [comparison_value], ) if query_filter.checks_whether_equal: if field_name == "ref": assert isinstance(comparison_value, str) return q.singleton( q.ref(q.collection(query_filter.table_name), comparison_value)) return q.let( { "ref_index": q.index(index_name_for_field(IndexType.REF)), "term_index": q.index(index_name_for_field(IndexType.TERM)), "info_indexes": get_info_indexes_with_references(query_filter.table_name, field_name), "comparison_value": comparison_value, }, q.if_( q.exists(q.var("ref_index")), q.match( q.var("ref_index"), get_foreign_key_ref( q.var("comparison_value"), # Assumes that there is only one reference per foreign key # and that it refers to the associated collection's ID field # (e.g. {'associated_table': 'id'}). # This is enforced via NotSupported errors when creating collections. q.select([0, DATA, "referred_table_"], q.var("info_indexes")), ), ), q.if_( q.exists(q.var("term_index")), q.match( q.var("term_index"), q.var("comparison_value"), ), convert_to_collection_ref_set(equality_range), ), ), ) # In the building of Filter objects from SQL tokens, we enforce the convention # of <column name> <operator> <value> for WHERE clauses, so we build the FQL queries # assuming that '>' means 'column value greater than literal value'. I can't think # of a good way to centralize the knowledge of this convention across # all query translation, so I'm leaving this note as a warning. if query_filter.checks_whether_greater_than: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [comparison_value], [], ) return convert_to_collection_ref_set( q.difference(inclusive_comparison_range, equality_range)) if query_filter.checks_whether_greater_than_or_equal: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [comparison_value], [], ) return convert_to_collection_ref_set(inclusive_comparison_range) if query_filter.checks_whether_less_than: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [], [comparison_value], ) return convert_to_collection_ref_set( q.difference(inclusive_comparison_range, equality_range)) if query_filter.checks_whether_less_than_or_equal: inclusive_comparison_range = q.range( q.match(q.index(index_name_for_field(IndexType.VALUE))), [], [comparison_value], ) return convert_to_collection_ref_set(inclusive_comparison_range) raise exceptions.NotSupportedError( f"Unsupported comparison {query_filter.comparison} was received.")
def _update_information_metadata( table_name: str, collection_metadata: CollectionMetadata) -> QueryExpression: column_metadata = [{ "name_": "id", "table_name_": table_name, "type_": "Integer", "nullable": False, "default_": None, }] column_metadata.extend([ { "name_": name, "table_name_": table_name, "type_": metadata["type"], # A bit awkward, but SQL uses the 'NOT NULL' keyword, while SQLAlchemy # uses 'nullable' when returning metadata "nullable_": not metadata["not_null"], "default_": metadata["default"], } for name, metadata in collection_metadata["fields"].items() ]) index_metadata = [{ "name_": index_name, "table_name_": table_name, **typing.cast(typing.Dict[str, typing.Any], metadata), } for index_name, metadata in collection_metadata["indexes"].items()] return q.if_( # We don't want to update information schema collections with information schema info, # because that would some weird inception-type stuff. q.contains_str_regex( table_name, r"^information_schema_(?:tables|columns|indexes)_$"), None, q.do( q.create( q.collection("information_schema_tables_"), {"data": { "name_": table_name }}, ), q.foreach( q.lambda_( "column_metadata", q.create( q.collection("information_schema_columns_"), {"data": q.var("column_metadata")}, ), ), column_metadata, ), q.foreach( q.lambda_( "index_metadata", q.create( q.collection("information_schema_indexes_"), {"data": q.var("index_metadata")}, ), ), index_metadata, ), ), )
def _assign_ref(ref_collection, ref_map, record_id): return q.if_( q.is_null(record_id), None, q.ref(ref_collection, q.select([q.to_string(record_id), "id"], ref_map)), )
def create_indexes(client): indexes = [{ "name": "user_by_email", "source": q.collection("users"), "terms": [{ "field": ["data", "email"] }], }, { "name": "products_search_by_name", "source": { "collection": q.collection('products'), "fields": { "wordparts": q.query(lambda product: wordPartsGenerator( q.select(['data', 'name'], product))) } }, "terms": [{ "binding": 'wordparts' }], }, { "name": "products_search_by_category", "source": q.collection('products'), "terms": [{ "field": ["data", "categories"] }], }, { "name": "products_sort_by_name_asc", "source": q.collection('products'), "terms": [{ "field": ["ref"] }], "values": [ { "field": ["data", "name"] }, { "field": ["ref"] }, ] }, { "name": "products_sort_by_price_asc", "source": q.collection('products'), "terms": [{ "field": ["ref"] }], "values": [ { "field": ["data", "price"] }, { "field": ["ref"] }, ] }, { "name": "products_sort_by_price_desc", "source": q.collection('products'), "terms": [{ "field": ["ref"] }], "values": [ { "field": ["data", "price"], "reverse": True }, { "field": ["ref"] }, ] }, { "name": "products_sort_by_created_asc", "source": q.collection('products'), "values": [ { "field": ["data", "createdAt"] }, { "field": ["ref"] }, ] }] client.query( q.map_( lambda index: q.if_(q.exists(q.index(q.select(["name"], index))), True, q.create_index(index)), indexes))
def subscribe_to_anime(self, anime_link: str): try: # create a new anime document anime_info = anime_alarm.utils.GGAScraper().get_anime_info( anime_link) print(anime_info['anime_id']) result = client.query( q.let( { 'user_anime_list': q.select(['data', 'animes_watching'], q.get(q.ref(q.collection(users), self.chat_id))), }, q.if_( # check if this anime exists in the db q.exists( q.match(q.index(anime_by_id), anime_info['anime_id'])), # if it exists... q.let( { 'anime_ref': q.select( 'ref', q.get( q.match(q.index(anime_by_id), anime_info['anime_id']))) }, q.if_( # check if user has subscribed to this anime already q.contains_value(q.var('anime_ref'), q.var('user_anime_list')), 'This anime is already on your watch list!', q.do( q.update( q.ref(q.collection(users), self.chat_id), { 'data': { 'animes_watching': q.append( q.var('user_anime_list'), [q.var('anime_ref')]) } }), q.update( q.var('anime_ref'), { 'data': { 'followers': q.add( q.select([ 'data', 'followers' ], q.get( q.var('anime_ref'))), 1) } }), ))), q.let( {'new_anime_id': q.new_id()}, q.do( # create new anime document q.create( q.ref(q.collection(animes), q.var('new_anime_id')), { 'data': { 'title': anime_info['title'], 'followers': 1, 'link': anime_link, 'anime_id': anime_info['anime_id'], 'anime_alias': anime_info['anime_alias'], 'episodes': anime_info['number_of_episodes'], 'last_episode': { 'link': anime_info[ 'latest_episode_link'], 'title': anime_info[ 'latest_episode_title'], }, } }), # add to user's list of subscribed animes q.update( q.ref(q.collection(users), self.chat_id), { 'data': { 'animes_watching': q.append( q.var('user_anime_list'), [ q.ref( q.collection(animes), q.var('new_anime_id')) ]) } }), ))))) if isinstance(result, str): updater.bot.send_message(chat_id=self.chat_id, text=result) else: updater.bot.send_message( chat_id=self.chat_id, text='You are now listening for updates on ' + anime_info['title']) except Exception as err: log_error(err)
def test_if(self): self.assertEqual(self._q(query.if_(True, "t", "f")), "t") self.assertEqual(self._q(query.if_(False, "t", "f")), "f")
def test_if_expr(self): self.assertJson(query.if_(True, "true", "false"), '{"else":"false","if":true,"then":"true"}')
def translate_select(sql_query: sql.SQLQuery) -> QueryExpression: """Translate a SELECT SQL query into an equivalent FQL query. Params: ------- sql_query: An SQLQuery instance. Returns: -------- An FQL query expression based on the SQL query. """ document_pages = _define_document_pages(sql_query) selected_table = next(table for table in sql_query.tables if table.has_columns) get_field_value = lambda function_value, raw_value: q.if_( q.equals(function_value, common.NULL), q.if_(q.equals(raw_value, common.NULL), None, raw_value), q.select([common.DATA, 0], function_value), ) calculate_function_value = lambda document_set, function_name: q.if_( q.is_null(function_name), common.NULL, q.if_( q.equals(function_name, sql.Function.COUNT.value), q.count(document_set), common.NULL, ), ) # With aggregation functions, standard behaviour is to include the first value # if any column selections are part of the query, at least until we add support # for GROUP BY get_first_document = lambda documents: q.if_(q.is_empty(documents), [{}], q.take(1, documents)) translate_document_fields = lambda maybe_documents: q.let( { # We map over selected_fields to build document object # to maintain the order of fields as queried. Otherwise, # SQLAlchemy gets confused and assigns values to the incorrect keys. "selected_column_info": [[col.table_name, col.name, col.function_name] for col in sql_query.columns], "has_functions": any(col.function_name for col in sql_query.columns), "maybe_document_set": q.if_( q.var("has_functions"), get_first_document(maybe_documents), maybe_documents, ), "field_alias_map": sql_query.alias_map, }, q.map_( q.lambda_( "maybe_document", q.let( { "document": q.if_( q.is_ref(q.var("maybe_document")), { # We use the selected table name here instead of deriving # the collection name from the document ref in order to # save a 'get' call from inside of a map, which could get # expensive. selected_table.name: q.merge( q.select( common.DATA, q.get(q.var("maybe_document")), ), {"ref": q.var("maybe_document")}, ), }, q.var("maybe_document"), ), }, q.to_object( q.map_( q.lambda_( [ "collection_name", "field_name", "function_name" ], q.let( { "function_value": calculate_function_value( maybe_documents, q.var("function_name")), "raw_value": q.select( [ q.var("collection_name"), q.var("field_name"), ], q.var("document"), default=common.NULL, ), }, [ q.select( [ q.var("collection_name"), q.var("field_name"), ], q.var("field_alias_map"), ), get_field_value( q.var("function_value"), q.var("raw_value")), ], ), ), q.var("selected_column_info"), )), ), ), q.var("maybe_document_set"), ), ) return q.let( { "maybe_documents": document_pages, "translated_documents": translate_document_fields(q.var("maybe_documents")), "result": q.distinct(q.var("translated_documents")) if sql_query.distinct else q.var("translated_documents"), }, # Paginated sets hold an array of results in a 'data' field, so we try to flatten it # in case we're dealing with pages instead of an array of results which doesn't # have such nesting {common.DATA: q.select(common.DATA, q.var("result"), q.var("result"))}, )