def execute_get_missing_initial_alerts(itgs): endpoint_users = Table('endpoint_users') endpoint_alerts = Table('endpoint_alerts') users = Table('users') usage_after_filters = Table('usage_after_filters') query = (Query.with_( Query.from_(endpoint_users).where( Not( Exists( Query.from_(endpoint_alerts).where( endpoint_alerts.endpoint_id == endpoint_users.endpoint_id).where( endpoint_alerts.user_id == endpoint_users.user_id)) )).select(endpoint_users.endpoint_id.as_('endpoint_id'), endpoint_users.user_id.as_('user_id'), Min(endpoint_users.created_at).as_('first_usage'), Max(endpoint_users.created_at).as_('last_usage'), Count(endpoint_users.id).as_('count_usage')).groupby( endpoint_users.endpoint_id, endpoint_users.user_id), 'usage_after_filters').from_(usage_after_filters).join(users).on( users.id == usage_after_filters.user_id).select( usage_after_filters.user_id, users.username, usage_after_filters.endpoint_id, usage_after_filters.first_usage, usage_after_filters.last_usage, usage_after_filters.count_usage).orderby( usage_after_filters.user_id)) sql = query.get_sql() itgs.read_cursor.execute(sql)
async def test_true_aggregate_max(self): await testmodels.DecimalFields.create(decimal=Decimal("0"), decimal_nodec=1) await testmodels.DecimalFields.create(decimal=Decimal("9.99"), decimal_nodec=1) await testmodels.DecimalFields.create(decimal=Decimal("27.27"), decimal_nodec=1) values = await testmodels.DecimalFields.all().aggregate( max_decimal=Max("decimal")) self.assertEqual(values, {"max_decimal": Decimal("27.27")})
def get_max_rv_point_list(self, point_table: str) -> int: rv = 0 try: p = Table(point_table) q = (Q.from_(p).select(Max(p.rv))) sql = q.get_sql() self._logger.debug(f'SQL: {sql}') rv = engine_mart.scalar(sql) if rv is None: rv = 0 except Exception as e: self._logger.error(e) return rv
def read_serial_id(self, find_id: int) -> int: ds = Table('DataString', alias='ds') query = (Query.from_(ds).select(ds.Data, Max(ds.TimeWrite)).where( ds.IdTagDef == find_id).groupby(ds.Data).limit(1)) sql = query.get_sql() ret_value: int = 0 try: self._logger.debug(f'SQL: {sql}') result = self._engine_data.execute(sql) for row in result: ret_value = row[0] except Exception as e: self._logger.error(e) return ret_value
def get_csv_dump(alt_authorization: str = None, authorization=Header(None)): """Get a csv of all loans where the columns are id, lender_id, borrower_id, currency, principal_minor, principal_cents, principal_repayment_minor, principal_repayment_cents, created_at, last_repayment_at, repaid_at, unpaid_at This endpoint is _very_ expensive for us. Without a users ratelimit being increased they will almost certainly not even be able to use this endpoint once. We charge 5 * rows * log(rows) toward the quota and do not allow users which contribute to the global ratelimit. It is NOT cheaper to use this endpoint compared to just walking the index endpoint. This mainly exists for users which are willing to pay for a csv dump. You may use a query parameter for authorization instead of a header. """ if authorization is None and alt_authorization is None: return Response(status_code=401) if authorization is None: authorization = f'bearer {alt_authorization}' attempt_request_cost = 1 check_request_cost_cost = 10 headers = {'x-request-cost': str(attempt_request_cost)} with LazyItgs() as itgs: user_id, _, perms = users.helper.get_permissions_from_header( itgs, authorization, ratelimit_helper.RATELIMIT_PERMISSIONS ) settings = ( ratelimit_helper.USER_RATELIMITS if user_id is None else get_settings(itgs, user_id) ) if not ratelimit_helper.check_ratelimit( itgs, user_id, perms, attempt_request_cost, settings=settings): return Response(status_code=429, headers=headers) if user_id is None: return Response(status_code=403, headers=headers) if settings.global_ratelimit_applies: return Response(status_code=403, headers=headers) headers['x-request-cost'] = ( str(attempt_request_cost + check_request_cost_cost) ) if not ratelimit_helper.check_ratelimit( itgs, user_id, perms, check_request_cost_cost, settings=settings): return Response(status_code=429, headers=headers) loans = Table('loans') itgs.read_cursor.execute( Query.from_(loans).select(Count(Star())).get_sql() ) (cnt_loans,) = itgs.read_cursor.fetchone() real_request_cost = ( 5 * cnt_loans * max(1, math.ceil(math.log(cnt_loans))) ) headers['x-request-cost'] = ( str( attempt_request_cost + check_request_cost_cost + real_request_cost ) ) if not ratelimit_helper.check_ratelimit( itgs, user_id, perms, real_request_cost, settings=settings): return Response(status_code=429, headers=headers) moneys = Table('moneys') currencies = Table('currencies') principals = moneys.as_('principals') principal_repayments = moneys.as_('principal_repayments') repayment_events = Table('loan_repayment_events') query = ( Query.from_(loans) .select( loans.id, loans.lender_id, loans.borrower_id, currencies.code, principals.amount, principals.amount_usd_cents, principal_repayments.amount, principal_repayments.amount_usd_cents, loans.created_at, Max(repayment_events.created_at), loans.repaid_at, loans.unpaid_at ) .join(principals) .on(principals.id == loans.principal_id) .join(currencies) .on(currencies.id == principals.currency_id) .join(principal_repayments) .on(principal_repayments.id == loans.principal_repayment_id) .left_join(repayment_events) .on(repayment_events.loan_id == loans.id) .groupby( loans.id, currencies.id, principals.id, principal_repayments.id ) ) headers['Content-Type'] = 'text/csv' headers['Content-Disposition'] = 'attachment; filename="loans.csv"' headers['Cache-Control'] = 'public, max-age=86400' return StreamingResponse( query_generator( query.get_sql(), ','.join(( 'id', 'lender_id', 'borrower_id', 'currency', 'principal_minor', 'principal_cents', 'principal_repayment_minor', 'principal_repayment_cents', 'created_at', 'last_repayment_at', 'repaid_at', 'unpaid_at' )) ), status_code=200, headers=headers )
def send_messages(version): with LazyIntegrations(logger_iden=LOGGER_IDEN) as itgs: itgs.logger.print(Level.TRACE, 'Sending moderator onboarding messages...') mod_onboarding_messages = Table('mod_onboarding_messages') itgs.read_cursor.execute( Query.from_(mod_onboarding_messages).select( Max(mod_onboarding_messages.msg_order)).get_sql()) (max_msg_order, ) = itgs.read_cursor.fetchone() if max_msg_order is None: itgs.logger.print(Level.DEBUG, 'There are no moderator onboarding messages.') return mod_onboarding_progress = Table('mod_onboarding_progress') moderators = Table('moderators') users = Table('users') itgs.read_cursor.execute( Query.from_(moderators).join(users).on( users.id == moderators.user_id). left_join(mod_onboarding_progress).on( mod_onboarding_progress.moderator_id == moderators.id).select( users.id, moderators.id, users.username, mod_onboarding_progress.msg_order).where( mod_onboarding_progress.msg_order.isnull() | (mod_onboarding_progress.msg_order < Parameter('%s')) ).get_sql(), (max_msg_order, )) rows = itgs.read_cursor.fetchall() responses = Table('responses') titles = responses.as_('titles') bodies = responses.as_('bodies') for (user_id, mod_id, username, cur_msg_order) in rows: itgs.read_cursor.execute( Query.from_(mod_onboarding_messages).join(titles).on( titles.id == mod_onboarding_messages.title_id).join(bodies) .on(bodies.id == mod_onboarding_messages.body_id).select( mod_onboarding_messages.msg_order, titles.id, titles.name, bodies.id, bodies.name).where( Parameter('%s').isnull() | (mod_onboarding_messages.msg_order > Parameter('%s')) ).orderby(mod_onboarding_messages.msg_order, order=Order.asc).limit(1).get_sql(), ( cur_msg_order, cur_msg_order, )) (new_msg_order, title_id, title_name, body_id, body_name) = itgs.read_cursor.fetchone() title_formatted = get_response(itgs, title_name, username=username) body_formatted = get_response(itgs, body_name, username=username) utils.reddit_proxy.send_request( itgs, 'mod_onboarding_messages', version, 'compose', { 'recipient': username, 'subject': title_formatted, 'body': body_formatted }) utils.mod_onboarding_utils.store_letter_message_with_id_and_names( itgs, user_id, title_id, title_name, body_id, body_name) if cur_msg_order is None: itgs.write_cursor.execute( Query.into(mod_onboarding_progress).columns( mod_onboarding_progress.moderator_id, mod_onboarding_progress.msg_order).insert( *(Parameter('%s') for _ in range(2))).get_sql(), (mod_id, new_msg_order)) else: itgs.write_cursor.execute( Query.update(mod_onboarding_progress).set( mod_onboarding_progress.msg_order, Parameter('%s')).set( mod_onboarding_progress.updated_at, CurTimestamp()).where( mod_onboarding_progress.moderator_id == Parameter('%s')).get_sql(), (new_msg_order, mod_id)) itgs.write_conn.commit() itgs.logger.print( Level.INFO, 'Successfully sent moderator onboarding message (msg_order={}) to /u/{}', new_msg_order, username)
def execute_get_missing_alerts(itgs, bonus_filters): """Executes the read to get all alerts which should have been sent out already for endpoint usage. The endpoint users is filtered using `bonus_filters`. If `bonus_filters` is a no-op then this function will return one row for each use of an endpoint by any user for which there is no later alert. The result is sorted by user id. Arguments: - `itgs (LazyIntegrations)`: The integrations to use for sending alerts. - `bonus_filters (callable)`: A callable which accepts the query, a callable which accepts an argument and returns the Parameter which will refer to that argment, and keyword arguments for each Table reference we have. This should return the new Query to use after filtering the results. Returns (via `itgs.read_cursor.fetchall()`): - `rows (list)`: A list of lists, where each inner list has the following elements: - `user_id (int)`: The id of the user which should be sent an alert. - `username (str)`: The username of the user with id `user_id`. - `endpoint_id (int)`: The id of the endpoint the user used - `endpoint_slug (str)`: The slug of the endpoint with id `endpoint_id` - `first_use_in_interval (datetime)`: The earliest time within the interval that the user used the endpoint. - `last_use_in_interval (datetime)`: The latest time within the interval that the user used the endpoint. - `count_in_interval (int)`: The number of times the user used the endpoint within the interval. """ endpoint_users = Table('endpoint_users') endpoint_alerts = Table('endpoint_alerts') most_recent_alerts = Table('most_recent_alerts') usage_after_filters = Table('usage_after_filters') users = Table('users') args = [] def add_param(arg): args.append(arg) return Parameter(f'${len(args)}') query = (Query.with_( Query.from_(endpoint_alerts).select( endpoint_alerts.endpoint_id.as_('endpoint_id'), endpoint_alerts.user_id.as_('user_id'), Max(endpoint_alerts.sent_at).as_('max_sent_at')).groupby( endpoint_alerts.endpoint_id, endpoint_alerts.user_id), 'most_recent_alerts' ).with_( bonus_filters(Query.from_(endpoint_users).join(most_recent_alerts).on( (most_recent_alerts.endpoint_id == endpoint_users.endpoint_id) & (most_recent_alerts.user_id == endpoint_users.user_id)).select( endpoint_users.endpoint_id.as_('endpoint_id'), endpoint_users.user_id.as_('user_id'), Min(endpoint_users.created_at).as_('first_usage'), Max(endpoint_users.created_at).as_('last_usage'), Count(endpoint_users.id).as_('count_usage')).where( endpoint_users.user_id.notnull()).where( endpoint_users.created_at > most_recent_alerts.max_sent_at).groupby( endpoint_users.endpoint_id, endpoint_users.user_id), add_param, endpoint_users=endpoint_users, endpoint_alerts=endpoint_alerts, most_recent_alerts=most_recent_alerts), 'usage_after_filters').from_(usage_after_filters).join(users).on( users.id == usage_after_filters.user_id).select( usage_after_filters.user_id, users.username, usage_after_filters.endpoint_id, usage_after_filters.first_usage, usage_after_filters.last_usage, usage_after_filters.count_usage).orderby( usage_after_filters.user_id)) (sql, ordered_args) = convert_numbered_args(query.get_sql(), args) itgs.read_cursor.execute(sql, ordered_args)