def get_associated_user_wallet(
        args: AssociatedUserWalletArgs) -> AssociatedUserWallet:
    """
    Returns a list of associated wallets

    Args:
        args: dict The parsed args from the request
        args.user_id: number The blockchain user id

    Returns:
        Array of strings representing the user's associated wallets
    """
    db = db_session.get_db_read_replica()
    with db.scoped_session() as session:
        user_wallet: List[Tuple[str, str]] = (session.query(
            AssociatedWallet.wallet, AssociatedWallet.chain).filter(
                AssociatedWallet.is_current == True).filter(
                    AssociatedWallet.is_delete == False).filter(
                        AssociatedWallet.user_id == args.get("user_id")).all())
        eth_wallets: List[str] = []
        sol_wallets: List[str] = []
        for wallet, chain in user_wallet:
            if chain == "eth":
                eth_wallets.append(wallet)
            elif chain == "sol":
                sol_wallets.append(wallet)
        return {"eth": eth_wallets, "sol": sol_wallets}
def get_previously_private_playlists(args):
    db = get_db_read_replica()
    with db.scoped_session() as session:
        if "date" not in args:
            raise exceptions.ArgumentError(
                "'date' required to query for retrieving previously private playlists"
            )

        date = args.get("date")

        playlist_after_date = (session.query(
            Playlist.playlist_id,
            Playlist.updated_at).distinct(Playlist.playlist_id).filter(
                Playlist.is_private == False,
                Playlist.updated_at >= date).subquery())

        playlist_before_date = (session.query(
            Playlist.playlist_id,
            Playlist.updated_at).distinct(Playlist.playlist_id).filter(
                Playlist.is_private == True,
                Playlist.updated_at < date).subquery())

        previously_private_results = session.query(
            playlist_before_date.c['playlist_id']).join(
                playlist_after_date,
                playlist_after_date.c['playlist_id'] ==
                playlist_before_date.c['playlist_id'],
            ).all()

        playlist_ids = [result[0] for result in previously_private_results]

    return {'ids': playlist_ids}
Exemplo n.º 3
0
def get_app_names(args):
    """
    Returns a list of app names

    Args:
        args: dict The parsed args from the request
        args.offset: number The offset to start querying from
        args.limit: number The max number of queries to return

    Returns:
        Array of dictionaries with the name field
    """
    db = db_session.get_db_read_replica()
    with db.scoped_session() as session:
        app_names = (
            session.query(AppNameMetrics.application_name)
            .group_by(AppNameMetrics.application_name)
            .order_by(asc(AppNameMetrics.application_name))
            .limit(args.get('limit'))
            .offset(args.get('offset'))
            .all()
        )

        names = [{'name': app_name[0]} for app_name in app_names]
        return names
Exemplo n.º 4
0
        def inner_wrap(*args, **kwargs):
            message = request.headers.get(MESSAGE_HEADER)
            signature = request.headers.get(SIGNATURE_HEADER)

            authed_user_id = None
            if message and signature:
                web3 = web3_provider.get_web3()
                encoded_to_recover = encode_defunct(text=message)
                wallet = web3.eth.account.recover_message(
                    encoded_to_recover, signature=signature
                )
                db = db_session.get_db_read_replica()
                with db.scoped_session() as session:
                    user = (
                        session.query(User.user_id)
                        .filter(
                            # Convert checksum wallet to lowercase
                            User.wallet == wallet.lower(),
                            User.is_current == True,
                        )
                        # In the case that multiple wallets match (not enforced on the data layer),
                        # pick the user id that is lowest (created first).
                        .order_by(User.user_id.asc())
                        .first()
                    )
                    if user:
                        authed_user_id = user.user_id
                        logger.info(
                            f"auth_middleware.py | authed_user_id: {authed_user_id}"
                        )
            return func(*args, **kwargs, authed_user_id=authed_user_id)
Exemplo n.º 5
0
def trending(time):
    (limit, offset) = get_pagination_vars()
    # Increment total trending count
    REDIS.incr(trending_cache_total_key, 1)

    genre = request.args.get("genre", default=None, type=str)
    if genre is None:
        redis_key = f"trending-{time}"
        redis_cache_value = REDIS.get(redis_key)
        if redis_cache_value is not None:
            json_cache = json.loads(redis_cache_value.decode('utf-8'))
            if json_cache is not None:
                num_cached_entries = len(json_cache['listen_counts'])
                logger.info(
                    f'Cache for {redis_key}, {num_cached_entries} entries, request limit {limit}'
                )
                if offset + limit <= num_cached_entries:
                    json_cache['listen_counts'] = json_cache['listen_counts'][
                        offset:offset + limit]
                    logger.info(f'Returning cache for {redis_key}')
                    # Increment cache hit count
                    REDIS.incr(trending_cache_hits_key, 1)
                    return api_helpers.success_response(json_cache)
    # Increment cache miss count
    REDIS.incr(trending_cache_miss_key, 1)
    # Recalculate trending values if necessary
    final_resp = generate_trending(get_db_read_replica(), time, genre, limit,
                                   offset)
    return api_helpers.success_response(final_resp)
def get_previously_unlisted_tracks(args):
    db = get_db_read_replica()
    with db.scoped_session() as session:
        if "date" not in args:
            raise exceptions.ArgumentError(
                "'date' required to query for retrieving previously unlisted tracks"
            )

        date = args.get("date")

        tracks_after_date = (session.query(
            Track.track_id, Track.updated_at).distinct(Track.track_id).filter(
                Track.is_unlisted == False,
                Track.updated_at >= date).subquery())

        tracks_before_date = (session.query(
            Track.track_id, Track.updated_at).distinct(Track.track_id).filter(
                Track.is_unlisted == True, Track.updated_at < date).subquery())

        previously_unlisted_results = (session.query(
            tracks_before_date.c["track_id"]).join(
                tracks_after_date,
                tracks_after_date.c["track_id"] ==
                tracks_before_date.c["track_id"],
            ).all())

        track_ids = [result[0] for result in previously_unlisted_results]

    return {"ids": track_ids}
def get_follow_intersection_users(followee_user_id, follower_user_id):
    users = []
    db = get_db_read_replica()
    with db.scoped_session() as session:
        query = session.query(User).filter(
            User.is_current == True,
            User.user_id.in_(
                session.query(Follow.follower_user_id).filter(
                    Follow.followee_user_id == followee_user_id,
                    Follow.is_current == True,
                    Follow.is_delete == False,
                ).intersect(
                    session.query(Follow.followee_user_id).filter(
                        Follow.follower_user_id == follower_user_id,
                        Follow.is_current == True,
                        Follow.is_delete == False,
                    ))),
        )
        users = paginate_query(query).all()
        users = helpers.query_result_to_list(users)
        user_ids = [user[response_name_constants.user_id] for user in users]

        current_user_id = get_current_user_id(required=False)

        # bundle peripheral info into user results
        users = populate_user_metadata(session, user_ids, users,
                                       current_user_id)

        # order by follower_count desc
        users.sort(
            key=lambda user: user[response_name_constants.follower_count],
            reverse=True)

    return users
Exemplo n.º 8
0
def milestones_followers():
    db = get_db_read_replica()
    if "user_id" not in request.args:
        return api_helpers.error_response({'msg': 'Please provider user ids'},
                                          500)

    try:
        user_id_str_list = request.args.getlist("user_id")
        user_ids = []
        user_ids = [int(y) for y in user_id_str_list]
    except ValueError as e:
        logger.error("Invalid value found in user id list", esc_info=True)
        return api_helpers.error_response({'msg': e}, 500)

    with db.scoped_session() as session:
        follower_counts = (session.query(
            Follow.followee_user_id,
            func.count(Follow.followee_user_id)).filter(
                Follow.is_current == True, Follow.is_delete == False,
                Follow.followee_user_id.in_(user_ids)).group_by(
                    Follow.followee_user_id).all())
        follower_count_dict = \
            {user_id: follower_count for (
                user_id, follower_count) in follower_counts}

    return api_helpers.success_response(follower_count_dict)
Exemplo n.º 9
0
def search(isAutocomplete):
    searchStr = request.args.get("query", type=str)
    if not searchStr:
        return api_helpers.error_response("Invalid value for parameter 'query'")
    searchStr = searchStr.replace('&', 'and')  # when creating query table, we substitute this too

    kind = request.args.get("kind", type=str, default="all")
    if kind not in SearchKind.__members__:
        return api_helpers.error_response(
            "Invalid value for parameter 'kind' must be in %s" % [k.name for k in SearchKind]
        )
    searchKind = SearchKind[kind]

    (limit, offset) = get_pagination_vars()

    results = {}
    if searchStr:
        db = get_db_read_replica()
        with db.scoped_session() as session:
            # Set similarity threshold to be used by % operator in queries.
            session.execute(sqlalchemy.text(f"select set_limit({minSearchSimilarity});"))

            if (searchKind in [SearchKind.all, SearchKind.tracks]):
                results['tracks'] = track_search_query(session, searchStr, limit, offset, False, isAutocomplete)
                results['saved_tracks'] = track_search_query(session, searchStr, limit, offset, True, isAutocomplete)
            if (searchKind in [SearchKind.all, SearchKind.users]):
                results['users'] = user_search_query(session, searchStr, limit, offset, False, isAutocomplete)
                results['followed_users'] = user_search_query(session, searchStr, limit, offset, True, isAutocomplete)
            if (searchKind in [SearchKind.all, SearchKind.playlists]):
                results['playlists'] = playlist_search_query(
                    session,
                    searchStr,
                    limit,
                    offset,
                    False,
                    False,
                    isAutocomplete
                )
                results['saved_playlists'] = playlist_search_query(
                    session,
                    searchStr,
                    limit,
                    offset,
                    False,
                    True,
                    isAutocomplete
                )
            if (searchKind in [SearchKind.all, SearchKind.albums]):
                results['albums'] = playlist_search_query(session, searchStr, limit, offset, True, False, isAutocomplete)
                results['saved_albums'] = playlist_search_query(
                    session,
                    searchStr,
                    limit,
                    offset,
                    True,
                    True,
                    isAutocomplete
                )

    return api_helpers.success_response(results)
Exemplo n.º 10
0
def _get_db_block_state(latest_blocknum, latest_blockhash):
    db = get_db_read_replica()
    with db.scoped_session() as session:
        # Fetch latest block from DB
        db_block_query = session.query(Block).filter(
            Block.is_current == True).all()
        assert len(
            db_block_query) == 1, "Expected SINGLE row marked as current"

        health_results = {
            "web": {
                "blocknumber": latest_blocknum,
                "blockhash": latest_blockhash,
            },
            "db": helpers.model_to_dictionary(db_block_query[0]),
            "git": os.getenv("GIT_SHA"),
        }

        block_difference = abs(health_results["web"]["blocknumber"] -
                               health_results["db"]["number"])
        health_results["block_difference"] = block_difference
        health_results[
            "maximum_healthy_block_difference"] = default_healthy_block_diff
        health_results.update(disc_prov_version)

        return health_results
Exemplo n.º 11
0
def get_trending_tracks(args, strategy):
    """Gets trending by getting the currently cached tracks and then populating them."""
    db = get_db_read_replica()
    with db.scoped_session() as session:
        current_user_id, genre, time = args.get("current_user_id"), args.get(
            "genre"), args.get("time", "week")
        time_range = "week" if time not in ["week", "month", "year"] else time
        key = make_trending_cache_key(time_range, genre, strategy.version)

        # Will try to hit cached trending from task, falling back
        # to generating it here if necessary and storing it with no TTL
        (tracks, track_ids) = use_redis_cache(
            key, None,
            make_generate_unpopulated_trending(session, genre, time_range,
                                               strategy))

        # populate track metadata
        tracks = populate_track_metadata(session, track_ids, tracks,
                                         current_user_id)
        tracks_map = {track['track_id']: track for track in tracks}

        # Re-sort the populated tracks b/c it loses sort order in sql query
        sorted_tracks = [tracks_map[track_id] for track_id in track_ids]

        if args.get("with_users", False):
            user_id_list = get_users_ids(sorted_tracks)
            users = get_users_by_id(session, user_id_list, current_user_id)
            for track in sorted_tracks:
                user = users[track['owner_id']]
                if user:
                    track['user'] = user
        return sorted_tracks
Exemplo n.º 12
0
def get_latest_spl_audio() -> Optional[Dict]:
    db = get_db_read_replica()
    with db.scoped_session() as session:
        token_tx = session.query(SPLTokenTransaction).first()
        if token_tx:
            return helpers.model_to_dictionary(token_tx)
    return None
def _get_underground_trending(args, strategy):
    db = get_db_read_replica()
    with db.scoped_session() as session:
        current_user_id = args.get("current_user_id", None)
        limit, offset = args.get("limit"), args.get("offset")
        key = make_underground_trending_cache_key(strategy.version)

        (tracks, track_ids) = use_redis_cache(
            key, None, make_get_unpopulated_tracks(session, redis, strategy))

        # Apply limit + offset early to reduce the amount of
        # population work we have to do
        if limit is not None and offset is not None:
            track_ids = track_ids[offset:limit + offset]

        tracks = populate_track_metadata(session, track_ids, tracks,
                                         current_user_id)

        tracks_map = {track['track_id']: track for track in tracks}

        # Re-sort the populated tracks b/c it loses sort order in sql query
        sorted_tracks = [tracks_map[track_id] for track_id in track_ids]
        user_id_list = get_users_ids(sorted_tracks)
        users = get_users_by_id(session, user_id_list, current_user_id)
        for track in sorted_tracks:
            user = users[track['owner_id']]
            if user:
                track['user'] = user
        sorted_tracks = list(map(extend_track, sorted_tracks))
        return sorted_tracks
def get_followers_for_user(args):
    users = []
    followee_user_id = args.get("followee_user_id")
    current_user_id = args.get("current_user_id")
    limit = args.get("limit")
    offset = args.get("offset")

    db = get_db_read_replica()
    with db.scoped_session() as session:

        rows = session.execute(
            sql,
            {
                "followee_user_id": followee_user_id,
                "limit": limit,
                "offset": offset
            },
        )
        user_ids = [r[0] for r in rows]

        # get all users for above user_ids
        users = get_unpopulated_users(session, user_ids)

        # bundle peripheral info into user results
        users = populate_user_metadata(session, user_ids, users,
                                       current_user_id)

    return users
Exemplo n.º 15
0
def get_playlist_repost_intersection_users(repost_playlist_id,
                                           follower_user_id):
    users = []
    db = get_db_read_replica()
    with db.scoped_session() as session:
        # ensure playlist_id exists
        playlist_entry = (session.query(Playlist).filter(
            Playlist.playlist_id == repost_playlist_id,
            Playlist.is_current == True).first())
        if playlist_entry is None:
            raise exceptions.NotFoundError(
                "Resource not found for provided playlist id")

        query = session.query(User).filter(
            User.is_current == True,
            User.user_id.in_(
                session.query(Repost.user_id).filter(
                    Repost.repost_item_id == repost_playlist_id,
                    Repost.repost_type != RepostType.track,
                    Repost.is_current == True,
                    Repost.is_delete == False,
                ).intersect(
                    session.query(Follow.followee_user_id).filter(
                        Follow.follower_user_id == follower_user_id,
                        Follow.is_current == True,
                        Follow.is_delete == False,
                    ))),
        )
        users = paginate_query(query).all()
        users = helpers.query_result_to_list(users)

    return users
def get_app_name_metrics(app_name, args):
    """
    Returns the usage metrics for a specified app_name

    Args:
        app_name: string The name of the app to query for metrics
        args: dict The parsed args from the request
        args.start_time: date The date to start the query from
        args.limit: number The max number of metrics to return

    Returns:
        Array of dictionaries with the timestamp and count
    """
    db = db_session.get_db_read_replica()
    with db.scoped_session() as session:

        metrics = (
            session.query(
                AppNameMetrics.timestamp,
                AppNameMetrics.count
            ).filter(
                AppNameMetrics.application_name == app_name,
                AppNameMetrics.timestamp > args.get('start_time')
            )
            .order_by(desc(AppNameMetrics.timestamp))
            .limit(args.get('limit'))
            .all()
        )

        metrics = [{'timestamp': int(time.mktime(
            m[0].timetuple())), 'count': m[1]} for m in metrics]

        return metrics
def get_trending_tracks(args):
    (limit, offset) = get_pagination_vars()
    current_user_id = get_current_user_id(required=False)

    db = get_db_read_replica()

    time = args.get('time')
    # Identity understands allTime as millennium.
    # TODO: Change this in https://github.com/AudiusProject/audius-protocol/pull/768/files
    query_time = time
    if time == 'allTime':
        query_time = 'millennium'

    with db.scoped_session() as session:
        trending_tracks = generate_trending(get_db_read_replica(), query_time,
                                            args.get('genre', None), limit,
                                            offset)

        track_scores = [
            z(time, track) for track in trending_tracks['listen_counts']
        ]
        sorted_track_scores = sorted(track_scores,
                                     key=lambda k: k['score'],
                                     reverse=True)

        track_ids = [track['track_id'] for track in sorted_track_scores]

        tracks = session.query(Track).filter(
            Track.is_current == True, Track.is_unlisted == False,
            Track.stem_of == None, Track.track_id.in_(track_ids)).all()
        tracks = helpers.query_result_to_list(tracks)

        tracks = populate_track_metadata(session, track_ids, tracks,
                                         current_user_id)
        tracks_map = {track['track_id']: track for track in tracks}

        # Re-sort the populated tracks b/c it loses sort order in sql query
        sorted_tracks = [tracks_map[track_id] for track_id in track_ids]

        if args.get("with_users", False):
            user_id_list = get_users_ids(sorted_tracks)
            users = get_users_by_id(session, user_id_list)
            for track in sorted_tracks:
                user = users[track['owner_id']]
                if user:
                    track['user'] = user
        return sorted_tracks
def get_reposters_for_track(args):
    user_results = []
    current_user_id = args.get('current_user_id')
    repost_track_id = args.get('repost_track_id')
    limit = args.get('limit')
    offset = args.get('offset')

    db = get_db_read_replica()
    with db.scoped_session() as session:
        # Ensure Track exists for provided repost_track_id.
        track_entry = session.query(Track).filter(
            Track.track_id == repost_track_id,
            Track.is_current == True).first()
        if track_entry is None:
            raise exceptions.NotFoundError(
                'Resource not found for provided track id')

        # Subquery to get all (user_id, follower_count) entries from Follows table.
        follower_count_subquery = (session.query(
            Follow.followee_user_id,
            func.count(Follow.followee_user_id).label(
                response_name_constants.follower_count)).filter(
                    Follow.is_current == True,
                    Follow.is_delete == False).group_by(
                        Follow.followee_user_id).subquery())

        # Get all Users that reposted track, ordered by follower_count desc & paginated.
        query = (
            session.query(
                User,
                # Replace null values from left outer join with 0 to ensure sort works correctly.
                (func.coalesce(follower_count_subquery.c.follower_count, 0)
                 ).label(response_name_constants.follower_count))
            # Left outer join to associate users with their follower count.
            .outerjoin(
                follower_count_subquery,
                follower_count_subquery.c.followee_user_id ==
                User.user_id).filter(
                    User.is_current == True,
                    # Only select users that reposted given track.
                    User.user_id.in_(
                        session.query(Repost.user_id).filter(
                            Repost.repost_item_id == repost_track_id,
                            Repost.repost_type == RepostType.track,
                            Repost.is_current == True,
                            Repost.is_delete == False)
                    )).order_by(desc(response_name_constants.follower_count)))
        user_results = add_query_pagination(query, limit, offset).all()

        # Fix format to return only Users objects with follower_count field.
        if user_results:
            users, _ = zip(*user_results)
            user_results = helpers.query_result_to_list(users)
            # bundle peripheral info into user results
            user_ids = [user['user_id'] for user in user_results]
            user_results = populate_user_metadata(session, user_ids,
                                                  user_results,
                                                  current_user_id)
    return user_results
def get_top_followee_windowed(type, window, args):
    if type != "track":
        raise exceptions.ArgumentError(
            "Invalid type provided, must be one of 'track'")

    valid_windows = ["week", "month", "year"]
    if not window or window not in valid_windows:
        raise exceptions.ArgumentError(
            f"Invalid window provided, must be one of {valid_windows}")

    limit = args.get("limit", 25)

    current_user_id = args.get("user_id")
    db = get_db_read_replica()
    with db.scoped_session() as session:

        followee_user_ids = session.query(Follow.followee_user_id).filter(
            Follow.follower_user_id == current_user_id,
            Follow.is_current == True,
            Follow.is_delete == False,
        )
        followee_user_ids_subquery = followee_user_ids.subquery()

        # Queries for tracks joined against followed users and counts
        tracks_query = (
            session.query(Track, ).join(
                followee_user_ids_subquery,
                Track.owner_id ==
                followee_user_ids_subquery.c.followee_user_id,
            ).join(AggregateTrack, Track.track_id == AggregateTrack.track_id).
            filter(
                Track.is_current == True,
                Track.is_delete == False,
                Track.is_unlisted == False,
                Track.stem_of == None,
                # Query only tracks created `window` time ago (week, month, etc.)
                Track.created_at >= text(f"NOW() - interval '1 {window}'"),
            ).order_by(
                desc(AggregateTrack.repost_count + AggregateTrack.save_count),
                desc(Track.track_id),
            ).limit(limit))

        tracks_query_results = tracks_query.all()
        tracks = helpers.query_result_to_list(tracks_query_results)
        track_ids = list(map(lambda track: track["track_id"], tracks))

        # Bundle peripheral info into track results
        tracks = populate_track_metadata(session, track_ids, tracks,
                                         current_user_id)

        if args.get("with_users", False):
            user_id_list = get_users_ids(tracks)
            users = get_users_by_id(session, user_id_list)
            for track in tracks:
                user = users[track["owner_id"]]
                if user:
                    track["user"] = user

    return tracks
Exemplo n.º 20
0
 def get(self):
     args = get_reactions_parser.parse_args()
     tx_ids, type = args.get("tx_signatures"), args.get("type")
     db = get_db_read_replica()
     with db.scoped_session() as session:
         reactions = get_reactions(session, tx_ids, type)
         reactions = list(map(extend_reaction, reactions))
         return success_response(reactions)
def get_latest_sol_user_bank() -> Optional[Dict]:
    db = get_db_read_replica()
    with db.scoped_session() as session:
        user_bank_tx = (session.query(UserBankTransaction).order_by(
            desc(UserBankTransaction.slot)).first())
        if user_bank_tx:
            return helpers.model_to_dictionary(user_bank_tx)
    return None
Exemplo n.º 22
0
def _get_db_block_state():
    db = db_session.get_db_read_replica()
    with db.scoped_session() as session:
        # Fetch latest block from DB
        db_block_query = session.query(Block).filter(
            Block.is_current == True).all()
        assert len(db_block_query) == 1, "Expected SINGLE row marked as current"
        return helpers.model_to_dictionary(db_block_query[0])
def get_followers_for_user(args):
    users = []
    followee_user_id = args.get('followee_user_id')
    current_user_id = args.get('current_user_id')
    limit = args.get('limit')
    offset = args.get('offset')

    db = get_db_read_replica()
    with db.scoped_session() as session:
        # correlated subquery sqlalchemy code:
        # https://groups.google.com/forum/#!topic/sqlalchemy/WLIy8jxD7qg
        inner_follow = aliased(Follow)
        outer_follow = aliased(Follow)

        # subquery to get a user's follower count
        inner_select = (session.query(func.count(
            inner_follow.followee_user_id)).filter(
                inner_follow.is_current == True,
                inner_follow.is_delete == False, inner_follow.followee_user_id
                == outer_follow.follower_user_id).correlate(outer_follow))

        # get all users that follow input user, sorted by their follower count desc
        outer_select = (
            session.query(
                outer_follow.follower_user_id,
                inner_select.as_scalar().label(
                    response_name_constants.follower_count)).filter(
                        outer_follow.followee_user_id == followee_user_id,
                        outer_follow.is_current == True,
                        outer_follow.is_delete == False).
            group_by(outer_follow.follower_user_id).order_by(
                desc(response_name_constants.follower_count),
                # secondary sort to guarantee determinism as explained here:
                # https://stackoverflow.com/questions/13580826/postgresql-repeating-rows-from-limit-offset
                asc(outer_follow.follower_user_id)))
        follower_user_ids_by_follower_count = add_query_pagination(
            outer_select, limit, offset).all()

        user_ids = [
            user_id for (user_id,
                         follower_count) in follower_user_ids_by_follower_count
        ]

        # get all users for above user_ids
        users = get_unpopulated_users(session, user_ids)

        # bundle peripheral info into user results
        users = populate_user_metadata(session, user_ids, users,
                                       current_user_id)

        # order by (follower_count desc, user_id asc) to match query sorting
        # tuple key syntax from: https://stackoverflow.com/a/4233482/8414360
        users.sort(key=lambda user:
                   (user[response_name_constants.follower_count],
                    (user['user_id']) * (-1)),
                   reverse=True)
    return users
def get_oldest_unarchived_play():
    """
    Gets the oldest unarchived play in the database
    """
    db = db_session.get_db_read_replica()
    with db.scoped_session() as session:
        play_query = session.query(func.min(Play.created_at))
        play = play_query.scalar()
        return play
def get_top_followee_saves(saveType, args):
    if saveType != 'track':
        raise exceptions.ArgumentError(
            "Invalid type provided, must be one of 'track'")

    limit = args.get('limit', 25)

    current_user_id = get_current_user_id()
    db = get_db_read_replica()
    with db.scoped_session() as session:
        # Construct a subquery of all followees
        followee_user_ids = (session.query(Follow.followee_user_id).filter(
            Follow.follower_user_id == current_user_id,
            Follow.is_current == True, Follow.is_delete == False))
        followee_user_ids_subquery = followee_user_ids.subquery()

        # Construct a subquery of all saves from followees aggregated by id
        save_count = (session.query(
            Save.save_item_id,
            func.count(Save.save_item_id).label(
                response_name_constants.save_count)).join(
                    followee_user_ids_subquery, Save.user_id ==
                    followee_user_ids_subquery.c.followee_user_id).filter(
                        Save.is_current == True,
                        Save.is_delete == False,
                        Save.save_type == saveType,
                    ).group_by(Save.save_item_id).order_by(
                        desc(response_name_constants.save_count)).limit(limit))
        save_count_subquery = save_count.subquery()

        # Query for tracks joined against followee save counts
        tracks_query = (session.query(Track, ).join(
            save_count_subquery,
            Track.track_id == save_count_subquery.c.save_item_id).filter(
                Track.is_current == True,
                Track.is_delete == False,
                Track.is_unlisted == False,
                Track.stem_of == None,
            ))

        tracks_query_results = tracks_query.all()
        tracks = helpers.query_result_to_list(tracks_query_results)
        track_ids = list(map(lambda track: track['track_id'], tracks))

        # bundle peripheral info into track results
        tracks = populate_track_metadata(session, track_ids, tracks,
                                         current_user_id)

        if args.get('with_users', False):
            user_id_list = get_users_ids(tracks)
            users = get_users_by_id(session, user_id_list)
            for track in tracks:
                user = users[track['owner_id']]
                if user:
                    track['user'] = user

    return tracks
Exemplo n.º 26
0
def get_latest_play():
    """
    Gets the latest play in the database
    """
    db = db_session.get_db_read_replica()
    with db.scoped_session() as session:
        play_query = (session.query(Play.created_at).order_by(
            Play.created_at.desc()).limit(1))
        play = play_query.scalar()
        return play
Exemplo n.º 27
0
def get_aggregate_route_metrics_trailing_month():
    """
    Returns trailing count and unique count for all routes in the last trailing 30 days

    Returns:
        { unique_count, total_count }
    """
    db = db_session.get_db_read_replica()
    with db.scoped_session() as session:
        return _get_aggregate_route_metrics_trailing_month(session)
Exemplo n.º 28
0
def get_users_cnode(cnode_endpoint_string, replica_type=ReplicaType.PRIMARY):
    '''
    Query all users with `cnode_endpoint_string` in replica set
    If replica_type=ReplicaType.PRIMARY -> returns users with `cnode_endpoint_string` as primary
    Else if replica_type=ReplicaType.SECONDARY -> returns users with `cnode_endpoint_string` as secondary1 or secondary2
    Else (only other option is replica_type=ReplicaType.ALL)

    Only returns values where 1/2 secondaries are non-null
    '''
    users = []
    db = get_db_read_replica()
    with db.scoped_session() as session:
        users_res = sqlalchemy.text(f"""
            SELECT
            *
            FROM
            (
                SELECT
                "user_id",
                "wallet",
                ("creator_node_endpoints") [1] as "primary",
                ("creator_node_endpoints") [2] as "secondary1",
                ("creator_node_endpoints") [3] as "secondary2"
                FROM
                (
                    SELECT
                    "user_id",
                    "wallet",
                    string_to_array("creator_node_endpoint", ',') as "creator_node_endpoints"
                    FROM
                    "users"
                    WHERE
                    "creator_node_endpoint" IS NOT NULL
                    AND "is_current" IS TRUE
                    ORDER BY
                    "user_id" ASC
                ) as "s"
            ) as "t"
            WHERE
            {
                "t.primary = :cnode_endpoint_string AND"
                if replica_type == ReplicaType.PRIMARY
                else '(t.secondary1 = :cnode_endpoint_string OR t.secondary2 = :cnode_endpoint_string) AND'
                if replica_type == ReplicaType.SECONDARY
                else '(t.primary = :cnode_endpoint_string OR '
                    't.secondary1 = :cnode_endpoint_string OR '
                    't.secondary2 = :cnode_endpoint_string) AND'
            }
            t.secondary1 is not NULL;
            """)
        users = session.execute(users_res, {
            "cnode_endpoint_string": cnode_endpoint_string
        }).fetchall()
        users_dict = [dict(row) for row in users]
    return users_dict
Exemplo n.º 29
0
def get_app_names(args):
    """
    Returns a list of app names

    Args:
        args: dict The parsed args from the request
        args.offset: number The offset to start querying from
        args.limit: number The max number of queries to return
        args.start_time: date The start of the query
        args.include_unknown: bool Whether or not to include a line item for unknown

    Returns:
        Array of dictionaries with name, count, and unique_count fields
    """
    db = db_session.get_db_read_replica()
    with db.scoped_session() as session:
        app_names = (session.query(
            AppNameMetrics.application_name,
            func.sum(AppNameMetrics.count).label('count'),
            func.count(AppNameMetrics.ip.distinct())).filter(
                AppNameMetrics.timestamp > args.get('start_time')).group_by(
                    AppNameMetrics.application_name).order_by(
                        desc('count'),
                        asc(AppNameMetrics.application_name)).limit(
                            args.get('limit')).offset(
                                args.get('offset')).all())

        names = [{
            'name': app_name[0],
            'count': app_name[1],
            'unique_count': app_name[2]
        } for app_name in app_names]

        if args.get('include_unknown', False):
            existing_count = reduce(lambda x, y: x + y['count'], names, 0)
            existing_unique_count = reduce(lambda x, y: x + y['unique_count'],
                                           names, 0)
            total_requests = (session.query(
                func.sum(RouteMetrics.count).label('count'),
                func.count(RouteMetrics.ip.distinct())).filter(
                    RouteMetrics.timestamp > args.get('start_time')).first())
            unknown_count = total_requests[0] - existing_count
            unique_count = total_requests[1] - existing_unique_count
            # Insert unique counts "in order" (desc by count)
            for i, name in enumerate(names[:]):
                if unknown_count > name['count'] or i == len(names):
                    names.insert(
                        i, {
                            'name': 'unknown',
                            'count': unknown_count,
                            'unique_count': unique_count
                        })
                    break

        return names
def get_playlist_tracks(args):
    playlists = []
    current_user_id = args.get("current_user_id")
    limit = args.get("limit")
    offset = args.get("offset")

    db = get_db_read_replica()
    with db.scoped_session() as session:
        try:
            playlist_id = args.get("playlist_id")
            playlist = (
                session
                .query(Playlist)
                .filter(
                    Playlist.is_current == True,
                    Playlist.playlist_id == playlist_id
                )
                .first()
            )
            if playlist is None:
                return None

            playlist_track_ids = [track_id['track']
                                  for track_id in playlist.playlist_contents['track_ids']]
            if limit and offset:
                playlist_track_ids = playlist_track_ids[offset:offset+limit]

            playlist_tracks = (
                session
                .query(Track)
                .filter(
                    Track.is_current == True,
                    Track.track_id.in_(playlist_track_ids)
                )
                .all()
            )

            tracks = helpers.query_result_to_list(playlist_tracks)
            tracks = populate_track_metadata(
                session, playlist_track_ids, tracks, current_user_id)

            if args.get("with_users", False):
                add_users_to_tracks(session, tracks, current_user_id)

            tracks_dict = {track['track_id']: track for track in tracks}

            playlist_tracks = []
            for track_id in playlist_track_ids:
                playlist_tracks.append(tracks_dict[track_id])

            return playlist_tracks

        except sqlalchemy.orm.exc.NoResultFound:
            pass
    return playlists