def get_tracks_and_ids(): if "handle" in args: handle = args.get("handle") user_id = session.query(User.user_id).filter( User.handle_lc == handle.lower()).first() args["user_id"] = user_id can_use_shared_cache = ("id" in args and not "min_block_number" in args and not "sort" in args and not "user_id" in args) if can_use_shared_cache: should_filter_deleted = args.get("filter_deleted", False) tracks = get_unpopulated_tracks(session, args["id"], should_filter_deleted) track_ids = list(map(lambda track: track["track_id"], tracks)) return (tracks, track_ids) (limit, offset) = get_pagination_vars() args["limit"] = limit args["offset"] = offset tracks = _get_tracks(session, args) track_ids = list(map(lambda track: track["track_id"], tracks)) return (tracks, track_ids)
def generate_unpopulated_trending_from_mat_views(session, genre, time_range, strategy, limit=TRENDING_LIMIT): # use all time instead of year for version EJ57D if strategy.version == TrendingVersion.EJ57D and time_range == "year": time_range = "allTime" elif strategy.version != TrendingVersion.EJ57D and time_range == "allTime": time_range = "year" trending_track_ids_query = session.query( TrackTrendingScore.track_id, TrackTrendingScore.score).filter( TrackTrendingScore.type == strategy.trending_type.name, TrackTrendingScore.version == strategy.version.name, TrackTrendingScore.time_range == time_range, ) if genre: trending_track_ids_query = trending_track_ids_query.filter( TrackTrendingScore.genre == genre) trending_track_ids = (trending_track_ids_query.order_by( desc(TrackTrendingScore.score), desc(TrackTrendingScore.track_id)).limit(limit).all()) track_ids = [track_id[0] for track_id in trending_track_ids] tracks = get_unpopulated_tracks(session, track_ids) return (tracks, track_ids)
def get_tracks_and_ids(): if "handle" in args: handle = args.get("handle") user = (session.query(User.user_id).filter( User.handle_lc == handle.lower()).first()) args["user_id"] = user.user_id if "routes" in args: # Convert the handles to user_ids routes = args.get("routes") handles = [route["handle"].lower() for route in routes] user_id_tuples = (session.query( User.user_id, User.handle_lc).filter(User.handle_lc.in_(handles), User.is_current == True).all()) user_id_map = { handle: user_id for (user_id, handle) in user_id_tuples } args["routes"] = [] for route in routes: if route["handle"].lower() in user_id_map: args["routes"].append({ "slug": route["slug"], "owner_id": user_id_map[route["handle"].lower()], }) # If none of the handles were found, return empty lists if not args["routes"]: return ([], []) can_use_shared_cache = ("id" in args and "min_block_number" not in args and "sort" not in args and "user_id" not in args) if can_use_shared_cache: should_filter_deleted = args.get("filter_deleted", False) tracks = get_unpopulated_tracks(session, args["id"], should_filter_deleted) track_ids = list(map(lambda track: track["track_id"], tracks)) return (tracks, track_ids) (limit, offset) = get_pagination_vars() args["limit"] = limit args["offset"] = offset tracks = _get_tracks(session, args) track_ids = list(map(lambda track: track["track_id"], tracks)) return (tracks, track_ids)
def wrapped(): # Score and sort track_scoring_data = get_scorable_track_data(session, redis_instance, strategy) scored_tracks = [ strategy.get_track_score("week", track) for track in track_scoring_data ] sorted_tracks = sorted(scored_tracks, key=lambda k: k["score"], reverse=True) sorted_tracks = sorted_tracks[:UNDERGROUND_TRENDING_LENGTH] # Get unpopulated metadata track_ids = [track["track_id"] for track in sorted_tracks] tracks = get_unpopulated_tracks(session, track_ids) return (tracks, track_ids)
def generate_unpopulated_trending(session, genre, time_range): trending_tracks = generate_trending(session, time_range, genre, TRENDING_LIMIT, 0) track_scores = [ z(time_range, track) for track in trending_tracks['listen_counts'] ] sorted_track_scores = sorted(track_scores, key=lambda k: k['score'], reverse=True) track_ids = [track['track_id'] for track in sorted_track_scores] tracks = get_unpopulated_tracks(session, track_ids) return (tracks, track_ids)
def get_unpopulated_trending(): trending_tracks = generate_trending(session, query_time, args.get('genre', None), limit, offset) track_scores = [ z(time, track) for track in trending_tracks['listen_counts'] ] sorted_track_scores = sorted(track_scores, key=lambda k: k['score'], reverse=True) track_ids = [track['track_id'] for track in sorted_track_scores] tracks = get_unpopulated_tracks(session, track_ids) return (tracks, track_ids)
def generate_unpopulated_trending(session, genre, time_range, strategy, limit=TRENDING_LIMIT): trending_tracks = generate_trending(session, time_range, genre, limit, 0, strategy) track_scores = [ strategy.get_track_score(time_range, track) for track in trending_tracks["listen_counts"] ] # Re apply the limit just in case we did decide to include more tracks in the scoring than the limit sorted_track_scores = sorted(track_scores, key=lambda k: (k["score"], k["track_id"]), reverse=True)[:limit] track_ids = [track["track_id"] for track in sorted_track_scores] tracks = get_unpopulated_tracks(session, track_ids) return (tracks, track_ids)
def track_search_query( session, search_str, limit, offset, is_auto_complete, current_user_id, only_downloadable, ): res = sqlalchemy.text( # pylint: disable=C0301 f""" select track_id, b.balance, b.associated_wallets_balance, u.is_saved from ( select distinct on (owner_id) track_id, owner_id, is_saved, total_score from ( select track_id, owner_id, is_saved, ( (:similarity_weight * sum(score)) + (:title_weight * similarity(coalesce(title, ''), query)) + (:user_name_weight * similarity(coalesce(user_name, ''), query)) + (:repost_weight * log(case when (repost_count = 0) then 1 else repost_count end)) + (case when (lower(query) = coalesce(title, '')) then :title_match_boost else 0 end) + (case when (lower(query) = handle) then :handle_match_boost else 0 end) + (case when (lower(query) = user_name) then :user_name_match_boost else 0 end) { '+ (case when (is_saved) then :current_user_saved_match_boost else 0 end)' if current_user_id else "" } ) as total_score from ( select d."track_id" as track_id, d."word" as word, similarity(d."word", :query) as score, d."track_title" as title, :query as query, d."user_name" as user_name, d."handle" as handle, d."repost_count" as repost_count, d."owner_id" as owner_id { ',s."user_id" is not null as is_saved' if current_user_id else ", false as is_saved" } from "track_lexeme_dict" d { "left outer join (select save_item_id, user_id from saves where saves.save_type = 'track' " + "and saves.is_current = true " + "and saves.is_delete = false and saves.user_id = :current_user_id )" + " s on s.save_item_id = d.track_id" if current_user_id else "" } { 'inner join "tracks" t on t.track_id = d.track_id' if only_downloadable else "" } where (d."word" % lower(:query) or d."handle" = lower(:query) or d."user_name" % lower(:query)) { "and (t.download->>'is_downloadable')::boolean is True" if only_downloadable else "" } ) as results group by track_id, title, query, user_name, handle, repost_count, owner_id, is_saved ) as results2 order by owner_id, total_score desc ) as u left join user_balances b on u.owner_id = b.user_id order by total_score desc limit :limit offset :offset; """ ) track_result_proxy = session.execute( res, params={ "query": search_str, "limit": limit, "offset": offset, "title_weight": search_title_weight, "repost_weight": search_repost_weight, "similarity_weight": search_similarity_weight, "current_user_id": current_user_id, "user_name_weight": search_user_name_weight, "title_match_boost": search_title_exact_match_boost, "handle_match_boost": search_handle_exact_match_boost, "user_name_match_boost": search_user_name_exact_match_boost, "current_user_saved_match_boost": current_user_saved_match_boost, }, ) track_data = track_result_proxy.fetchall() track_cols = track_result_proxy.keys() # track_ids is list of tuples - simplify to 1-D list track_ids = [track[track_cols.index("track_id")] for track in track_data] saved_tracks = { track[0] for track in track_data if track[track_cols.index("is_saved")] } tracks = get_unpopulated_tracks(session, track_ids, True) # TODO: Populate track metadata should be sped up to be able to be # used in search autocomplete as that'll give us better results. if is_auto_complete: # fetch users for tracks track_owner_ids = list(map(lambda track: track["owner_id"], tracks)) users = get_unpopulated_users(session, track_owner_ids) users_dict = {user["user_id"]: user for user in users} # attach user objects to track objects for i, track in enumerate(tracks): user = users_dict[track["owner_id"]] # Add user balance balance = track_data[i][1] associated_balance = track_data[i][2] user[response_name_constants.balance] = balance user[ response_name_constants.associated_wallets_balance ] = associated_balance track["user"] = user else: # bundle peripheral info into track results tracks = populate_track_metadata(session, track_ids, tracks, current_user_id) # Preserve order from track_ids above tracks_map = {} for t in tracks: tracks_map[t["track_id"]] = t tracks = [tracks_map[track_id] for track_id in track_ids] tracks_response = { "all": tracks, "saved": list(filter(lambda track: track["track_id"] in saved_tracks, tracks)), } return tracks_response
def get_unpopulated_remixes(): # Fetch the parent track to get the track's owner id parent_track_res = get_unpopulated_tracks(session, [track_id], False, False) if not parent_track_res or parent_track_res[0] is None: raise exceptions.ArgumentError("Invalid track_id provided") parent_track = parent_track_res[0] track_owner_id = parent_track['owner_id'] # Create subquery for save counts for sorting save_count_subquery = create_save_count_subquery( session, SaveType.track) # Create subquery for repost counts for sorting repost_count_subquery = create_repost_count_subquery( session, RepostType.track) # Get the 'children' remix tracks # Use the track owner id to fetch reposted/saved tracks returned first base_query = ( session.query( Track ) .join( Remix, and_( Remix.child_track_id == Track.track_id, Remix.parent_track_id == track_id ) ).outerjoin( Save, and_( Save.save_item_id == Track.track_id, Save.save_type == SaveType.track, Save.is_current == True, Save.is_delete == False, Save.user_id == track_owner_id ) ).outerjoin( Repost, and_( Repost.repost_item_id == Track.track_id, Repost.user_id == track_owner_id, Repost.repost_type == RepostType.track, Repost.is_current == True, Repost.is_delete == False ) ).outerjoin( repost_count_subquery, repost_count_subquery.c['id'] == Track.track_id ).outerjoin( save_count_subquery, save_count_subquery.c['id'] == Track.track_id ) .filter( Track.is_current == True, Track.is_delete == False, Track.is_unlisted == False ) # 1. Co-signed tracks ordered by save + repost count # 2. Other tracks ordered by save + repost count .order_by( desc( # If there is no "co-sign" for the track (no repost or save from the parent owner), # defer to secondary sort case( [ (and_(Repost.created_at == None, Save.created_at == None), 0), ], else_=( func.coalesce(repost_count_subquery.c.repost_count, 0) + \ func.coalesce(save_count_subquery.c.save_count, 0) ) ) ), # Order by saves + reposts desc( func.coalesce(repost_count_subquery.c.repost_count, 0) + \ func.coalesce(save_count_subquery.c.save_count, 0) ), # Ties, pick latest track id desc(Track.track_id) ) ) (tracks, count) = add_query_pagination(base_query, limit, offset, True, True) tracks = tracks.all() tracks = helpers.query_result_to_list(tracks) track_ids = list(map(lambda track: track["track_id"], tracks)) return (tracks, track_ids, count)
def track_search_query(session, searchStr, limit, offset, personalized, is_auto_complete, current_user_id, only_downloadable): if personalized and not current_user_id: return [] res = sqlalchemy.text( # pylint: disable=C0301 f""" select track_id from ( select track_id, (sum(score) + (:title_weight * similarity(coalesce(title, ''), query))) as total_score from ( select d."track_id" as track_id, d."word" as word, similarity(d."word", :query) as score, d."track_title" as title, :query as query from "track_lexeme_dict" d { 'inner join "saves" s on s.save_item_id = d.track_id' if personalized and current_user_id else "" } { 'inner join "tracks" t on t.track_id = d.track_id' if only_downloadable else "" } where d."word" % :query { "and s.save_type='track' and s.is_current=true and " + "s.is_delete=false and s.user_id = :current_user_id" if personalized and current_user_id else "" } { "and (t.download->>'is_downloadable')::boolean is True" if only_downloadable else "" } ) as results group by track_id, title, query ) as results2 order by total_score desc, track_id asc limit :limit offset :offset; """) track_ids = session.execute( res, { "query": searchStr, "limit": limit, "offset": offset, "title_weight": trackTitleWeight, "current_user_id": current_user_id }, ).fetchall() # track_ids is list of tuples - simplify to 1-D list track_ids = [i[0] for i in track_ids] tracks = get_unpopulated_tracks(session, track_ids, True) if is_auto_complete == True: # fetch users for tracks track_owner_ids = list(map(lambda track: track["owner_id"], tracks)) users = get_unpopulated_users(session, track_owner_ids) users_dict = {user["user_id"]: user for user in users} # attach user objects to track objects for track in tracks: track["user"] = users_dict[track["owner_id"]] else: # bundle peripheral info into track results tracks = populate_track_metadata(session, track_ids, tracks, current_user_id) # preserve order from track_ids above tracks = [ next((t for t in tracks if t["track_id"] == track_id), None) for track_id in track_ids ] return tracks
def track_search_query(session, searchStr, limit, offset, personalized, is_auto_complete, current_user_id, only_downloadable): if personalized and not current_user_id: return [] res = sqlalchemy.text( # pylint: disable=C0301 f""" select track_id from ( select track_id, (sum(score) + (:title_weight * similarity(coalesce(title, ''), query))) as total_score from ( select d."track_id" as track_id, d."word" as word, similarity(d."word", :query) as score, d."track_title" as title, :query as query from "track_lexeme_dict" d { 'inner join "saves" s on s.save_item_id = d.track_id' if personalized and current_user_id else "" } { 'inner join "tracks" t on t.track_id = d.track_id' if only_downloadable else "" } where d."word" % :query { "and s.save_type='track' and s.is_current=true and " + "s.is_delete=false and s.user_id = :current_user_id" if personalized and current_user_id else "" } { "and (t.download->>'is_downloadable')::boolean is True" if only_downloadable else "" } ) as results group by track_id, title, query ) as results2 order by total_score desc, track_id asc limit :limit offset :offset; """) track_ids = session.execute( res, { "query": searchStr, "limit": max(limit, MIN_SEARCH_LEXEME_LIMIT), "offset": offset, "title_weight": trackTitleWeight, "current_user_id": current_user_id }, ).fetchall() # track_ids is list of tuples - simplify to 1-D list track_ids = [i[0] for i in track_ids] tracks = get_unpopulated_tracks(session, track_ids, True) # TODO: Populate track metadata should be sped up to be able to be # used in search autocomplete as that'll give us better results. if is_auto_complete == True: # fetch users for tracks track_owner_ids = list(map(lambda track: track["owner_id"], tracks)) users = get_unpopulated_users(session, track_owner_ids) users_dict = {user["user_id"]: user for user in users} # attach user objects to track objects for track in tracks: track["user"] = users_dict[track["owner_id"]] tracks = populate_track_repost_counts(session, track_ids, tracks) else: # bundle peripheral info into track results tracks = populate_track_metadata(session, track_ids, tracks, current_user_id) # Preserve order from track_ids above tracks_map = {} for t in tracks: tracks_map[t["track_id"]] = t tracks = [tracks_map[track_id] for track_id in track_ids] # Sort tracks by extra criteria for "best match" tracks.sort(key=cmp_to_key(compare_tracks)) return tracks[0:limit]
def get_tracks_and_ids(): if "handle" in args: handle = args.get("handle") user_id = session.query(User.user_id).filter( User.handle_lc == handle.lower()).first() args["user_id"] = user_id can_use_shared_cache = ("id" in args and not "min_block_number" in args and not "sort" in args and not "user_id" in args) if can_use_shared_cache: should_filter_deleted = args.get("filter_deleted", False) tracks = get_unpopulated_tracks(session, args["id"], should_filter_deleted) track_ids = list(map(lambda track: track["track_id"], tracks)) return (tracks, track_ids) # Create initial query base_query = session.query(Track) base_query = base_query.filter(Track.is_current == True, Track.is_unlisted == False, Track.stem_of == None) # Conditionally process an array of tracks if "id" in args: track_id_list = args.get("id") try: # Update query with track_id list base_query = base_query.filter( Track.track_id.in_(track_id_list)) except ValueError as e: logger.error("Invalid value found in track id list", exc_info=True) raise e # Allow filtering of tracks by a certain creator if "user_id" in args: user_id = args.get("user_id") base_query = base_query.filter(Track.owner_id == user_id) # Allow filtering of deletes if "filter_deleted" in args: filter_deleted = args.get("filter_deleted") if filter_deleted: base_query = base_query.filter(Track.is_delete == False) if "min_block_number" in args: min_block_number = args.get("min_block_number") base_query = base_query.filter( Track.blocknumber >= min_block_number) if "sort" in args: if args["sort"] == "date": base_query = base_query.order_by(Track.created_at.desc(), Track.track_id.desc()) elif args["sort"] == "plays": base_query = base_query.join( AggregatePlays, AggregatePlays.play_item_id == Track.track_id).order_by(AggregatePlays.count.desc()) else: whitelist_params = [ 'created_at', 'create_date', 'release_date', 'blocknumber', 'track_id' ] base_query = parse_sort_param(base_query, Track, whitelist_params) query_results = paginate_query(base_query).all() tracks = helpers.query_result_to_list(query_results) track_ids = list(map(lambda track: track["track_id"], tracks)) return (tracks, track_ids)
def get_feed(args): feed_results = [] db = get_db_read_replica() feed_filter = args.get("filter") # Allow for fetching only tracks tracks_only = args.get('tracks_only', False) # Current user - user for whom feed is being generated current_user_id = get_current_user_id() with db.scoped_session() as session: # Generate list of users followed by current user, i.e. 'followees' followee_user_ids = (session.query(Follow.followee_user_id).filter( Follow.follower_user_id == current_user_id, Follow.is_current == True, Follow.is_delete == False).all()) followee_user_ids = [f[0] for f in followee_user_ids] # Fetch followee creations if requested if feed_filter in ["original", "all"]: if not tracks_only: # Query playlists posted by followees, sorted and paginated by created_at desc created_playlists_query = (session.query(Playlist).filter( Playlist.is_current == True, Playlist.is_delete == False, Playlist.is_private == False, Playlist.playlist_owner_id.in_( followee_user_ids)).order_by(desc( Playlist.created_at))) created_playlists = paginate_query(created_playlists_query, False).all() # get track ids for all tracks in playlists playlist_track_ids = set() for playlist in created_playlists: for track in playlist.playlist_contents["track_ids"]: playlist_track_ids.add(track["track"]) # get all track objects for track ids playlist_tracks = get_unpopulated_tracks( session, playlist_track_ids) playlist_tracks_dict = { track["track_id"]: track for track in playlist_tracks } # get all track ids that have same owner as playlist and created in "same action" # "same action": track created within [x time] before playlist creation tracks_to_dedupe = set() for playlist in created_playlists: for track_entry in playlist.playlist_contents["track_ids"]: track = playlist_tracks_dict.get(track_entry["track"]) if not track: return api_helpers.error_response( "Something caused the server to crash.") max_timedelta = datetime.timedelta( minutes=trackDedupeMaxMinutes) if (track["owner_id"] == playlist.playlist_owner_id) and \ (track["created_at"] <= playlist.created_at) and \ (playlist.created_at - track["created_at"] <= max_timedelta): tracks_to_dedupe.add(track["track_id"]) tracks_to_dedupe = list(tracks_to_dedupe) else: # No playlists to consider tracks_to_dedupe = [] created_playlists = [] # Query tracks posted by followees, sorted & paginated by created_at desc # exclude tracks that were posted in "same action" as playlist created_tracks_query = (session.query(Track).filter( Track.is_current == True, Track.is_delete == False, Track.is_unlisted == False, Track.stem_of == None, Track.owner_id.in_(followee_user_ids), Track.track_id.notin_(tracks_to_dedupe)).order_by( desc(Track.created_at))) created_tracks = paginate_query(created_tracks_query, False).all() # extract created_track_ids and created_playlist_ids created_track_ids = [track.track_id for track in created_tracks] created_playlist_ids = [ playlist.playlist_id for playlist in created_playlists ] # Fetch followee reposts if requested if feed_filter in ["repost", "all"]: # query items reposted by followees, sorted by oldest followee repost of item; # paginated by most recent repost timestamp repost_subquery = (session.query(Repost).filter( Repost.is_current == True, Repost.is_delete == False, Repost.user_id.in_(followee_user_ids))) # exclude items also created by followees to guarantee order determinism, in case of "all" filter if feed_filter == "all": repost_subquery = (repost_subquery.filter( or_( and_(Repost.repost_type == RepostType.track, Repost.repost_item_id.notin_(created_track_ids)), and_( Repost.repost_type != RepostType.track, Repost.repost_item_id.notin_( created_playlist_ids))))) repost_subquery = repost_subquery.subquery() repost_query = (session.query( repost_subquery.c.repost_item_id, repost_subquery.c.repost_type, func.min(repost_subquery.c.created_at).label( "min_created_at")).group_by( repost_subquery.c.repost_item_id, repost_subquery.c.repost_type).order_by( desc("min_created_at"))) followee_reposts = paginate_query(repost_query, False).all() # build dict of track_id / playlist_id -> oldest followee repost timestamp from followee_reposts above track_repost_timestamp_dict = {} playlist_repost_timestamp_dict = {} for (repost_item_id, repost_type, oldest_followee_repost_timestamp) in followee_reposts: if repost_type == RepostType.track: track_repost_timestamp_dict[ repost_item_id] = oldest_followee_repost_timestamp elif repost_type in (RepostType.playlist, RepostType.album): playlist_repost_timestamp_dict[ repost_item_id] = oldest_followee_repost_timestamp # extract reposted_track_ids and reposted_playlist_ids reposted_track_ids = list(track_repost_timestamp_dict.keys()) reposted_playlist_ids = list(playlist_repost_timestamp_dict.keys()) # Query tracks reposted by followees reposted_tracks = session.query(Track).filter( Track.is_current == True, Track.is_delete == False, Track.is_unlisted == False, Track.stem_of == None, Track.track_id.in_(reposted_track_ids)) # exclude tracks already fetched from above, in case of "all" filter if feed_filter == "all": reposted_tracks = reposted_tracks.filter( Track.track_id.notin_(created_track_ids)) reposted_tracks = reposted_tracks.order_by(desc( Track.created_at)).all() if not tracks_only: # Query playlists reposted by followees, excluding playlists already fetched from above reposted_playlists = session.query(Playlist).filter( Playlist.is_current == True, Playlist.is_delete == False, Playlist.is_private == False, Playlist.playlist_id.in_(reposted_playlist_ids)) # exclude playlists already fetched from above, in case of "all" filter if feed_filter == "all": reposted_playlists = reposted_playlists.filter( Playlist.playlist_id.notin_(created_playlist_ids)) reposted_playlists = reposted_playlists.order_by( desc(Playlist.created_at)).all() else: reposted_playlists = [] if feed_filter == "original": tracks_to_process = created_tracks playlists_to_process = created_playlists elif feed_filter == "repost": tracks_to_process = reposted_tracks playlists_to_process = reposted_playlists else: tracks_to_process = created_tracks + reposted_tracks playlists_to_process = created_playlists + reposted_playlists tracks = helpers.query_result_to_list(tracks_to_process) playlists = helpers.query_result_to_list(playlists_to_process) # define top level feed activity_timestamp to enable sorting # activity_timestamp: created_at if item created by followee, else reposted_at for track in tracks: if track["owner_id"] in followee_user_ids: track[response_name_constants. activity_timestamp] = track["created_at"] else: track[response_name_constants. activity_timestamp] = track_repost_timestamp_dict[ track["track_id"]] for playlist in playlists: if playlist["playlist_owner_id"] in followee_user_ids: playlist[response_name_constants. activity_timestamp] = playlist["created_at"] else: playlist[response_name_constants.activity_timestamp] = \ playlist_repost_timestamp_dict[playlist["playlist_id"]] # bundle peripheral info into track and playlist objects track_ids = list(map(lambda track: track["track_id"], tracks)) playlist_ids = list( map(lambda playlist: playlist["playlist_id"], playlists)) tracks = populate_track_metadata(session, track_ids, tracks, current_user_id) playlists = populate_playlist_metadata( session, playlist_ids, playlists, [RepostType.playlist, RepostType.album], [SaveType.playlist, SaveType.album], current_user_id) # build combined feed of tracks and playlists unsorted_feed = tracks + playlists # sort feed based on activity_timestamp sorted_feed = sorted(unsorted_feed, key=lambda entry: entry[response_name_constants. activity_timestamp], reverse=True) # truncate feed to requested limit (limit, _) = get_pagination_vars() feed_results = sorted_feed[0:limit] if "with_users" in args and args.get("with_users") != 'false': user_id_list = get_users_ids(feed_results) users = get_users_by_id(session, user_id_list) for result in feed_results: if 'playlist_owner_id' in result: user = users[result['playlist_owner_id']] if user: result['user'] = user elif 'owner_id' in result: user = users[result['owner_id']] if user: result['user'] = user return feed_results