def get_hot(srs, only_fullnames=False): """Get the (fullname, hotness, epoch_seconds) for the hottest links in a subreddit. Use the query-cache to avoid some lookups if we can.""" from r2.lib.db.thing import Query from r2.lib.db.queries import CachedResults ret = [] queries = [sr.get_links("hot", "all") for sr in srs] # fetch these all in one go cachedresults = filter(lambda q: isinstance(q, CachedResults), queries) CachedResults.fetch_multi(cachedresults) for q in queries: if isinstance(q, Query): links = cached_query(q, sr) res = [(link._fullname, link._hot, epoch_seconds(link._date)) for link in links] elif isinstance(q, CachedResults): # we're relying on an implementation detail of # CachedResults here, where it's storing tuples that look # exactly like the return-type we want, to make our # sorting a bit cheaper res = list(q.data) # remove any that are too old age_limit = epoch_seconds(utils.timeago("%d days" % g.HOT_PAGE_AGE)) res = [(fname if only_fullnames else (fname, hot, date)) for (fname, hot, date) in res if date > age_limit] ret.append(res) return ret
def get_hot(srs, only_fullnames=False): """Get the (fullname, hotness, epoch_seconds) for the hottest links in a subreddit. Use the query-cache to avoid some lookups if we can.""" from r2.lib.db.thing import Query from r2.lib.db.queries import CachedResults ret = [] queries = [sr.get_links('hot', 'all') for sr in srs] # fetch these all in one go cachedresults = filter(lambda q: isinstance(q, CachedResults), queries) CachedResults.fetch_multi(cachedresults) for q in queries: if isinstance(q, Query): links = cached_query(q, sr) res = [(link._fullname, link._hot, epoch_seconds(link._date)) for link in links] elif isinstance(q, CachedResults): # we're relying on an implementation detail of # CachedResults here, where it's storing tuples that look # exactly like the return-type we want, to make our # sorting a bit cheaper res = list(q.data) # remove any that are too old age_limit = epoch_seconds(utils.timeago('%d days' % g.HOT_PAGE_AGE)) res = [(fname if only_fullnames else (fname, hot, date)) for (fname, hot, date) in res if date > age_limit] ret.append(res) return ret
def get_hot_tuples(sr_ids): queries_by_sr_id = {sr_id: _get_links(sr_id, sort='hot', time='all') for sr_id in sr_ids} CachedResults.fetch_multi(queries_by_sr_id.values()) tuples_by_srid = {sr_id: [] for sr_id in sr_ids} for sr_id, q in queries_by_sr_id.iteritems(): if not q.data: continue link_name, hot, timestamp = q.data[0] thot = max(hot, 1.) tuples_by_srid[sr_id].append((-1., -hot, link_name, timestamp)) for link_name, hot, timestamp in q.data[1:MAX_PER_SUBREDDIT]: ehot = hot / thot # heapq.merge sorts from smallest to largest so we need to flip # ehot and hot to get the hottest links first tuples_by_srid[sr_id].append((-ehot, -hot, link_name, timestamp)) return tuples_by_srid
def get_hot_tuples(sr_ids, ageweight=None): queries_by_sr_id = {sr_id: _get_links(sr_id, sort='hot', time='all') for sr_id in sr_ids} CachedResults.fetch_multi(queries_by_sr_id.values(), stale=True) tuples_by_srid = {sr_id: [] for sr_id in sr_ids} now_seconds = epoch_seconds(datetime.now(g.tz)) for sr_id, q in queries_by_sr_id.iteritems(): if not q.data: continue hot_factor = get_hot_factor(q.data[0], now_seconds, ageweight) for link_name, hot, timestamp in q.data[:MAX_PER_SUBREDDIT]: effective_hot = hot / hot_factor # heapq.merge sorts from smallest to largest so we need to flip # ehot and hot to get the hottest links first tuples_by_srid[sr_id].append( (-effective_hot, -hot, link_name, timestamp) ) return tuples_by_srid
def get_hot_tuples(sr_ids): queries_by_sr_id = { sr_id: _get_links(sr_id, sort='hot', time='all') for sr_id in sr_ids } CachedResults.fetch_multi(queries_by_sr_id.values()) tuples_by_srid = {sr_id: [] for sr_id in sr_ids} for sr_id, q in queries_by_sr_id.iteritems(): if not q.data: continue link_name, hot, timestamp = q.data[0] thot = max(hot, 1.) tuples_by_srid[sr_id].append((-1., -hot, link_name, timestamp)) for link_name, hot, timestamp in q.data[1:MAX_PER_SUBREDDIT]: ehot = hot / thot # heapq.merge sorts from smallest to largest so we need to flip # ehot and hot to get the hottest links first tuples_by_srid[sr_id].append((-ehot, -hot, link_name, timestamp)) return tuples_by_srid