示例#1
0
                controversial=(desc, '_controversy'))


def db_sort(sort):
    cls, col = db_sorts[sort]
    return cls(col)


search_sort = dict(hot='hot desc',
                   new='date desc',
                   top='points desc',
                   controversial='controversy desc',
                   old='date asc')

db_times = dict(all=None,
                hour=Thing.c._date >= timeago('1 hour'),
                day=Thing.c._date >= timeago('1 day'),
                week=Thing.c._date >= timeago('1 week'),
                month=Thing.c._date >= timeago('1 month'),
                year=Thing.c._date >= timeago('1 year'))

# batched_time_sorts/batched_time_times: top and controversial
# listings with a time-component are really expensive, and for the
# ones that span more than a day they don't change much (if at all)
# within that time. So we have some hacks to avoid re-running these
# queries against the precomputer except up to once per day
# * To get the results of the queries, we return the results of the
#   (potentially stale) query, merged with the query by 'day' (see
#   get_links)
# * When we are adding the special queries to the queue, we add them
#   with a preflight check to determine if they are runnable and a
示例#2
0
文件: queries.py 项目: rram/reddit
                new = (desc, '_date'),
                top = (desc, '_score'),
                controversial = (desc, '_controversy'))

def db_sort(sort):
    cls, col = db_sorts[sort]
    return cls(col)

search_sort = dict(hot = 'hot desc',
                   new = 'date desc',
                   top = 'points desc',
                   controversial = 'controversy desc',
                   old = 'date asc')

db_times = dict(all = None,
                hour = Thing.c._date >= timeago('1 hour'),
                day = Thing.c._date >= timeago('1 day'),
                week = Thing.c._date >= timeago('1 week'),
                month = Thing.c._date >= timeago('1 month'),
                year = Thing.c._date >= timeago('1 year'))

# sorts for which there can be a time filter (by day, by week,
# etc). All of these but 'all' are done in mr_top, who knows about the
# structure of the stored CachedResults (so changes here may warrant
# changes there)
time_filtered_sorts = set(('top', 'controversial'))

#we need to define the filter functions here so cachedresults can be pickled
def filter_identity(x):
    return x
示例#3
0
precompute_limit = 1000

db_sorts = dict(hot=(desc, "_hot"), new=(desc, "_date"), top=(desc, "_score"), controversial=(desc, "_controversy"))


def db_sort(sort):
    cls, col = db_sorts[sort]
    return cls(col)


search_sort = dict(hot="hot desc", new="date desc", top="points desc", controversial="controversy desc", old="date asc")

db_times = dict(
    all=None,
    hour=Thing.c._date >= timeago("1 hour"),
    day=Thing.c._date >= timeago("1 day"),
    week=Thing.c._date >= timeago("1 week"),
    month=Thing.c._date >= timeago("1 month"),
    year=Thing.c._date >= timeago("1 year"),
)

# batched_time_sorts/batched_time_times: top and controversial
# listings with a time-component are really expensive, and for the
# ones that span more than a day they don't change much (if at all)
# within that time. So we have some hacks to avoid re-running these
# queries against the precomputer except up to once per day
# * To get the results of the queries, we return the results of the
#   (potentially stale) query, merged with the query by 'day' (see
#   get_links)
# * When we are adding the special queries to the queue, we add them