Exemple #1
0
def _series_index(query,
                  sort=None,
                  subsection=None,
                  conference=True,
                  past=False,
                  keywords=""):
    search_array = SeriesSearchArray(conference=conference, past=past)
    info = to_dict(read_search_cookie(search_array), search_array=search_array)
    info.update(request.args)
    if keywords:
        info["keywords"] = keywords
    query = dict(query)
    if conference:
        # Be permissive on end-date since we don't want to miss ongoing conferences, and we could have time zone differences.
        # Ignore the possibility that the user/conference is in Kiribati.
        recent = datetime.now().date() - timedelta(days=1)
        query["end_date"] = {"$lt" if past else "$gte": recent}
    query["visibility"] = 2  # only show public talks
    query[
        "display"] = True  # don't show talks created by users who have not been endorsed
    kw_query = query.copy()
    parse_substring(info, kw_query, "keywords", series_keyword_columns())
    org_query, more = {}, {}
    # we will be selecting talks satsifying the query and recording whether they satisfy the "more" query
    seminars_parser(info, more, org_query)
    results = list(
        seminars_search(kw_query,
                        organizer_dict=all_organizers(org_query),
                        more=more))
    if info.get("keywords", ""):
        parse_substring(info, org_query, "keywords",
                        organizers_keyword_columns())
        results += list(
            seminars_search(query,
                            organizer_dict=all_organizers(org_query),
                            more=more))
        unique = {s.shortname: s for s in results}
        results = [unique[key] for key in unique]
    series = series_sorted(results, conference=conference, reverse=past)
    counters = _get_counters(series)
    row_attributes = _get_row_attributes(series)
    response = make_response(
        render_template("browse_series.html",
                        title="Browse " + ("past " if past else "") +
                        ("conferences" if conference else "seminar series"),
                        section="Browse",
                        subsection=subsection,
                        info=info,
                        series_row_attributes=zip(series, row_attributes),
                        is_conference=conference,
                        past=past,
                        **counters))
    if request.cookies.get("topics", ""):
        # TODO: when we move cookie data to server with ajax calls, this will need to get updated again
        # For now we set the max_age to 30 years
        response.set_cookie("topics_dict",
                            topic_dag.port_cookie(),
                            max_age=60 * 60 * 24 * 365 * 30)
        response.set_cookie("topics", "", max_age=0)
    return response
Exemple #2
0
def _series_index(query, sort=None, subsection=None, conference=True, past=False):
    search_array = SeriesSearchArray(conference=conference, past=past)
    info = to_dict(read_search_cookie(search_array), search_array=search_array)
    info.update(request.args)
    query = dict(query)
    parse_substring(info, query, "keywords",
                    ["name",
                     "description",
                     "homepage",
                     "shortname",
                     "comments"])
    more = {} # we will be selecting talks satsifying the query and recording whether they satisfy the "more" query
    seminars_parser(info, more)
    query["visibility"] = 2
    print(info, more)
    if conference:
        # Be permissive on end-date since we don't want to miss ongoing conferences, and we could have time zone differences.  Ignore the possibility that the user/conference is in Kiribati.
        recent = datetime.now().date() - timedelta(days=1)
        if past:
            query["end_date"] = {"$lt": recent}
        else:
            query["end_date"] = {"$gte": recent}
        if sort is None:
            if past:
                sort = [("end_date", -1), ("start_date", -1), "name"]
            else:
                sort = ["start_date", "end_date", "name"]
    if sort is None: # not conferences
        # We don't currently call this case in the past, but if we add it we probably
        # need a last_talk_sorted that sorts by end time of last talk in reverse order
        series = next_talk_sorted(seminars_search(query, organizer_dict=all_organizers(), more=more))
    else:
        series = list(seminars_search(query, sort=sort, organizer_dict=all_organizers(), more=more))
    counters = _get_counters(series)
    row_attributes = _get_row_attributes(series)
    title = "Browse conferences" if conference else "Browse seminar series"
    response = make_response(render_template(
        "browse_series.html",
        title=title,
        section="Browse",
        subsection=subsection,
        info=info,
        series_row_attributes=zip(series, row_attributes),
        is_conference=conference,
        past=past,
        **counters
    ))
    if request.cookies.get("topics", ""):
        # TODO: when we move cookie data to server with ajax calls, this will need to get updated again
        # For now we set the max_age to 30 years
        response.set_cookie("topics_dict", topic_dag.port_cookie(), max_age=60*60*24*365*30)
        response.set_cookie("topics", "", max_age=0)
    return response
Exemple #3
0
def _talks_index(
    query={},
    sort=None,
    subsection=None,
    past=False,
    keywords="",
    limit=None,  # this is an upper bound on desired number of talks, we might filter some extra out
    limitbuffer=1000,  # the number of extra talks that we give ourselves to try to get the limit right
    asblock=False,  # the number of talks returned is based on star time blocks
    getcounters=True,  # doesn't limit the SQL search to get the full counters
    visible_counter=0,
    fully_filtered=True,
):
    # Eventually want some kind of cutoff on which talks are included.
    search_array = TalkSearchArray(past=past)
    info = to_dict(read_search_cookie(search_array), search_array=search_array)
    info.update(request.args)
    if keywords:
        info["keywords"] = keywords
    keywords = info.get("keywords", "")
    query = dict(query)
    parse_substring(info, query, "keywords", [
        "title", "abstract", "speaker", "speaker_affiliation", "seminar_id",
        "comments", "speaker_homepage", "paper_link"
    ])
    more = {
    }  # we will be selecting talks satsifying the query and recording whether they satisfy the "more" query
    # Note that talks_parser ignores the "time" field at the moment; see below for workaround
    talks_parser(info, more)
    if topdomain() == "mathseminars.org":
        query["topics"] = {"$contains": "math"}
    query["display"] = True
    query["hidden"] = {"$or": [False, {"$exists": False}]}
    query["audience"] = {"$lte": DEFAULT_AUDIENCE}
    now = datetime.now(pytz.UTC)
    if past:
        query["end_time"] = {"$lt": now}
        query["seminar_ctr"] = {"$gt": 0}  # don't show rescheduled talks
        if sort is None:
            sort = [("start_time", -1), "seminar_id"]
    else:
        query["end_time"] = {"$gte": now}
        if sort is None:
            sort = ["start_time", "seminar_id"]

    def dosearch(limit=limit, limitbuffer=limitbuffer):
        if limit and not getcounters:
            # we fetch extra talks to account for filtering
            talks = talks_search(query,
                                 sort=sort,
                                 seminar_dict=all_seminars(),
                                 more=more,
                                 limit=limit + limitbuffer)
        else:
            talks = talks_search(query,
                                 sort=sort,
                                 seminar_dict=all_seminars(),
                                 more=more)
        # Filtering on display and hidden isn't sufficient since the seminar could be private
        talks = [talk for talk in talks if talk.searchable()]
        return talks

    def truncateasblock(talks, retry=True):
        if not talks:
            return talks
        last_time = None
        # find enough talks such that the next talk has a different starting time
        for i, t in enumerate(talks):
            if last_time is None:
                last_time = t.start_time
                continue
            if t.start_time != last_time:
                if i > limit:
                    return talks[:i - 1]
                else:
                    last_time = t.start_time
        else:
            if retry and limit and not getcounters:
                # redo the search without limits
                talks = dosearch(limit=None)
                return truncateasblock(talks, retry=False)
            return talks

    def truncate(talks):
        if asblock and limit:
            return truncateasblock(talks)
        elif limit:
            return talks[:limit]
        else:
            return talks

    talks = dosearch()
    if getcounters:
        counters = _get_counters(talks)
    else:
        counters = _get_counters([])

    if getcounters and fully_filtered:
        # the initial query was not limited as getcounters = True
        # we will first filter after figuring out the more attribute
        # and then truncate
        pass
    else:
        # we are not going to filter or the query was already limited, so we can truncate
        talks = truncate(talks)
    # While we may be able to write a query specifying inequalities on the timestamp in the user's timezone, it's not easily supported by talks_search.  So we filter afterward
    timerange = info.get("timerange", "").strip()
    if timerange:
        tz = current_user.tz
        try:
            timerange = process_user_input(timerange,
                                           col="search",
                                           typ="daytimes")
        except ValueError:
            try:
                onetime = process_user_input(timerange,
                                             col="search",
                                             typ="daytime")
            except ValueError:
                flash_error("Invalid time range input: %s", timerange)
            else:
                for talk in talks:
                    if talk.more:
                        talkstart = adapt_datetime(talk.start_time, tz)
                        t = date_and_daytime_to_time(talkstart.date(), onetime,
                                                     tz)
                        talk.more = (t == talkstart)
        else:
            for talk in talks:
                if talk.more:
                    talkstart = adapt_datetime(talk.start_time, tz)
                    talkend = adapt_datetime(talk.end_time, tz)
                    t0, t1 = date_and_daytimes_to_times(
                        talkstart.date(), timerange, tz)
                    talk.more = (t0 <= talkstart) and (talkend <= t1)

    # get last_time before potential filtering
    last_time = int(talks[-1].start_time.timestamp()) if talks else None
    if fully_filtered:  # first filter then truncate
        row_attributes, talks = _get_row_attributes(talks, visible_counter,
                                                    fully_filtered)
        if getcounters:  # we have not yet truncated the results
            if limit and len(talks) > limit:
                talks = truncate(talks)
                row_attributes = row_attributes[:len(talks)]
                last_time = int(
                    talks[-1].start_time.timestamp()) if talks else None
    else:
        row_attributes = _get_row_attributes(talks, visible_counter)

    response = make_response(
        render_template("browse_talks.html",
                        title="Browse past talks" if past else "Browse talks",
                        section="Browse",
                        info=info,
                        subsection=subsection,
                        talk_row_attributes=zip(talks, row_attributes),
                        past=past,
                        last_time=last_time,
                        extraargs=urlencode({'keywords': keywords}),
                        **counters))
    if request.cookies.get("topics", ""):
        # TODO: when we move cookie data to server with ajax calls, this will need to get updated again
        # For now we set the max_age to 30 years
        response.set_cookie("topics_dict",
                            topic_dag.port_cookie(),
                            max_age=60 * 60 * 24 * 365 * 30)
        response.set_cookie("topics", "", max_age=0)

    # disable cache
    response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
    response.headers['Pragma'] = 'no-cache'
    return response
Exemple #4
0
def _talks_index(query={}, sort=None, subsection=None, past=False):
    # Eventually want some kind of cutoff on which talks are included.
    search_array = TalkSearchArray(past=past)
    info = to_dict(read_search_cookie(search_array), search_array=search_array)
    info.update(request.args)
    query = dict(query)
    parse_substring(info, query, "keywords",
                    ["title",
                     "abstract",
                     "speaker",
                     "speaker_affiliation",
                     "seminar_id",
                     "comments",
                     "speaker_homepage",
                     "paper_link"])
    more = {} # we will be selecting talks satsifying the query and recording whether they satisfy the "more" query
    # Note that talks_parser ignores the "time" field at the moment; see below for workaround
    talks_parser(info, more)
    if topdomain() == "mathseminars.org":
        query["topics"] = {"$contains": "math"}
    query["hidden"] = {"$or": [False, {"$exists": False}]}
    if past:
        query["end_time"] = {"$lt": datetime.now()}
        if sort is None:
            sort = [("start_time", -1), "seminar_id"]
    else:
        query["end_time"] = {"$gte": datetime.now()}
        if sort is None:
            sort = ["start_time", "seminar_id"]
    talks = list(talks_search(query, sort=sort, seminar_dict=all_seminars(), more=more))
    # Filtering on display and hidden isn't sufficient since the seminar could be private
    talks = [talk for talk in talks if talk.searchable()]
    # While we may be able to write a query specifying inequalities on the timestamp in the user's timezone, it's not easily supported by talks_search.  So we filter afterward
    timerange = info.get("timerange", "").strip()
    if timerange:
        tz = current_user.tz
        try:
            timerange = process_user_input(timerange, col="search", typ="daytimes")
        except ValueError:
            try:
                onetime = process_user_input(timerange, col="search", typ="daytime")
            except ValueError:
                flash_error("Invalid time range input: %s", timerange)
            else:
                for talk in talks:
                    if talk.more:
                        talkstart = adapt_datetime(talk.start_time, tz)
                        t = date_and_daytime_to_time(talkstart.date(), onetime, tz)
                        talk.more = (t == talkstart)
        else:
            for talk in talks:
                if talk.more:
                    talkstart = adapt_datetime(talk.start_time, tz)
                    talkend = adapt_datetime(talk.end_time, tz)
                    t0, t1 = date_and_daytimes_to_times(talkstart.date(), timerange, tz)
                    talk.more = (t0 <= talkstart) and (talkend <= t1)
    counters = _get_counters(talks)
    row_attributes = _get_row_attributes(talks)
    response = make_response(render_template(
        "browse_talks.html",
        title="Browse talks",
        section="Browse",
        info=info,
        subsection=subsection,
        talk_row_attributes=zip(talks, row_attributes),
        past=past,
        **counters
    ))
    if request.cookies.get("topics", ""):
        # TODO: when we move cookie data to server with ajax calls, this will need to get updated again
        # For now we set the max_age to 30 years
        response.set_cookie("topics_dict", topic_dag.port_cookie(), max_age=60*60*24*365*30)
        response.set_cookie("topics", "", max_age=0)
    return response