Ejemplo n.º 1
0
    def siteadmin_comments(self, query='', page=None, per_page=100):
        if not (current_auth.user.is_comment_moderator
                or current_auth.user.is_user_moderator):
            return abort(403)

        comments = Comment.query.filter(~(Comment.state.REMOVED)).order_by(
            Comment.created_at.desc())
        if query:
            comments = comments.join(User).filter(
                db.or_(
                    Comment.search_vector.match(for_tsquery(query or '')),
                    User.search_vector.match(for_tsquery(query or '')),
                ))

        pagination = comments.paginate(page=page, per_page=per_page)

        return {
            'query': query,
            'comments': pagination.items,
            'total_comments': pagination.total,
            'pages': list(range(1,
                                pagination.pages + 1)),  # list of page numbers
            'current_page': pagination.page,
            'comment_spam_form': Form(),
        }
Ejemplo n.º 2
0
def index(basequery=None,
          md5sum=None,
          tag=None,
          domain=None,
          location=None,
          title=None,
          showall=True,
          statuses=None,
          batched=True,
          ageless=False,
          template_vars={}):

    is_siteadmin = lastuser.has_permission('siteadmin')
    if basequery is None:
        is_index = True
    else:
        is_index = False
    now = datetime.utcnow()
    if basequery is None and not (g.user or g.kiosk or
                                  (g.board and not g.board.require_login)):
        showall = False
        batched = False

    if basequery is None:
        basequery = JobPost.query

    # Apply request.args filters
    data_filters = {}
    f_types = request.args.getlist('t')
    while '' in f_types:
        f_types.remove('')
    if f_types:
        data_filters['types'] = f_types
        basequery = basequery.join(JobType).filter(JobType.name.in_(f_types))
    f_categories = request.args.getlist('c')
    while '' in f_categories:
        f_categories.remove('')
    if f_categories:
        data_filters['categories'] = f_categories
        basequery = basequery.join(JobCategory).filter(
            JobCategory.name.in_(f_categories))
    r_locations = request.args.getlist('l')
    if location:
        r_locations.append(location['geonameid'])
    f_locations = []
    remote_location = getbool(request.args.get('anywhere')) or False
    for rl in r_locations:
        if isinstance(rl, int) and rl > 0:
            f_locations.append(rl)
        elif rl == 'anywhere':
            remote_location = True
        elif rl.isdigit():
            f_locations.append(int(rl))
        elif rl:
            ld = location_geodata(rl)
            if ld:
                f_locations.append(ld['geonameid'])
    remote_location_query = basequery.filter(
        JobPost.remote_location == True)  # NOQA
    if f_locations:
        locations_query = basequery.join(JobLocation).filter(
            JobLocation.geonameid.in_(f_locations))
    else:
        locations_query = basequery.join(JobLocation)
    if f_locations and remote_location:
        data_filters['locations'] = f_locations
        data_filters['anywhere'] = True
        recency = JobPost.datetime > datetime.utcnow() - agelimit
        basequery = locations_query.filter(recency).union(
            remote_location_query.filter(recency))
    elif f_locations:
        data_filters['locations'] = f_locations
        basequery = locations_query
    elif remote_location:
        data_filters['anywhere'] = True
        # Only works as a positive filter: you can't search for jobs that are NOT anywhere
        basequery = remote_location_query
    if 'currency' in request.args and request.args[
            'currency'] in CURRENCY.keys():
        currency = request.args['currency']
        data_filters['currency'] = currency
        basequery = basequery.filter(JobPost.pay_currency == currency)
        pay_graph = currency
    else:
        pay_graph = False
    if getbool(request.args.get('equity')):
        # Only works as a positive filter: you can't search for jobs that DON'T pay in equity
        data_filters['equity'] = True
        basequery = basequery.filter(JobPost.pay_equity_min != None)  # NOQA
    if 'pay' in request.args or ('pmin' in request.args
                                 and 'pmax' in request.args):
        if 'pay' in request.args:
            f_pay = string_to_number(request.args['pay'])
            f_min = int(f_pay * 0.90)
            f_max = int(f_pay * 1.30)
        else:
            # Legacy URL with min/max values
            f_min = string_to_number(request.args['pmin'])
            f_max = string_to_number(request.args['pmax'])
            f_pay = f_min  # Use min for pay now
        if f_pay is not None and f_min is not None and f_max is not None:
            data_filters['pay'] = f_pay
            basequery = basequery.filter(JobPost.pay_cash_min < f_max,
                                         JobPost.pay_cash_max >= f_min)
    else:
        f_pay = None
        f_min = None
        f_max = None

    if getbool(request.args.get('archive')):
        ageless = True
        data_filters['archive'] = True
        statuses = POSTSTATUS.ARCHIVED

    search_domains = None
    if request.args.get('q'):
        q = for_tsquery(request.args['q'])
        try:
            # TODO: Can we do syntax validation without a database roundtrip?
            db.session.query(db.func.to_tsquery(q)).all()
        except ProgrammingError:
            db.session.rollback()
            g.event_data['search_syntax_error'] = (request.args['q'], q)
            if not request.is_xhr:
                flash(
                    _(u"Search terms ignored because this didn’t parse: {query}"
                      ).format(query=q), 'danger')
        else:
            # Query's good? Use it.
            data_filters['query'] = q
            search_domains = Domain.query.filter(
                Domain.search_vector.match(q, postgresql_regconfig='english'),
                Domain.is_banned == False).options(
                    db.load_only('name', 'title', 'logo_url')).all()  # NOQA
            basequery = basequery.filter(
                JobPost.search_vector.match(q, postgresql_regconfig='english'))

    if data_filters:
        g.event_data['filters'] = data_filters
        showall = True
        batched = True

    # getposts sets g.board_jobs, used below
    posts = getposts(basequery,
                     pinned=True,
                     showall=showall,
                     statuses=statuses,
                     ageless=ageless).all()

    if is_siteadmin or (g.user and g.user.flags.get('is_employer_month')):
        cache_viewcounts(posts)

    if posts:
        employer_name = posts[0].company_name
    else:
        employer_name = u'a single employer'

    if g.user:
        g.starred_ids = set(
            g.user.starred_job_ids(agelimit if not ageless else None))
    else:
        g.starred_ids = set()

    jobpost_ab = session_jobpost_ab()

    # Make lookup slightly faster in the loop below since 'g' is a proxy
    board = g.board
    if board:
        board_jobs = g.board_jobs
    else:
        board_jobs = {}

    if is_index and posts and not g.kiosk:
        # Group posts by email_domain on index page only, when not in kiosk mode
        grouped = OrderedDict()
        for post in posts:
            pinned = post.pinned
            if board is not None:
                blink = board_jobs.get(
                    post.id
                )  # board_jobs only contains the last 30 days, no archive
                if blink is not None:
                    pinned = blink.pinned
            if pinned:
                # Make pinned posts appear in a group of one
                grouped.setdefault(('s', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.status == POSTSTATUS.ANNOUNCEMENT:
                # Make announcements also appear in a group of one
                grouped.setdefault(('a', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.domain.is_webmail:
                grouped.setdefault(('ne', post.md5sum), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            else:
                grouped.setdefault(('nd', post.email_domain), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
        pinsandposts = None
    else:
        grouped = None
        if g.board:
            pinsandposts = []
            for post in posts:
                pinned = post.pinned
                if board is not None:
                    blink = board_jobs.get(
                        post.id
                    )  # board_jobs only contains the last 30 days, no archive
                    if blink is not None:
                        pinned = blink.pinned
                pinsandposts.append((pinned, post, bgroup(jobpost_ab, post)))
        else:
            pinsandposts = [(post.pinned, post, bgroup(jobpost_ab, post))
                            for post in posts]

    # Pick a header campaign (only if not kiosk or an XHR reload)
    pay_graph_data = None
    if not g.kiosk:
        if g.preview_campaign:
            header_campaign = g.preview_campaign
        else:
            geonameids = g.user_geonameids + f_locations
            header_campaign = Campaign.for_context(CAMPAIGN_POSITION.HEADER,
                                                   board=g.board,
                                                   user=g.user,
                                                   anon_user=g.anon_user,
                                                   geonameids=geonameids)
        if pay_graph:
            pay_graph_data = make_pay_graph(pay_graph,
                                            posts,
                                            rmin=f_min,
                                            rmax=f_max)
    else:
        header_campaign = None

    loadmore = False
    if batched:
        # Figure out where the batch should start from
        startdate = None
        if 'startdate' in request.values:
            try:
                startdate = parse_isoformat(request.values['startdate'])
            except ValueError:
                pass

        batchsize = 32

        # list of posts that were pinned at the time of first load
        pinned_hashids = request.args.getlist('ph')
        # Depending on the display mechanism (grouped or ungrouped), extract the batch
        if grouped:
            if not startdate:
                startindex = 0
                for row in grouped.values():
                    # break when a non-pinned post is encountered
                    if (not row[0][0]):
                        break
                    else:
                        pinned_hashids.append(row[0][1].hashid)
            else:
                # Loop through group looking for start of next batch. See below to understand the
                # nesting structure of 'grouped'
                for startindex, row in enumerate(grouped.values()):
                    # Skip pinned posts when looking for starting index
                    if (row[0][1].hashid not in pinned_hashids
                            and row[0][1].datetime < startdate):
                        break

            batch = grouped.items()[startindex:startindex + batchsize]
            if startindex + batchsize < len(grouped):
                # Get the datetime of the last group's first item
                # batch = [((type, domain), [(pinned, post, bgroup), ...])]
                # batch[-1] = ((type, domain), [(pinned, post, bgroup), ...])
                # batch[-1][1] = [(pinned, post, bgroup), ...]
                # batch[-1][1][0] = (pinned, post, bgroup)
                # batch[-1][1][0][1] = post
                loadmore = batch[-1][1][0][1].datetime
            grouped = OrderedDict(batch)
        elif pinsandposts:
            if not startdate:
                startindex = 0
                for row in pinsandposts:
                    # break when a non-pinned post is encountered
                    if not row[0]:
                        break
                    else:
                        pinned_hashids.append(row[1].hashid)
            else:
                for startindex, row in enumerate(pinsandposts):
                    # Skip pinned posts when looking for starting index
                    if (row[1].hashid not in pinned_hashids
                            and row[1].datetime < startdate):
                        break

            batch = pinsandposts[startindex:startindex + batchsize]
            if startindex + batchsize < len(pinsandposts):
                # batch = [(pinned, post), ...]
                loadmore = batch[-1][1].datetime
            pinsandposts = batch
    if grouped:
        g.impressions = {
            post.id: (pinflag, post.id, is_bgroup)
            for group in grouped.itervalues()
            for pinflag, post, is_bgroup in group
        }
    elif pinsandposts:
        g.impressions = {
            post.id: (pinflag, post.id, is_bgroup)
            for pinflag, post, is_bgroup in pinsandposts
        }

    # Test values for development:
    # if not g.user_geonameids:
    #     g.user_geonameids = [1277333, 1277331, 1269750]
    if not location and 'l' not in request.args and g.user_geonameids and (
            g.user or g.anon_user) and (
                (not g.board.auto_locations) if g.board else True):
        # No location filters? Prompt the user
        ldata = location_geodata(g.user_geonameids)
        location_prompts = [
            ldata[geonameid] for geonameid in g.user_geonameids
            if geonameid in ldata
        ]
    else:
        location_prompts = []

    query_params = request.args.to_dict(flat=False)
    if loadmore:
        query_params.update({
            'startdate': loadmore.isoformat() + 'Z',
            'ph': pinned_hashids
        })
    return dict(pinsandposts=pinsandposts,
                grouped=grouped,
                now=now,
                newlimit=newlimit,
                title=title,
                md5sum=md5sum,
                domain=domain,
                location=location,
                employer_name=employer_name,
                showall=showall,
                is_index=is_index,
                header_campaign=header_campaign,
                loadmore=loadmore,
                location_prompts=location_prompts,
                search_domains=search_domains,
                query_params=query_params,
                is_siteadmin=is_siteadmin,
                pay_graph_data=pay_graph_data,
                paginated=index_is_paginated(),
                template_vars=template_vars)
Ejemplo n.º 3
0
def index(basequery=None, filters={}, md5sum=None, tag=None, domain=None, location=None, title=None, showall=True, statusfilter=None, batched=True, ageless=False, cached=False, query_string=None, filterset=None, template_vars={}):
    now = datetime.utcnow()
    is_siteadmin = lastuser.has_permission('siteadmin')
    board = g.board

    if board:
        board_jobs = {r.jobpost_id: r for r in
            BoardJobPost.query.join(BoardJobPost.jobpost).filter(
                BoardJobPost.board == g.board, JobPost.state.LISTED).options(
                    db.load_only('jobpost_id', 'pinned')).all()
            }

    else:
        board_jobs = {}

    if basequery is None:
        is_index = True
    else:
        is_index = False
    if basequery is None and not (g.user or g.kiosk or (board and not board.require_login)):
        showall = False
        batched = False

    # `query_string` is user-supplied
    # `search_query` is PostgreSQL syntax
    if not query_string:
        query_string = request.args.get('q')
    if query_string:
        search_query = for_tsquery(query_string)
        try:
            # TODO: Can we do syntax validation without a database roundtrip?
            db.session.query(db.func.to_tsquery(search_query)).all()
        except ProgrammingError:
            db.session.rollback()
            g.event_data['search_syntax_error'] = (query_string, search_query)
            if not request.is_xhr:
                flash(_(u"Search terms ignored because this didn’t parse: {query}").format(query=search_query), 'danger')
            search_query = None
    else:
        search_query = None

    if cached:
        data = fetch_cached_jobposts(request.args, request.values, filters, is_index, board, board_jobs, g.kiosk, basequery, md5sum, domain, location, title, showall, statusfilter, batched, ageless, template_vars, search_query, query_string)
    else:
        data = fetch_jobposts(request.args, request.values, filters, is_index, board, board_jobs, g.kiosk, basequery, md5sum, domain, location, title, showall, statusfilter, batched, ageless, template_vars, search_query, query_string)

    if data['data_filters']:
        # For logging
        g.event_data['filters'] = data['data_filters']

    if g.user:
        g.starred_ids = set(g.user.starred_job_ids(agelimit if not ageless else None))
    else:
        g.starred_ids = set()

    if is_siteadmin or (g.user and g.user.flags.get('is_employer_month')):
        load_viewcounts(data['posts'])
        show_viewcounts = True
    else:
        show_viewcounts = False

    if data['grouped']:
        g.impressions = {post.id: (pinflag, post.id, is_bgroup)
            for group in data['grouped'].itervalues()
            for pinflag, post, is_bgroup in group}
    elif data['pinsandposts']:
        g.impressions = {post.id: (pinflag, post.id, is_bgroup) for pinflag, post, is_bgroup in data['pinsandposts']}

    if not g.kiosk:
        if g.preview_campaign:
            header_campaign = g.preview_campaign
        else:
            geonameids = g.user_geonameids + data['f_locations']
            header_campaign = Campaign.for_context(CAMPAIGN_POSITION.HEADER, board=g.board, user=g.user,
                anon_user=g.anon_user, geonameids=geonameids)
    else:
        header_campaign = None

    # Test values for development:
    # if not g.user_geonameids:
    #     g.user_geonameids = [1277333, 1277331, 1269750]
    if not location and 'l' not in request.args and g.user_geonameids and (g.user or g.anon_user) and (
            (not g.board.auto_locations) if g.board else True):
        # No location filters? Prompt the user
        ldata = location_geodata(g.user_geonameids)
        location_prompts = [ldata[geonameid] for geonameid in g.user_geonameids if geonameid in ldata]
    else:
        location_prompts = []

    data['header_campaign'] = header_campaign
    data['now'] = now
    data['is_siteadmin'] = is_siteadmin
    data['location_prompts'] = location_prompts
    if data['domain'] and data['domain'] not in db.session:
        data['domain'] = db.session.merge(data['domain'])
    data['show_viewcounts'] = show_viewcounts

    max_counts = get_max_counts()
    data['max_impressions'] = max_counts['max_impressions']
    data['max_views'] = max_counts['max_views']
    data['max_opens'] = max_counts['max_opens']
    data['max_applied'] = max_counts['max_applied']

    if filterset:
        data['filterset'] = filterset

    return data
Ejemplo n.º 4
0
def index(basequery=None, filters={}, md5sum=None, tag=None, domain=None, location=None, title=None, showall=True, statusfilter=None, batched=True, ageless=False, cached=False, query_string=None, filterset=None, template_vars={}):
    now = datetime.utcnow()
    is_siteadmin = lastuser.has_permission('siteadmin')
    board = g.board

    if board:
        board_jobs = {r.jobpost_id: r for r in
            BoardJobPost.query.join(BoardJobPost.jobpost).filter(
                BoardJobPost.board == g.board, JobPost.state.LISTED).options(
                    db.load_only('jobpost_id', 'pinned')).all()
        }

    else:
        board_jobs = {}

    if basequery is None:
        is_index = True
    else:
        is_index = False
    if basequery is None and not (g.user or g.kiosk or (board and not board.require_login)):
        showall = False
        batched = False

    # `query_string` is user-supplied
    # `search_query` is PostgreSQL syntax
    if not query_string:
        query_string = request.args.get('q')
    if query_string:
        search_query = for_tsquery(query_string)
        try:
            # TODO: Can we do syntax validation without a database roundtrip?
            db.session.query(db.func.to_tsquery(search_query)).all()
        except ProgrammingError:
            db.session.rollback()
            g.event_data['search_syntax_error'] = (query_string, search_query)
            if not request.is_xhr:
                flash(_(u"Search terms ignored because this didn’t parse: {query}").format(query=search_query), 'danger')
            search_query = None
    else:
        search_query = None

    if cached:
        data = fetch_cached_jobposts(request.args, request.values, filters, is_index, board, board_jobs, g.kiosk, basequery, md5sum, domain, location, title, showall, statusfilter, batched, ageless, template_vars, search_query, query_string)
    else:
        data = fetch_jobposts(request.args, request.values, filters, is_index, board, board_jobs, g.kiosk, basequery, md5sum, domain, location, title, showall, statusfilter, batched, ageless, template_vars, search_query, query_string)

    if data['data_filters']:
        # For logging
        g.event_data['filters'] = data['data_filters']

    if g.user:
        g.starred_ids = set(g.user.starred_job_ids(agelimit if not ageless else None))
    else:
        g.starred_ids = set()

    if is_siteadmin or (g.user and g.user.flags.get('is_employer_month')):
        load_viewcounts(data['posts'])
        show_viewcounts = True
    else:
        show_viewcounts = False

    if data['grouped']:
        g.impressions = {post.id: (pinflag, post.id, is_bgroup)
            for group in data['grouped'].itervalues()
            for pinflag, post, is_bgroup in group}
    elif data['pinsandposts']:
        g.impressions = {post.id: (pinflag, post.id, is_bgroup) for pinflag, post, is_bgroup in data['pinsandposts']}

    if not g.kiosk:
        if g.preview_campaign:
            header_campaign = g.preview_campaign
        else:
            geonameids = g.user_geonameids + data['f_locations']
            header_campaign = Campaign.for_context(CAMPAIGN_POSITION.HEADER, board=g.board, user=g.user,
                anon_user=g.anon_user, geonameids=geonameids)
    else:
        header_campaign = None

    # Test values for development:
    # if not g.user_geonameids:
    #     g.user_geonameids = [1277333, 1277331, 1269750]
    if not location and 'l' not in request.args and g.user_geonameids and (g.user or g.anon_user) and (
            (not g.board.auto_locations) if g.board else True):
        # No location filters? Prompt the user
        ldata = location_geodata(g.user_geonameids)
        location_prompts = [ldata[geonameid] for geonameid in g.user_geonameids if geonameid in ldata]
    else:
        location_prompts = []

    data['header_campaign'] = header_campaign
    data['now'] = now
    data['is_siteadmin'] = is_siteadmin
    data['location_prompts'] = location_prompts
    if data['domain'] and data['domain'] not in db.session:
        data['domain'] = db.session.merge(data['domain'])
    data['show_viewcounts'] = show_viewcounts

    max_counts = get_max_counts()
    data['max_impressions'] = max_counts['max_impressions']
    data['max_views'] = max_counts['max_views']
    data['max_opens'] = max_counts['max_opens']
    data['max_applied'] = max_counts['max_applied']

    if filterset:
        data['filterset'] = filterset

    return data
Ejemplo n.º 5
0
def index(basequery=None, type=None, category=None, md5sum=None, domain=None,
        location=None, title=None, showall=True, statuses=None, tag=None, batched=True, ageless=False):

    if basequery is None:
        is_index = True
    else:
        is_index = False

    now = datetime.utcnow()
    if basequery is None and not (g.user or g.kiosk or (g.board and not g.board.require_login)):
        showall = False
        batched = False

    if basequery is None:
        basequery = JobPost.query

    # Apply request.args filters
    data_filters = {}
    f_types = request.args.getlist('t')
    while '' in f_types:
        f_types.remove('')
    if f_types:
        data_filters['types'] = f_types
        basequery = basequery.join(JobType).filter(JobType.name.in_(f_types))
    f_categories = request.args.getlist('c')
    while '' in f_categories:
        f_categories.remove('')
    if f_categories:
        data_filters['categories'] = f_categories
        basequery = basequery.join(JobCategory).filter(JobCategory.name.in_(f_categories))
    r_locations = request.args.getlist('l')
    f_locations = []
    remote_location = getbool(request.args.get('anywhere')) or False
    for rl in r_locations:
        if rl == 'anywhere':
            remote_location = True
        elif rl.isdigit():
            f_locations.append(int(rl))
        elif rl:
            ld = location_geodata(rl)
            if ld:
                f_locations.append(ld['geonameid'])
    remote_location_query = basequery.filter(JobPost.remote_location == True)  # NOQA
    locations_query = basequery.join(JobLocation).filter(JobLocation.geonameid.in_(f_locations))
    if f_locations and remote_location:
        data_filters['locations'] = f_locations
        data_filters['anywhere'] = True
        recency = JobPost.datetime > datetime.utcnow() - agelimit
        basequery = locations_query.filter(recency).union(remote_location_query.filter(recency))
    elif f_locations:
        data_filters['locations'] = f_locations
        basequery = locations_query
    elif remote_location:
        data_filters['anywhere'] = True
        # Only works as a positive filter: you can't search for jobs that are NOT anywhere
        basequery = remote_location_query
    if 'currency' in request.args and request.args['currency'] in CURRENCY.keys():
        currency = request.args['currency']
        data_filters['currency'] = currency
        basequery = basequery.filter(JobPost.pay_currency == currency)
        pay_graph = currency
    else:
        pay_graph = False
    if getbool(request.args.get('equity')):
        # Only works as a positive filter: you can't search for jobs that DON'T pay in equity
        data_filters['equity'] = True
        basequery = basequery.filter(JobPost.pay_equity_min != None)  # NOQA
    if 'pmin' in request.args and 'pmax' in request.args:
        f_min = string_to_number(request.args['pmin'])
        f_max = string_to_number(request.args['pmax'])
        if f_min is not None and f_max is not None:
            data_filters['pay_min'] = f_min
            data_filters['pay_max'] = f_max
            basequery = basequery.filter(JobPost.pay_cash_min < f_max, JobPost.pay_cash_max >= f_min)
    else:
        f_min = None
        f_max = None

    if getbool(request.args.get('archive')):
        ageless = True
        data_filters['archive'] = True
        statuses = POSTSTATUS.ARCHIVED

    search_domains = None
    if request.args.get('q'):
        q = for_tsquery(request.args['q'])
        try:
            # TODO: Can we do syntax validation without a database roundtrip?
            db.session.query(db.func.to_tsquery(q)).all()
        except ProgrammingError:
            db.session.rollback()
            g.event_data['search_syntax_error'] = (request.args['q'], q)
            if not request.is_xhr:
                flash(_(u"Search terms ignored because this didn’t parse: {query}").format(query=q), 'danger')
        else:
            # Query's good? Use it.
            data_filters['query'] = q
            search_domains = Domain.query.filter(
                Domain.search_vector.match(q, postgresql_regconfig='english'), Domain.is_banned == False).options(
                db.load_only('name', 'title', 'logo_url')).all()  # NOQA
            basequery = basequery.filter(JobPost.search_vector.match(q, postgresql_regconfig='english'))

    if data_filters:
        g.event_data['filters'] = data_filters
        showall = True
        batched = True

    # getposts sets g.board_jobs, used below
    posts = getposts(basequery, pinned=True, showall=showall, statuses=statuses, ageless=ageless).all()

    # Cache viewcounts (admin view or not)
    cache_viewcounts(posts)

    if posts:
        employer_name = posts[0].company_name
    else:
        employer_name = u'a single employer'

    if g.user:
        g.starred_ids = set(g.user.starred_job_ids(agelimit))
    else:
        g.starred_ids = set()

    jobpost_ab = session_jobpost_ab()

    # Make lookup slightly faster in the loop below since 'g' is a proxy
    board = g.board
    if board:
        board_jobs = g.board_jobs
    else:
        board_jobs = {}

    if is_index and posts and not g.kiosk:
        # Group posts by email_domain on index page only, when not in kiosk mode
        grouped = OrderedDict()
        for post in posts:
            pinned = post.pinned
            if board is not None:
                blink = board_jobs.get(post.id)  # board_jobs only contains the last 30 days, no archive
                if blink is not None:
                    pinned = blink.pinned
            if pinned:
                # Make pinned posts appear in a group of one
                grouped.setdefault(('s', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.status == POSTSTATUS.ANNOUNCEMENT:
                # Make announcements also appear in a group of one
                grouped.setdefault(('a', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.domain.is_webmail:
                grouped.setdefault(('ne', post.md5sum), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            else:
                grouped.setdefault(('nd', post.email_domain), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
        pinsandposts = None
    else:
        grouped = None
        if g.board:
            pinsandposts = []
            for post in posts:
                pinned = post.pinned
                if board is not None:
                    blink = board_jobs.get(post.id)  # board_jobs only contains the last 30 days, no archive
                    if blink is not None:
                        pinned = blink.pinned
                pinsandposts.append((pinned, post, bgroup(jobpost_ab, post)))
        else:
            pinsandposts = [(post.pinned, post, bgroup(jobpost_ab, post)) for post in posts]

    # Pick a header campaign (only if not kiosk or an XHR reload)
    pay_graph_data = None
    if not g.kiosk and not request.is_xhr:
        if g.preview_campaign:
            header_campaign = g.preview_campaign
        else:
            if location:
                geonameids = g.user_geonameids + [location['geonameid']]
            else:
                geonameids = g.user_geonameids
            header_campaign = Campaign.for_context(CAMPAIGN_POSITION.HEADER, board=g.board, user=g.user,
                anon_user=g.anon_user, geonameids=geonameids)
        if pay_graph:
            pay_graph_data = make_pay_graph(pay_graph, posts, rmin=f_min, rmax=f_max)
    else:
        header_campaign = None

    loadmore = False
    if batched:
        # Figure out where the batch should start from
        startdate = None
        if 'startdate' in request.values:
            try:
                startdate = parse_isoformat(request.values['startdate'])
            except ValueError:
                pass

        if request.method == 'GET':
            batchsize = 31  # Skipping one for the special stickie that's on all pages
        else:
            batchsize = 32

        # Depending on the display mechanism (grouped or ungrouped), extract the batch
        if grouped:
            if not startdate:
                startindex = 0
            else:
                # Loop through group looking for start of next batch. See below to understand the
                # nesting structure of 'grouped'
                for startindex, row in enumerate(grouped.values()):
                    # Skip examination of pinned listings (having row[0][0] = True)
                    if (not row[0][0]) and row[0][1].datetime < startdate:
                        break

            batch = grouped.items()[startindex:startindex + batchsize]
            if startindex + batchsize < len(grouped):
                # Get the datetime of the last group's first item
                # batch = [((type, domain), [(pinned, post, bgroup), ...])]
                # batch[-1] = ((type, domain), [(pinned, post, bgroup), ...])
                # batch[-1][1] = [(pinned, post, bgroup), ...]
                # batch[-1][1][0] = (pinned, post, bgroup)
                # batch[-1][1][0][1] = post
                loadmore = batch[-1][1][0][1].datetime
            grouped = OrderedDict(batch)
        elif pinsandposts:
            if not startdate:
                startindex = 0
            else:
                for startindex, row in enumerate(pinsandposts):
                    # Skip pinned posts when looking for starting index
                    if (not row[0]) and row[1].datetime < startdate:
                        break

            batch = pinsandposts[startindex:startindex + batchsize]
            if startindex + batchsize < len(pinsandposts):
                # batch = [(pinned, post), ...]
                loadmore = batch[-1][1].datetime
            pinsandposts = batch

    if grouped:
        g.impressions = {post.id: (pinflag, post.id, is_bgroup)
            for group in grouped.itervalues()
            for pinflag, post, is_bgroup in group}
    elif pinsandposts:
        g.impressions = {post.id: (pinflag, post.id, is_bgroup) for pinflag, post, is_bgroup in pinsandposts}

    return render_template('index.html', pinsandposts=pinsandposts, grouped=grouped, now=now,
                           newlimit=newlimit, jobtype=type, jobcategory=category, title=title,
                           md5sum=md5sum, domain=domain, employer_name=employer_name,
                           location=location, showall=showall, tag=tag, is_index=is_index,
                           header_campaign=header_campaign, loadmore=loadmore,
                           search_domains=search_domains,
                           is_siteadmin=lastuser.has_permission('siteadmin'),
                           pay_graph_data=pay_graph_data)