def ajax_place_date_chart(request):
    """
    Returns HTML fragment containing a chart of how many news items
    were added for each day over a short period (length defined by
    constants.DAYS_SHORT_AGGREGATE_TIMEDELTA).

    Expects request.GET['pid'] and request.GET['s'] (a Schema ID).
    """
    manager = get_schema_manager(request)
    try:
        schema = manager.get(id=int(request.GET['s']))
    except (KeyError, ValueError, Schema.DoesNotExist):
        raise Http404('Invalid Schema')
    filters = FilterChain(request=request, schema=schema)
    filters.add_by_place_id(request.GET.get('pid', ''))
    qs = filters.apply()

    # These charts are used on eg. the place overview page; there,
    # they should be smaller than the ones on the schema_detail view;
    # we don't have room for a full 30 days.
    date_span = constants.DAYS_SHORT_AGGREGATE_TIMEDELTA
    if schema.is_event:
        # Soonest span that includes some.
        try:
            qs = qs.filter(item_date__gte=today()).order_by('item_date', 'id')
            first_item = qs.values('item_date')[0]
            start_date = first_item['item_date']
        except IndexError:  # No matching items.
            start_date = today()
        end_date = today() + date_span
    else:
        # Most recent span that includes some.
        try:
            qs = qs.filter(item_date__lte=today()).order_by('-item_date', '-id')
            last_item = qs.values('item_date')[0]
            end_date = last_item['item_date']
        except IndexError:  # No matching items.
            end_date = today()
        start_date = end_date - date_span

    filters.add('date', start_date, end_date)
    counts = filters.apply().date_counts()
    date_chart = get_date_chart([schema], start_date, end_date, {schema.id: counts})[0]
    return render_to_response('db/snippets/date_chart.html', {
        'schema': schema,
        'date_chart': date_chart,
        'filters': filters,
    })
Exemple #2
0
def ajax_place_lookup_chart(request):
    """
    Returns HTML fragment -- expects request.GET['pid'] and request.GET['sf'] (a SchemaField ID).
    """
    try:
        sf = SchemaField.objects.select_related().get(id=int(request.GET['sf']), schema__is_public=True)
    except (KeyError, ValueError, SchemaField.DoesNotExist):
        raise Http404('Invalid SchemaField')
    filters = FilterChain(request=request, schema=sf.schema)
    filters.add_by_place_id(request.GET.get('pid', ''))
    qs = filters.apply()
    total_count = qs.count()
    top_values = qs.top_lookups(sf, 10)
    return render_to_response('db/snippets/lookup_chart.html', {
        'lookup': {'sf': sf, 'top_values': top_values},
        'total_count': total_count,
        'schema': sf.schema,
        'filters': filters,
    })
Exemple #3
0
def place_detail_timeline(request, *args, **kwargs):
    """
    Recent news OR upcoming events for the given Location or Block.
    """
    context, response = _place_detail_normalize_url(request, *args, **kwargs)
    if response is not None:
        return response

    show_upcoming = kwargs.get('show_upcoming')
    schema_manager = get_schema_manager(request)

    if show_upcoming:
        context['breadcrumbs'] = breadcrumbs.place_detail_upcoming(context)
    else:
        context['breadcrumbs'] = breadcrumbs.place_detail_timeline(context)

    is_latest_page = True
    # Check the query string for the max date to use. Otherwise, fall
    # back to today.
    end_date = today()
    if 'start' in request.GET:
        try:
            end_date = parse_date(request.GET['start'], '%m/%d/%Y')
            is_latest_page = False
        except ValueError:
            raise Http404('Invalid date %s' % request.GET['start'])

    filterchain = FilterChain(request=request, context=context)
    filterchain.add('location', context['place'])
    # As an optimization, limit the NewsItems to those on the
    # last (or next) few days.
    # And only fetch for relevant schemas - either event-ish or not.
    if show_upcoming:
        s_list = schema_manager.filter(is_event=True)
        start_date = end_date
        end_date = start_date + datetime.timedelta(days=settings.DEFAULT_DAYS)
        order_by = 'item_date_date'
    else:
        s_list = schema_manager.filter(is_event=False)
        start_date = end_date - datetime.timedelta(days=settings.DEFAULT_DAYS)
        order_by = '-item_date_date'

    filterchain.add('schema', list(s_list))
    filterchain.add('date', start_date, end_date)
    newsitem_qs = filterchain.apply().select_related()
    # TODO: can this really only be done via extra()?
    newsitem_qs = newsitem_qs.extra(
        select={'item_date_date': 'date(db_newsitem.item_date)'},
        order_by=(order_by, '-schema__importance', 'schema')
    )[:constants.NUM_NEWS_ITEMS_PLACE_DETAIL]

    # We're done filtering, so go ahead and do the query, to
    # avoid running it multiple times,
    # per http://docs.djangoproject.com/en/dev/topics/db/optimization
    ni_list = list(newsitem_qs)
    schemas_used = list(set([ni.schema for ni in ni_list]))
    s_list = s_list.filter(is_special_report=False, allow_charting=True).order_by('plural_name')
    populate_attributes_if_needed(ni_list, schemas_used)
    if ni_list:
        next_day = ni_list[-1].item_date - datetime.timedelta(days=1)
    else:
        next_day = None

    hidden_schema_list = []
    if not request.user.is_anonymous():
        hidden_schema_list = [o.schema for o in HiddenSchema.objects.filter(user_id=request.user.id)]

    context.update({
        'newsitem_list': ni_list,
        'next_day': next_day,
        'is_latest_page': is_latest_page,
        'hidden_schema_list': hidden_schema_list,
        'bodyclass': 'place-detail-timeline',
        'bodyid': context.get('place_type') or '',
        'filters': filterchain,
        'show_upcoming': show_upcoming,
    })


    context['filtered_schema_list'] = s_list
    context['map_configuration'] = _preconfigured_map(context);
    response = eb_render(request, 'db/place_detail.html', context)
    for k, v in context['cookies_to_set'].items():
        response.set_cookie(k, v)
    return response
Exemple #4
0
def newsitems_geojson(request):
    """Get a list of newsitems, optionally filtered for one place ID
    and/or one schema slug.

    Response is a geojson string.
    """
    # Note: can't use @cache_page here because that ignores all requests
    # with query parameters (in FetchFromCacheMiddleware.process_request).
    # So, we'll use the low-level cache API.

    # Copy-pasted code from ajax_place_newsitems.  Refactoring target:
    # Seems like there are a number of similar code blocks in
    # ebpub.db.views?

    pid = request.GET.get('pid', '')
    schema = request.GET.get('schema', None)
    if schema is not None:
        schema = get_object_or_404(Schema, slug=schema)

    nid = request.GET.get('newsitem', '')

    newsitem_qs = NewsItem.objects.all()
    if nid:
        newsitem_qs = newsitem_qs.filter(id=nid)
    else:
        filters = FilterChain(request=request, queryset=newsitem_qs, schema=schema)
        if pid:
            filters.add_by_place_id(pid)
        else:
            # Whole city!
            pass

        # More copy/paste from ebpub.db.views...
        # As an optimization, limit the NewsItems to those published in the
        # last few days.
        filters.update_from_query_params(request)
        if not filters.has_key('date'):
            end_date = today()
            start_date = end_date - datetime.timedelta(days=settings.DEFAULT_DAYS)
            filters.add('date', start_date, end_date)
        newsitem_qs = filters.apply()
        newsitem_qs = newsitem_qs
        if not has_staff_cookie(request):
            newsitem_qs = newsitem_qs.filter(schema__is_public=True)

        # Put a hard limit on the number of newsitems, and throw away
        # older items.
        newsitem_qs = newsitem_qs.select_related().order_by('-item_date', '-id')
        newsitem_qs = newsitem_qs[:constants.NUM_NEWS_ITEMS_PLACE_DETAIL]

    # Done preparing the query; cache based on the raw SQL
    # to be sure we capture everything that matters.
    cache_seconds = 60 * 5
    cache_key = 'newsitem_geojson:' + _make_cache_key_from_queryset(newsitem_qs)
    output = cache.get(cache_key, None)
    if output is None:
        newsitem_list = list(newsitem_qs)
        output = api_items_geojson(newsitem_list)
        cache.set(cache_key, output, cache_seconds)

    response = HttpResponse(output, mimetype="application/javascript")
    patch_response_headers(response, cache_timeout=60 * 5)
    return response