def ajax_place_date_chart(request):
    """
    Returns HTML fragment containing a chart of how many news items
    were added for each day over a short period (length defined by
    constants.DAYS_SHORT_AGGREGATE_TIMEDELTA).

    Expects request.GET['pid'] and request.GET['s'] (a Schema ID).
    """
    manager = get_schema_manager(request)
    try:
        schema = manager.get(id=int(request.GET['s']))
    except (KeyError, ValueError, Schema.DoesNotExist):
        raise Http404('Invalid Schema')
    filters = FilterChain(request=request, schema=schema)
    filters.add_by_place_id(request.GET.get('pid', ''))
    qs = filters.apply()

    # These charts are used on eg. the place overview page; there,
    # they should be smaller than the ones on the schema_detail view;
    # we don't have room for a full 30 days.
    date_span = constants.DAYS_SHORT_AGGREGATE_TIMEDELTA
    if schema.is_event:
        # Soonest span that includes some.
        try:
            qs = qs.filter(item_date__gte=today()).order_by('item_date', 'id')
            first_item = qs.values('item_date')[0]
            start_date = first_item['item_date']
        except IndexError:  # No matching items.
            start_date = today()
        end_date = today() + date_span
    else:
        # Most recent span that includes some.
        try:
            qs = qs.filter(item_date__lte=today()).order_by('-item_date', '-id')
            last_item = qs.values('item_date')[0]
            end_date = last_item['item_date']
        except IndexError:  # No matching items.
            end_date = today()
        start_date = end_date - date_span

    filters.add('date', start_date, end_date)
    counts = filters.apply().date_counts()
    date_chart = get_date_chart([schema], start_date, end_date, {schema.id: counts})[0]
    return render_to_response('db/snippets/date_chart.html', {
        'schema': schema,
        'date_chart': date_chart,
        'filters': filters,
    })
Beispiel #2
0
    def items(self, obj):
        # Note that items() returns "packed" tuples instead of objects.
        # This is necessary because we return NewsItems and blog entries,
        # plus different types of NewsItems (bunched vs. unbunched).

        # Limit the feed to all NewsItems published in the last four days.
        # We *do* include items from today in this query, but we'll filter
        # those later in this method so that only today's *uncollapsed* items
        # (schema.can_collapse=False) will be included in the feed. We don't
        # want today's *collapsed* items to be included, because more items
        # might be added to the database before the day is finished, and
        # that would result in the RSS item being updated multiple times, which
        # is annoying.
        today_value = today()
        start_date = today_value - datetime.timedelta(days=4)
        end_date = today_value
        # Note: The pub_date__lt=end_date+(1 day) ensures that we don't miss
        # stuff that has a pub_date of the afternoon of end_date. A straight
        # pub_date__range would miss those items.
        qs = NewsItem.objects.select_related().filter(
            schema__is_public=True,
            pub_date__gte=start_date,
            pub_date__lt=end_date + datetime.timedelta(days=1)).extra(
                select={
                    'pub_date_date': 'date(db_newsitem.pub_date)'
                }).order_by('-pub_date_date', 'schema__id', 'id')

        # Filter out ignored schemas -- those whose slugs are specified in
        # the "ignore" query-string parameter.
        if 'ignore' in self.request.GET:
            schema_slugs = self.request.GET['ignore'].split(',')
            qs = qs.exclude(schema__slug__in=schema_slugs)

        # Filter wanted schemas -- those whose slugs are specified in the
        # "only" query-string parameter.
        if 'only' in self.request.GET:
            schema_slugs = self.request.GET['only'].split(',')
            qs = qs.filter(schema__slug__in=schema_slugs)

        block_radius = self.request.GET.get('radius', BLOCK_RADIUS_DEFAULT)
        if block_radius not in BLOCK_RADIUS_CHOICES:
            raise Http404('Invalid radius')
        ni_list = list(self.newsitems_for_obj(obj, qs, block_radius))
        schema_list = list(set([ni.schema for ni in ni_list]))
        populate_attributes_if_needed(ni_list, schema_list)

        is_block = isinstance(obj, Block)

        # Note that this decorates the results by returning tuples instead of
        # NewsItems. This is necessary because we're bunching.
        for schema_group in bunch_by_date_and_schema(ni_list, today_value):
            schema = schema_group[0].schema
            if schema.can_collapse:
                yield ('newsitem', obj, schema, schema_group, is_block,
                       block_radius)
            else:
                for newsitem in schema_group:
                    yield ('newsitem', obj, schema, newsitem, is_block,
                           block_radius)
Beispiel #3
0
    def items(self, obj):
        # Note that items() returns "packed" tuples instead of objects.
        # This is necessary because we return NewsItems and blog entries,
        # plus different types of NewsItems (bunched vs. unbunched).

        # Limit the feed to all NewsItems published in the last four days.
        # We *do* include items from today in this query, but we'll filter
        # those later in this method so that only today's *uncollapsed* items
        # (schema.can_collapse=False) will be included in the feed. We don't
        # want today's *collapsed* items to be included, because more items
        # might be added to the database before the day is finished, and
        # that would result in the RSS item being updated multiple times, which
        # is annoying.

        # TODO: re-use ebpub.db.schemafilters for filtering here.

        # TODO: allow user control over date range
        today_value = today()
        start_date = today_value - datetime.timedelta(days=5)
        # Include future stuff, useful for events
        end_date = today_value + datetime.timedelta(days=5)

        qs = (
            NewsItem.objects.select_related()
            .by_request(self.request)
            .filter(item_date__gte=start_date, item_date__lte=end_date)
            .order_by("-item_date", "schema__id", "id")
        )

        # Filter out ignored schemas -- those whose slugs are specified in
        # the "ignore" query-string parameter.
        if "ignore" in self.request.GET:
            schema_slugs = self.request.GET["ignore"].split(",")
            qs = qs.exclude(schema__slug__in=schema_slugs)

        # Filter wanted schemas -- those whose slugs are specified in the
        # "only" query-string parameter.
        if "only" in self.request.GET:
            schema_slugs = self.request.GET["only"].split(",")
            qs = qs.filter(schema__slug__in=schema_slugs)

        block_radius = self.request.GET.get("radius", BLOCK_RADIUS_DEFAULT)
        if block_radius not in BLOCK_RADIUS_CHOICES:
            raise Http404("Invalid radius")
        ni_list = list(self.newsitems_for_obj(obj, qs, block_radius))
        schema_list = list(set([ni.schema for ni in ni_list]))
        populate_attributes_if_needed(ni_list, schema_list)

        is_block = isinstance(obj, Block)

        # Note that this decorates the results by returning tuples instead of
        # NewsItems. This is necessary because we're bunching.
        for schema_group in bunch_by_date_and_schema(ni_list, today_value):
            schema = schema_group[0].schema
            if schema.can_collapse:
                yield ("newsitem", obj, schema, schema_group, is_block, block_radius)
            else:
                for newsitem in schema_group:
                    yield ("newsitem", obj, schema, newsitem, is_block, block_radius)
Beispiel #4
0
def _default_date_filtering(filterchain):
    """
    Make sure we do some date limiting, but don't force a
    DateFilter into the filterchain, because that would prevent
    users from choosing dates.
    """
    schema = filterchain['schema'].schema
    date_filter = filterchain.get('date') or filterchain.get('pubdate')
    qs = filterchain.apply()
    if date_filter:
        start_date = date_filter.start_date
        end_date = date_filter.end_date
    else:
        if schema.is_event:
            start_date = today()
            end_date = start_date + datetime.timedelta(days=30)
        else:
            start_date = schema.min_date
            end_date = today()
        qs = qs.filter(item_date__gte=start_date,
                       item_date__lte=end_date)
    return qs, start_date, end_date
Beispiel #5
0
    def items(self, obj):
        # Note that items() returns "packed" tuples instead of objects.
        # This is necessary because we return NewsItems and blog entries,
        # plus different types of NewsItems (bunched vs. unbunched).

        # Limit the feed to all NewsItems published in the last four days.
        # We *do* include items from today in this query, but we'll filter
        # those later in this method so that only today's *uncollapsed* items
        # (schema.can_collapse=False) will be included in the feed. We don't
        # want today's *collapsed* items to be included, because more items
        # might be added to the database before the day is finished, and
        # that would result in the RSS item being updated multiple times, which
        # is annoying.
        today_value = today()
        start_date = today_value - datetime.timedelta(days=4)
        end_date = today_value
        # Note: The pub_date__lt=end_date+(1 day) ensures that we don't miss
        # stuff that has a pub_date of the afternoon of end_date. A straight
        # pub_date__range would miss those items.
        qs = NewsItem.objects.select_related().filter(schema__is_public=True, pub_date__gte=start_date, pub_date__lt=end_date+datetime.timedelta(days=1)).extra(select={'pub_date_date': 'date(db_newsitem.pub_date)'}).order_by('-pub_date_date', 'schema__id', 'id')

        # Filter out ignored schemas -- those whose slugs are specified in
        # the "ignore" query-string parameter.
        if 'ignore' in self.request.GET:
            schema_slugs = self.request.GET['ignore'].split(',')
            qs = qs.exclude(schema__slug__in=schema_slugs)

        # Filter wanted schemas -- those whose slugs are specified in the
        # "only" query-string parameter.
        if 'only' in self.request.GET:
            schema_slugs = self.request.GET['only'].split(',')
            qs = qs.filter(schema__slug__in=schema_slugs)

        block_radius = self.request.GET.get('radius', BLOCK_RADIUS_DEFAULT)
        if block_radius not in BLOCK_RADIUS_CHOICES:
            raise Http404('Invalid radius')
        ni_list = list(self.newsitems_for_obj(obj, qs, block_radius))
        schema_list = list(set([ni.schema for ni in ni_list]))
        populate_attributes_if_needed(ni_list, schema_list)

        is_block = isinstance(obj, Block)

        # Note that this decorates the results by returning tuples instead of
        # NewsItems. This is necessary because we're bunching.
        for schema_group in bunch_by_date_and_schema(ni_list, today_value):
            schema = schema_group[0].schema
            if schema.can_collapse:
                yield ('newsitem', obj, schema, schema_group, is_block, block_radius)
            else:
                for newsitem in schema_group:
                    yield ('newsitem', obj, schema, newsitem, is_block, block_radius)
Beispiel #6
0
def map_context(request):
    """
    Context variables needed on pages that use maps.
    """
    # XXX TODO: can we slim or at least version the olwidget JS & CSS?
    # note they are set as settings.OLWIDGET_JS and settings.OLWIDGET_CSS,
    # could possibly munge those?
    return {'OPENLAYERS_IMG_PATH': settings.OPENLAYERS_IMG_PATH,
            'JQUERY_URL': settings.JQUERY_URL,
            'MAP_MEDIA_HTML': _get_map_media,
            'MAP_CUSTOM_BASE_LAYERS': _get_extra_layers,
            'MAP_BASELAYER_TYPE': settings.MAP_BASELAYER_TYPE,
            'alerts_installed': 'ebpub.alerts' in settings.INSTALLED_APPS,
            'today': today(),
            }
def homepage(request):
    """Front page of the default OpenBlock theme.
    """

    end_date = today()
    start_date = end_date - datetime.timedelta(days=settings.DEFAULT_DAYS)
    end_date += datetime.timedelta(days=1)

    manager = get_schema_manager(request)
    sparkline_schemas = list(manager.filter(allow_charting=True, is_special_report=False))

    # Order by slug to ensure case-insensitive ordering. (Kind of hackish.)
    lt_list = LocationType.objects.filter(is_significant=True).order_by('slug').extra(select={'count': 'select count(*) from db_location where is_public=True and location_type_id=db_locationtype.id'})
    street_count = Street.objects.count()
    more_schemas = manager.filter(allow_charting=False).order_by('name')

    # Get the public records.
    date_charts = get_date_chart_agg_model(sparkline_schemas, start_date, end_date, AggregateDay)
    empty_date_charts, non_empty_date_charts = [], []
    for chart in date_charts:
        if chart['total_count']:
            non_empty_date_charts.append(chart)
        else:
            empty_date_charts.append(chart)
    non_empty_date_charts.sort(lambda a, b: cmp(b['total_count'], a['total_count']))
    empty_date_charts.sort(lambda a, b: cmp(a['schema'].plural_name, b['schema'].plural_name))

    return eb_render(request, 'homepage.html', {
        'location_type_list': lt_list,
        'street_count': street_count,
        'more_schemas': more_schemas,
        'non_empty_date_charts': non_empty_date_charts,
        'empty_date_charts': empty_date_charts,
        'num_days': settings.DEFAULT_DAYS,
        'default_lon': settings.DEFAULT_MAP_CENTER_LON,
        'default_lat': settings.DEFAULT_MAP_CENTER_LAT,
        'default_zoom': settings.DEFAULT_MAP_ZOOM,
        'bodyclass': 'homepage',
        'breadcrumbs': breadcrumbs.home({}),
        'map_configuration': _preconfigured_map({})
    })
Beispiel #8
0
def friendlydate(value):
    """
    A date string that includes 'Today' or 'Yesterday' if relevant,
    or the day of the week if it's within the past week,
    otherwise just the date.

    Examples:

    >>> import mock, datetime
    >>> with mock.patch('ebpub.db.templatetags.eb.today', lambda: datetime.date(2011, 8, 15)):
    ...     print friendlydate(datetime.date(2011, 8, 15))
    ...     print friendlydate(datetime.date(2011, 8, 14))
    ...     print friendlydate(datetime.date(2011, 8, 13))
    ...     print friendlydate(datetime.date(2011, 8, 9))
    ...     print friendlydate(datetime.date(2011, 8, 8))
    ...
    Today August 15, 2011
    Yesterday August 14, 2011
    Saturday August 13, 2011
    Tuesday August 9, 2011
    August 8, 2011
    """
    try:  # Convert to a datetime.date, if it's a datetime.datetime.
        value = value.date()
    except AttributeError:
        pass
    # Using value.day because strftine('%d') is zero-padded and we don't want that.
    # TODO: parameterize format to allow i18n?
    formatted_date = value.strftime('%B ') + unicode(
        value.day) + value.strftime(', %Y')
    _today = today()
    if value == _today:
        return 'Today %s' % formatted_date
    elif value == _today - datetime.timedelta(1):
        return 'Yesterday %s' % formatted_date
    elif _today - value <= datetime.timedelta(6):
        return '%s %s' % (value.strftime('%A'), formatted_date)
    return formatted_date
Beispiel #9
0
def friendlydate(value):
    """
    A date string that includes 'Today' or 'Yesterday' if relevant,
    or the day of the week if it's within the past week,
    otherwise just the date.

    Examples:

    >>> import mock, datetime
    >>> with mock.patch('ebpub.db.templatetags.eb.today', lambda: datetime.date(2011, 8, 15)):
    ...     print friendlydate(datetime.date(2011, 8, 15))
    ...     print friendlydate(datetime.date(2011, 8, 14))
    ...     print friendlydate(datetime.date(2011, 8, 13))
    ...     print friendlydate(datetime.date(2011, 8, 9))
    ...     print friendlydate(datetime.date(2011, 8, 8))
    ...
    Today August 15, 2011
    Yesterday August 14, 2011
    Saturday August 13, 2011
    Tuesday August 9, 2011
    August 8, 2011
    """
    try: # Convert to a datetime.date, if it's a datetime.datetime.
        value = value.date()
    except AttributeError:
        pass
    # Using value.day because strftine('%d') is zero-padded and we don't want that.
    # TODO: parameterize format to allow i18n?
    formatted_date = value.strftime('%B ') + unicode(value.day) + value.strftime(', %Y')
    _today = today()
    if value == _today:
        return 'Today %s' % formatted_date
    elif value == _today - datetime.timedelta(1):
        return 'Yesterday %s' % formatted_date
    elif _today - value <= datetime.timedelta(6):
        return '%s %s' % (value.strftime('%A'), formatted_date)
    return formatted_date
Beispiel #10
0
def update_aggregates(schema_id_or_slug, dry_run=False):
    """
    Updates all Aggregate* tables for the given schema_id/slug,
    deleting/updating the existing records if necessary.

    If dry_run is True, then the records won't be updated -- only the SQL
    will be output.
    """
    if not str(schema_id_or_slug).isdigit():
        schema_id = Schema.objects.get(slug=schema_id_or_slug).id
    else:
        schema_id = schema_id_or_slug
    cursor = connection.cursor()

    # AggregateAll
    cursor.execute("SELECT COUNT(*) FROM db_newsitem WHERE schema_id = %s", (schema_id,))
    new_values = [{'total': row[0]} for row in cursor.fetchall()]
    smart_update(cursor, new_values, AggregateAll._meta.db_table, ('total',), (), {'schema_id': schema_id}, dry_run=dry_run)

    # AggregateDay
    cursor.execute("""
        SELECT item_date, COUNT(*)
        FROM db_newsitem
        WHERE schema_id = %s
        GROUP BY 1""", (schema_id,))
    new_values = [{'date_part': row[0], 'total': row[1]} for row in cursor.fetchall()]
    smart_update(cursor, new_values, AggregateDay._meta.db_table, ('date_part', 'total'), ('date_part',), {'schema_id': schema_id}, dry_run=dry_run)

    # AggregateLocationDay
    cursor.execute("""
        SELECT nl.location_id, ni.item_date, loc.location_type_id, COUNT(*)
        FROM db_newsitemlocation nl, db_newsitem ni, db_location loc
        WHERE nl.news_item_id = ni.id
            AND ni.schema_id = %s
            AND nl.location_id = loc.id
        GROUP BY 1, 2, 3""", (schema_id,))
    new_values = [{'location_id': row[0], 'date_part': row[1], 'location_type_id': row[2], 'total': row[3]} for row in cursor.fetchall()]
    smart_update(cursor, new_values, AggregateLocationDay._meta.db_table, ('location_id', 'date_part', 'location_type_id', 'total'), ('location_id', 'date_part', 'location_type_id'), {'schema_id': schema_id}, dry_run=dry_run)

    # AggregateLocation
    # This query is a bit clever -- we just sum up the totals created in a
    # previous aggregate. It's a helpful optimization, because otherwise
    # the location query is way too slow.
    # Note that we calculate the total for the last 30 days that had at least
    # one news item -- *NOT* the last 30 days, period.
    # We add date_part <= current_date here to keep sparse items in the future
    # from throwing off counts for the previous 30 days.
    cursor.execute("SELECT date_part FROM %s WHERE schema_id = %%s AND date_part <= current_date ORDER BY date_part DESC LIMIT 1" % \
        AggregateLocationDay._meta.db_table, (schema_id,))
    try:
        end_date = cursor.fetchone()[0]
    except TypeError: # if cursor.fetchone() is None, there are no records.
        pass
    else:
        start_date = end_date - datetime.timedelta(days=settings.DEFAULT_DAYS)
        cursor.execute("""
            SELECT location_id, location_type_id, SUM(total)
            FROM %s
            WHERE schema_id = %%s
                AND date_part BETWEEN %%s AND %%s
            GROUP BY 1, 2""" % AggregateLocationDay._meta.db_table,
                (schema_id, start_date, end_date))
        new_values = [{'location_id': row[0], 'location_type_id': row[1], 'total': row[2]} for row in cursor.fetchall()]
        smart_update(cursor, new_values, AggregateLocation._meta.db_table, ('location_id', 'location_type_id', 'total'), ('location_id', 'location_type_id'), {'schema_id': schema_id}, dry_run=dry_run)

    for sf in SchemaField.objects.filter(schema__id=schema_id, is_filter=True, is_lookup=True):
        try:
            end_date = NewsItem.objects.filter(schema__id=schema_id, item_date__lte=today()).values_list('item_date', flat=True).order_by('-item_date')[0]
        except IndexError:
            continue # There have been no NewsItems in the given date range.
        start_date = end_date - datetime.timedelta(days=constants.NUM_DAYS_AGGREGATE)

        if sf.is_many_to_many_lookup():
            # AggregateFieldLookup
            cursor.execute("""
                SELECT id, (
                    SELECT COUNT(*) FROM db_attribute a, db_newsitem ni
                    WHERE a.news_item_id = ni.id
                        AND a.schema_id = %%s
                        AND ni.schema_id = %%s
                        AND a.%s ~ ('[[:<:]]' || db_lookup.id || '[[:>:]]')
                        AND ni.item_date BETWEEN %%s AND %%s
                )
                FROM db_lookup
                WHERE schema_field_id = %%s""" % sf.real_name, (schema_id, schema_id, start_date, end_date, sf.id))
            new_values = [{'lookup_id': row[0], 'total': row[1]} for row in cursor.fetchall()]
            smart_update(cursor, new_values, AggregateFieldLookup._meta.db_table, ('lookup_id', 'total'), ('lookup_id',), {'schema_id': schema_id, 'schema_field_id': sf.id}, dry_run=dry_run)
        else:
            # AggregateFieldLookup
            cursor.execute("""
                SELECT a.%s, COUNT(*)
                FROM db_attribute a, db_newsitem ni
                WHERE a.news_item_id = ni.id
                    AND a.schema_id = %%s
                    AND ni.schema_id = %%s
                    AND %s IS NOT NULL
                    AND ni.item_date BETWEEN %%s AND %%s
                GROUP BY 1""" % (sf.real_name, sf.real_name), (schema_id, schema_id, start_date, end_date))
            new_values = [{'lookup_id': row[0], 'total': row[1]} for row in cursor.fetchall()]
            smart_update(cursor, new_values, AggregateFieldLookup._meta.db_table, ('lookup_id', 'total'), ('lookup_id',), {'schema_id': schema_id, 'schema_field_id': sf.id}, dry_run=dry_run)

    transaction.commit_unless_managed()
Beispiel #11
0
def update_aggregates(schema_id_or_slug, dry_run=False):
    """
    Updates all Aggregate* tables for the given schema_id/slug,
    deleting/updating the existing records if necessary.

    If dry_run is True, then the records won't be updated -- only the SQL
    will be output.
    """
    if not str(schema_id_or_slug).isdigit():
        schema_id = Schema.objects.get(slug=schema_id_or_slug).id
    else:
        schema_id = schema_id_or_slug
    cursor = connection.cursor()

    # AggregateAll
    cursor.execute("SELECT COUNT(*) FROM db_newsitem WHERE schema_id = %s",
                   (schema_id, ))
    new_values = [{'total': row[0]} for row in cursor.fetchall()]
    smart_update(cursor,
                 new_values,
                 AggregateAll._meta.db_table, ('total', ), (),
                 {'schema_id': schema_id},
                 dry_run=dry_run)

    # AggregateDay
    cursor.execute(
        """
        SELECT item_date, COUNT(*)
        FROM db_newsitem
        WHERE schema_id = %s
        GROUP BY 1""", (schema_id, ))
    new_values = [{
        'date_part': row[0],
        'total': row[1]
    } for row in cursor.fetchall()]
    smart_update(cursor,
                 new_values,
                 AggregateDay._meta.db_table, ('date_part', 'total'),
                 ('date_part', ), {'schema_id': schema_id},
                 dry_run=dry_run)

    # AggregateLocationDay
    cursor.execute(
        """
        SELECT nl.location_id, ni.item_date, loc.location_type_id, COUNT(*)
        FROM db_newsitemlocation nl, db_newsitem ni, db_location loc
        WHERE nl.news_item_id = ni.id
            AND ni.schema_id = %s
            AND nl.location_id = loc.id
        GROUP BY 1, 2, 3""", (schema_id, ))
    new_values = [{
        'location_id': row[0],
        'date_part': row[1],
        'location_type_id': row[2],
        'total': row[3]
    } for row in cursor.fetchall()]
    smart_update(cursor,
                 new_values,
                 AggregateLocationDay._meta.db_table,
                 ('location_id', 'date_part', 'location_type_id', 'total'),
                 ('location_id', 'date_part', 'location_type_id'),
                 {'schema_id': schema_id},
                 dry_run=dry_run)

    # AggregateLocation
    # This query is a bit clever -- we just sum up the totals created in a
    # previous aggregate. It's a helpful optimization, because otherwise
    # the location query is way too slow.
    # Note that we calculate the total for the last 30 days that had at least
    # one news item -- *NOT* the last 30 days, period.
    # We add date_part <= current_date here to keep sparse items in the future
    # from throwing off counts for the previous 30 days.
    cursor.execute("SELECT date_part FROM %s WHERE schema_id = %%s AND date_part <= current_date ORDER BY date_part DESC LIMIT 1" % \
        AggregateLocationDay._meta.db_table, (schema_id,))
    try:
        end_date = cursor.fetchone()[0]
    except TypeError:  # if cursor.fetchone() is None, there are no records.
        pass
    else:
        start_date = end_date - datetime.timedelta(days=settings.DEFAULT_DAYS)
        cursor.execute(
            """
            SELECT location_id, location_type_id, SUM(total)
            FROM %s
            WHERE schema_id = %%s
                AND date_part BETWEEN %%s AND %%s
            GROUP BY 1, 2""" % AggregateLocationDay._meta.db_table,
            (schema_id, start_date, end_date))
        new_values = [{
            'location_id': row[0],
            'location_type_id': row[1],
            'total': row[2]
        } for row in cursor.fetchall()]
        smart_update(cursor,
                     new_values,
                     AggregateLocation._meta.db_table,
                     ('location_id', 'location_type_id', 'total'),
                     ('location_id', 'location_type_id'),
                     {'schema_id': schema_id},
                     dry_run=dry_run)

    for sf in SchemaField.objects.filter(schema__id=schema_id,
                                         is_filter=True,
                                         is_lookup=True):
        try:
            end_date = NewsItem.objects.filter(
                schema__id=schema_id, item_date__lte=today()).values_list(
                    'item_date', flat=True).order_by('-item_date')[0]
        except IndexError:
            continue  # There have been no NewsItems in the given date range.
        start_date = end_date - datetime.timedelta(
            days=constants.NUM_DAYS_AGGREGATE)

        if sf.is_many_to_many_lookup():
            # AggregateFieldLookup
            cursor.execute(
                """
                SELECT id, (
                    SELECT COUNT(*) FROM db_attribute a, db_newsitem ni
                    WHERE a.news_item_id = ni.id
                        AND a.schema_id = %%s
                        AND ni.schema_id = %%s
                        AND a.%s ~ ('[[:<:]]' || db_lookup.id || '[[:>:]]')
                        AND ni.item_date BETWEEN %%s AND %%s
                )
                FROM db_lookup
                WHERE schema_field_id = %%s""" % sf.real_name,
                (schema_id, schema_id, start_date, end_date, sf.id))
            new_values = [{
                'lookup_id': row[0],
                'total': row[1]
            } for row in cursor.fetchall()]
            smart_update(cursor,
                         new_values,
                         AggregateFieldLookup._meta.db_table,
                         ('lookup_id', 'total'), ('lookup_id', ), {
                             'schema_id': schema_id,
                             'schema_field_id': sf.id
                         },
                         dry_run=dry_run)
        else:
            # AggregateFieldLookup
            cursor.execute(
                """
                SELECT a.%s, COUNT(*)
                FROM db_attribute a, db_newsitem ni
                WHERE a.news_item_id = ni.id
                    AND a.schema_id = %%s
                    AND ni.schema_id = %%s
                    AND %s IS NOT NULL
                    AND ni.item_date BETWEEN %%s AND %%s
                GROUP BY 1""" % (sf.real_name, sf.real_name),
                (schema_id, schema_id, start_date, end_date))
            new_values = [{
                'lookup_id': row[0],
                'total': row[1]
            } for row in cursor.fetchall()]
            smart_update(cursor,
                         new_values,
                         AggregateFieldLookup._meta.db_table,
                         ('lookup_id', 'total'), ('lookup_id', ), {
                             'schema_id': schema_id,
                             'schema_field_id': sf.id
                         },
                         dry_run=dry_run)

    transaction.commit_unless_managed()
Beispiel #12
0
def place_detail_timeline(request, *args, **kwargs):
    """
    Recent news OR upcoming events for the given Location or Block.
    """
    context, response = _place_detail_normalize_url(request, *args, **kwargs)
    if response is not None:
        return response

    show_upcoming = kwargs.get('show_upcoming')
    schema_manager = get_schema_manager(request)

    if show_upcoming:
        context['breadcrumbs'] = breadcrumbs.place_detail_upcoming(context)
    else:
        context['breadcrumbs'] = breadcrumbs.place_detail_timeline(context)

    is_latest_page = True
    # Check the query string for the max date to use. Otherwise, fall
    # back to today.
    end_date = today()
    if 'start' in request.GET:
        try:
            end_date = parse_date(request.GET['start'], '%m/%d/%Y')
            is_latest_page = False
        except ValueError:
            raise Http404('Invalid date %s' % request.GET['start'])

    filterchain = FilterChain(request=request, context=context)
    filterchain.add('location', context['place'])
    # As an optimization, limit the NewsItems to those on the
    # last (or next) few days.
    # And only fetch for relevant schemas - either event-ish or not.
    if show_upcoming:
        s_list = schema_manager.filter(is_event=True)
        start_date = end_date
        end_date = start_date + datetime.timedelta(days=settings.DEFAULT_DAYS)
        order_by = 'item_date_date'
    else:
        s_list = schema_manager.filter(is_event=False)
        start_date = end_date - datetime.timedelta(days=settings.DEFAULT_DAYS)
        order_by = '-item_date_date'

    filterchain.add('schema', list(s_list))
    filterchain.add('date', start_date, end_date)
    newsitem_qs = filterchain.apply().select_related()
    # TODO: can this really only be done via extra()?
    newsitem_qs = newsitem_qs.extra(
        select={'item_date_date': 'date(db_newsitem.item_date)'},
        order_by=(order_by, '-schema__importance', 'schema')
    )[:constants.NUM_NEWS_ITEMS_PLACE_DETAIL]

    # We're done filtering, so go ahead and do the query, to
    # avoid running it multiple times,
    # per http://docs.djangoproject.com/en/dev/topics/db/optimization
    ni_list = list(newsitem_qs)
    schemas_used = list(set([ni.schema for ni in ni_list]))
    s_list = s_list.filter(is_special_report=False, allow_charting=True).order_by('plural_name')
    populate_attributes_if_needed(ni_list, schemas_used)
    if ni_list:
        next_day = ni_list[-1].item_date - datetime.timedelta(days=1)
    else:
        next_day = None

    hidden_schema_list = []
    if not request.user.is_anonymous():
        hidden_schema_list = [o.schema for o in HiddenSchema.objects.filter(user_id=request.user.id)]

    context.update({
        'newsitem_list': ni_list,
        'next_day': next_day,
        'is_latest_page': is_latest_page,
        'hidden_schema_list': hidden_schema_list,
        'bodyclass': 'place-detail-timeline',
        'bodyid': context.get('place_type') or '',
        'filters': filterchain,
        'show_upcoming': show_upcoming,
    })


    context['filtered_schema_list'] = s_list
    context['map_configuration'] = _preconfigured_map(context);
    response = eb_render(request, 'db/place_detail.html', context)
    for k, v in context['cookies_to_set'].items():
        response.set_cookie(k, v)
    return response
Beispiel #13
0
def schema_detail(request, slug):
    s = get_object_or_404(get_schema_manager(request), slug=slug)
    if s.is_special_report:
        return schema_detail_special_report(request, s)

    location_type_list = LocationType.objects.filter(is_significant=True).order_by('slug')
    if s.allow_charting:
        # For the date range, the end_date is the last non-future date
        # with at least one NewsItem.
        try:
            end_date = NewsItem.objects.filter(schema__id=s.id, item_date__lte=today()).values_list('item_date', flat=True).order_by('-item_date')[0]
        except IndexError:
            latest_dates = ()
            date_chart = {}
            start_date = end_date = None
        else:
            start_date = end_date - constants.DAYS_AGGREGATE_TIMEDELTA
            date_chart = get_date_chart_agg_model([s], start_date, end_date, AggregateDay)[0]
            latest_dates = [date['date'] for date in date_chart['dates'] if date['count']]

        # Populate schemafield_list and lookup_list.
        schemafield_list = list(s.schemafield_set.filter(is_filter=True).order_by('display_order'))
        # XXX this duplicates part of schema_filter()
        LOOKUP_MIN_DISPLAYED = 7
        LOOKUP_BUFFER = 4
        lookup_list = []
        for sf in schemafield_list:
            if not (sf.is_charted and sf.is_lookup):
                continue
            top_values = list(AggregateFieldLookup.objects.filter(schema_field__id=sf.id).select_related('lookup').order_by('-total')[:LOOKUP_MIN_DISPLAYED + LOOKUP_BUFFER])
            if len(top_values) == LOOKUP_MIN_DISPLAYED + LOOKUP_BUFFER:
                top_values = top_values[:LOOKUP_MIN_DISPLAYED]
                has_more = True
            else:
                has_more = False
            lookup_list.append({'sf': sf, 'top_values': top_values, 'has_more': has_more})

        location_chartfield_list = []

        # Populate location_chartfield_list.
        for lt in location_type_list:
            # Collect the locations in the location_type here so we don't have
            # to query them again in the select_related() below.
            locations = dict([(loc.id, loc) for loc in lt.location_set.iterator()])

            ni_totals = AggregateLocation.objects.filter(
                schema__id=s.id,
                location_type__id=lt.id,
                location__is_public=True).select_related('location').order_by('-total')

            if ni_totals:  # This runs the query.
                known_count = reduce(operator.add, (n.total for n in ni_totals))
                total_count = date_chart.get('total_count', 0)
                unknown_count = max(0, total_count - known_count)
                location_chartfield_list.append({'location_type': lt, 'locations': ni_totals[:9], 'unknown': unknown_count})
        ni_list = ()
    else:
        date_chart = {}
        latest_dates = schemafield_list = lookup_list = location_chartfield_list = ()
        ni_list = list(NewsItem.objects.filter(schema__id=s.id).order_by('-item_date', '-id')[:30])
        populate_schema(ni_list, s)
        populate_attributes_if_needed(ni_list, [s])

    textsearch_sf_list = list(SchemaField.objects.filter(schema__id=s.id, is_searchable=True).order_by('display_order'))
    boolean_lookup_list = [sf for sf in SchemaField.objects.filter(schema__id=s.id, is_filter=True, is_lookup=False).order_by('display_order') if sf.is_type('bool')]

    templates_to_try = ('db/schema_detail/%s.html' % s.slug, 'db/schema_detail.html')

    # The HIDE_SCHEMA_INTRO_COOKIE_NAME cookie is a comma-separated list of
    # schema IDs for schemas whose intro text should *not* be displayed.
    hide_intro = str(s.id) in request.COOKIES.get(HIDE_SCHEMA_INTRO_COOKIE_NAME, '').split(',')

    context = {
        'schema': s,
        'schemafield_list': schemafield_list,
        'location_type_list': location_type_list,
        'date_chart': date_chart,
        'lookup_list': lookup_list,
        'location_chartfield_list': location_chartfield_list,
        'boolean_lookup_list': boolean_lookup_list,
        'search_list': textsearch_sf_list,
        'newsitem_list': ni_list,
        'latest_dates': latest_dates[-3:],
        'hide_intro': hide_intro,
        'hide_intro_cookie_name': HIDE_SCHEMA_INTRO_COOKIE_NAME,
        'start_date': s.min_date,
        'end_date': today(),
        'bodyclass': 'schema-detail',
        'bodyid': slug,
        'filters': FilterChain(schema=s),
    }
    context['breadcrumbs'] = breadcrumbs.schema_detail(context)
    return eb_render(request, templates_to_try, context)
Beispiel #14
0
def newsitems_geojson(request):
    """Get a list of newsitems, optionally filtered for one place ID
    and/or one schema slug.

    Response is a geojson string.
    """
    # Note: can't use @cache_page here because that ignores all requests
    # with query parameters (in FetchFromCacheMiddleware.process_request).
    # So, we'll use the low-level cache API.

    # Copy-pasted code from ajax_place_newsitems.  Refactoring target:
    # Seems like there are a number of similar code blocks in
    # ebpub.db.views?

    pid = request.GET.get('pid', '')
    schema = request.GET.get('schema', None)
    if schema is not None:
        schema = get_object_or_404(Schema, slug=schema)

    nid = request.GET.get('newsitem', '')

    newsitem_qs = NewsItem.objects.all()
    if nid:
        newsitem_qs = newsitem_qs.filter(id=nid)
    else:
        filters = FilterChain(request=request, queryset=newsitem_qs, schema=schema)
        if pid:
            filters.add_by_place_id(pid)
        else:
            # Whole city!
            pass

        # More copy/paste from ebpub.db.views...
        # As an optimization, limit the NewsItems to those published in the
        # last few days.
        filters.update_from_query_params(request)
        if not filters.has_key('date'):
            end_date = today()
            start_date = end_date - datetime.timedelta(days=settings.DEFAULT_DAYS)
            filters.add('date', start_date, end_date)
        newsitem_qs = filters.apply()
        newsitem_qs = newsitem_qs
        if not has_staff_cookie(request):
            newsitem_qs = newsitem_qs.filter(schema__is_public=True)

        # Put a hard limit on the number of newsitems, and throw away
        # older items.
        newsitem_qs = newsitem_qs.select_related().order_by('-item_date', '-id')
        newsitem_qs = newsitem_qs[:constants.NUM_NEWS_ITEMS_PLACE_DETAIL]

    # Done preparing the query; cache based on the raw SQL
    # to be sure we capture everything that matters.
    cache_seconds = 60 * 5
    cache_key = 'newsitem_geojson:' + _make_cache_key_from_queryset(newsitem_qs)
    output = cache.get(cache_key, None)
    if output is None:
        newsitem_list = list(newsitem_qs)
        output = api_items_geojson(newsitem_list)
        cache.set(cache_key, output, cache_seconds)

    response = HttpResponse(output, mimetype="application/javascript")
    patch_response_headers(response, cache_timeout=60 * 5)
    return response
Beispiel #15
0
def place_detail_overview(request, *args, **kwargs):
    context, response = _place_detail_normalize_url(request, *args, **kwargs)
    if response is not None:
        return response
    schema_manager = get_schema_manager(request)
    context['breadcrumbs'] = breadcrumbs.place_detail_overview(context)

    schema_list = SortedDict([(s.id, s) for s in schema_manager.filter(is_special_report=False).order_by('plural_name')])
    # needed = set(schema_list.keys())

    # We actually want two lists of schemas, since we care whether
    # they are news-like or future-event-like.
    import copy
    eventish_schema_list = copy.deepcopy(schema_list)
    newsish_schema_list = copy.deepcopy(schema_list)
    for s_id, schema in schema_list.items():
        if schema.is_event:
            del(newsish_schema_list[s_id])
        else:
            del(eventish_schema_list[s_id])

    filterchain = FilterChain(request=request, context=context)
    filterchain.add('location', context['place'])

    # Distinguish between past news and upcoming events.
    # With some preliminary date limiting too.
    filterchain_news = filterchain.copy()
    filterchain_news.add('date',
                         today() - datetime.timedelta(days=90),
                         today())

    filterchain_events = filterchain.copy()
    filterchain_events.add('date',
                           today(),
                           today() + datetime.timedelta(days=60))

    # Ordering by ID ensures consistency across page views.
    newsitem_qs = filterchain_news.apply().order_by('-item_date', '-id')
    events_qs = filterchain_events.apply().order_by('item_date', 'id')

    # Mapping of schema id -> [schemafields], for building Lookup charts.
    sf_dict = {}
    charted_lookups = SchemaField.objects.filter(
        is_lookup=True, is_charted=True, schema__is_public=True,
        schema__is_special_report=False)
    charted_lookups = charted_lookups.values('id', 'schema_id', 'pretty_name')
    for sf in charted_lookups.order_by('schema__id', 'display_order'):
        sf_dict.setdefault(sf['schema_id'], []).append(sf)

    # Now retrieve newsitems per schema.
    schema_groups, all_newsitems = [], []
    for schema in schema_list.values():
        if schema.id in newsish_schema_list:
            newsitems = newsitem_qs.filter(schema__id=schema.id)
        elif schema.id in eventish_schema_list:
            newsitems = events_qs.filter(schema__id=schema.id)
        else:
            raise RuntimeError("should never get here")
        newsitems = list(newsitems[:s.number_in_overview])
        populate_schema(newsitems, schema)
        schema_groups.append({
            'schema': schema,
            'latest_newsitems': newsitems,
            'has_newsitems': bool(newsitems),
            'lookup_charts': sf_dict.get(schema.id),
        })
        all_newsitems.extend(newsitems)
    schema_list = schema_list.values()
    populate_attributes_if_needed(all_newsitems, schema_list)
    schema_list = [s for s in schema_list if s.allow_charting]

    context['schema_groups'] = schema_groups
    context['filtered_schema_list'] = schema_list
    context['bodyclass'] = 'place-detail-overview'
    if context['is_block']:
        context['bodyid'] = '%s-%s-%s' % (context['place'].street_slug,
                                          context['place'].number(),
                                          context['place'].dir_url_bit())
    else:
        context['bodyid'] = context['location'].slug
    response = eb_render(request, 'db/place_overview.html', context)
    for k, v in context['cookies_to_set'].items():
        response.set_cookie(k, v)
    return response