コード例 #1
0
ファイル: run.py プロジェクト: serchaos/openspending
 def view(self, dataset, source, id, format='html'):
     self._get_run(dataset, source, id)
     system = c.run.records.filter_by(category=LogRecord.CATEGORY_SYSTEM)
     c.num_system = system.count()
     c.system_page = templating.Page(
         system.order_by(LogRecord.timestamp.asc()),
         page=self._get_page('system_page'),
         items_per_page=10)
     data = c.run.records.filter_by(category=LogRecord.CATEGORY_DATA)
     c.num_data = data.count()
     c.data_page = templating.Page(data.order_by(LogRecord.timestamp.asc()),
                                   page=self._get_page('data_page'),
                                   items_per_page=20)
     return templating.render('run/view.html')
コード例 #2
0
ファイル: account.py プロジェクト: serchaos/openspending
    def scoreboard(self, format='html'):
        """
        A list of users ordered by their score. The score is computed by
        by assigning every dataset a score (10 divided by no. of maintainers)
        and then adding that score up for all maintainers.

        This does give users who maintain a single dataset a higher score than
        those who are a part of a maintenance team, which is not really what
        we want (since that rewards single points of failure in the system).

        But this is an adequate initial score and this will only be accessible
        to administrators (who may be interested in findin these single points
        of failures).
        """

        # If user is not an administrator we abort
        if not (c.account and c.account.admin):
            abort(403, _("You are not authorized to view this page"))

        # Assign scores to each dataset based on number of maintainers
        score = db.session.query(Dataset.id,
                                 (10 / db.func.count(Account.id)).label('sum'))
        score = score.join('managers').group_by(Dataset.id).subquery()

        # Order users based on their score which is the sum of the dataset
        # scores they maintain
        user_score = db.session.query(
            Account.name, Account.email,
            db.func.coalesce(db.func.sum(score.c.sum), 0).label('score'))
        user_score = user_score.outerjoin(Account.datasets).outerjoin(score)
        user_score = user_score.group_by(Account.name, Account.email)
        # We exclude the system user
        user_score = user_score.filter(Account.name != 'system')
        user_score = user_score.order_by(desc('score'))

        # Fetch all and assign to a context variable score and paginate them
        # We paginate 42 users per page, just because that's an awesome number
        scores = user_score.all()
        c.page = templating.Page(scores,
                                 items_per_page=42,
                                 item_count=len(scores),
                                 **request.params)

        return templating.render('account/scoreboard.html')
コード例 #3
0
ファイル: dataset.py プロジェクト: serchaos/openspending
    def index(self, format='html'):
        """
        Get a list of all datasets along with territory, language, and
        category counts (amount of datasets for each).
        """

        # Create facet filters (so we can look at a single country,
        # language etc.)
        c.query = request.params.items()
        c.add_filter = lambda f, v: \
            '?' + urlencode(c.query +
                            [(f, v)] if (f, v) not in c.query else c.query)
        c.del_filter = lambda f, v: \
            '?' + urlencode([(k, x) for k, x in
                             c.query if (k, x) != (f, v)])

        # Parse the request parameters to get them into the right format
        parser = DatasetIndexParamParser(request.params)
        params, errors = parser.parse()
        if errors:
            concatenated_errors = ', '.join(errors)
            abort(400,
                  _('Parameter values not supported: %s') %
                  concatenated_errors)

        # We need to pop the page and pagesize parameters since they're not
        # used for the cache (we have to get all of the datasets to do the
        # language, territory, and category counts (these are then only used
        # for the html response)
        params.pop('page')
        pagesize = params.pop('pagesize')

        # Get cached indices (this will also generate them if there are no
        # cached results (the cache is invalidated when a dataset is published
        # or retracted
        cache = DatasetIndexCache()
        results = cache.index(**params)

        # Generate the ETag from the last modified timestamp of the first
        # dataset (since they are ordered in descending order by last
        # modified). It doesn't matter that this happens if it has (possibly)
        # generated the index (if not cached) since if it isn't cached then
        # the ETag is definitely modified. We wrap it in a try clause since
        # if there are no public datasets we'll get an index error.
        # We also don't set c._must_revalidate to True since we don't care
        # if the index needs a hard refresh
        try:
            etag_cache_keygen(
                results['datasets'][0]['timestamps']['last_modified'])
        except IndexError:
            etag_cache_keygen(None)

        # Assign the results to template context variables
        c.language_options = results['languages']
        c.territory_options = results['territories']
        c.category_options = results['categories']

        if format == 'json':
            # Apply links to the dataset lists before returning the json
            results['datasets'] = [dataset_apply_links(r)
                                   for r in results['datasets']]
            return to_jsonp(results)
        elif format == 'csv':
            # The CSV response only shows datasets, not languages,
            # territories, etc.
            return write_csv(results['datasets'], response)

        # If we're here then it's an html format so we show rss, do the
        # pagination and render the template
        c.show_rss = True
        # The page parameter we popped earlier is part of request.params but
        # we now know it was parsed. We have to send in request.params to
        # retain any parameters already supplied (filters)
        c.page = templating.Page(results['datasets'], items_per_page=pagesize,
                                 item_count=len(results['datasets']),
                                 **request.params)
        return templating.render('dataset/index.html')
コード例 #4
0
ファイル: entry.py プロジェクト: smellman/openspending
    def index(self, dataset, format='html'):
        # Get the dataset into the context variable 'c'
        self._get_dataset(dataset)

        # If the format is either json or csv we direct the user to the search
        # API instead
        if format in ['json', 'csv']:
            return redirect(
                h.url_for(controller='api/version2',
                          action='search',
                          format=format,
                          dataset=dataset,
                          **request.params))

        # Get the default view
        handle_request(request, c, c.dataset)

        # Parse the parameters using the SearchParamParser (used by the API)
        parser = EntryIndexParamParser(request.params)
        params, errors = parser.parse()

        # We have to remove page from the parameters because that's also
        # used in the Solr browser (which fetches the queries)
        params.pop('page')

        # We limit ourselve to only our dataset
        params['filter']['dataset'] = [c.dataset.name]
        facet_dimensions = {
            field.name: field
            for field in c.dataset.dimensions if field.facet
        }
        params['facet_field'] = facet_dimensions.keys()

        # Create a Solr browser and execute it
        b = Browser(**params)
        try:
            b.execute()
        except SolrException as e:
            return {'errors': [unicode(e)]}

        # Get the entries, each item is a tuple of (dataset, entry)
        solr_entries = b.get_entries()
        # We are only interested in the entry in the tuple since  we know
        # the dataset
        entries = [entry[1] for entry in solr_entries]

        # Get expanded facets for this dataset,
        c.facets = b.get_expanded_facets(c.dataset)

        # Create a pager for the entries
        c.entries = templating.Page(entries, **request.params)

        # Set the search word and default to empty string
        c.search = params.get('q', '')

        # Set filters (but remove the dataset as we don't need it)
        c.filters = params['filter']
        del c.filters['dataset']

        # We also make the facet dimensions and dimension names available
        c.facet_dimensions = facet_dimensions
        c.dimensions = [dimension.name for dimension in c.dataset.dimensions]

        # Render the entries page
        return templating.render('entry/index.html')