コード例 #1
0
ファイル: dimension.py プロジェクト: serchaos/openspending
 def member(self, dataset, dimension, name, format="html"):
     self._get_member(dataset, dimension, name)
     handle_request(request, c, c.member, c.dimension.name)
     member = [member_apply_links(dataset, dimension, c.member)]
     if format == 'json':
         return write_json(member, response)
     elif format == 'csv':
         return write_csv(member, response)
     else:
         # If there are no views set up, then go direct to the entries
         # search page
         if c.view is None:
             return redirect(
                 url_for(controller='dimension',
                         action='entries',
                         dataset=c.dataset.name,
                         dimension=dimension,
                         name=name))
         if 'embed' in request.params:
             return redirect(
                 url_for(controller='view',
                         action='embed',
                         dataset=c.dataset.name,
                         widget=c.view.vis_widget.get('name'),
                         state=json.dumps(c.view.vis_state)))
         return templating.render('dimension/member.html')
コード例 #2
0
ファイル: dataset.py プロジェクト: asuffield/openspending
    def index(self, format='html'):
        for item in self.extensions:
            item.index(c, request, response, c.results)

        if format == 'json':
            return to_jsonp(map(lambda d: d.as_dict(),
                                c.datasets))
        elif format == 'csv':
            results = map(lambda d: d.as_dict(), c.datasets)
            return write_csv(results, response)
        else:
            c.query = request.params.items()
            c.add_filter = lambda f, v: '?' + urlencode(c.query +
                    [(f, v)] if (f, v) not in c.query else c.query)
            c.del_filter = lambda f, v: '?' + urlencode([(k,x) for k, x in
                c.query if (k,x) != (f,v)])
            c.results = c.datasets
            for language in request.params.getall('languages'):
                l = db.aliased(DatasetLanguage)
                c.results = c.results.join(l, Dataset._languages)
                c.results = c.results.filter(l.code==language)
            for territory in request.params.getall('territories'):
                t = db.aliased(DatasetTerritory)
                c.results = c.results.join(t, Dataset._territories)
                c.results = c.results.filter(t.code==territory)
            c.results = list(c.results)
            c.territory_options = DatasetTerritory.dataset_counts(c.results)
            c.language_options = DatasetLanguage.dataset_counts(c.results)
            return render('dataset/index.html')
コード例 #3
0
ファイル: dimension.py プロジェクト: mihi-tr/openspending
 def member(self, dataset, dimension, name, format="html"):
     self._get_member(dataset, dimension, name)
     handle_request(request, c, c.member, c.dimension.name)
     member = [member_apply_links(dataset, dimension, c.member)]
     if format == "json":
         return write_json(member, response)
     elif format == "csv":
         return write_csv(member, response)
     else:
         # If there are no views set up, then go direct to the entries
         # search page
         if c.view is None:
             return redirect(
                 url_for(
                     controller="dimension", action="entries", dataset=c.dataset.name, dimension=dimension, name=name
                 )
             )
         if "embed" in request.params:
             return redirect(
                 url_for(
                     controller="view",
                     action="embed",
                     dataset=c.dataset.name,
                     widget=c.view.vis_widget.get("name"),
                     state=json.dumps(c.view.vis_state),
                 )
             )
         return templating.render("dimension/member.html")
コード例 #4
0
ファイル: entry.py プロジェクト: tomjrees/openspending
    def view(self, dataset, id, format='html'):
        self._get_dataset(dataset)
        entries = list(c.dataset.entries(c.dataset.alias.c.id == id))
        if not len(entries) == 1:
            abort(404, _('Sorry, there is no entry %r') % id)
        c.entry = entry_apply_links(dataset, entries.pop())

        c.id = c.entry.get('id')
        c.from_ = c.entry.get('from')
        c.to = c.entry.get('to')
        c.currency = c.entry.get('currency', c.dataset.currency).upper()
        c.amount = c.entry.get('amount')
        c.time = c.entry.get('time')

        c.custom_html = h.render_entry_custom_html(c.dataset, c.entry)

        excluded_keys = ('time', 'amount', 'currency', 'from', 'to', 'dataset',
                         'id', 'name', 'description')

        c.extras = {}
        if c.dataset:
            c.desc = dict([(d.name, d) for d in c.dataset.dimensions])
            for key in c.entry:
                if key in c.desc and \
                        not key in excluded_keys:
                    c.extras[key] = c.entry[key]

        if format == 'json':
            return to_jsonp(c.entry)
        elif format == 'csv':
            return write_csv([c.entry], response)
        else:
            return templating.render('entry/view.html')
コード例 #5
0
    def index(self, format='html'):
        for item in self.extensions:
            item.index(c, request, response, c.results)

        if format == 'json':
            return to_jsonp(map(lambda d: d.as_dict(), c.datasets))
        elif format == 'csv':
            results = map(lambda d: d.as_dict(), c.datasets)
            return write_csv(results, response)
        else:
            c.query = request.params.items()
            c.add_filter = lambda f, v: '?' + urlencode(c.query + [(f, v)] if (
                f, v) not in c.query else c.query)
            c.del_filter = lambda f, v: '?' + urlencode([(k, x)
                                                         for k, x in c.query
                                                         if (k, x) != (f, v)])
            c.results = c.datasets
            for language in request.params.getall('languages'):
                l = db.aliased(DatasetLanguage)
                c.results = c.results.join(l, Dataset._languages)
                c.results = c.results.filter(l.code == language)
            for territory in request.params.getall('territories'):
                t = db.aliased(DatasetTerritory)
                c.results = c.results.join(t, Dataset._territories)
                c.results = c.results.filter(t.code == territory)
            c.results = list(c.results)
            c.territory_options = DatasetTerritory.dataset_counts(c.results)
            c.language_options = DatasetLanguage.dataset_counts(c.results)
            return render('dataset/index.html')
コード例 #6
0
ファイル: entry.py プロジェクト: vitorbaptista/openspending
    def view(self, dataset, id, format="html"):
        self._get_dataset(dataset)
        entries = list(c.dataset.entries(c.dataset.alias.c.id == id))
        if not len(entries) == 1:
            abort(404, _("Sorry, there is no entry %r") % id)
        c.entry = entry_apply_links(dataset, entries.pop())

        c.id = c.entry.get("id")
        c.from_ = c.entry.get("from")
        c.to = c.entry.get("to")
        c.currency = c.entry.get("currency", c.dataset.currency).upper()
        c.amount = c.entry.get("amount")
        c.time = c.entry.get("time")

        c.custom_html = h.render_entry_custom_html(c.dataset, c.entry)

        excluded_keys = ("time", "amount", "currency", "from", "to", "dataset", "id", "name", "description")

        c.extras = {}
        if c.dataset:
            c.desc = dict([(d.name, d) for d in c.dataset.dimensions])
            for key in c.entry:
                if key in c.desc and not key in excluded_keys:
                    c.extras[key] = c.entry[key]

        if format == "json":
            return to_jsonp(c.entry)
        elif format == "csv":
            return write_csv([c.entry], response)
        else:
            return render("entry/view.html")
コード例 #7
0
ファイル: api2.py プロジェクト: openstate/openspending
    def aggregate(self):
        parser = AggregateParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return {'errors': errors}

        params['cuts'] = params.pop('cut')
        params['drilldowns'] = params.pop('drilldown')
        dataset = params.pop('dataset')
        format = params.pop('format')
        require.dataset.read(dataset)

        try:
            cache = AggregationCache(dataset)
            result = cache.aggregate(**params)
            if 'drilldown' in result:
                result['drilldown'] = drilldowns_apply_links(
                    dataset.name, result['drilldown'])

            response.last_modified = dataset.updated_at
            if cache.cache_enabled and 'cache_key' in result['summary']:
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            log.exception(ve)
            response.status = 400
            return {'errors': ['Invalid aggregation query: %r' % ve]}

        if format == 'csv':
            return write_csv(result['drilldown'],
                             response,
                             filename=dataset.name + '.csv')
        return to_jsonp(result)
コード例 #8
0
ファイル: entry.py プロジェクト: openstate/openspending
    def view(self, dataset, id, format='html'):
        self._get_dataset(dataset)
        entries = list(c.dataset.entries(c.dataset.alias.c.id == id))
        if not len(entries) == 1:
            abort(404, _('Sorry, there is no entry %r') % id)
        c.entry = entry_apply_links(dataset, entries.pop())

        c.id = c.entry.get('id')
        c.from_ = c.entry.get('from')
        c.to = c.entry.get('to')
        c.currency = c.entry.get('currency', c.dataset.currency).upper()
        c.amount = c.entry.get('amount')
        c.time = c.entry.get('time')

        c.custom_html = h.render_entry_custom_html(c.dataset,
                                                   c.entry)

        excluded_keys = ('time', 'amount', 'currency', 'from',
                         'to', 'dataset', 'id', 'name', 'description')

        c.extras = {}
        if c.dataset:
            c.desc = dict([(d.name, d) for d in c.dataset.dimensions])
            for key in c.entry:
                if key in c.desc and \
                        not key in excluded_keys:
                    c.extras[key] = c.entry[key]

        if format == 'json':
            return to_jsonp(c.entry)
        elif format == 'csv':
            return write_csv([c.entry], response)
        else:
            return render('entry/view.html')
コード例 #9
0
ファイル: api2.py プロジェクト: openstate/openspending
    def aggregate(self):
        parser = AggregateParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return {'errors': errors}

        params['cuts'] = params.pop('cut')
        params['drilldowns'] = params.pop('drilldown')
        dataset = params.pop('dataset')
        format = params.pop('format')
        require.dataset.read(dataset)

        try:
            cache = AggregationCache(dataset)
            result = cache.aggregate(**params)
            if 'drilldown' in result:
                result['drilldown'] = drilldowns_apply_links(dataset.name,
                    result['drilldown'])

            response.last_modified = dataset.updated_at
            if cache.cache_enabled and 'cache_key' in result['summary']:
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            log.exception(ve)
            response.status = 400
            return {'errors': ['Invalid aggregation query: %r' % ve]}

        if format == 'csv':
            return write_csv(result['drilldown'], response,
                filename=dataset.name + '.csv')
        return to_jsonp(result)
コード例 #10
0
ファイル: entry.py プロジェクト: openstate/openspending
    def index_export(self, dataset, format):
        self._get_dataset(dataset)

        # TODO include html_urls in dumps.
        #processor = lambda e: entry_apply_links(c.dataset.name, e)

        if format == 'json':
            return write_json(c.dataset.entries(), response,
                filename=c.dataset.name + '.json')
        if format == 'csv':
            return write_csv(c.dataset.entries(), response,
                filename=c.dataset.name + '.csv')
        else:
            return redirect(h.url_for(controller='entry', action='index'))
コード例 #11
0
ファイル: dimension.py プロジェクト: openstate/openspending
    def entries(self, dataset, dimension, name, format="html"):
        self._get_member(dataset, dimension, name)

        handle_request(request, c, c.member, c.dimension.name)

        entries = c.dataset.entries(c.dimension.alias.c.name == c.member["name"])
        entries = (entry_apply_links(dataset, e) for e in entries)
        attachment_name = "__".join([dataset, dimension, name])

        if format == "json":
            return write_json(entries, response, filename=attachment_name + ".json")
        elif format == "csv":
            return write_csv(entries, response, filename=attachment_name + ".csv")
        else:
            return render("dimension/entries.html")
コード例 #12
0
ファイル: dimension.py プロジェクト: citizennerd/openspending
    def entries(self, dataset, dimension, name, format='html'):
        self._get_member(dataset, dimension, name)

        handle_request(request, c, c.member, c.dimension.name)

        entries = c.dataset.entries(c.dimension.alias.c.name == c.member['name'])
        entries = (entry_apply_links(dataset, e) for e in entries)
        attachment_name = '__'.join([dataset, dimension, name])

        if format == 'json':
            return write_json(entries, response, filename=attachment_name + '.json')
        elif format == 'csv':
            return write_csv(entries, response, filename=attachment_name + '.csv')
        else:
            return render('dimension/entries.html')
コード例 #13
0
ファイル: dimension.py プロジェクト: asuffield/openspending
    def member(self, dataset, dimension, name, format="html"):
        self._get_member(dataset, dimension, name)

        handle_request(request, c, c.member, c.dimension)
        if c.view is None:
            self._make_browser()

        for item in self.extensions:
            item.read(c, request, response, c.member)

        if format == 'json':
            return to_jsonp(c.member)
        elif format == 'csv':
            return write_csv([c.member], response)
        else:
            return render('dimension/member.html')
コード例 #14
0
ファイル: entry.py プロジェクト: openstate/openspending
    def index_export(self, dataset, format):
        self._get_dataset(dataset)

        # TODO include html_urls in dumps.
        #processor = lambda e: entry_apply_links(c.dataset.name, e)

        if format == 'json':
            return write_json(c.dataset.entries(),
                              response,
                              filename=c.dataset.name + '.json')
        if format == 'csv':
            return write_csv(c.dataset.entries(),
                             response,
                             filename=c.dataset.name + '.csv')
        else:
            return redirect(h.url_for(controller='entry', action='index'))
コード例 #15
0
    def member(self, dataset, dimension, name, format="html"):
        self._get_member(dataset, dimension, name)

        handle_request(request, c, c.member, c.dimension)
        if c.view is None:
            self._make_browser()

        for item in self.extensions:
            item.read(c, request, response, c.member)

        if format == 'json':
            return to_jsonp(c.member)
        elif format == 'csv':
            return write_csv([c.member], response)
        else:
            return render('dimension/member.html')
コード例 #16
0
ファイル: dimension.py プロジェクト: citizennerd/openspending
    def member(self, dataset, dimension, name, format="html"):
        self._get_member(dataset, dimension, name)

        handle_request(request, c, c.member, c.dimension.name)

        # If there are no views set up, then go direct to the entries search page
        if c.view is None and format is "html":
            return redirect(url_for(controller='dimension', action='entries',
                dataset=c.dataset.name, dimension=dimension, name=name))

        member = [member_apply_links(dataset, dimension, c.member)]
        if format == 'json':
            return write_json(member, response)
        elif format == 'csv':
            return write_csv(member, response)
        else:
            return render('dimension/member.html')
コード例 #17
0
    def view(self, dataset, format='html'):
        self._get_dataset(dataset)
        c.num_entries = len(c.dataset)

        handle_request(request, c, c.dataset)

        if c.view is None and format == 'html':
            return EntryController().index(dataset, format)

        for item in self.extensions:
            item.read(c, request, response, c.dataset)

        if format == 'json':
            return to_jsonp(c.dataset.as_dict())
        elif format == 'csv':
            return write_csv([c.dataset.as_dict()], response)
        else:
            return render('dataset/view.html')
コード例 #18
0
ファイル: dataset.py プロジェクト: asuffield/openspending
    def view(self, dataset, format='html'):
        self._get_dataset(dataset)
        c.num_entries = len(c.dataset)

        handle_request(request, c, c.dataset)

        if c.view is None and format == 'html':
            return EntryController().index(dataset, format)

        for item in self.extensions:
            item.read(c, request, response, c.dataset)

        if format == 'json':
            return to_jsonp(c.dataset.as_dict())
        elif format == 'csv':
            return write_csv([c.dataset.as_dict()], response)
        else:
            return render('dataset/view.html')
コード例 #19
0
ファイル: dimension.py プロジェクト: openstate/openspending
    def entries(self, dataset, dimension, name, format='html'):
        self._get_member(dataset, dimension, name)

        handle_request(request, c, c.member, c.dimension.name)

        entries = c.dataset.entries(
            c.dimension.alias.c.name == c.member['name'])
        entries = (entry_apply_links(dataset, e) for e in entries)
        attachment_name = '__'.join([dataset, dimension, name])

        if format == 'json':
            return write_json(entries,
                              response,
                              filename=attachment_name + '.json')
        elif format == 'csv':
            return write_csv(entries,
                             response,
                             filename=attachment_name + '.csv')
        else:
            return render('dimension/entries.html')
コード例 #20
0
ファイル: dimension.py プロジェクト: Web5design/openspending
 def member(self, dataset, dimension, name, format="html"):
     self._get_member(dataset, dimension, name)
     handle_request(request, c, c.member, c.dimension.name)
     member = [member_apply_links(dataset, dimension, c.member)]
     if format == 'json':
         return write_json(member, response)
     elif format == 'csv':
         return write_csv(member, response)
     else:
         # If there are no views set up, then go direct to the entries
         # search page
         if c.view is None:
             return redirect(url_for(controller='dimension', action='entries',
                 dataset=c.dataset.name, dimension=dimension, name=name))
         if 'embed' in request.params:
             return redirect(url_for(controller='view',
                 action='embed', dataset=c.dataset.name,
                 widget=c.view.vis_widget.get('name'),
                 state=json.dumps(c.view.vis_state)))
         return render('dimension/member.html')
コード例 #21
0
ファイル: dataset.py プロジェクト: citizennerd/openspending
    def index(self, format='html'):
        c.query = request.params.items()
        c.add_filter = lambda f, v: '?' + urlencode(c.query +
                [(f, v)] if (f, v) not in c.query else c.query)
        c.del_filter = lambda f, v: '?' + urlencode([(k, x) for k, x in
            c.query if (k, x) != (f, v)])
        c.results = c.datasets
        for language in request.params.getall('languages'):
            l = db.aliased(DatasetLanguage)
            c.results = c.results.join(l, Dataset._languages)
            c.results = c.results.filter(l.code == language)
        for territory in request.params.getall('territories'):
            t = db.aliased(DatasetTerritory)
            c.results = c.results.join(t, Dataset._territories)
            c.results = c.results.filter(t.code == territory)
        c.results = list(c.results)
        c.territory_options = [{'code': code,
                                'count': count,
                                'url': h.url_for(controller='dataset',
                                    action='index', territories=code),
                                'label': COUNTRIES.get(code, code)} \
            for (code, count) in DatasetTerritory.dataset_counts(c.results)]
        c.language_options = [{'code': code,
                               'count': count,
                               'url': h.url_for(controller='dataset',
                                    action='index', languages=code),
                               'label': LANGUAGES.get(code, code)} \
            for (code, count) in DatasetLanguage.dataset_counts(c.results)]

        if format == 'json':
            results = map(lambda d: d.as_dict(), c.results)
            results = [dataset_apply_links(r) for r in results]
            return to_jsonp({
                'datasets': results,
                'territories': c.territory_options,
                'languages': c.language_options
                })
        elif format == 'csv':
            results = map(lambda d: d.as_dict(), c.results)
            return write_csv(results, response)
        return render('dataset/index.html')
コード例 #22
0
ファイル: restapi.py プロジェクト: rgrp/openspending
 def _view_csv(self, result):
     if not isinstance(result, list):
         result = [result]
     write_csv(result, response)
     return
コード例 #23
0
 def to_csv(self):
     return write_csv(self.all_entries, response)
コード例 #24
0
ファイル: browser.py プロジェクト: nomed/openspending
 def to_csv(self):
     from pylons import response
     return write_csv(self.entities, response)
コード例 #25
0
ファイル: version2.py プロジェクト: RandyMoore/openspending
    def aggregate(self):
        """
        Aggregation of a dataset based on URL parameters. It serves the 
        aggregation from a cache if possible, and if not it computes it (it's
        performed in the aggregation cache for some reason).
        """

        # Parse the aggregation parameters to get them into the right format
        parser = AggregateParamParser(request.params)
        params, errors = parser.parse()

        # If there were parsing errors we return them with status code 400
        # as jsonp, irrespective of what format was asked for.
        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        # URL parameters are always singular nouns but we work with some
        # as plural nouns so we pop them into the plural version
        params['cuts'] = params.pop('cut')
        params['drilldowns'] = params.pop('drilldown')
        params['measures'] = params.pop('measure')

        # Get the dataset and the format and remove from the parameters
        dataset = params.pop('dataset')
        format = params.pop('format')

        # User must have the right to read the dataset to perform aggregation
        require.dataset.read(dataset)

        # Create response headers from the parameters
        self._response_params(params)

        try:
            # Create an aggregation cache for the dataset and aggregate its
            # results. The cache will perform the aggreagation if it doesn't
            # have a cached result
            cache = AggregationCache(dataset)
            result = cache.aggregate(**params)

            # If the result has drilldown we create html_url values for its
            # dimensions (linked data).
            if 'drilldown' in result:
                result['drilldown'] = drilldowns_apply_links(dataset.name,
                    result['drilldown'])

            # Do the ETag caching based on the cache_key in the summary
            # this is a weird place to do it since the heavy lifting has
            # already been performed above. TODO: Needs rethinking.
            response.last_modified = dataset.updated_at
            if cache.cache_enabled and 'cache_key' in result['summary']:
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            # We log possible errors and return them with status code 400
            log.exception(ve)
            response.status = 400
            return to_jsonp({'errors': [unicode(ve)]})

        # If the requested format is csv we write the drilldown results into
        # a csv file and return it, if not we return a jsonp result (default)
        if format == 'csv':
            return write_csv(result['drilldown'], response,
                filename=dataset.name + '.csv')
        return to_jsonp(result)
コード例 #26
0
ファイル: dataset.py プロジェクト: serchaos/openspending
    def index(self, format='html'):
        """
        Get a list of all datasets along with territory, language, and
        category counts (amount of datasets for each).
        """

        # Create facet filters (so we can look at a single country,
        # language etc.)
        c.query = request.params.items()
        c.add_filter = lambda f, v: \
            '?' + urlencode(c.query +
                            [(f, v)] if (f, v) not in c.query else c.query)
        c.del_filter = lambda f, v: \
            '?' + urlencode([(k, x) for k, x in
                             c.query if (k, x) != (f, v)])

        # Parse the request parameters to get them into the right format
        parser = DatasetIndexParamParser(request.params)
        params, errors = parser.parse()
        if errors:
            concatenated_errors = ', '.join(errors)
            abort(400,
                  _('Parameter values not supported: %s') %
                  concatenated_errors)

        # We need to pop the page and pagesize parameters since they're not
        # used for the cache (we have to get all of the datasets to do the
        # language, territory, and category counts (these are then only used
        # for the html response)
        params.pop('page')
        pagesize = params.pop('pagesize')

        # Get cached indices (this will also generate them if there are no
        # cached results (the cache is invalidated when a dataset is published
        # or retracted
        cache = DatasetIndexCache()
        results = cache.index(**params)

        # Generate the ETag from the last modified timestamp of the first
        # dataset (since they are ordered in descending order by last
        # modified). It doesn't matter that this happens if it has (possibly)
        # generated the index (if not cached) since if it isn't cached then
        # the ETag is definitely modified. We wrap it in a try clause since
        # if there are no public datasets we'll get an index error.
        # We also don't set c._must_revalidate to True since we don't care
        # if the index needs a hard refresh
        try:
            etag_cache_keygen(
                results['datasets'][0]['timestamps']['last_modified'])
        except IndexError:
            etag_cache_keygen(None)

        # Assign the results to template context variables
        c.language_options = results['languages']
        c.territory_options = results['territories']
        c.category_options = results['categories']

        if format == 'json':
            # Apply links to the dataset lists before returning the json
            results['datasets'] = [dataset_apply_links(r)
                                   for r in results['datasets']]
            return to_jsonp(results)
        elif format == 'csv':
            # The CSV response only shows datasets, not languages,
            # territories, etc.
            return write_csv(results['datasets'], response)

        # If we're here then it's an html format so we show rss, do the
        # pagination and render the template
        c.show_rss = True
        # The page parameter we popped earlier is part of request.params but
        # we now know it was parsed. We have to send in request.params to
        # retain any parameters already supplied (filters)
        c.page = templating.Page(results['datasets'], items_per_page=pagesize,
                                 item_count=len(results['datasets']),
                                 **request.params)
        return templating.render('dataset/index.html')
コード例 #27
0
ファイル: dataset.py プロジェクト: vitorbaptista/openspending
    def index(self, format="html"):
        c.query = request.params.items()
        c.add_filter = lambda f, v: "?" + urlencode(c.query + [(f, v)] if (f, v) not in c.query else c.query)
        c.del_filter = lambda f, v: "?" + urlencode([(k, x) for k, x in c.query if (k, x) != (f, v)])
        c.results = c.datasets
        for language in request.params.getall("languages"):
            l = db.aliased(DatasetLanguage)
            c.results = c.results.join(l, Dataset._languages)
            c.results = c.results.filter(l.code == language)
        for territory in request.params.getall("territories"):
            t = db.aliased(DatasetTerritory)
            c.results = c.results.join(t, Dataset._territories)
            c.results = c.results.filter(t.code == territory)
        category = request.params.get("category")
        if category:
            c.results = c.results.filter(Dataset.category == category)

        c.results = list(c.results)
        c.territory_options = [
            {
                "code": code,
                "count": count,
                "url": h.url_for(controller="dataset", action="index", territories=code),
                "label": COUNTRIES.get(code, code),
            }
            for (code, count) in DatasetTerritory.dataset_counts(c.results)
        ]
        c.language_options = [
            {
                "code": code,
                "count": count,
                "url": h.url_for(controller="dataset", action="index", languages=code),
                "label": LANGUAGES.get(code, code),
            }
            for (code, count) in DatasetLanguage.dataset_counts(c.results)
        ]

        # TODO: figure out where to put this:
        ds_ids = [d.id for d in c.results]
        if len(ds_ids):
            q = db.select(
                [Dataset.category, db.func.count(Dataset.id)],
                Dataset.id.in_(ds_ids),
                group_by=Dataset.category,
                order_by=db.func.count(Dataset.id).desc(),
            )
            c.category_options = [
                {
                    "category": category,
                    "count": count,
                    "url": h.url_for(controller="dataset", action="index", category=category),
                    "label": CATEGORIES.get(category, category),
                }
                for (category, count) in db.session.bind.execute(q).fetchall()
                if category is not None
            ]
        else:
            c.category_options = []

        c._must_revalidate = True
        if len(c.results):
            dt = max([r.updated_at for r in c.results])
            etag_cache_keygen(dt)

        if format == "json":
            results = map(lambda d: d.as_dict(), c.results)
            results = [dataset_apply_links(r) for r in results]
            return to_jsonp(
                {
                    "datasets": results,
                    "categories": c.category_options,
                    "territories": c.territory_options,
                    "languages": c.language_options,
                }
            )
        elif format == "csv":
            results = map(lambda d: d.as_dict(), c.results)
            return write_csv(results, response)
        return render("dataset/index.html")
コード例 #28
0
ファイル: entry.py プロジェクト: nickstenning/openspending
    def view(self, dataset, id, format='html'):
        """
        Get a specific entry in the dataset, identified by the id. Entry
        can be return as html (default), json or csv.
        """

        # Generate the dataset
        self._get_dataset(dataset)
        # Get the entry that matches the given id. c.dataset.entries is 
        # a generator so we create a list from it's responses based on the
        # given constraint
        entries = list(c.dataset.entries(c.dataset.alias.c.id == id))
        # Since we're trying to get a single entry the list should only 
        # contain one entry, if not then we return an error
        if not len(entries) == 1:
            abort(404, _('Sorry, there is no entry %r') % id)
        # Add urls to the dataset and assign assign it as a context variable
        c.entry = entry_apply_links(dataset, entries.pop())

        # Get and set some context variables from the entry
        # This shouldn't really be necessary but it's here so nothing gets
        # broken
        c.id = c.entry.get('id')
        c.from_ = c.entry.get('from')
        c.to = c.entry.get('to')
        c.currency = c.entry.get('currency', c.dataset.currency).upper()
        c.time = c.entry.get('time')

        # Get the amount for the entry
        amount = c.entry.get('amount')
        # We adjust for inflation if the user as asked for this to be inflated
        if request.params.has_key('inflate'):
            try:
                # Inflate the amount. Target date is provided in request.params
                # as value for inflate and reference date is the date of the
                # entry. We also provide a list of the territories to extract
                # a single country for which to do the inflation
                c.inflation = h.inflate(amount, request.params['inflate'],
                                        c.time, c.dataset.territories)

                # The amount to show should be the inflated amount
                # and overwrite the entry's amount as well
                c.amount = c.inflation['inflated']
                c.entry['amount'] = c.inflation['inflated']

                # We include the inflation response in the entry's dict
                # HTML description assumes every dict value for the entry
                # includes a label so we include a default "Inflation
                # adjustment" for it to work.
                c.inflation['label'] = 'Inflation adjustment'
                c.entry['inflation_adjustment'] = c.inflation
            except:
                # If anything goes wrong in the try clause (and there's a lot
                # that can go wrong). We just say that we can't adjust for
                # inflation and set the context amount as the original amount
                h.flash_notice(_('Unable to adjust for inflation'))
                c.amount = amount
        else:
            # If we haven't been asked to inflate then we just use the
            # original amount
            c.amount = amount

        # Add custom html for the dataset entry if the dataset has some
        # custom html
        # 2013-11-17 disabled this as part of removal of genshi as depended on
        # a genshi specific helper.
        # TODO: reinstate if important
        # c.custom_html = h.render_entry_custom_html(c.dataset, c.entry)

        # Add the rest of the dimensions relating to this entry into a
        # extras dictionary. We first need to exclude all dimensions that
        # are already shown and then we can loop through the dimensions
        excluded_keys = ('time', 'amount', 'currency', 'from',
                         'to', 'dataset', 'id', 'name', 'description')

        c.extras = {}
        if c.dataset:
            # Create a dictionary of the dataset dimensions
            c.desc = dict([(d.name, d) for d in c.dataset.dimensions])
            # Loop through dimensions of the entry
            for key in c.entry:
                # Entry dimension must be a dataset dimension and not in
                # the predefined excluded keys
                if key in c.desc and \
                        not key in excluded_keys:
                    c.extras[key] = c.entry[key]

        # Return entry based on 
        if format == 'json':
            return to_jsonp(c.entry)
        elif format == 'csv':
            return write_csv([c.entry], response)
        else:
            return templating.render('entry/view.html')
コード例 #29
0
ファイル: version2.py プロジェクト: hagino3000/openspending
    def search(self):
        parser = SearchParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        expand_facets = params.pop('expand_facet_dimensions')

        format = params.pop('format')
        if format == 'csv':
            params['stats'] = False
            params['facet_field'] = None

        datasets = params.pop('dataset', None)
        if datasets is None or not datasets:
            q = Dataset.all_by_account(c.account)
            if params.get('category'):
                q = q.filter_by(category=params.pop('category'))
            datasets = q.all()
            expand_facets = False

        if not datasets:
            return {'errors': ["No dataset available."]}

        params['filter']['dataset'] = []
        for dataset in datasets:
            require.dataset.read(dataset)
            params['filter']['dataset'].append(dataset.name)

        response.last_modified = max([d.updated_at for d in datasets])
        etag_cache_keygen(parser.key(), response.last_modified)

        if params['pagesize'] > parser.defaults['pagesize']:

            # http://wiki.nginx.org/X-accel#X-Accel-Buffering
            response.headers['X-Accel-Buffering'] = 'no'

            if format == 'csv':
                csv_headers(response, 'entries.csv')
                streamer = CSVStreamingResponse(
                    datasets,
                    params,
                    pagesize=parser.defaults['pagesize']
                )
                return streamer.response()
            else:
                json_headers(filename='entries.json')
                streamer = JSONStreamingResponse(
                    datasets,
                    params,
                    pagesize=parser.defaults['pagesize'],
                    expand_facets=util.expand_facets
                    if expand_facets else None,
                    callback=request.params.get('callback')
                )
                return streamer.response()

        solr_browser = Browser(**params)
        try:
            solr_browser.execute()
        except SolrException as e:
            return {'errors': [unicode(e)]}

        entries = []
        for dataset, entry in solr_browser.get_entries():
            entry = entry_apply_links(dataset.name, entry)
            entry['dataset'] = dataset_apply_links(dataset.as_dict())
            entries.append(entry)

        if format == 'csv':
            return write_csv(entries, response,
                             filename='entries.csv')

        if expand_facets and len(datasets) == 1:
            facets = solr_browser.get_expanded_facets(datasets[0])
        else:
            facets = solr_browser.get_facets()

        return to_jsonp({
            'stats': solr_browser.get_stats(),
            'facets': facets,
            'results': entries
        })
コード例 #30
0
    def index(self, format='html'):
        c.query = request.params.items()
        c.add_filter = lambda f, v: '?' + urlencode(c.query + [(f, v)] if (
            f, v) not in c.query else c.query)
        c.del_filter = lambda f, v: '?' + urlencode([(k, x) for k, x in c.query
                                                     if (k, x) != (f, v)])
        c.results = c.datasets
        for language in request.params.getall('languages'):
            l = db.aliased(DatasetLanguage)
            c.results = c.results.join(l, Dataset._languages)
            c.results = c.results.filter(l.code == language)
        for territory in request.params.getall('territories'):
            t = db.aliased(DatasetTerritory)
            c.results = c.results.join(t, Dataset._territories)
            c.results = c.results.filter(t.code == territory)
        category = request.params.get('category')
        if category:
            c.results = c.results.filter(Dataset.category == category)

        c.results = list(c.results)
        c.territory_options = [{'code': code,
                                'count': count,
                                'url': h.url_for(controller='dataset',
                                    action='index', territories=code),
                                'label': COUNTRIES.get(code, code)} \
            for (code, count) in DatasetTerritory.dataset_counts(c.results)]
        c.language_options = [{'code': code,
                               'count': count,
                               'url': h.url_for(controller='dataset',
                                    action='index', languages=code),
                               'label': LANGUAGES.get(code, code)} \
            for (code, count) in DatasetLanguage.dataset_counts(c.results)]

        # TODO: figure out where to put this:
        ds_ids = [d.id for d in c.results]
        if len(ds_ids):
            q = db.select(
                [Dataset.category, db.func.count(Dataset.id)],
                Dataset.id.in_(ds_ids),
                group_by=Dataset.category,
                order_by=db.func.count(Dataset.id).desc())
            c.category_options = [{'category': category,
                                   'count': count,
                                   'url': h.url_for(controller='dataset',
                                        action='index', category=category),
                                   'label': CATEGORIES.get(category, category)} \
                for (category, count) in db.session.bind.execute(q).fetchall() \
                if category is not None]
        else:
            c.category_options = []

        c._must_revalidate = True
        if len(c.results):
            dt = max([r.updated_at for r in c.results])
            etag_cache_keygen(dt)

        if format == 'json':
            results = map(lambda d: d.as_dict(), c.results)
            results = [dataset_apply_links(r) for r in results]
            return to_jsonp({
                'datasets': results,
                'categories': c.category_options,
                'territories': c.territory_options,
                'languages': c.language_options
            })
        elif format == 'csv':
            results = map(lambda d: d.as_dict(), c.results)
            return write_csv(results, response)
        c.show_rss = True
        return templating.render('dataset/index.html')
コード例 #31
0
    def search(self):
        parser = SearchParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        expand_facets = params.pop('expand_facet_dimensions')

        format = params.pop('format')
        if format == 'csv':
            params['stats'] = False
            params['facet_field'] = None

        datasets = params.pop('dataset', None)
        if datasets is None or not datasets:
            q = Dataset.all_by_account(c.account)
            if params.get('category'):
                q = q.filter_by(category=params.pop('category'))
            datasets = q.all()
            expand_facets = False

        if not datasets:
            return {'errors': ["No dataset available."]}

        params['filter']['dataset'] = []
        for dataset in datasets:
            require.dataset.read(dataset)
            params['filter']['dataset'].append(dataset.name)

        response.last_modified = max([d.updated_at for d in datasets])
        etag_cache_keygen(parser.key(), response.last_modified)

        if params['pagesize'] > parser.defaults['pagesize']:

            # http://wiki.nginx.org/X-accel#X-Accel-Buffering
            response.headers['X-Accel-Buffering'] = 'no'

            if format == 'csv':
                csv_headers(response, 'entries.csv')
                streamer = CSVStreamingResponse(
                    datasets, params, pagesize=parser.defaults['pagesize'])
                return streamer.response()
            else:
                json_headers(filename='entries.json')
                streamer = JSONStreamingResponse(
                    datasets,
                    params,
                    pagesize=parser.defaults['pagesize'],
                    expand_facets=util.expand_facets
                    if expand_facets else None,
                    callback=request.params.get('callback'))
                return streamer.response()

        solr_browser = Browser(**params)
        try:
            solr_browser.execute()
        except SolrException as e:
            return {'errors': [unicode(e)]}

        entries = []
        for dataset, entry in solr_browser.get_entries():
            entry = entry_apply_links(dataset.name, entry)
            entry['dataset'] = dataset_apply_links(dataset.as_dict())
            entries.append(entry)

        if format == 'csv':
            return write_csv(entries, response, filename='entries.csv')

        if expand_facets and len(datasets) == 1:
            facets = solr_browser.get_expanded_facets(datasets[0])
        else:
            facets = solr_browser.get_facets()

        return to_jsonp({
            'stats': solr_browser.get_stats(),
            'facets': facets,
            'results': entries
        })
コード例 #32
0
    def aggregate(self):
        """
        Aggregation of a dataset based on URL parameters. It serves the
        aggregation from a cache if possible, and if not it computes it (it's
        performed in the aggregation cache for some reason).
        """

        # Parse the aggregation parameters to get them into the right format
        parser = AggregateParamParser(request.params)
        params, errors = parser.parse()

        # If there were parsing errors we return them with status code 400
        # as jsonp, irrespective of what format was asked for.
        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        # URL parameters are always singular nouns but we work with some
        # as plural nouns so we pop them into the plural version
        params['cuts'] = params.pop('cut')
        params['drilldowns'] = params.pop('drilldown')
        params['measures'] = params.pop('measure')

        # Get the dataset and the format and remove from the parameters
        dataset = params.pop('dataset')
        format = params.pop('format')

        # User must have the right to read the dataset to perform aggregation
        require.dataset.read(dataset)

        # Create response headers from the parameters
        self._response_params(params)

        try:
            # Create an aggregation cache for the dataset and aggregate its
            # results. The cache will perform the aggreagation if it doesn't
            # have a cached result
            cache = AggregationCache(dataset)
            result = cache.aggregate(**params)

            # If the result has drilldown we create html_url values for its
            # dimensions (linked data).
            if 'drilldown' in result:
                result['drilldown'] = drilldowns_apply_links(
                    dataset.name, result['drilldown'])

            # Do the ETag caching based on the cache_key in the summary
            # this is a weird place to do it since the heavy lifting has
            # already been performed above. TODO: Needs rethinking.
            response.last_modified = dataset.updated_at
            if cache.cache_enabled and 'cache_key' in result['summary']:
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            # We log possible errors and return them with status code 400
            log.exception(ve)
            response.status = 400
            return to_jsonp({'errors': [unicode(ve)]})

        # If the requested format is csv we write the drilldown results into
        # a csv file and return it, if not we return a jsonp result (default)
        if format == 'csv':
            return write_csv(result['drilldown'],
                             response,
                             filename=dataset.name + '.csv')
        return to_jsonp(result)
コード例 #33
0
ファイル: dataset.py プロジェクト: AlbertoPeon/openspending
    def index(self, format='html'):
        c.query = request.params.items()
        c.add_filter = lambda f, v: '?' + urlencode(c.query +
                [(f, v)] if (f, v) not in c.query else c.query)
        c.del_filter = lambda f, v: '?' + urlencode([(k, x) for k, x in
            c.query if (k, x) != (f, v)])
        c.results = c.datasets
        for language in request.params.getall('languages'):
            l = db.aliased(DatasetLanguage)
            c.results = c.results.join(l, Dataset._languages)
            c.results = c.results.filter(l.code == language)
        for territory in request.params.getall('territories'):
            t = db.aliased(DatasetTerritory)
            c.results = c.results.join(t, Dataset._territories)
            c.results = c.results.filter(t.code == territory)
        category = request.params.get('category')
        if category:
            c.results = c.results.filter(Dataset.category == category)

        c.results = list(c.results)
        c.territory_options = [{'code': code,
                                'count': count,
                                'url': h.url_for(controller='dataset',
                                    action='index', territories=code),
                                'label': COUNTRIES.get(code, code)} \
            for (code, count) in DatasetTerritory.dataset_counts(c.results)]
        c.language_options = [{'code': code,
                               'count': count,
                               'url': h.url_for(controller='dataset',
                                    action='index', languages=code),
                               'label': LANGUAGES.get(code, code)} \
            for (code, count) in DatasetLanguage.dataset_counts(c.results)]

        # TODO: figure out where to put this:
        ds_ids = [d.id for d in c.results]
        if len(ds_ids):
            q = db.select([Dataset.category, db.func.count(Dataset.id)],
                Dataset.id.in_(ds_ids), group_by=Dataset.category,
                order_by=db.func.count(Dataset.id).desc())
            c.category_options = [{'category': category,
                                   'count': count,
                                   'url': h.url_for(controller='dataset',
                                        action='index', category=category),
                                   'label': CATEGORIES.get(category, category)} \
                for (category, count) in db.session.bind.execute(q).fetchall() \
                if category is not None]
        else:
            c.category_options = []

        c._must_revalidate = True
        if len(c.results):
            dt = max([r.updated_at for r in c.results])
            etag_cache_keygen(dt)

        if format == 'json':
            results = map(lambda d: d.as_dict(), c.results)
            results = [dataset_apply_links(r) for r in results]
            return to_jsonp({
                'datasets': results,
                'categories': c.category_options,
                'territories': c.territory_options,
                'languages': c.language_options
                })
        elif format == 'csv':
            results = map(lambda d: d.as_dict(), c.results)
            return write_csv(results, response)
        c.show_rss = True
        return templating.render('dataset/index.html')
コード例 #34
0
ファイル: api2.py プロジェクト: tomjrees/openspending
class Api2Controller(BaseController):
    def _response_params(self, params):
        for k, v in params.items():
            k = k.replace('_', ' ').replace('-', ' ').split()
            k = '-'.join(['X'] + [l.capitalize() for l in k])
            response.headers[k] = unicode(v).encode('ascii', 'ignore')

    def aggregate(self):
        parser = AggregateParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        params['cuts'] = params.pop('cut')
        params['drilldowns'] = params.pop('drilldown')
        dataset = params.pop('dataset')
        format = params.pop('format')
        require.dataset.read(dataset)
        self._response_params(params)

        try:
            cache = AggregationCache(dataset)
            result = cache.aggregate(**params)
            if 'drilldown' in result:
                result['drilldown'] = drilldowns_apply_links(
                    dataset.name, result['drilldown'])

            response.last_modified = dataset.updated_at
            if cache.cache_enabled and 'cache_key' in result['summary']:
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            log.exception(ve)
            response.status = 400
            return to_jsonp({'errors': [unicode(ve)]})

        if format == 'csv':
            return write_csv(result['drilldown'],
                             response,
                             filename=dataset.name + '.csv')
        return to_jsonp(result)

    def search(self):
        parser = SearchParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        expand_facets = params.pop('expand_facet_dimensions')

        format = params.pop('format')
        if format == 'csv':
            params['stats'] = False
            params['facet_field'] = None

        datasets = params.pop('dataset', None)
        if datasets is None or not datasets:
            q = model.Dataset.all_by_account(c.account)
            if params.get('category'):
                q = q.filter_by(category=params.pop('category'))
            datasets = q.all()
            expand_facets = False

        if not datasets:
            return {'errors': ["No dataset available."]}

        params['filter']['dataset'] = []
        for dataset in datasets:
            require.dataset.read(dataset)
            params['filter']['dataset'].append(dataset.name)

        response.last_modified = max([d.updated_at for d in datasets])
        etag_cache_keygen(parser.key(), response.last_modified)

        self._response_params(params)

        if params['pagesize'] > parser.defaults['pagesize']:

            # http://wiki.nginx.org/X-accel#X-Accel-Buffering
            response.headers['X-Accel-Buffering'] = 'no'

            if format == 'csv':
                csv_headers(response, 'entries.csv')
                streamer = CSVStreamingResponse(
                    datasets, params, pagesize=parser.defaults['pagesize'])
                return streamer.response()
            else:
                json_headers(filename='entries.json')
                streamer = JSONStreamingResponse(
                    datasets,
                    params,
                    pagesize=parser.defaults['pagesize'],
                    expand_facets=_expand_facets if expand_facets else None,
                    callback=request.params.get('callback'))
                return streamer.response()

        b = Browser(**params)
        try:
            b.execute()
        except SolrException, e:
            return {'errors': [unicode(e)]}

        stats, facets, entries = b.get_stats(), b.get_facets(), b.get_entries()

        _entries = []
        for dataset, entry in entries:
            entry = entry_apply_links(dataset.name, entry)
            entry['dataset'] = dataset_apply_links(dataset.as_dict())
            _entries.append(entry)

        if format == 'csv':
            return write_csv(_entries, response, filename='entries.csv')

        if expand_facets and len(datasets) == 1:
            _expand_facets(facets, datasets[0])

        return to_jsonp({
            'stats': stats,
            'facets': facets,
            'results': _entries
        })
コード例 #35
0
ファイル: entry.py プロジェクト: smellman/openspending
    def view(self, dataset, id, format='html'):
        """
        Get a specific entry in the dataset, identified by the id. Entry
        can be return as html (default), json or csv.
        """

        # Generate the dataset
        self._get_dataset(dataset)
        # Get the entry that matches the given id. c.dataset.entries is
        # a generator so we create a list from it's responses based on the
        # given constraint
        entries = list(c.dataset.entries(c.dataset.alias.c.id == id))
        # Since we're trying to get a single entry the list should only
        # contain one entry, if not then we return an error
        if not len(entries) == 1:
            abort(404, _('Sorry, there is no entry %r') % id)
        # Add urls to the dataset and assign assign it as a context variable
        c.entry = entry_apply_links(dataset, entries.pop())

        # Get and set some context variables from the entry
        # This shouldn't really be necessary but it's here so nothing gets
        # broken
        c.id = c.entry.get('id')
        c.from_ = c.entry.get('from')
        c.to = c.entry.get('to')
        c.currency = c.entry.get('currency', c.dataset.currency).upper()
        c.time = c.entry.get('time')

        # Get the amount for the entry
        amount = c.entry.get('amount')
        # We adjust for inflation if the user as asked for this to be inflated
        if 'inflate' in request.params:
            try:
                # Inflate the amount. Target date is provided in request.params
                # as value for inflate and reference date is the date of the
                # entry. We also provide a list of the territories to extract
                # a single country for which to do the inflation
                c.inflation = h.inflate(amount, request.params['inflate'],
                                        c.time, c.dataset.territories)

                # The amount to show should be the inflated amount
                # and overwrite the entry's amount as well
                c.amount = c.inflation['inflated']
                c.entry['amount'] = c.inflation['inflated']

                # We include the inflation response in the entry's dict
                # HTML description assumes every dict value for the entry
                # includes a label so we include a default "Inflation
                # adjustment" for it to work.
                c.inflation['label'] = 'Inflation adjustment'
                c.entry['inflation_adjustment'] = c.inflation
            except:
                # If anything goes wrong in the try clause (and there's a lot
                # that can go wrong). We just say that we can't adjust for
                # inflation and set the context amount as the original amount
                h.flash_notice(_('Unable to adjust for inflation'))
                c.amount = amount
        else:
            # If we haven't been asked to inflate then we just use the
            # original amount
            c.amount = amount

        # Add custom html for the dataset entry if the dataset has some
        # custom html
        # 2013-11-17 disabled this as part of removal of genshi as depended on
        # a genshi specific helper.
        # TODO: reinstate if important
        # c.custom_html = h.render_entry_custom_html(c.dataset, c.entry)

        # Add the rest of the dimensions relating to this entry into a
        # extras dictionary. We first need to exclude all dimensions that
        # are already shown and then we can loop through the dimensions
        excluded_keys = ('time', 'amount', 'currency', 'from', 'to', 'dataset',
                         'id', 'name', 'description')

        c.extras = {}
        if c.dataset:
            # Create a dictionary of the dataset dimensions
            c.desc = dict([(d.name, d) for d in c.dataset.dimensions])
            # Loop through dimensions of the entry
            for key in c.entry:
                # Entry dimension must be a dataset dimension and not in
                # the predefined excluded keys
                if key in c.desc and \
                        key not in excluded_keys:
                    c.extras[key] = c.entry[key]

        # Return entry based on
        if format == 'json':
            return to_jsonp(c.entry)
        elif format == 'csv':
            return write_csv([c.entry], response)
        else:
            return templating.render('entry/view.html')
コード例 #36
0
 def to_csv(self):
     return write_csv(self.all_entries, response)