Exemple #1
0
    def view(self, dataset, id, format='html'):
        self._get_dataset(dataset)
        entries = list(c.dataset.entries(c.dataset.alias.c.id == id))
        if not len(entries) == 1:
            abort(404, _('Sorry, there is no entry %r') % id)
        c.entry = entry_apply_links(dataset, entries.pop())

        c.id = c.entry.get('id')
        c.from_ = c.entry.get('from')
        c.to = c.entry.get('to')
        c.currency = c.entry.get('currency', c.dataset.currency).upper()
        c.amount = c.entry.get('amount')
        c.time = c.entry.get('time')

        c.custom_html = h.render_entry_custom_html(c.dataset,
                                                   c.entry)

        excluded_keys = ('time', 'amount', 'currency', 'from',
                         'to', 'dataset', 'id', 'name', 'description')

        c.extras = {}
        if c.dataset:
            c.desc = dict([(d.name, d) for d in c.dataset.dimensions])
            for key in c.entry:
                if key in c.desc and \
                        not key in excluded_keys:
                    c.extras[key] = c.entry[key]

        if format == 'json':
            return to_jsonp(c.entry)
        elif format == 'csv':
            return write_csv([c.entry], response)
        else:
            return render('entry/view.html')
Exemple #2
0
    def view(self, dataset, id, format="html"):
        self._get_dataset(dataset)
        entries = list(c.dataset.entries(c.dataset.alias.c.id == id))
        if not len(entries) == 1:
            abort(404, _("Sorry, there is no entry %r") % id)
        c.entry = entry_apply_links(dataset, entries.pop())

        c.id = c.entry.get("id")
        c.from_ = c.entry.get("from")
        c.to = c.entry.get("to")
        c.currency = c.entry.get("currency", c.dataset.currency).upper()
        c.amount = c.entry.get("amount")
        c.time = c.entry.get("time")

        c.custom_html = h.render_entry_custom_html(c.dataset, c.entry)

        excluded_keys = ("time", "amount", "currency", "from", "to", "dataset", "id", "name", "description")

        c.extras = {}
        if c.dataset:
            c.desc = dict([(d.name, d) for d in c.dataset.dimensions])
            for key in c.entry:
                if key in c.desc and not key in excluded_keys:
                    c.extras[key] = c.entry[key]

        if format == "json":
            return to_jsonp(c.entry)
        elif format == "csv":
            return write_csv([c.entry], response)
        else:
            return render("entry/view.html")
Exemple #3
0
    def view(self, dataset, id, format='html'):
        self._get_dataset(dataset)
        entries = list(c.dataset.entries(c.dataset.alias.c.id == id))
        if not len(entries) == 1:
            abort(404, _('Sorry, there is no entry %r') % id)
        c.entry = entry_apply_links(dataset, entries.pop())

        c.id = c.entry.get('id')
        c.from_ = c.entry.get('from')
        c.to = c.entry.get('to')
        c.currency = c.entry.get('currency', c.dataset.currency).upper()
        c.amount = c.entry.get('amount')
        c.time = c.entry.get('time')

        c.custom_html = h.render_entry_custom_html(c.dataset, c.entry)

        excluded_keys = ('time', 'amount', 'currency', 'from', 'to', 'dataset',
                         'id', 'name', 'description')

        c.extras = {}
        if c.dataset:
            c.desc = dict([(d.name, d) for d in c.dataset.dimensions])
            for key in c.entry:
                if key in c.desc and \
                        not key in excluded_keys:
                    c.extras[key] = c.entry[key]

        if format == 'json':
            return to_jsonp(c.entry)
        elif format == 'csv':
            return write_csv([c.entry], response)
        else:
            return templating.render('entry/view.html')
Exemple #4
0
    def search(self):
        parser = SearchParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return {'errors': errors}

        expand_facets = params.pop('expand_facet_dimensions')

        datasets = params.pop('dataset', None)
        if datasets is None:
            datasets = model.Dataset.all_by_account(c.account)
            expand_facets = False

        for dataset in datasets:
            require.dataset.read(dataset)

        b = Browser(**params)
        stats, facets, entries = b.execute()
        entries = [entry_apply_links(d.name, e) for d, e in entries]

        if expand_facets and len(datasets) == 1:
            _expand_facets(facets, datasets[0])

        return {
            'stats': stats,
            'facets': facets,
            'results': entries
        }
Exemple #5
0
    def entries(self, dataset, dimension, name, format='html'):
        self._get_member(dataset, dimension, name)
        if format in ['json', 'csv']:
            return redirect(url_for(controller='api2', action='search',
                format=format, dataset=dataset,
                filter='%s.name:%s' % (dimension, name),
                **request.params))

        handle_request(request, c, c.member, c.dimension.name)
        entries = c.dataset.entries(c.dimension.alias.c.name == c.member['name'])
        entries = (entry_apply_links(dataset, e) for e in entries)
        return render('dimension/entries.html')
Exemple #6
0
    def entries(self, dataset, dimension, name, format="html"):
        self._get_member(dataset, dimension, name)

        handle_request(request, c, c.member, c.dimension.name)

        entries = c.dataset.entries(c.dimension.alias.c.name == c.member["name"])
        entries = (entry_apply_links(dataset, e) for e in entries)
        attachment_name = "__".join([dataset, dimension, name])

        if format == "json":
            return write_json(entries, response, filename=attachment_name + ".json")
        elif format == "csv":
            return write_csv(entries, response, filename=attachment_name + ".csv")
        else:
            return render("dimension/entries.html")
    def entries(self, dataset, dimension, name, format='html'):
        self._get_member(dataset, dimension, name)

        handle_request(request, c, c.member, c.dimension.name)

        entries = c.dataset.entries(c.dimension.alias.c.name == c.member['name'])
        entries = (entry_apply_links(dataset, e) for e in entries)
        attachment_name = '__'.join([dataset, dimension, name])

        if format == 'json':
            return write_json(entries, response, filename=attachment_name + '.json')
        elif format == 'csv':
            return write_csv(entries, response, filename=attachment_name + '.csv')
        else:
            return render('dimension/entries.html')
Exemple #8
0
    def entries(self, dataset, dimension, name, format='html'):
        self._get_member(dataset, dimension, name)
        if format in ['json', 'csv']:
            return redirect(
                url_for(controller='api/version2',
                        action='search',
                        format=format,
                        dataset=dataset,
                        filter='%s.name:%s' % (dimension, name),
                        **request.params))

        handle_request(request, c, c.member, c.dimension.name)
        entries = c.dataset.entries(
            c.dimension.alias.c.name == c.member['name'])
        entries = (entry_apply_links(dataset, e) for e in entries)
        return templating.render('dimension/entries.html')
Exemple #9
0
    def entries(self, dataset, dimension, name, format="html"):
        self._get_member(dataset, dimension, name)
        if format in ["json", "csv"]:
            return redirect(
                url_for(
                    controller="api2",
                    action="search",
                    format=format,
                    dataset=dataset,
                    filter="%s.name:%s" % (dimension, name),
                    **request.params
                )
            )

        handle_request(request, c, c.member, c.dimension.name)
        entries = c.dataset.entries(c.dimension.alias.c.name == c.member["name"])
        entries = (entry_apply_links(dataset, e) for e in entries)
        return templating.render("dimension/entries.html")
Exemple #10
0
    def entries(self, dataset, dimension, name, format='html'):
        self._get_member(dataset, dimension, name)

        handle_request(request, c, c.member, c.dimension.name)

        entries = c.dataset.entries(
            c.dimension.alias.c.name == c.member['name'])
        entries = (entry_apply_links(dataset, e) for e in entries)
        attachment_name = '__'.join([dataset, dimension, name])

        if format == 'json':
            return write_json(entries,
                              response,
                              filename=attachment_name + '.json')
        elif format == 'csv':
            return write_csv(entries,
                             response,
                             filename=attachment_name + '.csv')
        else:
            return render('dimension/entries.html')
Exemple #11
0
class Api2Controller(BaseController):
    def _response_params(self, params):
        for k, v in params.items():
            k = k.replace('_', ' ').replace('-', ' ').split()
            k = '-'.join(['X'] + [l.capitalize() for l in k])
            response.headers[k] = unicode(v).encode('ascii', 'ignore')

    def aggregate(self):
        parser = AggregateParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        params['cuts'] = params.pop('cut')
        params['drilldowns'] = params.pop('drilldown')
        dataset = params.pop('dataset')
        format = params.pop('format')
        require.dataset.read(dataset)
        self._response_params(params)

        try:
            cache = AggregationCache(dataset)
            result = cache.aggregate(**params)
            if 'drilldown' in result:
                result['drilldown'] = drilldowns_apply_links(
                    dataset.name, result['drilldown'])

            response.last_modified = dataset.updated_at
            if cache.cache_enabled and 'cache_key' in result['summary']:
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            log.exception(ve)
            response.status = 400
            return to_jsonp({'errors': [unicode(ve)]})

        if format == 'csv':
            return write_csv(result['drilldown'],
                             response,
                             filename=dataset.name + '.csv')
        return to_jsonp(result)

    def search(self):
        parser = SearchParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        expand_facets = params.pop('expand_facet_dimensions')

        format = params.pop('format')
        if format == 'csv':
            params['stats'] = False
            params['facet_field'] = None

        datasets = params.pop('dataset', None)
        if datasets is None or not datasets:
            q = model.Dataset.all_by_account(c.account)
            if params.get('category'):
                q = q.filter_by(category=params.pop('category'))
            datasets = q.all()
            expand_facets = False

        if not datasets:
            return {'errors': ["No dataset available."]}

        params['filter']['dataset'] = []
        for dataset in datasets:
            require.dataset.read(dataset)
            params['filter']['dataset'].append(dataset.name)

        response.last_modified = max([d.updated_at for d in datasets])
        etag_cache_keygen(parser.key(), response.last_modified)

        self._response_params(params)

        if params['pagesize'] > parser.defaults['pagesize']:

            # http://wiki.nginx.org/X-accel#X-Accel-Buffering
            response.headers['X-Accel-Buffering'] = 'no'

            if format == 'csv':
                csv_headers(response, 'entries.csv')
                streamer = CSVStreamingResponse(
                    datasets, params, pagesize=parser.defaults['pagesize'])
                return streamer.response()
            else:
                json_headers(filename='entries.json')
                streamer = JSONStreamingResponse(
                    datasets,
                    params,
                    pagesize=parser.defaults['pagesize'],
                    expand_facets=_expand_facets if expand_facets else None,
                    callback=request.params.get('callback'))
                return streamer.response()

        b = Browser(**params)
        try:
            b.execute()
        except SolrException, e:
            return {'errors': [unicode(e)]}

        stats, facets, entries = b.get_stats(), b.get_facets(), b.get_entries()

        _entries = []
        for dataset, entry in entries:
            entry = entry_apply_links(dataset.name, entry)
            entry['dataset'] = dataset_apply_links(dataset.as_dict())
            _entries.append(entry)

        if format == 'csv':
            return write_csv(_entries, response, filename='entries.csv')

        if expand_facets and len(datasets) == 1:
            _expand_facets(facets, datasets[0])

        return to_jsonp({
            'stats': stats,
            'facets': facets,
            'results': _entries
        })
Exemple #12
0
class Api2Controller(BaseController):
    def aggregate(self):
        parser = AggregateParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return {'errors': errors}

        params['cuts'] = params.pop('cut')
        params['drilldowns'] = params.pop('drilldown')
        dataset = params.pop('dataset')
        format = params.pop('format')
        require.dataset.read(dataset)

        try:
            cache = AggregationCache(dataset)
            result = cache.aggregate(**params)
            if 'drilldown' in result:
                result['drilldown'] = drilldowns_apply_links(
                    dataset.name, result['drilldown'])

            response.last_modified = dataset.updated_at
            if cache.cache_enabled and 'cache_key' in result['summary']:
                etag_cache(result['summary']['cache_key'])

        except (KeyError, ValueError) as ve:
            log.exception(ve)
            response.status = 400
            return {'errors': ['Invalid aggregation query: %r' % ve]}

        if format == 'csv':
            return write_csv(result['drilldown'],
                             response,
                             filename=dataset.name + '.csv')
        return to_jsonp(result)

    @jsonpify
    def search(self):
        parser = SearchParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return {'errors': errors}

        expand_facets = params.pop('expand_facet_dimensions')

        datasets = params.pop('dataset', None)
        if datasets is None or not len(datasets):
            q = model.Dataset.all_by_account(c.account)
            if params.get('category'):
                q = q.filter_by(category=params.pop('category'))
            datasets = q.all()
            expand_facets = False

        if not len(datasets):
            return {'errors': [_("No dataset available.")]}

        params['filter']['dataset'] = []
        for dataset in datasets:
            require.dataset.read(dataset)
            params['filter']['dataset'].append(dataset.name)

        response.last_modified = max([d.updated_at for d in datasets])
        etag_cache_keygen(parser.key(), response.last_modified)

        b = Browser(**params)
        try:
            stats, facets, entries = b.execute()
        except SolrException, e:
            return {'errors': [unicode(e)]}

        _entries = []
        for dataset, entry in entries:
            entry = entry_apply_links(dataset.name, entry)
            entry['dataset'] = dataset_apply_links(dataset.as_dict())
            _entries.append(entry)

        if expand_facets and len(datasets) == 1:
            _expand_facets(facets, datasets[0])

        return {'stats': stats, 'facets': facets, 'results': _entries}
Exemple #13
0
    def search(self):
        parser = SearchParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        expand_facets = params.pop('expand_facet_dimensions')

        format = params.pop('format')
        if format == 'csv':
            params['stats'] = False
            params['facet_field'] = None

        datasets = params.pop('dataset', None)
        if datasets is None or not datasets:
            q = Dataset.all_by_account(c.account)
            if params.get('category'):
                q = q.filter_by(category=params.pop('category'))
            datasets = q.all()
            expand_facets = False

        if not datasets:
            return {'errors': ["No dataset available."]}

        params['filter']['dataset'] = []
        for dataset in datasets:
            require.dataset.read(dataset)
            params['filter']['dataset'].append(dataset.name)

        response.last_modified = max([d.updated_at for d in datasets])
        etag_cache_keygen(parser.key(), response.last_modified)

        if params['pagesize'] > parser.defaults['pagesize']:

            # http://wiki.nginx.org/X-accel#X-Accel-Buffering
            response.headers['X-Accel-Buffering'] = 'no'

            if format == 'csv':
                csv_headers(response, 'entries.csv')
                streamer = CSVStreamingResponse(
                    datasets, params, pagesize=parser.defaults['pagesize'])
                return streamer.response()
            else:
                json_headers(filename='entries.json')
                streamer = JSONStreamingResponse(
                    datasets,
                    params,
                    pagesize=parser.defaults['pagesize'],
                    expand_facets=util.expand_facets
                    if expand_facets else None,
                    callback=request.params.get('callback'))
                return streamer.response()

        solr_browser = Browser(**params)
        try:
            solr_browser.execute()
        except SolrException as e:
            return {'errors': [unicode(e)]}

        entries = []
        for dataset, entry in solr_browser.get_entries():
            entry = entry_apply_links(dataset.name, entry)
            entry['dataset'] = dataset_apply_links(dataset.as_dict())
            entries.append(entry)

        if format == 'csv':
            return write_csv(entries, response, filename='entries.csv')

        if expand_facets and len(datasets) == 1:
            facets = solr_browser.get_expanded_facets(datasets[0])
        else:
            facets = solr_browser.get_facets()

        return to_jsonp({
            'stats': solr_browser.get_stats(),
            'facets': facets,
            'results': entries
        })
Exemple #14
0
    def view(self, dataset, id, format='html'):
        """
        Get a specific entry in the dataset, identified by the id. Entry
        can be return as html (default), json or csv.
        """

        # Generate the dataset
        self._get_dataset(dataset)
        # Get the entry that matches the given id. c.dataset.entries is 
        # a generator so we create a list from it's responses based on the
        # given constraint
        entries = list(c.dataset.entries(c.dataset.alias.c.id == id))
        # Since we're trying to get a single entry the list should only 
        # contain one entry, if not then we return an error
        if not len(entries) == 1:
            abort(404, _('Sorry, there is no entry %r') % id)
        # Add urls to the dataset and assign assign it as a context variable
        c.entry = entry_apply_links(dataset, entries.pop())

        # Get and set some context variables from the entry
        # This shouldn't really be necessary but it's here so nothing gets
        # broken
        c.id = c.entry.get('id')
        c.from_ = c.entry.get('from')
        c.to = c.entry.get('to')
        c.currency = c.entry.get('currency', c.dataset.currency).upper()
        c.time = c.entry.get('time')

        # Get the amount for the entry
        amount = c.entry.get('amount')
        # We adjust for inflation if the user as asked for this to be inflated
        if request.params.has_key('inflate'):
            try:
                # Inflate the amount. Target date is provided in request.params
                # as value for inflate and reference date is the date of the
                # entry. We also provide a list of the territories to extract
                # a single country for which to do the inflation
                c.inflation = h.inflate(amount, request.params['inflate'],
                                        c.time, c.dataset.territories)

                # The amount to show should be the inflated amount
                # and overwrite the entry's amount as well
                c.amount = c.inflation['inflated']
                c.entry['amount'] = c.inflation['inflated']

                # We include the inflation response in the entry's dict
                # HTML description assumes every dict value for the entry
                # includes a label so we include a default "Inflation
                # adjustment" for it to work.
                c.inflation['label'] = 'Inflation adjustment'
                c.entry['inflation_adjustment'] = c.inflation
            except:
                # If anything goes wrong in the try clause (and there's a lot
                # that can go wrong). We just say that we can't adjust for
                # inflation and set the context amount as the original amount
                h.flash_notice(_('Unable to adjust for inflation'))
                c.amount = amount
        else:
            # If we haven't been asked to inflate then we just use the
            # original amount
            c.amount = amount

        # Add custom html for the dataset entry if the dataset has some
        # custom html
        # 2013-11-17 disabled this as part of removal of genshi as depended on
        # a genshi specific helper.
        # TODO: reinstate if important
        # c.custom_html = h.render_entry_custom_html(c.dataset, c.entry)

        # Add the rest of the dimensions relating to this entry into a
        # extras dictionary. We first need to exclude all dimensions that
        # are already shown and then we can loop through the dimensions
        excluded_keys = ('time', 'amount', 'currency', 'from',
                         'to', 'dataset', 'id', 'name', 'description')

        c.extras = {}
        if c.dataset:
            # Create a dictionary of the dataset dimensions
            c.desc = dict([(d.name, d) for d in c.dataset.dimensions])
            # Loop through dimensions of the entry
            for key in c.entry:
                # Entry dimension must be a dataset dimension and not in
                # the predefined excluded keys
                if key in c.desc and \
                        not key in excluded_keys:
                    c.extras[key] = c.entry[key]

        # Return entry based on 
        if format == 'json':
            return to_jsonp(c.entry)
        elif format == 'csv':
            return write_csv([c.entry], response)
        else:
            return templating.render('entry/view.html')
Exemple #15
0
    def search(self):
        parser = SearchParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        expand_facets = params.pop('expand_facet_dimensions')

        format = params.pop('format')
        if format == 'csv':
            params['stats'] = False
            params['facet_field'] = None

        datasets = params.pop('dataset', None)
        if datasets is None or not datasets:
            q = Dataset.all_by_account(c.account)
            if params.get('category'):
                q = q.filter_by(category=params.pop('category'))
            datasets = q.all()
            expand_facets = False

        if not datasets:
            return {'errors': ["No dataset available."]}

        params['filter']['dataset'] = []
        for dataset in datasets:
            require.dataset.read(dataset)
            params['filter']['dataset'].append(dataset.name)

        response.last_modified = max([d.updated_at for d in datasets])
        etag_cache_keygen(parser.key(), response.last_modified)

        if params['pagesize'] > parser.defaults['pagesize']:

            # http://wiki.nginx.org/X-accel#X-Accel-Buffering
            response.headers['X-Accel-Buffering'] = 'no'

            if format == 'csv':
                csv_headers(response, 'entries.csv')
                streamer = CSVStreamingResponse(
                    datasets,
                    params,
                    pagesize=parser.defaults['pagesize']
                )
                return streamer.response()
            else:
                json_headers(filename='entries.json')
                streamer = JSONStreamingResponse(
                    datasets,
                    params,
                    pagesize=parser.defaults['pagesize'],
                    expand_facets=util.expand_facets
                    if expand_facets else None,
                    callback=request.params.get('callback')
                )
                return streamer.response()

        solr_browser = Browser(**params)
        try:
            solr_browser.execute()
        except SolrException as e:
            return {'errors': [unicode(e)]}

        entries = []
        for dataset, entry in solr_browser.get_entries():
            entry = entry_apply_links(dataset.name, entry)
            entry['dataset'] = dataset_apply_links(dataset.as_dict())
            entries.append(entry)

        if format == 'csv':
            return write_csv(entries, response,
                             filename='entries.csv')

        if expand_facets and len(datasets) == 1:
            facets = solr_browser.get_expanded_facets(datasets[0])
        else:
            facets = solr_browser.get_facets()

        return to_jsonp({
            'stats': solr_browser.get_stats(),
            'facets': facets,
            'results': entries
        })
 def make_entries(self, entries):
     for dataset, entry in entries:
         entry = entry_apply_links(dataset.name, entry)
         entry['dataset'] = dataset_apply_links(dataset.as_dict())
         yield entry
Exemple #17
0
    def view(self, dataset, id, format='html'):
        """
        Get a specific entry in the dataset, identified by the id. Entry
        can be return as html (default), json or csv.
        """

        # Generate the dataset
        self._get_dataset(dataset)
        # Get the entry that matches the given id. c.dataset.entries is
        # a generator so we create a list from it's responses based on the
        # given constraint
        entries = list(c.dataset.entries(c.dataset.alias.c.id == id))
        # Since we're trying to get a single entry the list should only
        # contain one entry, if not then we return an error
        if not len(entries) == 1:
            abort(404, _('Sorry, there is no entry %r') % id)
        # Add urls to the dataset and assign assign it as a context variable
        c.entry = entry_apply_links(dataset, entries.pop())

        # Get and set some context variables from the entry
        # This shouldn't really be necessary but it's here so nothing gets
        # broken
        c.id = c.entry.get('id')
        c.from_ = c.entry.get('from')
        c.to = c.entry.get('to')
        c.currency = c.entry.get('currency', c.dataset.currency).upper()
        c.time = c.entry.get('time')

        # Get the amount for the entry
        amount = c.entry.get('amount')
        # We adjust for inflation if the user as asked for this to be inflated
        if 'inflate' in request.params:
            try:
                # Inflate the amount. Target date is provided in request.params
                # as value for inflate and reference date is the date of the
                # entry. We also provide a list of the territories to extract
                # a single country for which to do the inflation
                c.inflation = h.inflate(amount, request.params['inflate'],
                                        c.time, c.dataset.territories)

                # The amount to show should be the inflated amount
                # and overwrite the entry's amount as well
                c.amount = c.inflation['inflated']
                c.entry['amount'] = c.inflation['inflated']

                # We include the inflation response in the entry's dict
                # HTML description assumes every dict value for the entry
                # includes a label so we include a default "Inflation
                # adjustment" for it to work.
                c.inflation['label'] = 'Inflation adjustment'
                c.entry['inflation_adjustment'] = c.inflation
            except:
                # If anything goes wrong in the try clause (and there's a lot
                # that can go wrong). We just say that we can't adjust for
                # inflation and set the context amount as the original amount
                h.flash_notice(_('Unable to adjust for inflation'))
                c.amount = amount
        else:
            # If we haven't been asked to inflate then we just use the
            # original amount
            c.amount = amount

        # Add custom html for the dataset entry if the dataset has some
        # custom html
        # 2013-11-17 disabled this as part of removal of genshi as depended on
        # a genshi specific helper.
        # TODO: reinstate if important
        # c.custom_html = h.render_entry_custom_html(c.dataset, c.entry)

        # Add the rest of the dimensions relating to this entry into a
        # extras dictionary. We first need to exclude all dimensions that
        # are already shown and then we can loop through the dimensions
        excluded_keys = ('time', 'amount', 'currency', 'from', 'to', 'dataset',
                         'id', 'name', 'description')

        c.extras = {}
        if c.dataset:
            # Create a dictionary of the dataset dimensions
            c.desc = dict([(d.name, d) for d in c.dataset.dimensions])
            # Loop through dimensions of the entry
            for key in c.entry:
                # Entry dimension must be a dataset dimension and not in
                # the predefined excluded keys
                if key in c.desc and \
                        key not in excluded_keys:
                    c.extras[key] = c.entry[key]

        # Return entry based on
        if format == 'json':
            return to_jsonp(c.entry)
        elif format == 'csv':
            return write_csv([c.entry], response)
        else:
            return templating.render('entry/view.html')
Exemple #18
0
 def make_entries(self, entries):
     for dataset, entry in entries:
         entry = entry_apply_links(dataset.name, entry)
         entry['dataset'] = dataset_apply_links(dataset.as_dict())
         yield entry