def aggregate(self): parser = AggregateParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return to_jsonp({'errors': errors}) params['cuts'] = params.pop('cut') params['drilldowns'] = params.pop('drilldown') dataset = params.pop('dataset') format = params.pop('format') require.dataset.read(dataset) self._response_params(params) try: cache = AggregationCache(dataset) result = cache.aggregate(**params) if 'drilldown' in result: result['drilldown'] = drilldowns_apply_links(dataset.name, result['drilldown']) response.last_modified = dataset.updated_at if cache.cache_enabled and 'cache_key' in result['summary']: etag_cache(result['summary']['cache_key']) except (KeyError, ValueError) as ve: log.exception(ve) response.status = 400 return to_jsonp({'errors': [unicode(ve)]}) if format == 'csv': return write_csv(result['drilldown'], response, filename=dataset.name + '.csv') return to_jsonp(result)
def permissions(self): """ Check a user's permissions for a given dataset. This could also be done via request to the user, but since we're not really doing a RESTful service we do this via the api instead. """ # Check the parameters. Since we only use one parameter we check it # here instead of creating a specific parameter parser if len(request.params) != 1 or 'dataset' not in request.params: return to_jsonp({'error': 'Parameter dataset missing'}) # Get the dataset we want to check permissions for dataset = Dataset.by_name(request.params['dataset']) # Return permissions return to_jsonp({ "create":\ can.dataset.create() and dataset is None, "read":\ False if dataset is None else can.dataset.read(dataset), "update":\ False if dataset is None else can.dataset.update(dataset), "delete":\ False if dataset is None else can.dataset.delete(dataset) })
def complete(self, format='json'): self._disable_cache() parser = DistinctParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return {'errors': errors} if not c.account: response.status = 403 return to_jsonp({'errors': _("You are not authorized to see that " "page")}) query = db.session.query(Account) filter_string = params.get('q') + '%' query = query.filter(db.or_(Account.name.ilike(filter_string), Account.fullname.ilike(filter_string))) count = query.count() query = query.limit(params.get('pagesize')) query = query.offset(int((params.get('page') - 1) * params.get('pagesize'))) results = [dict(fullname=x.fullname, name=x.name) for x in list(query)] return to_jsonp({ 'results': results, 'count': count })
def complete(self, format='json'): self._disable_cache() parser = DistinctParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return {'errors': errors} if not c.account: response.status = 403 return to_jsonp( {'errors': _("You are not authorized to see that " "page")}) query = db.session.query(Account) filter_string = params.get('q') + '%' query = query.filter( db.or_(Account.name.ilike(filter_string), Account.fullname.ilike(filter_string))) count = query.count() query = query.limit(params.get('pagesize')) query = query.offset( int((params.get('page') - 1) * params.get('pagesize'))) results = [dict(fullname=x.fullname, name=x.name) for x in list(query)] return to_jsonp({'results': results, 'count': count})
def permissions(self): """ Check a user's permissions for a given dataset. This could also be done via request to the user, but since we're not really doing a RESTful service we do this via the api instead. """ # Check the parameters. Since we only use one parameter we check it # here instead of creating a specific parameter parser if len(request.params) != 1 or 'dataset' not in request.params: return to_jsonp({'error': 'Parameter dataset missing'}) # Get the dataset we want to check permissions for dataset = Dataset.by_name(request.params['dataset']) # Return permissions return to_jsonp({ "create": can.dataset.create() and dataset is None, "read": False if dataset is None else can.dataset.read(dataset), "update": False if dataset is None else can.dataset.update(dataset), "delete": False if dataset is None else can.dataset.delete(dataset) })
def load_with_model_and_csv(self, metadata, csv_file, private): """ Load a dataset using a metadata model file and a csv file """ if metadata is None: response.status = 400 return to_jsonp({'errors': 'metadata is missing'}) if csv_file is None: response.status = 400 return to_jsonp({'errors': 'csv_file is missing'}) # We proceed with the dataset try: model = json.load(urllib2.urlopen(metadata)) except: response.status = 400 return to_jsonp({'errors': 'JSON model could not be parsed'}) try: log.info("Validating model") model = validate_model(model) except Invalid as i: log.error("Errors occured during model validation:") for field, error in i.asdict().items(): log.error("%s: %s", field, error) response.status = 400 return to_jsonp({'errors': 'Model is not well formed'}) dataset = Dataset.by_name(model['dataset']['name']) if dataset is None: dataset = Dataset(model) require.dataset.create() dataset.managers.append(c.account) dataset.private = private db.session.add(dataset) else: require.dataset.update(dataset) log.info("Dataset: %s", dataset.name) source = Source(dataset=dataset, creator=c.account, url=csv_file) log.info(source) for source_ in dataset.sources: if source_.url == csv_file: source = source_ break db.session.add(source) db.session.commit() # Send loading of source into celery queue load_source.delay(source.id) return to_jsonp(dataset_apply_links(dataset.as_dict()))
def view(self, dataset, dimension, format='html'): self._get_dataset(dataset) try: c.dimension = c.dataset[dimension] except KeyError: abort(404, _('This is not a dimension')) if not isinstance(c.dimension, model.Dimension): abort(404, _('This is not a dimension')) page = self._get_page('page') cache = AggregationCache(c.dataset) result = cache.aggregate(drilldowns=[dimension], page=page, pagesize=PAGE_SIZE) items = result.get('drilldown', []) c.values = [(d.get(dimension), d.get('amount')) for d in items] if format == 'json': return to_jsonp({ "values": c.values, "meta": c.dimension.as_dict()}) c.page = Page(c.values, page=page, item_count=result['summary']['num_drilldowns'], items_per_page=PAGE_SIZE, presliced_list=True) return render('dimension/view.html')
def index(self, format='html'): for item in self.extensions: item.index(c, request, response, c.results) if format == 'json': return to_jsonp(map(lambda d: d.as_dict(), c.datasets)) elif format == 'csv': results = map(lambda d: d.as_dict(), c.datasets) return write_csv(results, response) else: c.query = request.params.items() c.add_filter = lambda f, v: '?' + urlencode(c.query + [(f, v)] if ( f, v) not in c.query else c.query) c.del_filter = lambda f, v: '?' + urlencode([(k, x) for k, x in c.query if (k, x) != (f, v)]) c.results = c.datasets for language in request.params.getall('languages'): l = db.aliased(DatasetLanguage) c.results = c.results.join(l, Dataset._languages) c.results = c.results.filter(l.code == language) for territory in request.params.getall('territories'): t = db.aliased(DatasetTerritory) c.results = c.results.join(t, Dataset._territories) c.results = c.results.filter(t.code == territory) c.results = list(c.results) c.territory_options = DatasetTerritory.dataset_counts(c.results) c.language_options = DatasetLanguage.dataset_counts(c.results) return render('dataset/index.html')
def index(self, format='html'): for item in self.extensions: item.index(c, request, response, c.results) if format == 'json': return to_jsonp(map(lambda d: d.as_dict(), c.datasets)) elif format == 'csv': results = map(lambda d: d.as_dict(), c.datasets) return write_csv(results, response) else: c.query = request.params.items() c.add_filter = lambda f, v: '?' + urlencode(c.query + [(f, v)] if (f, v) not in c.query else c.query) c.del_filter = lambda f, v: '?' + urlencode([(k,x) for k, x in c.query if (k,x) != (f,v)]) c.results = c.datasets for language in request.params.getall('languages'): l = db.aliased(DatasetLanguage) c.results = c.results.join(l, Dataset._languages) c.results = c.results.filter(l.code==language) for territory in request.params.getall('territories'): t = db.aliased(DatasetTerritory) c.results = c.results.join(t, Dataset._territories) c.results = c.results.filter(t.code==territory) c.results = list(c.results) c.territory_options = DatasetTerritory.dataset_counts(c.results) c.language_options = DatasetLanguage.dataset_counts(c.results) return render('dataset/index.html')
def create(self): """ Adds a new dataset dynamically through a POST request """ # User must be authenticated so we should have a user object in # c.account, if not abort with error message if not c.account: abort(status_code=400, detail='user not authenticated') # Parse the loading api parameters to get them into the right format parser = LoadingAPIParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return to_jsonp({'errors': errors}) # Precedence of budget data package over other methods if 'budget_data_package' in params: output = self.load_with_budget_data_package( params['budget_data_package'], params['private']) else: output = self.load_with_model_and_csv( params['metadata'], params['csv_file'], params['private']) return output
def create(self): """ Adds a new dataset dynamically through a POST request """ # User must be authenticated so we should have a user object in # c.account, if not abort with error message if not c.account: abort(status_code=400, detail='user not authenticated') # Parse the loading api parameters to get them into the right format parser = LoadingAPIParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return to_jsonp({'errors': errors}) # Precedence of budget data package over other methods if 'budget_data_package' in params: output = self.load_with_budget_data_package( params['budget_data_package'], params['private']) else: output = self.load_with_model_and_csv(params['metadata'], params['csv_file'], params['private']) return output
def view(self, dataset, id, format='html'): self._get_dataset(dataset) entries = list(c.dataset.entries(c.dataset.alias.c.id == id)) if not len(entries) == 1: abort(404, _('Sorry, there is no entry %r') % id) c.entry = entry_apply_links(dataset, entries.pop()) c.id = c.entry.get('id') c.from_ = c.entry.get('from') c.to = c.entry.get('to') c.currency = c.entry.get('currency', c.dataset.currency).upper() c.amount = c.entry.get('amount') c.time = c.entry.get('time') c.custom_html = h.render_entry_custom_html(c.dataset, c.entry) excluded_keys = ('time', 'amount', 'currency', 'from', 'to', 'dataset', 'id', 'name', 'description') c.extras = {} if c.dataset: c.desc = dict([(d.name, d) for d in c.dataset.dimensions]) for key in c.entry: if key in c.desc and \ not key in excluded_keys: c.extras[key] = c.entry[key] if format == 'json': return to_jsonp(c.entry) elif format == 'csv': return write_csv([c.entry], response) else: return templating.render('entry/view.html')
def view(self, dataset, id, format='html'): self._get_dataset(dataset) entries = list(c.dataset.entries(c.dataset.alias.c.id == id)) if not len(entries) == 1: abort(404, _('Sorry, there is no entry %r') % id) c.entry = entry_apply_links(dataset, entries.pop()) c.id = c.entry.get('id') c.from_ = c.entry.get('from') c.to = c.entry.get('to') c.currency = c.entry.get('currency', c.dataset.currency).upper() c.amount = c.entry.get('amount') c.time = c.entry.get('time') c.custom_html = h.render_entry_custom_html(c.dataset, c.entry) excluded_keys = ('time', 'amount', 'currency', 'from', 'to', 'dataset', 'id', 'name', 'description') c.extras = {} if c.dataset: c.desc = dict([(d.name, d) for d in c.dataset.dimensions]) for key in c.entry: if key in c.desc and \ not key in excluded_keys: c.extras[key] = c.entry[key] if format == 'json': return to_jsonp(c.entry) elif format == 'csv': return write_csv([c.entry], response) else: return render('entry/view.html')
def view(self, dataset, id, format="html"): self._get_dataset(dataset) entries = list(c.dataset.entries(c.dataset.alias.c.id == id)) if not len(entries) == 1: abort(404, _("Sorry, there is no entry %r") % id) c.entry = entry_apply_links(dataset, entries.pop()) c.id = c.entry.get("id") c.from_ = c.entry.get("from") c.to = c.entry.get("to") c.currency = c.entry.get("currency", c.dataset.currency).upper() c.amount = c.entry.get("amount") c.time = c.entry.get("time") c.custom_html = h.render_entry_custom_html(c.dataset, c.entry) excluded_keys = ("time", "amount", "currency", "from", "to", "dataset", "id", "name", "description") c.extras = {} if c.dataset: c.desc = dict([(d.name, d) for d in c.dataset.dimensions]) for key in c.entry: if key in c.desc and not key in excluded_keys: c.extras[key] = c.entry[key] if format == "json": return to_jsonp(c.entry) elif format == "csv": return write_csv([c.entry], response) else: return render("entry/view.html")
def aggregate(self): parser = AggregateParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return {'errors': errors} params['cuts'] = params.pop('cut') params['drilldowns'] = params.pop('drilldown') dataset = params.pop('dataset') format = params.pop('format') require.dataset.read(dataset) try: cache = AggregationCache(dataset) result = cache.aggregate(**params) if 'drilldown' in result: result['drilldown'] = drilldowns_apply_links( dataset.name, result['drilldown']) response.last_modified = dataset.updated_at if cache.cache_enabled and 'cache_key' in result['summary']: etag_cache(result['summary']['cache_key']) except (KeyError, ValueError) as ve: log.exception(ve) response.status = 400 return {'errors': ['Invalid aggregation query: %r' % ve]} if format == 'csv': return write_csv(result['drilldown'], response, filename=dataset.name + '.csv') return to_jsonp(result)
def create(self): """ Adds a new dataset dynamically through a POST request """ # User must be authenticated so we should have a user object in # c.account, if not abort with error message if not c.account: abort(status_code=400, detail='user not authenticated') # Check if the params are there ('metadata', 'csv_file') if len(request.params) != 2: abort(status_code=400, detail='incorrect number of params') metadata = request.params['metadata'] \ if 'metadata' in request.params \ else abort(status_code=400, detail='metadata is missing') csv_file = request.params['csv_file'] \ if 'csv_file' in request.params \ else abort(status_code=400, detail='csv_file is missing') # We proceed with the dataset try: model = json.load(urllib2.urlopen(metadata)) except: abort(status_code=400, detail='JSON model could not be parsed') try: log.info("Validating model") model = validate_model(model) except Invalid as i: log.error("Errors occured during model validation:") for field, error in i.asdict().items(): log.error("%s: %s", field, error) abort(status_code=400, detail='Model is not well formed') dataset = Dataset.by_name(model['dataset']['name']) if dataset is None: dataset = Dataset(model) require.dataset.create() dataset.managers.append(c.account) dataset.private = True # Default value db.session.add(dataset) else: require.dataset.update(dataset) log.info("Dataset: %s", dataset.name) source = Source(dataset=dataset, creator=c.account, url=csv_file) log.info(source) for source_ in dataset.sources: if source_.url == csv_file: source = source_ break db.session.add(source) db.session.commit() # Send loading of source into celery queue load_source.delay(source.id) return to_jsonp(dataset_apply_links(dataset.as_dict()))
def index(self, dataset, format='html'): self._get_dataset(dataset) handle_request(request, c, c.dataset) c.views = View.all_by_dataset(c.dataset) if format == 'json': return to_jsonp([v.as_dict() for v in c.views]) else: return templating.render('view/index.html')
def view(self, dataset, name, format='html'): self._get_named_view(dataset, name) handle_request(request, c, c.dataset) c.widget = widgets.get_widget(c.named_view.widget) if format == 'json': return to_jsonp(c.named_view.as_dict()) else: return templating.render('view/view.html')
def index(self, dataset, format='html'): self._get_dataset(dataset) handle_request(request, c, c.dataset) c.views = View.all_by_dataset(c.dataset) if format == 'json': return to_jsonp([v.as_dict() for v in c.views]) else: return render('view/index.html')
def view(self, dataset, name, format='html'): self._get_named_view(dataset, name) handle_request(request, c, c.dataset) c.widget = widgets.get_widget(c.named_view.widget) if format == 'json': return to_jsonp(c.named_view.as_dict()) else: return render('view/view.html')
def index(self, dataset, format="html"): self._get_dataset(dataset) etag_cache_keygen(c.dataset.updated_at, format) if format == "json": dimensions = [dimension_apply_links(dataset, d.as_dict()) for d in c.dataset.dimensions] return to_jsonp(dimensions) else: return templating.render("dimension/index.html")
def index(self, dataset, format='html'): self._get_dataset(dataset) if format == 'json': dimensions = [dimension_apply_links(dataset, d.as_dict()) \ for d in c.dataset.dimensions] return to_jsonp(dimensions) else: return render('dimension/index.html')
def view(self, dataset, dimension, format='html'): self._get_dimension(dataset, dimension) if format == 'json': dimension = dimension_apply_links(dataset, c.dimension.as_dict()) return to_jsonp(dimension) c.widget = get_widget('aggregate_table') c.widget_state = {'drilldowns': [c.dimension.name]} return render('dimension/view.html')
def view(self, dataset, dimension, format="html"): self._get_dimension(dataset, dimension) etag_cache_keygen(c.dataset.updated_at, format) if format == "json": dimension = dimension_apply_links(dataset, c.dimension.as_dict()) return to_jsonp(dimension) c.widget = get_widget("aggregate_table") c.widget_state = {"drilldowns": [c.dimension.name]} return templating.render("dimension/view.html")
def view(self, dataset, dimension, format='html'): self._get_dimension(dataset, dimension) etag_cache_keygen(c.dataset.updated_at, format) if format == 'json': dimension = dimension_apply_links(dataset, c.dimension.as_dict()) return to_jsonp(dimension) c.widget = get_widget('aggregate_table') c.widget_state = {'drilldowns': [c.dimension.name]} return templating.render('dimension/view.html')
def index(self, dataset, format='html'): self._get_dataset(dataset) etag_cache_keygen(c.dataset.updated_at, format) if format == 'json': dimensions = [dimension_apply_links(dataset, d.as_dict()) \ for d in c.dataset.dimensions] return to_jsonp(dimensions) else: return render('dimension/index.html')
def territories(self): q = db.select([DatasetTerritory.code, db.func.count(DatasetTerritory.dataset_id)], group_by=DatasetTerritory.code, order_by=db.func.count(DatasetTerritory.dataset_id).desc()) result = {} for territory, count in db.session.bind.execute(q).fetchall(): result[territory] = {'count': count, 'label': h.COUNTRIES[territory], 'url': h.url_for(controller='dataset', action='index', territories=territory)} return to_jsonp(result)
def distinct(self, dataset, dimension, format='json'): self._get_dimension(dataset, dimension) parser = DistinctParamParser(c.dimension, request.params) params, errors = parser.parse() if errors: response.status = 400 return {'errors': errors} q = params.get('attribute').column_alias.ilike(params.get('q') + '%') offset = int((params.get('page') - 1) * params.get('pagesize')) members = c.dimension.members(q, offset=offset, limit=params.get('pagesize')) return to_jsonp(list(members))
def view(self, dataset, format='html'): self._get_dataset(dataset) c.num_entries = len(c.dataset) handle_request(request, c, c.dataset) if c.view is None and format == 'html': return EntryController().index(dataset, format) if format == 'json': return to_jsonp(dataset_apply_links(c.dataset.as_dict())) else: return render('dataset/view.html')
def distinct(self, dataset, dimension, format="json"): self._get_dimension(dataset, dimension) parser = DistinctFieldParamParser(c.dimension, request.params) params, errors = parser.parse() etag_cache_keygen(c.dataset.updated_at, format, parser.key()) if errors: response.status = 400 return {"errors": errors} q = params.get("attribute").column_alias.ilike(params.get("q") + "%") offset = int((params.get("page") - 1) * params.get("pagesize")) members = c.dimension.members(q, offset=offset, limit=params.get("pagesize")) return to_jsonp({"results": list(members), "count": c.dimension.num_entries(q)})
def view(self, dataset, format='html'): """ Dataset viewer. Default format is html. This will return either an entry index if there is no default view or the defaul view. If a request parameter embed is given the default view is returned as an embeddable page. If json is provided as a format the json representation of the dataset is returned. """ # Get the dataset (will be placed in c.dataset) self._get_dataset(dataset) # Generate the etag for the cache based on updated_at value etag_cache_keygen(c.dataset.updated_at) # Compute the number of entries in the dataset c.num_entries = len(c.dataset) # Handle the request for the dataset, this will return # a default view in c.view if there is any handle_request(request, c, c.dataset) if format == 'json': # If requested format is json we return the json representation return to_jsonp(dataset_apply_links(c.dataset.as_dict())) else: (earliest_timestamp, latest_timestamp) = c.dataset.timerange() if earliest_timestamp is not None: c.timerange = { 'from': earliest_timestamp, 'to': latest_timestamp } if c.view is None: # If handle request didn't return a view we return the # entry index return EntryController().index(dataset, format) if 'embed' in request.params: # If embed is requested using the url parameters we return # a redirect to an embed page for the default view return redirect( h.url_for(controller='view', action='embed', dataset=c.dataset.name, widget=c.view.vis_widget.get('name'), state=json.dumps(c.view.vis_state))) # Return the dataset view (for the default view) return templating.render('dataset/view.html')
def index(self, format='html'): """ List all badges in the system. Default is to present the user with an html site, but the user can request a json list of badges. """ c.badges = Badge.all() # If the requested format is json return a list of badges if format == 'json': return to_jsonp({"badges":badges_apply_links([b.as_dict() for b in c.badges])}) # Return html representation return templating.render('badge/index.html')
def member(self, dataset, dimension, name, format="html"): self._get_member(dataset, dimension, name) handle_request(request, c, c.member, c.dimension) if c.view is None: self._make_browser() for item in self.extensions: item.read(c, request, response, c.member) if format == 'json': return to_jsonp(c.member) elif format == 'csv': return write_csv([c.member], response) else: return render('dimension/member.html')
def information(self, id, format='html'): """ Show information about the badge. Default is to present the user with the badge on an html site, but the user can request a json representation of the badge """ # Get the badge c.badge = Badge.by_id(id=id) # Return a json representation if the format requested is 'json' if format == 'json': return to_jsonp({"badge":badge_apply_links(c.badge.as_dict())}) # Return html representation return templating.render('badge/information.html')
def information(self, id, format='html'): """ Show information about the badge. Default is to present the user with the badge on an html site, but the user can request a json representation of the badge """ # Get the badge c.badge = Badge.by_id(id=id) # Return a json representation if the format requested is 'json' if format == 'json': return to_jsonp({"badge": badge_apply_links(c.badge.as_dict())}) # Return html representation return templating.render('badge/information.html')
def view(self, dataset, format='html'): """ Dataset viewer. Default format is html. This will return either an entry index if there is no default view or the defaul view. If a request parameter embed is given the default view is returned as an embeddable page. If json is provided as a format the json representation of the dataset is returned. """ # Get the dataset (will be placed in c.dataset) self._get_dataset(dataset) # Generate the etag for the cache based on updated_at value etag_cache_keygen(c.dataset.updated_at) # Compute the number of entries in the dataset c.num_entries = len(c.dataset) # Handle the request for the dataset, this will return # a default view in c.view if there is any handle_request(request, c, c.dataset) if format == 'json': # If requested format is json we return the json representation return to_jsonp(dataset_apply_links(c.dataset.as_dict())) else: (earliest_timestamp, latest_timestamp) = c.dataset.timerange() if earliest_timestamp is not None: c.timerange = {'from': earliest_timestamp, 'to': latest_timestamp} if c.view is None: # If handle request didn't return a view we return the # entry index return EntryController().index(dataset, format) if 'embed' in request.params: # If embed is requested using the url parameters we return # a redirect to an embed page for the default view return redirect( h.url_for(controller='view', action='embed', dataset=c.dataset.name, widget=c.view.vis_widget.get('name'), state=json.dumps(c.view.vis_state))) # Return the dataset view (for the default view) return templating.render('dataset/view.html')
def distinct(self, dataset, dimension, format='json'): self._get_dimension(dataset, dimension) parser = DistinctFieldParamParser(c.dimension, request.params) params, errors = parser.parse() etag_cache_keygen(c.dataset.updated_at, format, parser.key()) if errors: response.status = 400 return {'errors': errors} q = params.get('attribute').column_alias.ilike(params.get('q') + '%') offset = int((params.get('page') - 1) * params.get('pagesize')) members = c.dimension.members(q, offset=offset, limit=params.get('pagesize')) return to_jsonp({ 'results': list(members), 'count': c.dimension.num_entries(q) })
def view(self, dataset, format='html'): self._get_dataset(dataset) c.num_entries = len(c.dataset) handle_request(request, c, c.dataset) if c.view is None and format == 'html': return EntryController().index(dataset, format) for item in self.extensions: item.read(c, request, response, c.dataset) if format == 'json': return to_jsonp(c.dataset.as_dict()) elif format == 'csv': return write_csv([c.dataset.as_dict()], response) else: return render('dataset/view.html')
def view(self, dataset, format='html'): self._get_dataset(dataset) etag_cache_keygen(c.dataset.updated_at) c.num_entries = len(c.dataset) handle_request(request, c, c.dataset) if format == 'json': return to_jsonp(dataset_apply_links(c.dataset.as_dict())) else: if c.view is None: return EntryController().index(dataset, format) if 'embed' in request.params: return redirect(h.url_for(controller='view', action='embed', dataset=c.dataset.name, widget=c.view.vis_widget.get('name'), state=json.dumps(c.view.vis_state))) return templating.render('dataset/view.html')
def view(self, dataset, format='html'): self._get_dataset(dataset) etag_cache_keygen(c.dataset.updated_at) c.num_entries = len(c.dataset) handle_request(request, c, c.dataset) if format == 'json': return to_jsonp(dataset_apply_links(c.dataset.as_dict())) else: if c.view is None: return EntryController().index(dataset, format) if 'embed' in request.params: return redirect( h.url_for(controller='view', action='embed', dataset=c.dataset.name, widget=c.view.vis_widget.get('name'), state=json.dumps(c.view.vis_state))) return templating.render('dataset/view.html')
def complete(self, format='json'): self._disable_cache() parser = DistinctParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return {'errors': errors} query = db.session.query(Account) filter_string = params.get('q') + '%' query = query.filter(db.or_(Account.name.ilike(filter_string), Account.fullname.ilike(filter_string))) count = query.count() query = query.limit(params.get('pagesize')) query = query.offset(int((params.get('page') - 1) * params.get('pagesize'))) return to_jsonp({ 'results': list(query), 'count': count })
def index(self, format='html'): c.query = request.params.items() c.add_filter = lambda f, v: '?' + urlencode(c.query + [(f, v)] if (f, v) not in c.query else c.query) c.del_filter = lambda f, v: '?' + urlencode([(k, x) for k, x in c.query if (k, x) != (f, v)]) c.results = c.datasets for language in request.params.getall('languages'): l = db.aliased(DatasetLanguage) c.results = c.results.join(l, Dataset._languages) c.results = c.results.filter(l.code == language) for territory in request.params.getall('territories'): t = db.aliased(DatasetTerritory) c.results = c.results.join(t, Dataset._territories) c.results = c.results.filter(t.code == territory) c.results = list(c.results) c.territory_options = [{'code': code, 'count': count, 'url': h.url_for(controller='dataset', action='index', territories=code), 'label': COUNTRIES.get(code, code)} \ for (code, count) in DatasetTerritory.dataset_counts(c.results)] c.language_options = [{'code': code, 'count': count, 'url': h.url_for(controller='dataset', action='index', languages=code), 'label': LANGUAGES.get(code, code)} \ for (code, count) in DatasetLanguage.dataset_counts(c.results)] if format == 'json': results = map(lambda d: d.as_dict(), c.results) results = [dataset_apply_links(r) for r in results] return to_jsonp({ 'datasets': results, 'territories': c.territory_options, 'languages': c.language_options }) elif format == 'csv': results = map(lambda d: d.as_dict(), c.results) return write_csv(results, response) return render('dataset/index.html')
def index(self, format='html'): c.query = request.params.items() c.add_filter = lambda f, v: '?' + urlencode(c.query + [(f, v)] if ( f, v) not in c.query else c.query) c.del_filter = lambda f, v: '?' + urlencode([(k, x) for k, x in c.query if (k, x) != (f, v)]) c.results = c.datasets for language in request.params.getall('languages'): l = db.aliased(DatasetLanguage) c.results = c.results.join(l, Dataset._languages) c.results = c.results.filter(l.code == language) for territory in request.params.getall('territories'): t = db.aliased(DatasetTerritory) c.results = c.results.join(t, Dataset._territories) c.results = c.results.filter(t.code == territory) category = request.params.get('category') if category: c.results = c.results.filter(Dataset.category == category) c.results = list(c.results) c.territory_options = [{'code': code, 'count': count, 'url': h.url_for(controller='dataset', action='index', territories=code), 'label': COUNTRIES.get(code, code)} \ for (code, count) in DatasetTerritory.dataset_counts(c.results)] c.language_options = [{'code': code, 'count': count, 'url': h.url_for(controller='dataset', action='index', languages=code), 'label': LANGUAGES.get(code, code)} \ for (code, count) in DatasetLanguage.dataset_counts(c.results)] # TODO: figure out where to put this: ds_ids = [d.id for d in c.results] if len(ds_ids): q = db.select( [Dataset.category, db.func.count(Dataset.id)], Dataset.id.in_(ds_ids), group_by=Dataset.category, order_by=db.func.count(Dataset.id).desc()) c.category_options = [{'category': category, 'count': count, 'url': h.url_for(controller='dataset', action='index', category=category), 'label': CATEGORIES.get(category, category)} \ for (category, count) in db.session.bind.execute(q).fetchall() \ if category is not None] else: c.category_options = [] c._must_revalidate = True if len(c.results): dt = max([r.updated_at for r in c.results]) etag_cache_keygen(dt) if format == 'json': results = map(lambda d: d.as_dict(), c.results) results = [dataset_apply_links(r) for r in results] return to_jsonp({ 'datasets': results, 'categories': c.category_options, 'territories': c.territory_options, 'languages': c.language_options }) elif format == 'csv': results = map(lambda d: d.as_dict(), c.results) return write_csv(results, response) c.show_rss = True return templating.render('dataset/index.html')
def model(self, dataset, format='json'): self._get_dataset(dataset) etag_cache_keygen(c.dataset.updated_at) model = c.dataset.model model['dataset'] = dataset_apply_links(model['dataset']) return to_jsonp(model)
def search(self): parser = SearchParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return to_jsonp({'errors': errors}) expand_facets = params.pop('expand_facet_dimensions') format = params.pop('format') if format == 'csv': params['stats'] = False params['facet_field'] = None datasets = params.pop('dataset', None) if datasets is None or not datasets: q = model.Dataset.all_by_account(c.account) if params.get('category'): q = q.filter_by(category=params.pop('category')) datasets = q.all() expand_facets = False if not datasets: return {'errors': ["No dataset available."]} params['filter']['dataset'] = [] for dataset in datasets: require.dataset.read(dataset) params['filter']['dataset'].append(dataset.name) response.last_modified = max([d.updated_at for d in datasets]) etag_cache_keygen(parser.key(), response.last_modified) self._response_params(params) if params['pagesize'] > parser.defaults['pagesize']: # http://wiki.nginx.org/X-accel#X-Accel-Buffering response.headers['X-Accel-Buffering'] = 'no' if format == 'csv': csv_headers(response, 'entries.csv') streamer = CSVStreamingResponse( datasets, params, pagesize=parser.defaults['pagesize']) return streamer.response() else: json_headers(filename='entries.json') streamer = JSONStreamingResponse( datasets, params, pagesize=parser.defaults['pagesize'], expand_facets=_expand_facets if expand_facets else None, callback=request.params.get('callback')) return streamer.response() b = Browser(**params) try: b.execute() except SolrException, e: return {'errors': [unicode(e)]}
class Api2Controller(BaseController): def _response_params(self, params): for k, v in params.items(): k = k.replace('_', ' ').replace('-', ' ').split() k = '-'.join(['X'] + [l.capitalize() for l in k]) response.headers[k] = unicode(v).encode('ascii', 'ignore') def aggregate(self): parser = AggregateParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return to_jsonp({'errors': errors}) params['cuts'] = params.pop('cut') params['drilldowns'] = params.pop('drilldown') dataset = params.pop('dataset') format = params.pop('format') require.dataset.read(dataset) self._response_params(params) try: cache = AggregationCache(dataset) result = cache.aggregate(**params) if 'drilldown' in result: result['drilldown'] = drilldowns_apply_links( dataset.name, result['drilldown']) response.last_modified = dataset.updated_at if cache.cache_enabled and 'cache_key' in result['summary']: etag_cache(result['summary']['cache_key']) except (KeyError, ValueError) as ve: log.exception(ve) response.status = 400 return to_jsonp({'errors': [unicode(ve)]}) if format == 'csv': return write_csv(result['drilldown'], response, filename=dataset.name + '.csv') return to_jsonp(result) def search(self): parser = SearchParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return to_jsonp({'errors': errors}) expand_facets = params.pop('expand_facet_dimensions') format = params.pop('format') if format == 'csv': params['stats'] = False params['facet_field'] = None datasets = params.pop('dataset', None) if datasets is None or not datasets: q = model.Dataset.all_by_account(c.account) if params.get('category'): q = q.filter_by(category=params.pop('category')) datasets = q.all() expand_facets = False if not datasets: return {'errors': ["No dataset available."]} params['filter']['dataset'] = [] for dataset in datasets: require.dataset.read(dataset) params['filter']['dataset'].append(dataset.name) response.last_modified = max([d.updated_at for d in datasets]) etag_cache_keygen(parser.key(), response.last_modified) self._response_params(params) if params['pagesize'] > parser.defaults['pagesize']: # http://wiki.nginx.org/X-accel#X-Accel-Buffering response.headers['X-Accel-Buffering'] = 'no' if format == 'csv': csv_headers(response, 'entries.csv') streamer = CSVStreamingResponse( datasets, params, pagesize=parser.defaults['pagesize']) return streamer.response() else: json_headers(filename='entries.json') streamer = JSONStreamingResponse( datasets, params, pagesize=parser.defaults['pagesize'], expand_facets=_expand_facets if expand_facets else None, callback=request.params.get('callback')) return streamer.response() b = Browser(**params) try: b.execute() except SolrException, e: return {'errors': [unicode(e)]} stats, facets, entries = b.get_stats(), b.get_facets(), b.get_entries() _entries = [] for dataset, entry in entries: entry = entry_apply_links(dataset.name, entry) entry['dataset'] = dataset_apply_links(dataset.as_dict()) _entries.append(entry) if format == 'csv': return write_csv(_entries, response, filename='entries.csv') if expand_facets and len(datasets) == 1: _expand_facets(facets, datasets[0]) return to_jsonp({ 'stats': stats, 'facets': facets, 'results': _entries })
def index(self, format='html'): """ Get a list of all datasets along with territory, language, and category counts (amount of datasets for each). """ # Create facet filters (so we can look at a single country, # language etc.) c.query = request.params.items() c.add_filter = lambda f, v: \ '?' + urlencode(c.query + [(f, v)] if (f, v) not in c.query else c.query) c.del_filter = lambda f, v: \ '?' + urlencode([(k, x) for k, x in c.query if (k, x) != (f, v)]) # Parse the request parameters to get them into the right format parser = DatasetIndexParamParser(request.params) params, errors = parser.parse() if errors: concatenated_errors = ', '.join(errors) abort(400, _('Parameter values not supported: %s') % concatenated_errors) # We need to pop the page and pagesize parameters since they're not # used for the cache (we have to get all of the datasets to do the # language, territory, and category counts (these are then only used # for the html response) params.pop('page') pagesize = params.pop('pagesize') # Get cached indices (this will also generate them if there are no # cached results (the cache is invalidated when a dataset is published # or retracted cache = DatasetIndexCache() results = cache.index(**params) # Generate the ETag from the last modified timestamp of the first # dataset (since they are ordered in descending order by last # modified). It doesn't matter that this happens if it has (possibly) # generated the index (if not cached) since if it isn't cached then # the ETag is definitely modified. We wrap it in a try clause since # if there are no public datasets we'll get an index error. # We also don't set c._must_revalidate to True since we don't care # if the index needs a hard refresh try: etag_cache_keygen( results['datasets'][0]['timestamps']['last_modified']) except IndexError: etag_cache_keygen(None) # Assign the results to template context variables c.language_options = results['languages'] c.territory_options = results['territories'] c.category_options = results['categories'] if format == 'json': # Apply links to the dataset lists before returning the json results['datasets'] = [dataset_apply_links(r) for r in results['datasets']] return to_jsonp(results) elif format == 'csv': # The CSV response only shows datasets, not languages, # territories, etc. return write_csv(results['datasets'], response) # If we're here then it's an html format so we show rss, do the # pagination and render the template c.show_rss = True # The page parameter we popped earlier is part of request.params but # we now know it was parsed. We have to send in request.params to # retain any parameters already supplied (filters) c.page = templating.Page(results['datasets'], items_per_page=pagesize, item_count=len(results['datasets']), **request.params) return templating.render('dataset/index.html')
def view(self, dataset, id, format='html'): """ Get a specific entry in the dataset, identified by the id. Entry can be return as html (default), json or csv. """ # Generate the dataset self._get_dataset(dataset) # Get the entry that matches the given id. c.dataset.entries is # a generator so we create a list from it's responses based on the # given constraint entries = list(c.dataset.entries(c.dataset.alias.c.id == id)) # Since we're trying to get a single entry the list should only # contain one entry, if not then we return an error if not len(entries) == 1: abort(404, _('Sorry, there is no entry %r') % id) # Add urls to the dataset and assign assign it as a context variable c.entry = entry_apply_links(dataset, entries.pop()) # Get and set some context variables from the entry # This shouldn't really be necessary but it's here so nothing gets # broken c.id = c.entry.get('id') c.from_ = c.entry.get('from') c.to = c.entry.get('to') c.currency = c.entry.get('currency', c.dataset.currency).upper() c.time = c.entry.get('time') # Get the amount for the entry amount = c.entry.get('amount') # We adjust for inflation if the user as asked for this to be inflated if 'inflate' in request.params: try: # Inflate the amount. Target date is provided in request.params # as value for inflate and reference date is the date of the # entry. We also provide a list of the territories to extract # a single country for which to do the inflation c.inflation = h.inflate(amount, request.params['inflate'], c.time, c.dataset.territories) # The amount to show should be the inflated amount # and overwrite the entry's amount as well c.amount = c.inflation['inflated'] c.entry['amount'] = c.inflation['inflated'] # We include the inflation response in the entry's dict # HTML description assumes every dict value for the entry # includes a label so we include a default "Inflation # adjustment" for it to work. c.inflation['label'] = 'Inflation adjustment' c.entry['inflation_adjustment'] = c.inflation except: # If anything goes wrong in the try clause (and there's a lot # that can go wrong). We just say that we can't adjust for # inflation and set the context amount as the original amount h.flash_notice(_('Unable to adjust for inflation')) c.amount = amount else: # If we haven't been asked to inflate then we just use the # original amount c.amount = amount # Add custom html for the dataset entry if the dataset has some # custom html # 2013-11-17 disabled this as part of removal of genshi as depended on # a genshi specific helper. # TODO: reinstate if important # c.custom_html = h.render_entry_custom_html(c.dataset, c.entry) # Add the rest of the dimensions relating to this entry into a # extras dictionary. We first need to exclude all dimensions that # are already shown and then we can loop through the dimensions excluded_keys = ('time', 'amount', 'currency', 'from', 'to', 'dataset', 'id', 'name', 'description') c.extras = {} if c.dataset: # Create a dictionary of the dataset dimensions c.desc = dict([(d.name, d) for d in c.dataset.dimensions]) # Loop through dimensions of the entry for key in c.entry: # Entry dimension must be a dataset dimension and not in # the predefined excluded keys if key in c.desc and \ key not in excluded_keys: c.extras[key] = c.entry[key] # Return entry based on if format == 'json': return to_jsonp(c.entry) elif format == 'csv': return write_csv([c.entry], response) else: return templating.render('entry/view.html')
def index(self, dataset, format='json'): self._get_dataset(dataset) return to_jsonp([src.as_dict() for src in c.dataset.sources])
def aggregate(self): """ Aggregation of a dataset based on URL parameters. It serves the aggregation from a cache if possible, and if not it computes it (it's performed in the aggregation cache for some reason). """ # Parse the aggregation parameters to get them into the right format parser = AggregateParamParser(request.params) params, errors = parser.parse() # If there were parsing errors we return them with status code 400 # as jsonp, irrespective of what format was asked for. if errors: response.status = 400 return to_jsonp({'errors': errors}) # URL parameters are always singular nouns but we work with some # as plural nouns so we pop them into the plural version params['cuts'] = params.pop('cut') params['drilldowns'] = params.pop('drilldown') params['measures'] = params.pop('measure') # Get the dataset and the format and remove from the parameters dataset = params.pop('dataset') format = params.pop('format') # User must have the right to read the dataset to perform aggregation require.dataset.read(dataset) # Create response headers from the parameters self._response_params(params) try: # Create an aggregation cache for the dataset and aggregate its # results. The cache will perform the aggreagation if it doesn't # have a cached result cache = AggregationCache(dataset) result = cache.aggregate(**params) # If the result has drilldown we create html_url values for its # dimensions (linked data). if 'drilldown' in result: result['drilldown'] = drilldowns_apply_links( dataset.name, result['drilldown']) # Do the ETag caching based on the cache_key in the summary # this is a weird place to do it since the heavy lifting has # already been performed above. TODO: Needs rethinking. response.last_modified = dataset.updated_at if cache.cache_enabled and 'cache_key' in result['summary']: etag_cache(result['summary']['cache_key']) except (KeyError, ValueError) as ve: # We log possible errors and return them with status code 400 log.exception(ve) response.status = 400 return to_jsonp({'errors': [unicode(ve)]}) # If the requested format is csv we write the drilldown results into # a csv file and return it, if not we return a jsonp result (default) if format == 'csv': return write_csv(result['drilldown'], response, filename=dataset.name + '.csv') return to_jsonp(result)
def analysis(self, dataset, source, format='json'): self._get_source(dataset, source) return to_jsonp(c.source.analysis)