def match(): dataset_arg = request.args.get('dataset') dataset = Dataset.find(dataset_arg) matches = find_matches(dataset, request.args.get('name'), filter=request.args.get('filter'), exclude=arg_int('exclude')) pager = Pager(matches) return jsonify(pager.to_dict())
def match(): dataset_arg = request.args.get('dataset') dataset = Dataset.find(dataset_arg) matches = find_matches(dataset, request.args.get('name'), filter=request.args.get('filter'), exclude=arg_int('exclude')) pager = Pager(matches) return jsonify(pager.to_dict())
def query(): etag_cache_keygen() query = document_query(request.args, lists=authz.authz_lists('read'), sources=authz.authz_sources('read')) results = search_documents(query) pager = Pager(results, results_converter=lambda ds: [add_urls(d) for d in ds]) data = pager.to_dict() data['facets'] = transform_facets(results.result.get('aggregations', {})) return jsonify(data)
def _query(): ''' everything here should be applicable both to the internal and to the public api ''' etag_cache_keygen() query = document_query(request.args, lists=authz.authz_lists('read'), sources=authz.authz_sources('read'), highlights=True) results = search_documents(query) pager = Pager(results, results_converter=lambda ds: [add_urls(d) for d in ds]) data = pager.to_dict() #import ipdb; ipdb.set_trace() data['facets'] = transform_facets(results.result.get('aggregations', {})) return data
def _query(): ''' everything here should be applicable both to the internal and to the public api ''' etag_cache_keygen() query = document_query(request.args, lists=authz.authz_lists('read'), sources=authz.authz_sources('read'), highlights=True) results = search_documents(query) pager = Pager(results, results_converter=lambda ds: [add_urls(d) for d in ds]) data = pager.to_dict() #import ipdb; ipdb.set_trace() data['facets'] = transform_facets(results.result.get('aggregations', {})) return data
def index(): entities = Entity.all() dataset_arg = request.args.get('dataset') if dataset_arg is not None: dataset = Dataset.find(dataset_arg) entities = entities.filter_by(dataset=dataset) filter_name = request.args.get('filter_name', '') if len(filter_name): query = '%' + filter_name + '%' entities = entities.filter(Entity.name.ilike(query)) # TODO, other filters. format = request.args.get('format', 'json').lower().strip() if format == 'csv': res = csvify(entities) else: pager = Pager(entities) res = jsonify(pager.to_dict()) if arg_bool('download'): fn = dataset_filename(dataset, format) res.headers['Content-Disposition'] = 'attachment; filename=' + fn return res
def index(): entities = Entity.all() dataset_arg = request.args.get("dataset") if dataset_arg is not None: dataset = Dataset.find(dataset_arg) entities = entities.filter_by(dataset=dataset) filter_name = request.args.get("filter_name", "") if len(filter_name): query = "%" + filter_name + "%" entities = entities.filter(Entity.name.ilike(query)) # TODO, other filters. format = request.args.get("format", "json").lower().strip() if format == "csv": res = csvify(entities) else: pager = Pager(entities) res = jsonify(pager.to_dict()) if arg_bool("download"): fn = dataset_filename(dataset, format) res.headers["Content-Disposition"] = "attachment; filename=" + fn return res
def index(): datasets = Dataset.all() pager = Pager(datasets) return jsonify(pager.to_dict())
def index(): datasets = Dataset.all() pager = Pager(datasets) return jsonify(pager.to_dict())
def aliases(id): entity = Entity.by_id(id) pager = Pager(entity.aliases, id=id) return jsonify(pager.to_dict())
def aliases(id): entity = Entity.by_id(id) pager = Pager(entity.aliases, id=id) return jsonify(pager.to_dict())