def index(format=None): request.cache_key['args'] = request.args.items() q = filter_query(cls.all()) format = response_format(request) if format == 'csv': def generate(): for entity in q: if hasattr(entity, 'as_shallow'): yield entity.as_shallow() else: yield entity.as_dict() return stream_csv(generate(), filename="%s.csv" % name) count = q.count() if q.has_fts: q = q.order_by('ts_rank_cd(entity.full_text, plainto_tsquery(:ftsq)) DESC') limit = get_limit() q = q.limit(limit) offset = get_offset() q = q.offset(offset) return jsonify({ 'count': count, 'next': paged_url('.index', limit, offset+limit) if count > offset else False, 'previous': paged_url('.index', limit, offset-limit) if offset > 0 else False, 'limit': limit, 'offset': offset, 'results': q, 'facets': make_facets() }, shallow=True)
def index(format=None): request.cache_key['args'] = request.args.items() q = filter_query(cls.all()) if 'update' in request.args: # FIXME set time correctly! lastupdate=datetime.now().replace(hour=1,minute=0,second=0,microsecond=0) - timedelta(days=arg_int('update',1)) q=q.filter(cls.updated_at > lastupdate) format = response_format(request) if format == 'csv': def generate(): for entity in q: if hasattr(entity, 'as_shallow'): yield entity.as_shallow() else: yield entity.as_dict() return stream_csv(generate(), filename="%s.csv" % name) count = q.count() if q.has_fts: q = q.order_by('ts_rank_cd(entity.full_text, plainto_tsquery(:ftsq)) DESC') limit = get_limit() q = q.limit(limit) offset = get_offset() q = q.offset(offset) return jsonify({ 'count': count, 'next': paged_url('.index', limit, offset+limit) if count > offset+limit else False, 'previous': paged_url('.index', limit, offset-limit) if offset > 0 else False, 'limit': limit, 'offset': offset, 'results': q, 'facets': make_facets() }, shallow=True)
def handle_exceptions(exc): """ Re-format exceptions to JSON if accept requires that. """ format = response_format(request) if format == 'json': body = {'status': exc.code, 'name': exc.name, 'description': exc.get_description(request.environ)} return jsonify(body, status=exc.code, headers=exc.get_headers(request.environ)) return exc
def view(id): obj = cls.by_id(id) if obj is None: return NotFound(id) # check this before lazy-loading during serialization for v in ['updated_at', 'created_at']: if hasattr(obj, v): request.cache_key['modified'] = getattr(obj, v) validate_cache(request) return jsonify(obj)
def report_index(): reports = [] for name, report in REPORTS.items(): argspec = inspect.getargspec(report) r = { 'name': name, 'description': report.__doc__, 'params': argspec.args, 'uri': url_for('.report', name=name, _external=True) } reports.append(r) return jsonify({'count': len(REPORTS), 'results': reports})
def report(name, format=None): if name not in REPORTS: return NotFound(name) func = REPORTS[name] argspec = inspect.getargspec(func) args = {} for k, v in request.args.items(): if k in argspec.args: args[k] = v try: q = func(**args) except Exception, exc: return jsonify({'error': unicode(exc)}, status=400)
def trail(id): obj = cls.by_id(id) if obj is None: return NotFound(id) return jsonify(obj.trail())
return NotFound(name) func = REPORTS[name] argspec = inspect.getargspec(func) args = {} for k, v in request.args.items(): if k in argspec.args: args[k] = v try: q = func(**args) except Exception, exc: return jsonify({'error': unicode(exc)}, status=400) format = response_format(request) if format == 'csv': return stream_csv(get_results(q), filename="%s.csv" % name) count = q.count() limit = get_limit() q = q.limit(limit) offset = get_offset() q = q.offset(offset) return jsonify({ 'count': count, 'next': paged_url('.report', limit, offset+limit, name=name), 'previous': paged_url('.report', limit, offset-limit, name=name), 'limit': limit, 'offset': offset, 'query': str(q), 'results': get_results(q) }, shallow=True)