def search(self): parser = SearchParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return {'errors': errors} expand_facets = params.pop('expand_facet_dimensions') datasets = params.pop('dataset', None) if datasets is None or not len(datasets): q = model.Dataset.all_by_account(c.account) if params.get('category'): q = q.filter_by(category=params.pop('category')) datasets = q.all() expand_facets = False if not len(datasets): return {'errors': [_("No dataset available.")]} params['filter']['dataset'] = [] for dataset in datasets: require.dataset.read(dataset) params['filter']['dataset'].append(dataset.name) response.last_modified = max([d.updated_at for d in datasets]) etag_cache_keygen(parser.key(), response.last_modified) b = Browser(**params) try: stats, facets, entries = b.execute() except SolrException, e: return {'errors': [unicode(e)]}
def search(self): parser = SearchParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return {'errors': errors} expand_facets = params.pop('expand_facet_dimensions') datasets = params.pop('dataset', None) if datasets is None: datasets = model.Dataset.all_by_account(c.account) expand_facets = False for dataset in datasets: require.dataset.read(dataset) b = Browser(**params) stats, facets, entries = b.execute() entries = [entry_apply_links(d.name, e) for d, e in entries] if expand_facets and len(datasets) == 1: _expand_facets(facets, datasets[0]) return { 'stats': stats, 'facets': facets, 'results': entries }
def search(self): parser = SearchParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return to_jsonp({'errors': errors}) expand_facets = params.pop('expand_facet_dimensions') format = params.pop('format') if format == 'csv': params['stats'] = False params['facet_field'] = None datasets = params.pop('dataset', None) if datasets is None or not datasets: q = model.Dataset.all_by_account(c.account) if params.get('category'): q = q.filter_by(category=params.pop('category')) datasets = q.all() expand_facets = False if not datasets: return {'errors': ["No dataset available."]} params['filter']['dataset'] = [] for dataset in datasets: require.dataset.read(dataset) params['filter']['dataset'].append(dataset.name) response.last_modified = max([d.updated_at for d in datasets]) etag_cache_keygen(parser.key(), response.last_modified) self._response_params(params) if params['pagesize'] > parser.defaults['pagesize']: # http://wiki.nginx.org/X-accel#X-Accel-Buffering response.headers['X-Accel-Buffering'] = 'no' if format == 'csv': csv_headers(response, 'entries.csv') streamer = CSVStreamingResponse( datasets, params, pagesize=parser.defaults['pagesize']) return streamer.response() else: json_headers(filename='entries.json') streamer = JSONStreamingResponse( datasets, params, pagesize=parser.defaults['pagesize'], expand_facets=_expand_facets if expand_facets else None, callback=request.params.get('callback')) return streamer.response() b = Browser(**params) try: b.execute() except SolrException, e: return {'errors': [unicode(e)]}
def search(self): parser = SearchParamParser(request.params) params, errors = parser.parse() if errors: response.status = 400 return to_jsonp({'errors': errors}) expand_facets = params.pop('expand_facet_dimensions') format = params.pop('format') if format == 'csv': params['stats'] = False params['facet_field'] = None datasets = params.pop('dataset', None) if datasets is None or not datasets: q = model.Dataset.all_by_account(c.account) if params.get('category'): q = q.filter_by(category=params.pop('category')) datasets = q.all() expand_facets = False if not datasets: return {'errors': ["No dataset available."]} params['filter']['dataset'] = [] for dataset in datasets: require.dataset.read(dataset) params['filter']['dataset'].append(dataset.name) response.last_modified = max([d.updated_at for d in datasets]) etag_cache_keygen(parser.key(), response.last_modified) self._response_params(params) if params['pagesize'] > parser.defaults['pagesize']: # http://wiki.nginx.org/X-accel#X-Accel-Buffering response.headers['X-Accel-Buffering'] = 'no' if format == 'csv': csv_headers(response, 'entries.csv') streamer = CSVStreamingResponse( datasets, params, pagesize=parser.defaults['pagesize'] ) return streamer.response() else: json_headers(filename='entries.json') streamer = JSONStreamingResponse( datasets, params, pagesize=parser.defaults['pagesize'], expand_facets=_expand_facets if expand_facets else None, callback=request.params.get('callback') ) return streamer.response() b = Browser(**params) try: b.execute() except SolrException, e: return {'errors': [unicode(e)]}