Ejemplo n.º 1
0
    def index(self, dataset, format='html'):
        # Get the dataset into the context variable 'c'
        self._get_dataset(dataset)

        # If the format is either json or csv we direct the user to the search
        # API instead
        if format in ['json', 'csv']:
            return redirect(h.url_for(controller='api/version2',
                                      action='search',
                                      format=format, dataset=dataset,
                                      **request.params))

        # Get the default view
        handle_request(request, c, c.dataset)

        # Parse the parameters using the SearchParamParser (used by the API)
        parser = EntryIndexParamParser(request.params)
        params, errors = parser.parse()

        # We have to remove page from the parameters because that's also
        # used in the Solr browser (which fetches the queries)
        params.pop('page')

        # We limit ourselve to only our dataset
        params['filter']['dataset'] = [c.dataset.name]
        facet_dimensions = {field.name: field
                            for field in c.dataset.dimensions
                            if field.facet}
        params['facet_field'] = facet_dimensions.keys()

        # Create a Solr browser and execute it
        b = Browser(**params)
        try:
            b.execute()
        except SolrException as e:
            return {'errors': [unicode(e)]}

        # Get the entries, each item is a tuple of the dataset and entry
        solr_entries = b.get_entries()
        entries = [entry for (dataset, entry) in solr_entries]

        # Get expanded facets for this dataset,
        c.facets = b.get_expanded_facets(c.dataset)

        # Create a pager for the entries
        c.entries = templating.Page(entries, **request.params)

        # Set the search word and default to empty string
        c.search = params.get('q', '')

        # Set filters (but remove the dataset as we don't need it)
        c.filters = params['filter']
        del c.filters['dataset']

        # We also make the facet dimensions and dimension names available
        c.facet_dimensions = facet_dimensions
        c.dimensions = [dimension.name for dimension in c.dataset.dimensions]

        # Render the entries page
        return templating.render('entry/index.html')
Ejemplo n.º 2
0
    def test_entries_order(self):
        self.conn.raw_query.return_value = make_response([1, 2, 3])
        self.dataset.entries.return_value = make_entries([3, 1, 2])

        b = Browser()
        b.execute()
        entries = b.get_entries()

        h.assert_equal(map(lambda (a, b): b, entries), make_entries([1, 2, 3]))
Ejemplo n.º 3
0
    def test_entries_order(self):
        self.conn.raw_query.return_value = make_response([1, 2, 3])
        self.dataset.entries.return_value = make_entries([3, 1, 2])

        b = Browser()
        b.execute()
        entries = b.get_entries()

        assert map(lambda a_b: a_b[1], entries) == make_entries([1, 2, 3])
Ejemplo n.º 4
0
    def test_entries_order(self):
        self.conn.raw_query.return_value = make_response([1, 2, 3])
        self.dataset.entries.return_value = make_entries([3, 1, 2])

        b = Browser()
        b.execute()
        entries = b.get_entries()

        assert map(lambda a_b: a_b[1], entries) == make_entries([1, 2, 3])
Ejemplo n.º 5
0
    def test_entries_order(self):
        self.conn.raw_query.return_value = make_response([1, 2, 3])
        self.dataset.entries.return_value = make_entries([3, 1, 2])

        b = Browser()
        b.execute()
        entries = b.get_entries()

        h.assert_equal(map(lambda (a, b): b, entries), make_entries([1, 2, 3]))
Ejemplo n.º 6
0
    def search(self):
        parser = SearchParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        expand_facets = params.pop('expand_facet_dimensions')

        format = params.pop('format')
        if format == 'csv':
            params['stats'] = False
            params['facet_field'] = None

        datasets = params.pop('dataset', None)
        if datasets is None or not datasets:
            q = Dataset.all_by_account(c.account)
            if params.get('category'):
                q = q.filter_by(category=params.pop('category'))
            datasets = q.all()
            expand_facets = False

        if not datasets:
            return {'errors': ["No dataset available."]}

        params['filter']['dataset'] = []
        for dataset in datasets:
            require.dataset.read(dataset)
            params['filter']['dataset'].append(dataset.name)

        response.last_modified = max([d.updated_at for d in datasets])
        etag_cache_keygen(parser.key(), response.last_modified)

        if params['pagesize'] > parser.defaults['pagesize']:

            # http://wiki.nginx.org/X-accel#X-Accel-Buffering
            response.headers['X-Accel-Buffering'] = 'no'

            if format == 'csv':
                csv_headers(response, 'entries.csv')
                streamer = CSVStreamingResponse(
                    datasets,
                    params,
                    pagesize=parser.defaults['pagesize']
                )
                return streamer.response()
            else:
                json_headers(filename='entries.json')
                streamer = JSONStreamingResponse(
                    datasets,
                    params,
                    pagesize=parser.defaults['pagesize'],
                    expand_facets=util.expand_facets
                    if expand_facets else None,
                    callback=request.params.get('callback')
                )
                return streamer.response()

        solr_browser = Browser(**params)
        try:
            solr_browser.execute()
        except SolrException as e:
            return {'errors': [unicode(e)]}

        entries = []
        for dataset, entry in solr_browser.get_entries():
            entry = entry_apply_links(dataset.name, entry)
            entry['dataset'] = dataset_apply_links(dataset.as_dict())
            entries.append(entry)

        if format == 'csv':
            return write_csv(entries, response,
                             filename='entries.csv')

        if expand_facets and len(datasets) == 1:
            facets = solr_browser.get_expanded_facets(datasets[0])
        else:
            facets = solr_browser.get_facets()

        return to_jsonp({
            'stats': solr_browser.get_stats(),
            'facets': facets,
            'results': entries
        })
Ejemplo n.º 7
0
    def index(self, dataset, format='html'):
        # Get the dataset into the context variable 'c'
        self._get_dataset(dataset)

        # If the format is either json or csv we direct the user to the search
        # API instead
        if format in ['json', 'csv']:
            return redirect(
                h.url_for(controller='api/version2',
                          action='search',
                          format=format,
                          dataset=dataset,
                          **request.params))

        # Get the default view
        handle_request(request, c, c.dataset)

        # Parse the parameters using the SearchParamParser (used by the API)
        parser = EntryIndexParamParser(request.params)
        params, errors = parser.parse()

        # We have to remove page from the parameters because that's also
        # used in the Solr browser (which fetches the queries)
        params.pop('page')

        # We limit ourselve to only our dataset
        params['filter']['dataset'] = [c.dataset.name]
        facet_dimensions = {
            field.name: field
            for field in c.dataset.dimensions if field.facet
        }
        params['facet_field'] = facet_dimensions.keys()

        # Create a Solr browser and execute it
        b = Browser(**params)
        try:
            b.execute()
        except SolrException as e:
            return {'errors': [unicode(e)]}

        # Get the entries, each item is a tuple of (dataset, entry)
        solr_entries = b.get_entries()
        # We are only interested in the entry in the tuple since  we know
        # the dataset
        entries = [entry[1] for entry in solr_entries]

        # Get expanded facets for this dataset,
        c.facets = b.get_expanded_facets(c.dataset)

        # Create a pager for the entries
        c.entries = templating.Page(entries, **request.params)

        # Set the search word and default to empty string
        c.search = params.get('q', '')

        # Set filters (but remove the dataset as we don't need it)
        c.filters = params['filter']
        del c.filters['dataset']

        # We also make the facet dimensions and dimension names available
        c.facet_dimensions = facet_dimensions
        c.dimensions = [dimension.name for dimension in c.dataset.dimensions]

        # Render the entries page
        return templating.render('entry/index.html')
Ejemplo n.º 8
0
    def search(self):
        parser = SearchParamParser(request.params)
        params, errors = parser.parse()

        if errors:
            response.status = 400
            return to_jsonp({'errors': errors})

        expand_facets = params.pop('expand_facet_dimensions')

        format = params.pop('format')
        if format == 'csv':
            params['stats'] = False
            params['facet_field'] = None

        datasets = params.pop('dataset', None)
        if datasets is None or not datasets:
            q = Dataset.all_by_account(c.account)
            if params.get('category'):
                q = q.filter_by(category=params.pop('category'))
            datasets = q.all()
            expand_facets = False

        if not datasets:
            return {'errors': ["No dataset available."]}

        params['filter']['dataset'] = []
        for dataset in datasets:
            require.dataset.read(dataset)
            params['filter']['dataset'].append(dataset.name)

        response.last_modified = max([d.updated_at for d in datasets])
        etag_cache_keygen(parser.key(), response.last_modified)

        if params['pagesize'] > parser.defaults['pagesize']:

            # http://wiki.nginx.org/X-accel#X-Accel-Buffering
            response.headers['X-Accel-Buffering'] = 'no'

            if format == 'csv':
                csv_headers(response, 'entries.csv')
                streamer = CSVStreamingResponse(
                    datasets, params, pagesize=parser.defaults['pagesize'])
                return streamer.response()
            else:
                json_headers(filename='entries.json')
                streamer = JSONStreamingResponse(
                    datasets,
                    params,
                    pagesize=parser.defaults['pagesize'],
                    expand_facets=util.expand_facets
                    if expand_facets else None,
                    callback=request.params.get('callback'))
                return streamer.response()

        solr_browser = Browser(**params)
        try:
            solr_browser.execute()
        except SolrException as e:
            return {'errors': [unicode(e)]}

        entries = []
        for dataset, entry in solr_browser.get_entries():
            entry = entry_apply_links(dataset.name, entry)
            entry['dataset'] = dataset_apply_links(dataset.as_dict())
            entries.append(entry)

        if format == 'csv':
            return write_csv(entries, response, filename='entries.csv')

        if expand_facets and len(datasets) == 1:
            facets = solr_browser.get_expanded_facets(datasets[0])
        else:
            facets = solr_browser.get_facets()

        return to_jsonp({
            'stats': solr_browser.get_stats(),
            'facets': facets,
            'results': entries
        })