def _prepare_and_render(self,
                            save_type='',
                            data=None,
                            errors=None,
                            error_summary=None):

        save_type = save_type if save_type else ''

        analytics_dict = analytics.generate_analytics_data(data)

        template_data = {
            'data': data,
            'analytics': analytics_dict,
            'errors': errors,
            'error_summary': error_summary,
            'aborted': False
        }

        if '-json' in save_type:
            response.headers['Content-Type'] = CONTENT_TYPES['json']
            return json.dumps(template_data)
        else:
            return base.render('contribute_flow/create_edit.html',
                               extra_vars=template_data)
示例#2
0
    def _performing_search(self,
                           q,
                           fq,
                           facet_keys,
                           limit,
                           page,
                           sort_by,
                           search_extras,
                           pager_url,
                           context,
                           fq_list=None,
                           expand='false',
                           enable_update_status_facet=False):
        data_dict = {
            'q': q,
            'fq_list': fq_list if fq_list else [],
            'expand': expand,
            'expand.rows':
            1,  # we anyway don't show the expanded datasets, but doesn't work with 0
            'fq': fq.strip(),
            'facet.field': facet_keys,
            # added for https://github.com/OCHA-DAP/hdx-ckan/issues/3340
            'facet.limit': 2000,
            'rows': limit,
            'start': (page - 1) * limit,
            'sort': sort_by,
            'extras': search_extras,
            'ext_compute_freshness': 'true'
        }

        self._add_additional_faceting_queries(data_dict)

        include_private = context.pop('ignore_capacity_check', None)
        if include_private:
            data_dict['include_private'] = include_private

        query = get_action('package_search')(context, data_dict)

        if not query.get('results', None):
            log.warn(
                'No query results found for data_dict: {}. Query dict is: {}. Query time {}'
                .format(str(data_dict), str(query), datetime.datetime.now()))

        self._process_found_package_list(query['results'])

        c.facets = query['facets']
        c.search_facets = query['search_facets']

        # if we're using collapse/expand/batch then take total count from facet site_id
        if expand:
            site_id_items = query['search_facets'].get('site_id',
                                                       {}).get('items', [])
            c.batch_total_items = sum(
                (item.get('count', 0) for item in site_id_items))

        # get_action('populate_related_items_count')(
        #     context, {'pkg_dict_list': query['results']})

        get_action('populate_showcase_items_count')(
            context, {
                'pkg_dict_list': query['results']
            })

        c.page = h.Page(collection=query['results'],
                        page=page,
                        url=pager_url,
                        item_count=query['count'],
                        items_per_page=limit)

        for dataset in query['results']:
            downloads_list = (res['tracking_summary']['total']
                              for res in dataset.get('resources', [])
                              if res.get('tracking_summary', {}).get('total'))
            download_sum = sum(downloads_list)

            dataset['approx_total_downloads'] = find_approx_download(
                dataset.get('total_res_downloads', 0))

            dataset['batch_length'] = query['expanded'].get(
                dataset.get('batch', ''), {}).get('numFound', 0)
            if dataset.get('organization'):
                dataset['batch_url'] = h.url_for(
                    'organization_read',
                    id=dataset['organization'].get('name'),
                    ext_batch=dataset.get('batch'))

        for dataset in query['results']:
            dataset['hdx_analytics'] = json.dumps(
                generate_analytics_data(dataset))

        c.page.items = query['results']
        c.sort_by_selected = query['sort']

        c.count = c.item_count = query['count']

        return query