def prepare_context(self, request):
        try:
            self.metadata, self.cycle = get_metadata(self.entity_id, request,
                                                     self.type)
        except Exception as e:
            if hasattr(e, 'code') and e.code == 404:
                raise Http404
            raise

        self.check_metadata()

        self.standardized_name = standardize_name(
            self.metadata['entity_info']['name'], self.type)
        self.external_ids = self.metadata['entity_info']['external_ids']

        context = super(EntityView, self).prepare_context(request)
        context['available_cycles'] = self.metadata['available_cycles']
        context['entity_id'] = self.entity_id
        context['cycle'] = self.cycle
        context['entity_info'] = self.metadata['entity_info']
        context['entity_info']['metadata'][
            'source_display_name'] = get_source_display_name(
                self.metadata['entity_info']['metadata'])

        if self.cycle != DEFAULT_CYCLE and unicode(str(
                self.cycle)) in self.metadata['entity_info']['metadata']:
            # copy the current cycle's metadata into the generic metadata spot
            self.metadata['entity_info']['metadata'].update(
                self.metadata['entity_info']['metadata'][unicode(
                    str(self.cycle))])

        return context
Beispiel #2
0
    def prepare_context(self, request):
        try:
            self.metadata, self.cycle = get_metadata(self.entity_id, request, self.type)
        except Exception as e:
            if hasattr(e, "code") and e.code == 404:
                raise Http404
            raise

        self.check_metadata()

        self.standardized_name = standardize_name(self.metadata["entity_info"]["name"], self.type)
        self.external_ids = self.metadata["entity_info"]["external_ids"]

        context = super(EntityView, self).prepare_context(request)
        context["available_cycles"] = self.metadata["available_cycles"]
        context["entity_id"] = self.entity_id
        context["cycle"] = self.cycle
        context["entity_info"] = self.metadata["entity_info"]
        context["entity_info"]["metadata"]["source_display_name"] = get_source_display_name(
            self.metadata["entity_info"]["metadata"]
        )

        if self.cycle != DEFAULT_CYCLE and unicode(str(self.cycle)) in self.metadata["entity_info"]["metadata"]:
            # copy the current cycle's metadata into the generic metadata spot
            self.metadata["entity_info"]["metadata"].update(
                self.metadata["entity_info"]["metadata"][unicode(str(self.cycle))]
            )

        return context
Beispiel #3
0
def search(request, search_type, search_subtype):
    if not request.GET.get('query', None):
        HttpResponseRedirect('/')

    submitted_form = SearchForm(request.GET)
    if submitted_form.is_valid():
        query = submitted_form.cleaned_data['query'].strip()

        # see ticket #545
        query = query.replace(u"’", "'")

        query = unicodedata.normalize('NFKD',query).encode('ascii','ignore')

        # if a user submitted the search value from the form, then
        # treat the hyphens as intentional. if it was from a url, then
        # the name has probably been slug-ized and we need to remove
        # any single occurences of hyphens.
        if not request.GET.get('from_form', None):
            query = query.replace('-', ' ')

        per_page = 5 if search_type == 'all' else 10
        page = 1 if search_type == 'all' else request.GET.get('page', 1)

        results = {'per_page_slice': ":%s" % per_page}

        search_kwargs = defaultdict(dict)
        if search_subtype:
            search_kwargs[search_type]['subtype'] = search_subtype
            if search_subtype == 'politicians':
                state = request.GET.get('state', None)
                seat = request.GET.get('seat', None)
                party = request.GET.get('party', None)

                if state:
                    results['state_filter'] = state
                    search_kwargs[search_type]['state'] = state
                if seat:
                    results['seat_filter'] = seat
                    search_kwargs[search_type]['seat'] = seat
                if party:
                    results['party_filter'] = party
                    search_kwargs[search_type]['party'] = party

        results['result_sets'] = OrderedDict([
            ('groups', _cached_search(query, per_page=10, page=page, type=('organization', 'industry'), **(search_kwargs['groups']))),
            ('people', _cached_search(query, per_page=10, page=page, type=('individual', 'politician'), **(search_kwargs['people'])))
        ])

        all_results = reduce(operator.add, [t['results'] for t in results['result_sets'].values()])

        if len(all_results) == 1:
            # if there's just one result, redirect to that entity's page
            result_type = all_results[0]['type']
            name = slugify(standardize_name(all_results[0]['name'], result_type))
            _id = all_results[0]['id']
            return HttpResponseRedirect('/%s/%s/%s' % (result_type, name, _id))
        elif len(all_results) > 0 and search_type == "all":
            # if there's only one type of result, redirect to a sub-search
            for result_type, result_set in results['result_sets'].items():
                if len(result_set['results']) == len(all_results):
                    return HttpResponseRedirect('/search/%s?%s' % (result_type, urllib.urlencode(request.GET)))


        # do a tiny bit of regulations-specific hackery: if there are org results, stash a thread-local copy of the Docket Wrench entity list so it doesn't have to be recreated for each result
        dw_entity_list = None
        if results['result_sets']['groups']['results']:
            external_sites._dw_local.dw_entity_list = dw_entity_list = external_sites.get_docketwrench_entity_list()

        for result in (all_results if search_type == 'all' else results['result_sets'][search_type]['results']):
            result['url'] = "/%s/%s/%s" % (result['type'], slugify(standardize_name(result['name'], result['type'])), result['id'])

            if result['type'] == 'organization':
                result['has_fec_id'] = len([eid for eid in result['external_ids'] if eid['namespace'] == "urn:fec:committee"]) > 0

            # munge results a bit to handle available sections
            result.update(get_data_types(result['type'], result['totals']))
            result['sections'] = []
            # simulate an entity view so we can query said entity view's sections to see if they're availble
            dummy_view = DummyEntity(result)

            view_type = entity_views[result['type']]
            for section_type in view_type.sections:
                dummy_section = section_type(dummy_view)
                if dummy_section.should_fetch():
                    result['sections'].append(dummy_section)

        # clean up after DW hackery
        if dw_entity_list:
            del external_sites._dw_local.dw_entity_list

        results['total_results'] = sum([result.get('total', 0) for result in results['result_sets'].values()])
        results['has_results'] = (results['total_results'] if search_type == 'all' else results['result_sets'][search_type]['total']) > 0
        results['query'] = query
        results['search_type'] = search_type
        results['total'] = len(all_results)
        for result_set in results['result_sets'].values():
            result_set['start'] = (result_set['page'] - 1) * per_page + 1
            result_set['end'] = result_set['start'] + min(per_page, len(result_set['results'])) - 1

        results['search_subtype'] = search_subtype
        results['search_subtypes'] = {
            'people': [('all', 'All people'), ('contributors', 'Contributors'), ('lobbyists', 'Lobbyists'), ('politicians', 'Politicians')],
            'groups': [('all', 'All groups'), ('industries', 'Industries'), ('lobbying_firms', 'Lobbying organizations'), ('political_groups', 'Political groups'), ('other_orgs', 'Businesses and other organizations')]
        }

        qs_attrs = request.GET.copy()
        if 'page' in qs_attrs:
            del qs_attrs['page']
        results['qs'] = urllib.urlencode(qs_attrs)

        if search_subtype == 'politicians':
            results['states'] = POL_STATES
            results['seats'] = ["federal:president", "federal:senate", "federal:house", "state:governor", "state:judicial", "state:lower", "state:upper", "state:office"]
            results['parties'] = [('D', 'Democrat'), ('R', 'Republican'), ('O', 'Other')]
        
        return render_to_response('search/results.html', results, RequestContext(request))
    else:
        return HttpResponseRedirect('/')
def standardize_name_filter(name, type):
    return standardize_name(name, type)
def standardize_name_filter(name, type):
    return standardize_name(name, type)
Beispiel #6
0
def search(request, search_type, search_subtype):
    if not request.GET.get('query', None):
        HttpResponseRedirect('/')

    submitted_form = SearchForm(request.GET)
    if submitted_form.is_valid():
        query = submitted_form.cleaned_data['query'].strip()

        # see ticket #545
        query = query.replace(u"’", "'")

        query = unicodedata.normalize('NFKD', query).encode('ascii', 'ignore')

        # if a user submitted the search value from the form, then
        # treat the hyphens as intentional. if it was from a url, then
        # the name has probably been slug-ized and we need to remove
        # any single occurences of hyphens.
        if not request.GET.get('from_form', None):
            query = query.replace('-', ' ')

        per_page = 5 if search_type == 'all' else 10
        page = 1 if search_type == 'all' else request.GET.get('page', 1)

        results = {'per_page_slice': ":%s" % per_page}

        search_kwargs = defaultdict(dict)
        if search_subtype:
            search_kwargs[search_type]['subtype'] = search_subtype
            if search_subtype == 'politicians':
                state = request.GET.get('state', None)
                seat = request.GET.get('seat', None)
                party = request.GET.get('party', None)

                if state:
                    results['state_filter'] = state
                    search_kwargs[search_type]['state'] = state
                if seat:
                    results['seat_filter'] = seat
                    search_kwargs[search_type]['seat'] = seat
                if party:
                    results['party_filter'] = party
                    search_kwargs[search_type]['party'] = party

        results['result_sets'] = OrderedDict([
            ('groups',
             _cached_search(query,
                            per_page=10,
                            page=page,
                            type=('organization', 'industry'),
                            **(search_kwargs['groups']))),
            ('people',
             _cached_search(query,
                            per_page=10,
                            page=page,
                            type=('individual', 'politician'),
                            **(search_kwargs['people'])))
        ])

        all_results = reduce(
            operator.add,
            [t['results'] for t in results['result_sets'].values()])

        if len(all_results) == 1:
            # if there's just one result, redirect to that entity's page
            result_type = all_results[0]['type']
            name = slugify(
                standardize_name(all_results[0]['name'], result_type))
            _id = all_results[0]['id']
            return HttpResponseRedirect('/%s/%s/%s' % (result_type, name, _id))
        elif len(all_results) > 0 and search_type == "all":
            # if there's only one type of result, redirect to a sub-search
            for result_type, result_set in results['result_sets'].items():
                if len(result_set['results']) == len(all_results):
                    return HttpResponseRedirect(
                        '/search/%s?%s' %
                        (result_type, urllib.urlencode(request.GET)))

        # do a tiny bit of regulations-specific hackery: if there are org results, stash a thread-local copy of the Docket Wrench entity list so it doesn't have to be recreated for each result
        dw_entity_list = None
        if results['result_sets']['groups']['results']:
            external_sites._dw_local.dw_entity_list = dw_entity_list = external_sites.get_docketwrench_entity_list(
            )

        for result in (all_results if search_type == 'all' else
                       results['result_sets'][search_type]['results']):
            result['url'] = "/%s/%s/%s" % (
                result['type'],
                slugify(standardize_name(result['name'],
                                         result['type'])), result['id'])

            if result['type'] == 'organization':
                result['has_fec_id'] = len([
                    eid for eid in result['external_ids']
                    if eid['namespace'] == "urn:fec:committee"
                ]) > 0

            # munge results a bit to handle available sections
            result.update(get_data_types(result['type'], result['totals']))
            result['sections'] = []
            # simulate an entity view so we can query said entity view's sections to see if they're availble
            dummy_view = DummyEntity(result)

            view_type = entity_views[result['type']]
            for section_type in view_type.sections:
                dummy_section = section_type(dummy_view)
                if dummy_section.should_fetch():
                    result['sections'].append(dummy_section)

        # clean up after DW hackery
        if dw_entity_list:
            del external_sites._dw_local.dw_entity_list

        results['total_results'] = sum([
            result.get('total', 0)
            for result in results['result_sets'].values()
        ])
        results['has_results'] = (
            results['total_results'] if search_type == 'all' else
            results['result_sets'][search_type]['total']) > 0
        results['query'] = query
        results['search_type'] = search_type
        results['total'] = len(all_results)
        for result_set in results['result_sets'].values():
            result_set['start'] = (result_set['page'] - 1) * per_page + 1
            result_set['end'] = result_set['start'] + min(
                per_page, len(result_set['results'])) - 1

        results['search_subtype'] = search_subtype
        results['search_subtypes'] = {
            'people': [('all', 'All people'), ('contributors', 'Contributors'),
                       ('lobbyists', 'Lobbyists'),
                       ('politicians', 'Politicians')],
            'groups': [('all', 'All groups'), ('industries', 'Industries'),
                       ('lobbying_firms', 'Lobbying organizations'),
                       ('political_groups', 'Political groups'),
                       ('other_orgs', 'Businesses and other organizations')]
        }

        qs_attrs = request.GET.copy()
        if 'page' in qs_attrs:
            del qs_attrs['page']
        results['qs'] = urllib.urlencode(qs_attrs)

        if search_subtype == 'politicians':
            results['states'] = POL_STATES
            results['seats'] = [
                "federal:president", "federal:senate", "federal:house",
                "state:governor", "state:judicial", "state:lower",
                "state:upper", "state:office"
            ]
            results['parties'] = [('D', 'Democrat'), ('R', 'Republican'),
                                  ('O', 'Other')]

        return render_to_response('search/results.html', results,
                                  RequestContext(request))
    else:
        return HttpResponseRedirect('/')
Beispiel #7
0
 def fix_name(n):
     name = standardize_name(n, 'politician')
     str_name = name.__str__()
     return { 'name': str_name, 'last_name': name.last, 'slug': slugify(str_name) }