Esempio n. 1
0
def custom_context(request):
    return {
        'search_form': SearchForm(),
        'API_KEY': getattr(settings, 'CLIENT_API_KEY', settings.API_KEY),
        'AGGREGATES_API_BASE_URL': getattr(settings, 'CLIENT_AGGREGATES_API_BASE_URL', settings.AGGREGATES_API_BASE_URL),
        'DOCKETWRENCH_URL': getattr(settings, 'DOCKETWRENCH_URL', 'http://docketwrench.sunlightfoundation.com'),
        'FIRST_VIEW': urlparse.urlparse(request.META.get('HTTP_REFERER', '')).netloc != request.META.get('HTTP_HOST', ''),
        'CHART_FALLBACK_URL': getattr(settings, 'CHART_FALLBACK_URL', '/chart/'),
    }
Esempio n. 2
0
def page_not_found(request, template_name='404.html'):
    t = loader.get_template(template_name)
    search = SearchForm()
    matches = re.findall(
        '^/(organization|individual|politician)/([a-zA-Z0-9\-]*?)/',
        request.path)
    if matches:
        search.fields['query'].initial = matches[0][1].replace('-',
                                                               ' ').title()
    return http.HttpResponseNotFound(
        t.render(
            RequestContext(request, {
                'request_page': request.path,
                'search_form_ext': search
            })))
Esempio n. 3
0
def search(request, search_type, search_subtype):
    if not request.GET.get('query', None):
        HttpResponseRedirect('/')

    submitted_form = SearchForm(request.GET)
    if submitted_form.is_valid():
        query = submitted_form.cleaned_data['query'].strip()

        # see ticket #545
        query = query.replace(u"’", "'")

        query = unicodedata.normalize('NFKD',query).encode('ascii','ignore')

        # if a user submitted the search value from the form, then
        # treat the hyphens as intentional. if it was from a url, then
        # the name has probably been slug-ized and we need to remove
        # any single occurences of hyphens.
        if not request.GET.get('from_form', None):
            query = query.replace('-', ' ')

        per_page = 5 if search_type == 'all' else 10
        page = 1 if search_type == 'all' else request.GET.get('page', 1)

        results = {'per_page_slice': ":%s" % per_page}

        search_kwargs = defaultdict(dict)
        if search_subtype:
            search_kwargs[search_type]['subtype'] = search_subtype
            if search_subtype == 'politicians':
                state = request.GET.get('state', None)
                seat = request.GET.get('seat', None)
                party = request.GET.get('party', None)

                if state:
                    results['state_filter'] = state
                    search_kwargs[search_type]['state'] = state
                if seat:
                    results['seat_filter'] = seat
                    search_kwargs[search_type]['seat'] = seat
                if party:
                    results['party_filter'] = party
                    search_kwargs[search_type]['party'] = party

        results['result_sets'] = OrderedDict([
            ('groups', _cached_search(query, per_page=10, page=page, type=('organization', 'industry'), **(search_kwargs['groups']))),
            ('people', _cached_search(query, per_page=10, page=page, type=('individual', 'politician'), **(search_kwargs['people'])))
        ])

        all_results = reduce(operator.add, [t['results'] for t in results['result_sets'].values()])

        if len(all_results) == 1:
            # if there's just one result, redirect to that entity's page
            result_type = all_results[0]['type']
            name = slugify(standardize_name(all_results[0]['name'], result_type))
            _id = all_results[0]['id']
            return HttpResponseRedirect('/%s/%s/%s' % (result_type, name, _id))
        elif len(all_results) > 0 and search_type == "all":
            # if there's only one type of result, redirect to a sub-search
            for result_type, result_set in results['result_sets'].items():
                if len(result_set['results']) == len(all_results):
                    return HttpResponseRedirect('/search/%s?%s' % (result_type, urllib.urlencode(request.GET)))


        # do a tiny bit of regulations-specific hackery: if there are org results, stash a thread-local copy of the Docket Wrench entity list so it doesn't have to be recreated for each result
        dw_entity_list = None
        if results['result_sets']['groups']['results']:
            external_sites._dw_local.dw_entity_list = dw_entity_list = external_sites.get_docketwrench_entity_list()

        for result in (all_results if search_type == 'all' else results['result_sets'][search_type]['results']):
            result['url'] = "/%s/%s/%s" % (result['type'], slugify(standardize_name(result['name'], result['type'])), result['id'])

            if result['type'] == 'organization':
                result['has_fec_id'] = len([eid for eid in result['external_ids'] if eid['namespace'] == "urn:fec:committee"]) > 0

            # munge results a bit to handle available sections
            result.update(get_data_types(result['type'], result['totals']))
            result['sections'] = []
            # simulate an entity view so we can query said entity view's sections to see if they're availble
            dummy_view = DummyEntity(result)

            view_type = entity_views[result['type']]
            for section_type in view_type.sections:
                dummy_section = section_type(dummy_view)
                if dummy_section.should_fetch():
                    result['sections'].append(dummy_section)

        # clean up after DW hackery
        if dw_entity_list:
            del external_sites._dw_local.dw_entity_list

        results['total_results'] = sum([result.get('total', 0) for result in results['result_sets'].values()])
        results['has_results'] = (results['total_results'] if search_type == 'all' else results['result_sets'][search_type]['total']) > 0
        results['query'] = query
        results['search_type'] = search_type
        results['total'] = len(all_results)
        for result_set in results['result_sets'].values():
            result_set['start'] = (result_set['page'] - 1) * per_page + 1
            result_set['end'] = result_set['start'] + min(per_page, len(result_set['results'])) - 1

        results['search_subtype'] = search_subtype
        results['search_subtypes'] = {
            'people': [('all', 'All people'), ('contributors', 'Contributors'), ('lobbyists', 'Lobbyists'), ('politicians', 'Politicians')],
            'groups': [('all', 'All groups'), ('industries', 'Industries'), ('lobbying_firms', 'Lobbying organizations'), ('political_groups', 'Political groups'), ('other_orgs', 'Businesses and other organizations')]
        }

        qs_attrs = request.GET.copy()
        if 'page' in qs_attrs:
            del qs_attrs['page']
        results['qs'] = urllib.urlencode(qs_attrs)

        if search_subtype == 'politicians':
            results['states'] = POL_STATES
            results['seats'] = ["federal:president", "federal:senate", "federal:house", "state:governor", "state:judicial", "state:lower", "state:upper", "state:office"]
            results['parties'] = [('D', 'Democrat'), ('R', 'Republican'), ('O', 'Other')]
        
        return render_to_response('search/results.html', results, RequestContext(request))
    else:
        return HttpResponseRedirect('/')
Esempio n. 4
0
def search(request):
    if not request.GET.get('query', None):
        HttpResponseRedirect('/')

    submitted_form = SearchForm(request.GET)
    if submitted_form.is_valid():
        kwargs = {}
        query = submitted_form.cleaned_data['query'].strip()
        cycle = request.GET.get('cycle', DEFAULT_CYCLE)

        # see ticket #545
        query = query.replace(u"’", "'")

        # if a user submitted the search value from the form, then
        # treat the hyphens as intentional. if it was from a url, then
        # the name has probably been slug-ized and we need to remove
        # any single occurences of hyphens.
        if not request.GET.get('from_form', None):
            query = query.replace('-', ' ')

        results = api.entities.search(query)

        # limit the results to only those entities with an ID.
        entity_results = [result for result in results if result['id']]

        # if there's just one results, redirect to that entity's page
        if len(entity_results) == 1:
            result_type = entity_results[0]['type']
            name = slugify(entity_results[0]['name'])
            _id = entity_results[0]['id']
            return HttpResponseRedirect('/%s/%s/%s%s' % (result_type, name, _id, "?cycle=" + cycle if cycle != "-1" else ""))

        kwargs['query'] = query

        if len(entity_results) == 0:
            kwargs['sorted_results'] = None
        else:
            # sort the results by type
            sorted_results = {'organization': [], 'politician': [], 'individual': [], 'lobbying_firm': [], 'industry': []}
            for result in entity_results:
                if result['type'] == 'organization' and result['lobbying_firm'] == True:
                    sorted_results['lobbying_firm'].append(result)
                else:
                    sorted_results[result['type']].append(result)

            # sort each type by amount
            sorted_results['industry']      = sorted(sorted_results['industry'],      key=lambda x: float(x['total_given']), reverse=True)
            sorted_results['organization']  = sorted(sorted_results['organization'],  key=lambda x: float(x['total_given']), reverse=True)
            sorted_results['individual']    = sorted(sorted_results['individual'],    key=lambda x: float(x['total_given']), reverse=True)
            sorted_results['politician']    = sorted(sorted_results['politician'],    key=lambda x: float(x['total_received']), reverse=True)
            sorted_results['lobbying_firm'] = sorted(sorted_results['lobbying_firm'], key=lambda x: float(x['firm_income']), reverse=True)

            # keep track of how many there are of each type of result
            kwargs['num_industries']   = len(sorted_results['industry'])
            kwargs['num_orgs']   = len(sorted_results['organization'])
            kwargs['num_pols']   = len(sorted_results['politician'])
            kwargs['num_indivs'] = len(sorted_results['individual'])
            kwargs['num_firms']  = len(sorted_results['lobbying_firm'])
            kwargs['cycle'] = cycle
            kwargs['sorted_results'] = sorted_results
        return render_to_response('results.html', kwargs, brisket_context(request))
    else:
        return HttpResponseRedirect('/')
Esempio n. 5
0
def brisket_context(request):
    return RequestContext(request, {'search_form': SearchForm()})
Esempio n. 6
0
def search(request, search_type, search_subtype):
    if not request.GET.get('query', None):
        HttpResponseRedirect('/')

    submitted_form = SearchForm(request.GET)
    if submitted_form.is_valid():
        query = submitted_form.cleaned_data['query'].strip()

        # see ticket #545
        query = query.replace(u"’", "'")

        query = unicodedata.normalize('NFKD', query).encode('ascii', 'ignore')

        # if a user submitted the search value from the form, then
        # treat the hyphens as intentional. if it was from a url, then
        # the name has probably been slug-ized and we need to remove
        # any single occurences of hyphens.
        if not request.GET.get('from_form', None):
            query = query.replace('-', ' ')

        per_page = 5 if search_type == 'all' else 10
        page = 1 if search_type == 'all' else request.GET.get('page', 1)

        results = {'per_page_slice': ":%s" % per_page}

        search_kwargs = defaultdict(dict)
        if search_subtype:
            search_kwargs[search_type]['subtype'] = search_subtype
            if search_subtype == 'politicians':
                state = request.GET.get('state', None)
                seat = request.GET.get('seat', None)
                party = request.GET.get('party', None)

                if state:
                    results['state_filter'] = state
                    search_kwargs[search_type]['state'] = state
                if seat:
                    results['seat_filter'] = seat
                    search_kwargs[search_type]['seat'] = seat
                if party:
                    results['party_filter'] = party
                    search_kwargs[search_type]['party'] = party

        results['result_sets'] = OrderedDict([
            ('groups',
             _cached_search(query,
                            per_page=10,
                            page=page,
                            type=('organization', 'industry'),
                            **(search_kwargs['groups']))),
            ('people',
             _cached_search(query,
                            per_page=10,
                            page=page,
                            type=('individual', 'politician'),
                            **(search_kwargs['people'])))
        ])

        all_results = reduce(
            operator.add,
            [t['results'] for t in results['result_sets'].values()])

        if len(all_results) == 1:
            # if there's just one result, redirect to that entity's page
            result_type = all_results[0]['type']
            name = slugify(
                standardize_name(all_results[0]['name'], result_type))
            _id = all_results[0]['id']
            return HttpResponseRedirect('/%s/%s/%s' % (result_type, name, _id))
        elif len(all_results) > 0 and search_type == "all":
            # if there's only one type of result, redirect to a sub-search
            for result_type, result_set in results['result_sets'].items():
                if len(result_set['results']) == len(all_results):
                    return HttpResponseRedirect(
                        '/search/%s?%s' %
                        (result_type, urllib.urlencode(request.GET)))

        # do a tiny bit of regulations-specific hackery: if there are org results, stash a thread-local copy of the Docket Wrench entity list so it doesn't have to be recreated for each result
        dw_entity_list = None
        if results['result_sets']['groups']['results']:
            external_sites._dw_local.dw_entity_list = dw_entity_list = external_sites.get_docketwrench_entity_list(
            )

        for result in (all_results if search_type == 'all' else
                       results['result_sets'][search_type]['results']):
            result['url'] = "/%s/%s/%s" % (
                result['type'],
                slugify(standardize_name(result['name'],
                                         result['type'])), result['id'])

            if result['type'] == 'organization':
                result['has_fec_id'] = len([
                    eid for eid in result['external_ids']
                    if eid['namespace'] == "urn:fec:committee"
                ]) > 0

            # munge results a bit to handle available sections
            result.update(get_data_types(result['type'], result['totals']))
            result['sections'] = []
            # simulate an entity view so we can query said entity view's sections to see if they're availble
            dummy_view = DummyEntity(result)

            view_type = entity_views[result['type']]
            for section_type in view_type.sections:
                dummy_section = section_type(dummy_view)
                if dummy_section.should_fetch():
                    result['sections'].append(dummy_section)

        # clean up after DW hackery
        if dw_entity_list:
            del external_sites._dw_local.dw_entity_list

        results['total_results'] = sum([
            result.get('total', 0)
            for result in results['result_sets'].values()
        ])
        results['has_results'] = (
            results['total_results'] if search_type == 'all' else
            results['result_sets'][search_type]['total']) > 0
        results['query'] = query
        results['search_type'] = search_type
        results['total'] = len(all_results)
        for result_set in results['result_sets'].values():
            result_set['start'] = (result_set['page'] - 1) * per_page + 1
            result_set['end'] = result_set['start'] + min(
                per_page, len(result_set['results'])) - 1

        results['search_subtype'] = search_subtype
        results['search_subtypes'] = {
            'people': [('all', 'All people'), ('contributors', 'Contributors'),
                       ('lobbyists', 'Lobbyists'),
                       ('politicians', 'Politicians')],
            'groups': [('all', 'All groups'), ('industries', 'Industries'),
                       ('lobbying_firms', 'Lobbying organizations'),
                       ('political_groups', 'Political groups'),
                       ('other_orgs', 'Businesses and other organizations')]
        }

        qs_attrs = request.GET.copy()
        if 'page' in qs_attrs:
            del qs_attrs['page']
        results['qs'] = urllib.urlencode(qs_attrs)

        if search_subtype == 'politicians':
            results['states'] = POL_STATES
            results['seats'] = [
                "federal:president", "federal:senate", "federal:house",
                "state:governor", "state:judicial", "state:lower",
                "state:upper", "state:office"
            ]
            results['parties'] = [('D', 'Democrat'), ('R', 'Republican'),
                                  ('O', 'Other')]

        return render_to_response('search/results.html', results,
                                  RequestContext(request))
    else:
        return HttpResponseRedirect('/')
Esempio n. 7
0
def search(request):
    if not request.GET.get("query", None):
        HttpResponseRedirect("/")

    submitted_form = SearchForm(request.GET)
    if submitted_form.is_valid():
        kwargs = {}
        query = submitted_form.cleaned_data["query"].strip()
        cycle = request.GET.get("cycle", DEFAULT_CYCLE)

        # see ticket #545
        query = query.replace(u"’", "'")

        # if a user submitted the search value from the form, then
        # treat the hyphens as intentional. if it was from a url, then
        # the name has probably been slug-ized and we need to remove
        # any single occurences of hyphens.
        if not request.GET.get("from_form", None):
            query = query.replace("-", " ")

        results = api.entity_search(query)

        # limit the results to only those entities with an ID.
        entity_results = [result for result in results if result["id"]]

        # if there's just one results, redirect to that entity's page
        if len(entity_results) == 1:
            result_type = entity_results[0]["type"]
            name = slugify(entity_results[0]["name"])
            _id = entity_results[0]["id"]
            return HttpResponseRedirect(
                "/%s/%s/%s%s" % (result_type, name, _id, "?cycle=" + cycle if cycle != "-1" else "")
            )

        kwargs["query"] = query

        if len(entity_results) == 0:
            kwargs["sorted_results"] = None
        else:
            # sort the results by type
            sorted_results = {"organization": [], "politician": [], "individual": [], "lobbying_firm": []}
            for result in entity_results:
                if result["type"] == "organization" and result["lobbying_firm"] == True:
                    sorted_results["lobbying_firm"].append(result)
                else:
                    sorted_results[result["type"]].append(result)

            # sort each type by amount
            sorted_results["organization"] = sorted(
                sorted_results["organization"], key=lambda x: float(x["total_given"]), reverse=True
            )
            sorted_results["individual"] = sorted(
                sorted_results["individual"], key=lambda x: float(x["total_given"]), reverse=True
            )
            sorted_results["politician"] = sorted(
                sorted_results["politician"], key=lambda x: float(x["total_received"]), reverse=True
            )
            sorted_results["lobbying_firm"] = sorted(
                sorted_results["lobbying_firm"], key=lambda x: float(x["firm_income"]), reverse=True
            )

            # keep track of how many there are of each type of result
            kwargs["num_orgs"] = len(sorted_results["organization"])
            kwargs["num_pols"] = len(sorted_results["politician"])
            kwargs["num_indivs"] = len(sorted_results["individual"])
            kwargs["num_firms"] = len(sorted_results["lobbying_firm"])
            kwargs["cycle"] = cycle
            kwargs["sorted_results"] = sorted_results
        return render_to_response("results.html", kwargs, brisket_context(request))
    else:
        return HttpResponseRedirect("/")