예제 #1
0
                               (title.lccn, title.name, title.place_of_publication,
                                title.start_year, title.end_year, title.publisher,
                                title.edition, title.frequency,
                                map(str, title.subjects.all()),
                                set(map(lambda p: p.state, title.places.all())),
                                map(lambda p: p.city, title.places.all()),
                                str(title.country), map(str, title.languages.all()),
                                title.oclc, title.holding_types)))
        return response
 
    try:
        curr_page = int(request.GET.get('page', 1))
    except ValueError, e:
        curr_page = 1

    paginator = index.SolrTitlesPaginator(request.GET)

    try:
        page = paginator.page(curr_page)
    except:
        raise Http404

    page_range_short = list(_page_range_short(paginator, page))

    try:
        rows = int(request.GET.get('rows', '20'))
    except ValueError, e:
        rows = 20

    query = request.GET.copy()
    query.rows = rows
예제 #2
0
def search_titles_results(request):
    page_title = "US Newspaper Directory Search Results"
    crumbs = list(settings.BASE_CRUMBS)
    crumbs.extend([{"label": "Search Newspaper Directory", "href": reverse("chronam_search_titles")}])

    def prep_title_for_return(t):
        title = {}
        title.update(t.solr_doc)
        title["oclc"] = t.oclc
        return title

    format = request.GET.get("format")

    # check if requested format is CSV before building pages for response. CSV
    # response does not make use of pagination, instead all matching titles from
    # SOLR are returned at once
    if format == "csv":
        query = request.GET.copy()
        q, fields, sort_field, sort_order = index.get_solr_request_params_from_query(query)

        # return all titles in csv format. * May hurt performance. Assumption is that this
        # request is not made often.
        # TODO: revisit if assumption is incorrect
        solr_response = index.execute_solr_query(q, fields, sort_field, sort_order, index.title_count(), 0)
        titles = index.get_titles_from_solr_documents(solr_response)

        csv_header_labels = (
            "lccn",
            "title",
            "place_of_publication",
            "start_year",
            "end_year",
            "publisher",
            "edition",
            "frequency",
            "subject",
            "state",
            "city",
            "country",
            "language",
            "oclc",
            "holding_type",
        )
        response = HttpResponse(content_type="text/csv")
        response["Content-Disposition"] = 'attachment; filename="chronam_titles.csv"'
        writer = csv.writer(response)
        writer.writerow(csv_header_labels)
        for title in titles:
            writer.writerow(
                map(
                    lambda val: smart_str(val or "--"),
                    (
                        title.lccn,
                        title.name,
                        title.place_of_publication,
                        title.start_year,
                        title.end_year,
                        title.publisher,
                        title.edition,
                        title.frequency,
                        map(str, title.subjects.all()),
                        set(map(lambda p: p.state, title.places.all())),
                        map(lambda p: p.city, title.places.all()),
                        str(title.country),
                        map(str, title.languages.all()),
                        title.oclc,
                        title.holding_types,
                    ),
                )
            )
        return response

    try:
        curr_page = int(request.GET.get("page", 1))
    except ValueError as e:
        curr_page = 1

    paginator = index.SolrTitlesPaginator(request.GET)

    try:
        page = paginator.page(curr_page)
    except:
        raise Http404

    page_range_short = list(_page_range_short(paginator, page))

    try:
        rows = int(request.GET.get("rows", "20"))
    except ValueError as e:
        rows = 20

    query = request.GET.copy()
    query.rows = rows
    if page.has_next():
        query["page"] = curr_page + 1
        next_url = "?" + query.urlencode()
    if page.has_previous():
        query["page"] = curr_page - 1
        previous_url = "?" + query.urlencode()
    start = page.start_index()
    end = page.end_index()
    host = request.get_host()
    page_list = []
    for p in range(len(page.object_list)):
        page_start = start + p
        page_list.append((page_start, page.object_list[p]))

    if format == "atom":
        feed_url = request.build_absolute_uri()
        updated = rfc3339(datetime.datetime.now())
        return render_to_response(
            "search_titles_results.xml",
            dictionary=locals(),
            context_instance=RequestContext(request),
            content_type="application/atom+xml",
        )

    elif format == "json":
        results = {
            "startIndex": start,
            "endIndex": end,
            "totalItems": paginator.count,
            "itemsPerPage": rows,
            "items": [prep_title_for_return(t) for t in page.object_list],
        }
        # add url for the json view
        for i in results["items"]:
            i["url"] = request.build_absolute_uri(i["id"].rstrip("/") + ".json")
        json_text = json.dumps(results)
        # jsonp?
        callback = request.GET.get("callback")
        if callback and is_valid_jsonp_callback(callback):
            json_text = "%s(%s);" % ("callback", json_text)
        return HttpResponse(json_text, content_type="application/json")

    sort = request.GET.get("sort", "relevance")

    q = request.GET.copy()
    if "page" in q:
        del q["page"]
    if "sort" in q:
        del q["sort"]
    q = q.urlencode()
    collapse_search_tab = True
    return render_to_response(
        "search_titles_results.html", dictionary=locals(), context_instance=RequestContext(request)
    )
예제 #3
0
def search_titles_results(request):
    page_title = 'US Newspaper Directory Search Results'
    crumbs = list(settings.BASE_CRUMBS)
    crumbs.extend([{'label': 'Search Newspaper Directory',
                    'href': reverse('chronam_search_titles')},
                   ])

    def prep_title_for_return(t):
        title = {}
        title.update(t.solr_doc)
        title['oclc'] = t.oclc
        return title

    format = request.GET.get('format')

    # check if requested format is CSV before building pages for response. CSV
    # response does not make use of pagination, instead all matching titles from
    # SOLR are returned at once
    if format == 'csv':
        query = request.GET.copy()
        q, fields, sort_field, sort_order = index.get_solr_request_params_from_query(query)

        # return all titles in csv format. * May hurt performance. Assumption is that this
        # request is not made often.
        # TODO: revisit if assumption is incorrect
        solr_response = index.execute_solr_query(q, fields, sort_field,
                                                 sort_order, index.title_count(), 0)
        titles = index.get_titles_from_solr_documents(solr_response)

        csv_header_labels = ('lccn', 'title', 'place_of_publication', 'start_year',
                             'end_year', 'publisher', 'edition', 'frequency', 'subject',
                             'state', 'city', 'country', 'language', 'oclc',
                             'holding_type',)
        response = HttpResponse(content_type='text/csv')
        response['Content-Disposition'] = 'attachment; filename="chronam_titles.csv"'
        writer = csv.writer(response)
        writer.writerow(csv_header_labels)
        for title in titles:
            writer.writerow(map(lambda val: smart_str(val or '--'),
                                (title.lccn, title.name, title.place_of_publication,
                                 title.start_year, title.end_year, title.publisher,
                                 title.edition, title.frequency,
                                 map(str, title.subjects.all()),
                                 set(map(lambda p: p.state, title.places.all())),
                                 map(lambda p: p.city, title.places.all()),
                                 str(title.country), map(str, title.languages.all()),
                                 title.oclc, title.holding_types)))
        return response

    try:
        curr_page = int(request.GET.get('page', 1))
    except ValueError as e:
        curr_page = 1

    paginator = index.SolrTitlesPaginator(request.GET)

    try:
        page = paginator.page(curr_page)
    except:
        raise Http404

    page_range_short = list(_page_range_short(paginator, page))

    try:
        rows = int(request.GET.get('rows', '20'))
    except ValueError as e:
        rows = 20

    query = request.GET.copy()
    query.rows = rows
    if page.has_next():
        query['page'] = curr_page + 1
        next_url = '?' + query.urlencode()
    if page.has_previous():
        query['page'] = curr_page - 1
        previous_url = '?' + query.urlencode()
    start = page.start_index()
    end = page.end_index()
    host = request.get_host()
    page_list = []
    for p in range(len(page.object_list)):
        page_start = start + p
        page_list.append((page_start, page.object_list[p]))

    if format == 'atom':
        feed_url = 'http://' + host + request.get_full_path()
        updated = rfc3339(datetime.datetime.now())
        return render_to_response('search_titles_results.xml',
                                  dictionary=locals(),
                                  context_instance=RequestContext(request),
                                  content_type='application/atom+xml')

    elif format == 'json':
        results = {
            'startIndex': start,
            'endIndex': end,
            'totalItems': paginator.count,
            'itemsPerPage': rows,
            'items': [prep_title_for_return(t) for t in page.object_list]
        }
        # add url for the json view
        for i in results['items']:
            i['url'] = 'http://' + request.get_host() + i['id'].rstrip("/") + ".json"
        json_text = json.dumps(results, indent=2)
        # jsonp?
        callback = request.GET.get('callback')
        if callback and is_valid_jsonp_callback(callback):
            json_text = "%s(%s);" % ('callback', json_text)
        return HttpResponse(json_text, content_type='application/json')

    sort = request.GET.get('sort', 'relevance')

    q = request.GET.copy()
    if 'page' in q:
        del q['page']
    if 'sort' in q:
        del q['sort']
    q = q.urlencode()
    collapse_search_tab = True
    return render_to_response('search_titles_results.html',
                              dictionary=locals(),
                              context_instance=RequestContext(request))