コード例 #1
0
def issue_pages(request, lccn, date, edition, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    _year, _month, _day = date.split("-")
    try:
        _date = datetime.date(int(_year), int(_month), int(_day))
    except ValueError:
        raise Http404
    try:
        issue = title.issues.filter(date_issued=_date,
                                    edition=edition).order_by("-created")[0]
    except IndexError:
        raise Http404
    paginator = Paginator(issue.pages.all(), 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    if not page.object_list:
        notes = issue.notes.filter(type="noteAboutReproduction")
        num_notes = notes.count()
        if num_notes >= 1:
            display_label = notes[0].label
            explanation = notes[0].text
    page_title = 'All Pages: %s, %s' % (label(title), label(issue))
    page_head_heading = "All Pages: %s, %s" % (title.display_name,
                                               label(issue))
    page_head_subheading = label(title)
    crumbs = create_crumbs(title, issue, date, edition)
    profile_uri = 'http://www.openarchives.org/ore/html/'
    response = render_to_response('issue_pages.html',
                                  dictionary=locals(),
                                  context_instance=RequestContext(request))
    return add_cache_tag(response, "lccn=%s" % lccn)
コード例 #2
0
ファイル: reports.py プロジェクト: dbrunton/chronam
def language_pages(request, language, batch, title=None, page_number=1):
    language_name = models.Language.objects.get(code=language).name
    page_title = "Pages with %s text" % (language_name)
    path = "reports/language_title_pages.html"
    if language != "eng":
        if title:
            pages = (
                models.Page.objects.filter(ocr__language_texts__language__code=language, issue__title__lccn=title)
                .values("reel__number", "issue__date_issued", "issue__title__lccn", "issue__edition", "sequence")
                .order_by("reel__number", "issue__date_issued", "sequence")
            )
        else:
            pages = (
                models.Page.objects.filter(ocr__language_texts__language__code=language, issue__batch__name=batch)
                .values("reel__number", "issue__date_issued", "issue__title__lccn", "issue__edition", "sequence")
                .order_by("reel__number", "issue__title__lccn", "issue__date_issued", "sequence")
            )
            path = "reports/language_batch_pages.html"
        paginator = Paginator(pages, 25)
        try:
            page = paginator.page(page_number)
        except InvalidPage:
            page = paginator.page(1)
        page_range_short = list(_page_range_short(paginator, page))
    return render_to_response(path, dictionary=locals(), context_instance=RequestContext(request))
コード例 #3
0
def issues_first_pages(request, lccn, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    issues = title.issues.all()
    if not issues.exists():
        raise Http404("No issues for %s" % title.display_name)

    paginator = Paginator(issues, 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)

    response = render(
        request,
        "issue_pages.html",
        context={
            "title": title,
            "issues": issues,
            "page_title": "Browse Issues: %s" % label(title),
            "page_head_heading": "Browse Issues: %s" % title.display_name,
            "page_head_subheading": label(title),
            "crumbs": create_crumbs(title),
            "paginator": paginator,
            # To avoid confusing aliasing in the templates, we use unambiguous
            # variable names in the templates:
            "paginator_page": page,
            "newspaper_pages": [i.first_page for i in page.object_list],
            "page_range_short": list(_page_range_short(paginator, page)),
        },
    )
    return add_cache_tag(response, "lccn=%s" % lccn)
コード例 #4
0
ファイル: browse.py プロジェクト: REAP720801/chronam
def titles_in_city(request, state, county, city,
                   page_number=1, order='name_normal'):
    state, county, city = map(unpack_url_path, (state, county, city))
    page_title = "Titles in City: %s, %s" % (city, state)
    titles = models.Title.objects.all()
    if city:
        titles = titles.filter(places__city__iexact=city)
    if county:
        titles = titles.filter(places__county__iexact=county)
    if state:
        titles = titles.filter(places__state__iexact=state)
    titles = titles.order_by(order)
    titles.distinct()

    if titles.count() == 0:
        raise Http404

    paginator = Paginator(titles, 50)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))

    return render_to_response('reports/city.html', dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #5
0
def issues_first_pages(request, lccn, page_num=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    crumbs = create_crumbs(title)
    crumbs.extend([{'label': 'All Front Pages'}])
    issues = title.issues.all()
    if not issues.count() > 0:
        raise Http404("No issues for %s" % title.display_name)
    first_pages = []
    for issue in issues:
        first_pages.append(issue.first_page)
    paginator = Paginator(first_pages, 12)
    try:
        page = paginator.page(page_num)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    page_title = 'All Front Pages: %s' % label(title)
    total_items = len(first_pages)
    if (int(page_num) - 1) > 1:
        prev_page_num = int(page_num) - 1
    else:
        prev_page_num = 1
    next_url = urlresolvers.reverse('chronam_issues_first_pages_page_number', args=(title.lccn, int(page_num) + 1))
    previous_url = urlresolvers.reverse('chronam_issues_first_pages_page_number', args=(title.lccn, prev_page_num))
    # title_lccn = lccn
    return render_to_response('issue_first_pages.html', dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #6
0
ファイル: reports.py プロジェクト: sshyran/chronam
def language_pages(request, language, batch, title=None, page_number=1):
    language_name = models.Language.objects.get(code=language).name
    page_title = "Pages with %s text" % (language_name)
    path = "reports/language_title_pages.html"
    if language != "eng":
        if title:
            pages = (models.Page.objects.filter(
                ocr__language_texts__language__code=language,
                issue__title__lccn=title).values(
                    "reel__number", "issue__date_issued", "issue__title__lccn",
                    "issue__edition",
                    "sequence").order_by("reel__number", "issue__date_issued",
                                         "sequence"))
        else:
            pages = (models.Page.objects.filter(
                ocr__language_texts__language__code=language,
                issue__batch__name=batch).values(
                    "reel__number", "issue__date_issued", "issue__title__lccn",
                    "issue__edition",
                    "sequence").order_by("reel__number", "issue__title__lccn",
                                         "issue__date_issued", "sequence"))
            path = "reports/language_batch_pages.html"
        paginator = Paginator(pages, 25)
        try:
            page = paginator.page(page_number)
        except InvalidPage:
            page = paginator.page(1)
        page_range_short = list(_page_range_short(paginator, page))
    return render_to_response(path,
                              dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #7
0
def titles_in_county(request,
                     state,
                     county,
                     page_number=1,
                     order='name_normal'):
    state, county = map(unpack_url_path, (state, county))
    page_title = "Titles in County: %s, %s" % (county, state)
    titles = models.Title.objects.all()
    if county:
        titles = titles.filter(places__county__iexact=county)
    if state:
        titles = titles.filter(places__state__iexact=state)
    titles = titles.order_by(order)
    titles = titles.distinct()

    if titles.count() == 0:
        raise Http404

    paginator = Paginator(titles, 50)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))

    return render_to_response('reports/county.html',
                              dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #8
0
ファイル: browse.py プロジェクト: LibraryOfCongress/chronam
def issue_pages(request, lccn, date, edition, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    _year, _month, _day = date.split("-")
    try:
        _date = datetime.date(int(_year), int(_month), int(_day))
    except ValueError:
        raise Http404
    try:
        issue = title.issues.filter(date_issued=_date,
                                    edition=edition).order_by("-created")[0]
    except IndexError:
        raise Http404
    paginator = Paginator(issue.pages.all(), 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    if not page.object_list:
        notes = issue.notes.filter(type="noteAboutReproduction")
        num_notes = notes.count()
        if num_notes >= 1:
            display_label = notes[0].label
            explanation = notes[0].text
    page_title = 'All Pages: %s, %s' % (label(title), label(issue))
    page_head_heading = "All Pages: %s, %s" % (title.display_name, label(issue))
    page_head_subheading = label(title)
    crumbs = create_crumbs(title, issue, date, edition)
    profile_uri = 'http://www.openarchives.org/ore/html/'
    response = render_to_response('issue_pages.html', dictionary=locals(),
                                  context_instance=RequestContext(request))
    return add_cache_tag(response, "lccn=%s" % lccn)
コード例 #9
0
def titles_in_state(request, state, page_number=1, order="name_normal"):
    if not any(i for i in models.Title._meta.local_fields if i.name == order):
        return HttpResponseRedirect(
            urlresolvers.reverse("chronam_state_page_number",
                                 kwargs={
                                     "state": state,
                                     "page_number": page_number
                                 }))

    state = unpack_url_path(state)
    page_title = "Titles in State: %s" % state
    titles = models.Title.objects.all()
    if state:
        titles = titles.filter(places__state__iexact=state)
    titles = titles.order_by(order)
    titles = titles.distinct()

    if titles.count() == 0:
        raise Http404

    paginator = Paginator(titles, 50)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))

    return render_to_response("reports/state.html",
                              dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #10
0
def issues_first_pages(request, lccn, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    issues = title.issues.all()
    if not issues.count() > 0:
        raise Http404("No issues for %s" % title.display_name)

    first_pages = []
    for issue in issues:
        first_pages.append(issue.first_page)

    paginator = Paginator(first_pages, 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))

    page_title = 'Browse Issues: %s' % label(title)
    page_head_heading = "Browse Issues: %s" % title.display_name
    page_head_subheading = label(title)
    crumbs = create_crumbs(title)
    response = render_to_response('issue_pages.html',
                                  dictionary=locals(),
                                  context_instance=RequestContext(request))
    return add_cache_tag(response, "lccn=%s" % lccn)
コード例 #11
0
ファイル: reports.py プロジェクト: dbrunton/chronam
def reels(request, page_number=1):
    page_title = "Reels"
    reels = models.Reel.objects.all().order_by("number")
    paginator = Paginator(reels, 25)
    page = paginator.page(page_number)
    page_range_short = list(_page_range_short(paginator, page))

    return render_to_response("reports/reels.html", dictionary=locals(), context_instance=RequestContext(request))
コード例 #12
0
ファイル: reports.py プロジェクト: dbrunton/chronam
def batches(request, page_number=1):
    page_title = "Batches"
    batches = models.Batch.viewable_batches()
    paginator = Paginator(batches, 25)
    page = paginator.page(page_number)
    page_range_short = list(_page_range_short(paginator, page))

    return render_to_response("reports/batches.html", dictionary=locals(), context_instance=RequestContext(request))
コード例 #13
0
ファイル: reports.py プロジェクト: dbrunton/chronam
def events(request, page_number=1):
    page_title = "Events"
    events = models.LoadBatchEvent.objects.all().order_by("-created")
    paginator = Paginator(events, 25)
    page = paginator.page(page_number)
    page_range_short = list(_page_range_short(paginator, page))

    return render_to_response("reports/events.html", dictionary=locals(), context_instance=RequestContext(request))
コード例 #14
0
def events_atom(request, page_number=1):
    events = models.LoadBatchEvent.objects.all().order_by('-created')
    paginator = Paginator(events, 25)
    page = paginator.page(page_number)
    page_range_short = list(_page_range_short(paginator, page))
    return render_to_response('reports/events.xml', dictionary=locals(),
                              context_instance=RequestContext(request),
                              mimetype='application/atom+xml')
コード例 #15
0
ファイル: reports.py プロジェクト: phonedude/chronam
def events_atom(request, page_number=1):
    events = models.LoadBatchEvent.objects.all().order_by('-created')
    paginator = Paginator(events, 25)
    page = paginator.page(page_number)
    page_range_short = list(_page_range_short(paginator, page))
    return render_to_response('reports/events.xml', dictionary=locals(),
                              context_instance=RequestContext(request),
                              content_type='application/atom+xml')
コード例 #16
0
ファイル: reports.py プロジェクト: phonedude/chronam
def reels(request, page_number=1):
    page_title = 'Reels'
    reels = models.Reel.objects.all().order_by('number')
    paginator = Paginator(reels, 25)
    page = paginator.page(page_number)
    page_range_short = list(_page_range_short(paginator, page))

    return render_to_response('reports/reels.html', dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #17
0
def issue_pages(request, lccn, date, edition, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)

    _year, _month, _day = date.split("-")
    try:
        _date = datetime.date(int(_year), int(_month), int(_day))
    except ValueError:
        raise Http404

    try:
        issue = title.issues.filter(date_issued=_date,
                                    edition=edition).order_by("-created")[0]
    except IndexError:
        raise Http404

    paginator = Paginator(issue.pages.all(), 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)

    context = {
        "page_title":
        "All Pages: %s, %s" % (label(title), label(issue)),
        "page_head_heading":
        "All Pages: %s, %s" % (title.display_name, label(issue)),
        "page_head_subheading":
        label(title),
        "crumbs":
        create_crumbs(title, issue, date, edition),
        "title":
        title,
        "issue":
        issue,
        "paginator":
        paginator,
        "paginator_page":
        page,
        "page_range_short":
        list(_page_range_short(paginator, page)),
        # This name allows the same template to be used as in the
        # issues_first_pages view, where the paginator is *issues* rather than
        # pages, and for clarity we give it a name which is obviously not
        # paginator pages:
        "newspaper_pages":
        page.object_list,
    }

    if not page.object_list:
        note = issue.notes.filter(type="noteAboutReproduction").first()
        if note:
            context["display_label"] = note.label
            context["explanation"] = note.text

    response = render(request, "issue_pages.html", context=context)
    return add_cache_tag(response, "lccn=%s" % lccn)
コード例 #18
0
ファイル: reports.py プロジェクト: sshyran/chronam
def events(request, page_number=1):
    page_title = "Events"
    events = models.LoadBatchEvent.objects.all().order_by("-created")
    paginator = Paginator(events, 25)
    page = paginator.page(page_number)
    page_range_short = list(_page_range_short(paginator, page))

    return render_to_response("reports/events.html",
                              dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #19
0
def batches(request, page_number=1):
    page_title = 'Batches'
    batches = models.Batch.viewable_batches()
    paginator = Paginator(batches, 25)
    page = paginator.page(page_number)
    page_range_short = list(_page_range_short(paginator, page))

    return render_to_response('reports/batches.html',
                              dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #20
0
ファイル: reports.py プロジェクト: sshyran/chronam
def batches(request, page_number=1):
    page_title = "Batches"
    batches = models.Batch.viewable_batches()
    batches = batches.prefetch_related("awardee")
    paginator = Paginator(batches, 25)
    page = paginator.page(page_number)
    page_range_short = list(_page_range_short(paginator, page))

    return render_to_response("reports/batches.html",
                              dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #21
0
ファイル: reports.py プロジェクト: phonedude/chronam
def institutions(request, page_number=1):
    page_title = 'Institutions'
    institutions = models.Institution.objects.all()
    paginator = Paginator(institutions, 50)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    return render_to_response('reports/institutions.html', dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #22
0
def institutions(request, page_number=1):
    page_title = 'Institutions'
    institutions = models.Institution.objects.all()
    paginator = Paginator(institutions, 50)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    return render_to_response('reports/institutions.html', dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #23
0
ファイル: reports.py プロジェクト: phonedude/chronam
def institution_titles(request, code, page_number=1):
    institution = get_object_or_404(models.Institution, code=code)
    page_title = 'Titles held by %s' % institution
    titles = models.Title.objects.filter(
        holdings__institution=institution).distinct()
    paginator = Paginator(titles, 50)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    return render_to_response('reports/institution_titles.html', dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #24
0
def language_titles(request, language, page_number=1):
    language_name = models.Language.objects.get(code=language).name
    page_title = 'Titles with %s text' % (language_name)
    if language != "eng":
        titles = models.Title.objects.filter(issues__pages__ocr__language_texts__language__code=language).values('lccn', 'issues__batch__name').annotate(count=Count('lccn'))
        paginator = Paginator(titles, 25)
        try:
            page = paginator.page(page_number)
        except InvalidPage:
            page = paginator.page(1)
        page_range_short = list(_page_range_short(paginator, page))
    return render_to_response('reports/language_titles.html', dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #25
0
def institution_titles(request, code, page_number=1):
    institution = get_object_or_404(models.Institution, code=code)
    page_title = 'Titles held by %s' % institution
    titles = models.Title.objects.filter(
        holdings__institution=institution).distinct()
    paginator = Paginator(titles, 50)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    return render_to_response('reports/institution_titles.html', dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #26
0
ファイル: reports.py プロジェクト: phonedude/chronam
def language_titles(request, language, page_number=1):
    language_name = models.Language.objects.get(code=language).name
    page_title = 'Titles with %s text' % (language_name)
    if language != "eng":
        titles = models.Title.objects.filter(
            issues__pages__ocr__language_texts__language__code=language
        ).values('lccn', 'issues__batch__name').annotate(count=Count('lccn'))
        paginator = Paginator(titles, 25)
        try:
            page = paginator.page(page_number)
        except InvalidPage:
            page = paginator.page(1)
        page_range_short = list(_page_range_short(paginator, page))
    return render_to_response('reports/language_titles.html', dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #27
0
def titles(request, start=None, page_number=1):
    page_title = "Newspaper Titles"
    if start:
        page_title += " Starting With %s" % start
        titles = models.Title.objects.order_by("name_normal")
        titles = titles.filter(name_normal__istartswith=start.upper())
    else:
        titles = models.Title.objects.all().order_by("name_normal")
    paginator = Paginator(titles, 50)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_start = page.start_index()
    page_end = page.end_index()
    page_range_short = list(_page_range_short(paginator, page))
    browse_val = [chr(n) for n in range(65, 91)]
    browse_val.extend([str(i) for i in range(10)])
    collapse_search_tab = True
    crumbs = list(settings.BASE_CRUMBS)
    return render_to_response("titles.html", dictionary=locals(), context_instance=RequestContext(request))
コード例 #28
0
ファイル: browse.py プロジェクト: REAP720801/chronam
def titles(request, start=None, page_number=1):
    page_title = 'Newspaper Titles'
    if start:
        page_title += ' Starting With %s' % start
        titles = models.Title.objects.order_by('name_normal')
        titles = titles.filter(name_normal__istartswith=start.upper())
    else:
        titles = models.Title.objects.all().order_by('name_normal')
    paginator = Paginator(titles, 50)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_start = page.start_index()
    page_end = page.end_index()
    page_range_short = list(_page_range_short(paginator, page))
    browse_val = [chr(n) for n in range(65, 91)]
    browse_val.extend([str(i) for i in range(10)])
    collapse_search_tab = True
    crumbs = list(settings.BASE_CRUMBS)
    return render_to_response('titles.html', dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #29
0
ファイル: browse.py プロジェクト: REAP720801/chronam
def issues_first_pages(request, lccn, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    issues = title.issues.all()
    if not issues.count() > 0:
        raise Http404("No issues for %s" % title.display_name)

    first_pages = []
    for issue in issues:
        first_pages.append(issue.first_page)

    paginator = Paginator(first_pages, 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))

    page_title = 'Browse Issues: %s' % label(title)
    page_head_heading = "Browse Issues: %s" % title.display_name
    page_head_subheading = label(title)
    crumbs = create_crumbs(title)
    return render_to_response('issue_pages.html', dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #30
0
ファイル: search.py プロジェクト: CDRH/nebnews
def search_pages_results(request, view_type='gallery'):
    page_title = "Search Results"
    paginator = search_pages_paginator(request)
    q = paginator.query
    try:
        page = paginator.page(paginator._cur_page)
    except InvalidPage:
        url = urlresolvers.reverse('chronam_search_pages_results')
        # Set the page to the first page
        q['page'] = 1
        return HttpResponseRedirect('%s?%s' % (url, q.urlencode()))
    start = page.start_index()
    end = page.end_index()

    # figure out the next page number
    query = request.GET.copy()
    if page.has_next():
        query['page'] = paginator._cur_page + 1
        next_url = '?' + query.urlencode()
        # and the previous page number
    if page.has_previous():
        query['page'] = paginator._cur_page - 1
        previous_url = '?' + query.urlencode()

    rows = q["rows"] if "rows" in q else 20
    crumbs = list(settings.BASE_CRUMBS)

    host = request.get_host()
    format = request.GET.get('format', None)
    if format == 'atom':
        feed_url = 'http://' + host + request.get_full_path()
        updated = rfc3339(datetime.datetime.now())
        return render_to_response('search_pages_results.xml',
                                  dictionary=locals(),
                                  context_instance=RequestContext(request),
                                  mimetype='application/atom+xml')
    elif format == 'json':
        results = {
            'startIndex': start,
            'endIndex': end,
            'totalItems': paginator.count,
            'itemsPerPage': rows,
            'items': [p.solr_doc for p in page.object_list],
        }
        for i in results['items']:
            i['url'] = 'http://' + request.get_host() + i['id'].rstrip('/') + '.json'
        json_text = json.dumps(results, indent=2)
        # jsonp?
        if request.GET.get('callback') is not None:
            json_text = "%s(%s);" % (request.GET.get('callback'), json_text)
        return HttpResponse(json_text, mimetype='application/json')
    page_range_short = list(_page_range_short(paginator, page))
    # copy the current request query without the page and sort
    # query params so we can construct links with it in the template
    q = request.GET.copy()
    for i in ('page', 'sort'):
        if i in q:
            q.pop(i)
    q = q.urlencode()

    # get an pseudo english version of the query
    english_search = paginator.englishify()

    # get some stuff from the query string for use in the form
    lccns = query.getlist('lccn')
    states = query.getlist('state')

    # figure out the sort that's in use
    sort = query.get('sort', 'relevance')
    if view_type == "list":
        template = "search_pages_results_list.html"
    else:
        template = "search_pages_results.html"
    page_list = []
    for count in range(len(page.object_list)):
        page_list.append((count + start, page.object_list[count]))
    return render_to_response(template, dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #31
0
def search_pages_results(request, view_type='gallery'):
    page_title = "Search Results"
    paginator = search_pages_paginator(request)
    q = paginator.query
    try:
        page = paginator.page(paginator._cur_page)
    except InvalidPage:
        url = urlresolvers.reverse('chronam_search_pages_results')
        # Set the page to the first page
        q['page'] = 1
        return HttpResponseRedirect('%s?%s' % (url, q.urlencode()))
    start = page.start_index()
    end = page.end_index()

    # figure out the next page number
    query = request.GET.copy()
    if page.has_next():
        query['page'] = paginator._cur_page + 1
        next_url = '?' + query.urlencode()
        # and the previous page number
    if page.has_previous():
        query['page'] = paginator._cur_page - 1
        previous_url = '?' + query.urlencode()

    rows = q["rows"] if "rows" in q else 20
    #crumbs = list(settings.BASE_CRUMBS)

    host = request.get_host()
    format = request.GET.get('format', None)
    if format == 'atom':
        feed_url = 'http://' + host + request.get_full_path()
        updated = rfc3339(datetime.datetime.now())
        return render_to_response('search_pages_results.xml',
                                  dictionary=locals(),
                                  context_instance=RequestContext(request),
                                  mimetype='application/atom+xml')
    elif format == 'json':
        results = {
            'startIndex': start,
            'endIndex': end,
            'totalItems': paginator.count,
            'itemsPerPage': rows,
            'items': [p.solr_doc for p in page.object_list],
        }
        for i in results['items']:
            i['url'] = 'http://' + request.get_host() + i['id'].rstrip(
                '/') + '.json'
        json_text = json.dumps(results, indent=2)
        # jsonp?
        if request.GET.get('callback') is not None:
            json_text = "%s(%s);" % (request.GET.get('callback'), json_text)
        return HttpResponse(json_text, mimetype='application/json')
    page_range_short = list(_page_range_short(paginator, page))
    # copy the current request query without the page and sort
    # query params so we can construct links with it in the template
    q = request.GET.copy()
    for i in ('page', 'sort'):
        if i in q:
            q.pop(i)
    q = q.urlencode()

    # get an pseudo english version of the query
    english_search = paginator.englishify()

    # get some stuff from the query string for use in the form
    lccns = query.getlist('lccn')
    states = query.getlist('state')

    # figure out the sort that's in use
    sort = query.get('sort', 'relevance')
    if view_type == "list":
        template = "search_pages_results_list.html"
    else:
        template = "search_pages_results.html"
    page_list = []
    for count in range(len(page.object_list)):
        page_list.append((count + start, page.object_list[count]))
    return render_to_response(template,
                              dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #32
0
def search_titles_results(request):
    page_title = "US Newspaper Directory Search Results"
    crumbs = list(settings.BASE_CRUMBS)
    crumbs.extend([{"label": "Search Newspaper Directory", "href": reverse("chronam_search_titles")}])

    def prep_title_for_return(t):
        title = {}
        title.update(t.solr_doc)
        title["oclc"] = t.oclc
        return title

    format = request.GET.get("format")

    # check if requested format is CSV before building pages for response. CSV
    # response does not make use of pagination, instead all matching titles from
    # SOLR are returned at once
    if format == "csv":
        query = request.GET.copy()
        q, fields, sort_field, sort_order = index.get_solr_request_params_from_query(query)

        # return all titles in csv format. * May hurt performance. Assumption is that this
        # request is not made often.
        # TODO: revisit if assumption is incorrect
        solr_response = index.execute_solr_query(q, fields, sort_field, sort_order, index.title_count(), 0)
        titles = index.get_titles_from_solr_documents(solr_response)

        csv_header_labels = (
            "lccn",
            "title",
            "place_of_publication",
            "start_year",
            "end_year",
            "publisher",
            "edition",
            "frequency",
            "subject",
            "state",
            "city",
            "country",
            "language",
            "oclc",
            "holding_type",
        )
        response = HttpResponse(content_type="text/csv")
        response["Content-Disposition"] = 'attachment; filename="chronam_titles.csv"'
        writer = csv.writer(response)
        writer.writerow(csv_header_labels)
        for title in titles:
            writer.writerow(
                map(
                    lambda val: smart_str(val or "--"),
                    (
                        title.lccn,
                        title.name,
                        title.place_of_publication,
                        title.start_year,
                        title.end_year,
                        title.publisher,
                        title.edition,
                        title.frequency,
                        map(str, title.subjects.all()),
                        set(map(lambda p: p.state, title.places.all())),
                        map(lambda p: p.city, title.places.all()),
                        str(title.country),
                        map(str, title.languages.all()),
                        title.oclc,
                        title.holding_types,
                    ),
                )
            )
        return response

    try:
        curr_page = int(request.GET.get("page", 1))
    except ValueError as e:
        curr_page = 1

    paginator = index.SolrTitlesPaginator(request.GET)

    try:
        page = paginator.page(curr_page)
    except:
        raise Http404

    page_range_short = list(_page_range_short(paginator, page))

    try:
        rows = int(request.GET.get("rows", "20"))
    except ValueError as e:
        rows = 20

    query = request.GET.copy()
    query.rows = rows
    if page.has_next():
        query["page"] = curr_page + 1
        next_url = "?" + query.urlencode()
    if page.has_previous():
        query["page"] = curr_page - 1
        previous_url = "?" + query.urlencode()
    start = page.start_index()
    end = page.end_index()
    host = request.get_host()
    page_list = []
    for p in range(len(page.object_list)):
        page_start = start + p
        page_list.append((page_start, page.object_list[p]))

    if format == "atom":
        feed_url = request.build_absolute_uri()
        updated = rfc3339(datetime.datetime.now())
        return render_to_response(
            "search_titles_results.xml",
            dictionary=locals(),
            context_instance=RequestContext(request),
            content_type="application/atom+xml",
        )

    elif format == "json":
        results = {
            "startIndex": start,
            "endIndex": end,
            "totalItems": paginator.count,
            "itemsPerPage": rows,
            "items": [prep_title_for_return(t) for t in page.object_list],
        }
        # add url for the json view
        for i in results["items"]:
            i["url"] = request.build_absolute_uri(i["id"].rstrip("/") + ".json")
        json_text = json.dumps(results)
        # jsonp?
        callback = request.GET.get("callback")
        if callback and is_valid_jsonp_callback(callback):
            json_text = "%s(%s);" % ("callback", json_text)
        return HttpResponse(json_text, content_type="application/json")

    sort = request.GET.get("sort", "relevance")

    q = request.GET.copy()
    if "page" in q:
        del q["page"]
    if "sort" in q:
        del q["sort"]
    q = q.urlencode()
    collapse_search_tab = True
    return render_to_response(
        "search_titles_results.html", dictionary=locals(), context_instance=RequestContext(request)
    )
コード例 #33
0
ファイル: directory.py プロジェクト: phonedude/chronam
def search_titles_results(request):
    page_title = 'US Newspaper Directory Search Results'
    crumbs = list(settings.BASE_CRUMBS)
    crumbs.extend([{'label': 'Search Newspaper Directory',
                    'href': reverse('chronam_search_titles')},
                   ])

    def prep_title_for_return(t):
        title = {}
        title.update(t.solr_doc)
        title['oclc'] = t.oclc
        return title

    format = request.GET.get('format')

    # check if requested format is CSV before building pages for response. CSV
    # response does not make use of pagination, instead all matching titles from
    # SOLR are returned at once
    if format == 'csv':
        query = request.GET.copy()
        q, fields, sort_field, sort_order = index.get_solr_request_params_from_query(query)

        # return all titles in csv format. * May hurt performance. Assumption is that this
        # request is not made often.
        # TODO: revisit if assumption is incorrect
        solr_response = index.execute_solr_query(q, fields, sort_field,
                                                 sort_order, index.title_count(), 0)
        titles = index.get_titles_from_solr_documents(solr_response)

        csv_header_labels = ('lccn', 'title', 'place_of_publication', 'start_year',
                             'end_year', 'publisher', 'edition', 'frequency', 'subject',
                             'state', 'city', 'country', 'language', 'oclc',
                             'holding_type',)
        response = HttpResponse(content_type='text/csv')
        response['Content-Disposition'] = 'attachment; filename="chronam_titles.csv"'
        writer = csv.writer(response)
        writer.writerow(csv_header_labels)
        for title in titles:
            writer.writerow(map(lambda val: smart_str(val or '--'),
                                (title.lccn, title.name, title.place_of_publication,
                                 title.start_year, title.end_year, title.publisher,
                                 title.edition, title.frequency,
                                 map(str, title.subjects.all()),
                                 set(map(lambda p: p.state, title.places.all())),
                                 map(lambda p: p.city, title.places.all()),
                                 str(title.country), map(str, title.languages.all()),
                                 title.oclc, title.holding_types)))
        return response

    try:
        curr_page = int(request.GET.get('page', 1))
    except ValueError as e:
        curr_page = 1

    paginator = index.SolrTitlesPaginator(request.GET)

    try:
        page = paginator.page(curr_page)
    except:
        raise Http404

    page_range_short = list(_page_range_short(paginator, page))

    try:
        rows = int(request.GET.get('rows', '20'))
    except ValueError as e:
        rows = 20

    query = request.GET.copy()
    query.rows = rows
    if page.has_next():
        query['page'] = curr_page + 1
        next_url = '?' + query.urlencode()
    if page.has_previous():
        query['page'] = curr_page - 1
        previous_url = '?' + query.urlencode()
    start = page.start_index()
    end = page.end_index()
    host = request.get_host()
    page_list = []
    for p in range(len(page.object_list)):
        page_start = start + p
        page_list.append((page_start, page.object_list[p]))

    if format == 'atom':
        feed_url = 'http://' + host + request.get_full_path()
        updated = rfc3339(datetime.datetime.now())
        return render_to_response('search_titles_results.xml',
                                  dictionary=locals(),
                                  context_instance=RequestContext(request),
                                  content_type='application/atom+xml')

    elif format == 'json':
        results = {
            'startIndex': start,
            'endIndex': end,
            'totalItems': paginator.count,
            'itemsPerPage': rows,
            'items': [prep_title_for_return(t) for t in page.object_list]
        }
        # add url for the json view
        for i in results['items']:
            i['url'] = 'http://' + request.get_host() + i['id'].rstrip("/") + ".json"
        json_text = json.dumps(results, indent=2)
        # jsonp?
        callback = request.GET.get('callback')
        if callback and is_valid_jsonp_callback(callback):
            json_text = "%s(%s);" % ('callback', json_text)
        return HttpResponse(json_text, content_type='application/json')

    sort = request.GET.get('sort', 'relevance')

    q = request.GET.copy()
    if 'page' in q:
        del q['page']
    if 'sort' in q:
        del q['sort']
    q = q.urlencode()
    collapse_search_tab = True
    return render_to_response('search_titles_results.html',
                              dictionary=locals(),
                              context_instance=RequestContext(request))
コード例 #34
0
                                title.oclc, title.holding_types)))
        return response
 
    try:
        curr_page = int(request.GET.get('page', 1))
    except ValueError, e:
        curr_page = 1

    paginator = index.SolrTitlesPaginator(request.GET)

    try:
        page = paginator.page(curr_page)
    except:
        raise Http404

    page_range_short = list(_page_range_short(paginator, page))

    try:
        rows = int(request.GET.get('rows', '20'))
    except ValueError, e:
        rows = 20

    query = request.GET.copy()
    query.rows = rows
    if page.has_next():
        query['page'] = curr_page + 1
        next_url = '?' + query.urlencode()
    if page.has_previous():
        query['page'] = curr_page - 1
        previous_url = '?' + query.urlencode()
    start = page.start_index()
コード例 #35
0
ファイル: browse.py プロジェクト: REAP720801/chronam
    _year, _month, _day = date.split("-")
    try:
        _date = datetime.date(int(_year), int(_month), int(_day))
    except ValueError, e:
        raise Http404
    try:
        issue = title.issues.filter(date_issued=_date,
                                    edition=edition).order_by("-created")[0]
    except IndexError, e:
        raise Http404
    paginator = Paginator(issue.pages.all(), 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    if not page.object_list:
        notes = issue.notes.filter(type="noteAboutReproduction")
        num_notes = notes.count()
        if num_notes >= 1:
            display_label = notes[0].label
            explanation = notes[0].text
    page_title = 'All Pages: %s, %s' % (label(title), label(issue))
    page_head_heading = "All Pages: %s, %s" % (title.display_name, label(issue))
    page_head_subheading = label(title)
    crumbs = create_crumbs(title, issue, date, edition)
    profile_uri = 'http://www.openarchives.org/ore/html/'
    response = render_to_response('issue_pages.html', dictionary=locals(),
                                  context_instance=RequestContext(request))
    return response
コード例 #36
0
ファイル: search.py プロジェクト: sshyran/chronam
def search_pages_results(request, view_type="gallery"):
    page_title = "Search Results"
    paginator = search_pages_paginator(request)
    q = paginator.query

    try:
        page = paginator.page(paginator._cur_page)
    except InvalidPage:
        url = urlresolvers.reverse("chronam_search_pages_results")
        # Set the page to the first page
        q["page"] = 1
        return HttpResponseRedirect("%s?%s" % (url, q.urlencode()))
    except Exception as exc:
        logging.exception(
            "Unable to paginate search results",
            extra={"data": {
                "q": q,
                "page": paginator._cur_page
            }})

        if getattr(exc, "httpcode", None) == 400:
            return HttpResponseBadRequest()
        else:
            raise
    start = page.start_index()
    end = page.end_index()

    # figure out the next page number
    query = request.GET.copy()
    if page.has_next():
        query["page"] = paginator._cur_page + 1
        next_url = "?" + query.urlencode()
        # and the previous page number
    if page.has_previous():
        query["page"] = paginator._cur_page - 1
        previous_url = "?" + query.urlencode()

    rows = q["rows"] if "rows" in q else 20
    crumbs = list(settings.BASE_CRUMBS)

    host = request.get_host()
    response_format = request.GET.get("format")
    if response_format == "atom":
        feed_url = request.build_absolute_uri()
        updated = rfc3339(datetime.datetime.now())
        return render_to_response(
            "search_pages_results.xml",
            dictionary=locals(),
            context_instance=RequestContext(request),
            content_type="application/atom+xml",
        )
    elif response_format == "json":
        results = {
            "startIndex": start,
            "endIndex": end,
            "totalItems": paginator.count,
            "itemsPerPage": rows,
            "items": [p.solr_doc for p in page.object_list],
        }
        for i in results["items"]:
            i["url"] = request.build_absolute_uri(i["id"].rstrip("/") +
                                                  ".json")
        json_text = json.dumps(results)
        # jsonp?
        callback = request.GET.get("callback")
        if callback and is_valid_jsonp_callback(callback):
            json_text = "%s(%s);" % (callback, json_text)
        return HttpResponse(json_text, content_type="application/json")
    page_range_short = list(_page_range_short(paginator, page))
    # copy the current request query without the page and sort
    # query params so we can construct links with it in the template
    q = request.GET.copy()
    for i in ("page", "sort"):
        if i in q:
            q.pop(i)
    q = q.urlencode()

    # get an pseudo english version of the query
    english_search = paginator.englishify()

    # get some stuff from the query string for use in the form
    lccns = query.getlist("lccn")
    states = query.getlist("state")

    # figure out the sort that's in use
    sort = query.get("sort", "relevance")
    if view_type == "list":
        template = "search_pages_results_list.html"
    else:
        template = "search_pages_results.html"
    page_list = []
    for count in range(len(page.object_list)):
        page_list.append((count + start, page.object_list[count]))
    return render_to_response(template,
                              dictionary=locals(),
                              context_instance=RequestContext(request))