Ejemplo n.º 1
0
def issue_pages(request, lccn, date, edition, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    _year, _month, _day = date.split("-")
    try:
        _date = datetime.date(int(_year), int(_month), int(_day))
    except ValueError:
        raise Http404
    try:
        issue = title.issues.filter(date_issued=_date,
                                    edition=edition).order_by("-created")[0]
    except IndexError:
        raise Http404
    paginator = Paginator(issue.pages.all(), 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    if not page.object_list:
        notes = issue.notes.filter(type="noteAboutReproduction")
        num_notes = notes.count()
        if num_notes >= 1:
            display_label = notes[0].label
            explanation = notes[0].text
    page_title = 'All Pages: %s, %s' % (label(title), label(issue))
    page_head_heading = "All Pages: %s, %s" % (title.display_name, label(issue))
    page_head_subheading = label(title)
    crumbs = create_crumbs(title, issue, date, edition)
    profile_uri = 'http://www.openarchives.org/ore/html/'
    response = render_to_response('issue_pages.html', dictionary=locals(),
                                  context_instance=RequestContext(request))
    return add_cache_tag(response, "lccn=%s" % lccn)
Ejemplo n.º 2
0
def page_print(request, lccn, date, edition, sequence,
               width, height, x1, y1, x2, y2):
    page = get_page(lccn, date, edition, sequence)
    title = get_object_or_404(models.Title, lccn=lccn)
    issue = page.issue
    page_title = "%s, %s, %s" % (label(title), label(issue), label(page))
    crumbs = create_crumbs(title, issue, date, edition, page)
    host = request.get_host()
    image_credit = page.issue.batch.awardee.name
    path_parts = dict(lccn=lccn, date=date, edition=edition,
                      sequence=sequence,
                      width=width, height=height,
                      x1=x1, y1=y1, x2=x2, y2=y2)
    url = urlresolvers.reverse('chronam_page_print',
                               kwargs=path_parts)
    width, height = int(width), int(height)
    x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
    width = min(width, (x2-x1))
    height = min(height, (y2-y1))
    image_url = settings.IIIF + '%2F' \
           + page.issue.batch.path.replace('/opt/chronam/data/dlg_batches/','').replace('/','%2F') \
           + page.jp2_filename.replace('/','%2F') + '/' \
           + str(x1) + ',' + str(y1) + ',' + str(x2 - x1) + ',' + str(y2 - y1) \
           + '/' + str(width) + ',' + str(height) + '/0/default.jpg'
    return render_to_response('page_print.html', dictionary=locals(),
                              context_instance=RequestContext(request))
Ejemplo n.º 3
0
def page_ocr(request, lccn, date, edition, sequence):
    title, issue, page = _get_tip(lccn, date, edition, sequence)
    page_title = "%s, %s, %s" % (label(title), label(issue), label(page))
    crumbs = create_crumbs(title, issue, date, edition, page)
    host = request.get_host()
    return render_to_response('page_text.html', dictionary=locals(),
                              context_instance=RequestContext(request))
Ejemplo n.º 4
0
def issues_first_pages(request, lccn, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    issues = title.issues.all()
    if not issues.exists():
        raise Http404("No issues for %s" % title.display_name)

    paginator = Paginator(issues, 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)

    response = render(
        request,
        "issue_pages.html",
        context={
            "title": title,
            "issues": issues,
            "page_title": "Browse Issues: %s" % label(title),
            "page_head_heading": "Browse Issues: %s" % title.display_name,
            "page_head_subheading": label(title),
            "crumbs": create_crumbs(title),
            "paginator": paginator,
            # To avoid confusing aliasing in the templates, we use unambiguous
            # variable names in the templates:
            "paginator_page": page,
            "newspaper_pages": [i.first_page for i in page.object_list],
            "page_range_short": list(_page_range_short(paginator, page)),
        },
    )
    return add_cache_tag(response, "lccn=%s" % lccn)
Ejemplo n.º 5
0
def page_ocr(request, lccn, date, edition, sequence):
    title, issue, page = _get_tip(lccn, date, edition, sequence)
    page_title = "%s, %s, %s" % (label(title), label(issue), label(page))
    crumbs = create_crumbs(title, issue, date, edition, page)
    host = request.get_host()
    return render_to_response('page_text.html', dictionary=locals(),
                              context_instance=RequestContext(request))
Ejemplo n.º 6
0
def issue_pages(request, lccn, date, edition, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    _year, _month, _day = date.split("-")
    try:
        _date = datetime.date(int(_year), int(_month), int(_day))
    except ValueError:
        raise Http404
    try:
        issue = title.issues.filter(date_issued=_date,
                                    edition=edition).order_by("-created")[0]
    except IndexError:
        raise Http404
    paginator = Paginator(issue.pages.all(), 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    if not page.object_list:
        notes = issue.notes.filter(type="noteAboutReproduction")
        num_notes = notes.count()
        if num_notes >= 1:
            display_label = notes[0].label
            explanation = notes[0].text
    page_title = 'All Pages: %s, %s' % (label(title), label(issue))
    page_head_heading = "All Pages: %s, %s" % (title.display_name,
                                               label(issue))
    page_head_subheading = label(title)
    crumbs = create_crumbs(title, issue, date, edition)
    profile_uri = 'http://www.openarchives.org/ore/html/'
    response = render_to_response('issue_pages.html',
                                  dictionary=locals(),
                                  context_instance=RequestContext(request))
    return add_cache_tag(response, "lccn=%s" % lccn)
Ejemplo n.º 7
0
def page_print(request, lccn, date, edition, sequence, width, height, x1, y1,
               x2, y2):
    page = get_page(lccn, date, edition, sequence)
    title = get_object_or_404(models.Title, lccn=lccn)
    issue = page.issue
    page_title = "%s, %s, %s" % (label(title), label(issue), label(page))
    crumbs = create_crumbs(title, issue, date, edition, page)
    host = request.get_host()
    image_credit = page.issue.batch.awardee.name
    path_parts = dict(lccn=lccn,
                      date=date,
                      edition=edition,
                      sequence=sequence,
                      width=width,
                      height=height,
                      x1=x1,
                      y1=y1,
                      x2=x2,
                      y2=y2)
    url = urlresolvers.reverse('chronam_page_print', kwargs=path_parts)

    response = render_to_response('page_print.html',
                                  dictionary=locals(),
                                  context_instance=RequestContext(request))
    return add_cache_tag(response, "lccn=%s" % lccn)
Ejemplo n.º 8
0
def issues_first_pages(request, lccn, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    issues = title.issues.all()
    if not issues.count() > 0:
        raise Http404("No issues for %s" % title.display_name)

    first_pages = []
    for issue in issues:
        first_pages.append(issue.first_page)

    paginator = Paginator(first_pages, 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))

    page_title = 'Browse Issues: %s' % label(title)
    page_head_heading = "Browse Issues: %s" % title.display_name
    page_head_subheading = label(title)
    crumbs = create_crumbs(title)
    response = render_to_response('issue_pages.html',
                                  dictionary=locals(),
                                  context_instance=RequestContext(request))
    return add_cache_tag(response, "lccn=%s" % lccn)
Ejemplo n.º 9
0
def issue_pages(request, lccn, date, edition, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)

    _year, _month, _day = date.split("-")
    try:
        _date = datetime.date(int(_year), int(_month), int(_day))
    except ValueError:
        raise Http404

    try:
        issue = title.issues.filter(date_issued=_date,
                                    edition=edition).order_by("-created")[0]
    except IndexError:
        raise Http404

    paginator = Paginator(issue.pages.all(), 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)

    context = {
        "page_title":
        "All Pages: %s, %s" % (label(title), label(issue)),
        "page_head_heading":
        "All Pages: %s, %s" % (title.display_name, label(issue)),
        "page_head_subheading":
        label(title),
        "crumbs":
        create_crumbs(title, issue, date, edition),
        "title":
        title,
        "issue":
        issue,
        "paginator":
        paginator,
        "paginator_page":
        page,
        "page_range_short":
        list(_page_range_short(paginator, page)),
        # This name allows the same template to be used as in the
        # issues_first_pages view, where the paginator is *issues* rather than
        # pages, and for clarity we give it a name which is obviously not
        # paginator pages:
        "newspaper_pages":
        page.object_list,
    }

    if not page.object_list:
        note = issue.notes.filter(type="noteAboutReproduction").first()
        if note:
            context["display_label"] = note.label
            context["explanation"] = note.text

    response = render(request, "issue_pages.html", context=context)
    return add_cache_tag(response, "lccn=%s" % lccn)
Ejemplo n.º 10
0
def page_print(request, lccn, date, edition, sequence, width, height, x1, y1,
               x2, y2):
    width, height, x1, y1, x2, y2 = map(int, (width, height, x1, y1, x2, y2))
    page = get_page(lccn, date, edition, sequence)
    title = get_object_or_404(models.Title, lccn=lccn)
    issue = page.issue
    page_title = "%s, %s, %s" % (label(title), label(issue), label(page))
    crumbs = create_crumbs(title, issue, date, edition, page)
    host = request.get_host()
    image_credit = page.issue.batch.awardee.name
    path_parts = {
        "lccn": lccn,
        "date": date,
        "edition": edition,
        "sequence": sequence,
        "width": width,
        "height": height,
        "x1": x1,
        "y1": y1,
        "x2": x2,
        "y2": y2,
    }
    url = urlresolvers.reverse("chronam_page_print", kwargs=path_parts)

    download_filename = "%s %s %s %s image %dx%d from %dx%d to %dx%d.jpg" % (
        lccn,
        date,
        edition,
        sequence,
        width,
        height,
        x1,
        y1,
        x2,
        y2,
    )

    if page.iiif_client:
        download_url = page.iiif_client.region(x=x1,
                                               y=y1,
                                               width=x2 - x1,
                                               height=y2 - y1)
        image_url = download_url.size(width=width, height=height)
    else:
        download_url = urlresolvers.reverse("chronam_page_image_tile",
                                            kwargs=path_parts)
        image_url = urlresolvers.reverse("chronam_page_image_tile",
                                         kwargs=path_parts)

    response = render_to_response("page_print.html",
                                  dictionary=locals(),
                                  context_instance=RequestContext(request))
    return add_cache_tag(response, "lccn=%s" % lccn)
Ejemplo n.º 11
0
def title_marc(request, lccn):
    title = get_object_or_404(models.Title, lccn=lccn)
    page_title = "MARC Bibliographic Record: %s" % label(title)
    page_name = "marc"
    crumbs = create_crumbs(title)
    return render_to_response('marc.html', dictionary=locals(),
                              context_instance=RequestContext(request))
Ejemplo n.º 12
0
def title_marc(request, lccn):
    title = get_object_or_404(models.Title, lccn=lccn)
    page_title = "MARC Bibliographic Record: %s" % label(title)
    page_name = "marc"
    crumbs = create_crumbs(title)
    return render_to_response('marc.html', dictionary=locals(),
                              context_instance=RequestContext(request))
Ejemplo n.º 13
0
def issues_first_pages(request, lccn, page_num=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    crumbs = create_crumbs(title)
    crumbs.extend([{'label': 'All Front Pages'}])
    issues = title.issues.all()
    if not issues.count() > 0:
        raise Http404("No issues for %s" % title.display_name)
    first_pages = []
    for issue in issues:
        first_pages.append(issue.first_page)
    paginator = Paginator(first_pages, 12)
    try:
        page = paginator.page(page_num)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    page_title = 'All Front Pages: %s' % label(title)
    total_items = len(first_pages)
    if (int(page_num) - 1) > 1:
        prev_page_num = int(page_num) - 1
    else:
        prev_page_num = 1
    next_url = urlresolvers.reverse('chronam_issues_first_pages_page_number', args=(title.lccn, int(page_num) + 1))
    previous_url = urlresolvers.reverse('chronam_issues_first_pages_page_number', args=(title.lccn, prev_page_num))
    # title_lccn = lccn
    return render_to_response('issue_first_pages.html', dictionary=locals(),
                              context_instance=RequestContext(request))
Ejemplo n.º 14
0
def page_print(request, lccn, date, edition, sequence,
               width, height, x1, y1, x2, y2):
    page = get_page(lccn, date, edition, sequence)
    title = get_object_or_404(models.Title, lccn=lccn)
    issue = page.issue
    page_title = "%s, %s, %s" % (label(title), label(issue), label(page))
    crumbs = create_crumbs(title, issue, date, edition, page)
    host = request.get_host()
    image_credit = page.issue.batch.awardee.name
    path_parts = dict(lccn=lccn, date=date, edition=edition,
                      sequence=sequence,
                      width=width, height=height,
                      x1=x1, y1=y1, x2=x2, y2=y2)
    url = urlresolvers.reverse('chronam_page_print',
                               kwargs=path_parts)

    return render_to_response('page_print.html', dictionary=locals(),
                              context_instance=RequestContext(request))
Ejemplo n.º 15
0
def title_holdings(request, lccn):
    title = get_object_or_404(models.Title, lccn=lccn)
    page_title = "Libraries that Have It: %s" % label(title)
    page_name = "holdings"
    crumbs = create_crumbs(title)
    holdings = title.holdings.all()

    return render_to_response('holdings.html', dictionary=locals(),
                              context_instance=RequestContext(request))
Ejemplo n.º 16
0
def title_holdings(request, lccn):
    title = get_object_or_404(models.Title, lccn=lccn)
    page_title = "Libraries that Have It: %s" % label(title)
    page_name = "holdings"
    crumbs = create_crumbs(title)

    holdings = title.holdings.select_related('institution').order_by('institution__name')

    return render_to_response('holdings.html', dictionary=locals(),
                              context_instance=RequestContext(request))
Ejemplo n.º 17
0
def title_holdings(request, lccn):
    title = get_object_or_404(models.Title, lccn=lccn)
    page_title = "Libraries that Have It: %s" % label(title)
    page_name = "holdings"
    crumbs = create_crumbs(title)

    holdings = title.holdings.select_related('institution').order_by('institution__name')

    response = render_to_response('holdings.html', dictionary=locals(),
                                  context_instance=RequestContext(request))
    return add_cache_tag(response, "lccn=%s" % lccn)
Ejemplo n.º 18
0
def issues_first_pages(request, lccn, page_number=1):
    title = get_object_or_404(models.Title, lccn=lccn)
    issues = title.issues.all()
    if not issues.count() > 0:
        raise Http404("No issues for %s" % title.display_name)

    first_pages = []
    for issue in issues:
        first_pages.append(issue.first_page)

    paginator = Paginator(first_pages, 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))

    page_title = 'Browse Issues: %s' % label(title)
    page_head_heading = "Browse Issues: %s" % title.display_name
    page_head_subheading = label(title)
    crumbs = create_crumbs(title)
    return render_to_response('issue_pages.html', dictionary=locals(),
                              context_instance=RequestContext(request))
Ejemplo n.º 19
0
def title(request, lccn):
    title = get_object_or_404(models.Title, lccn=lccn)
    page_title = "%s" % label(title)
    page_name = "title"
    # we call these here, because the query the db, they are not
    # cached by django's ORM, and we have some conditional logic
    # in the template that would result in them getting called more
    # than once. Short story: minimize database hits...
    related_titles = title.related_titles()
    succeeding_titles = title.succeeding_titles()
    preceeding_titles = title.preceeding_titles()
    profile_uri = 'http://www.openarchives.org/ore/html/'
    notes = []
    has_external_link = False
    for note in title.notes.all():
        org_text = html.escape(note.text)
        text = re.sub('(http(s)?://[^\s]+[^\.])',
                      r'<a class="external" href="\1">\1</a>', org_text)
        if text != org_text:
            has_external_link = True
        notes.append(text)

    if title.has_issues:
        rep_notes = title.first_issue.notes.filter(type="noteAboutReproduction")
        num_notes = rep_notes.count()
        if num_notes >= 1:
            explanation = rep_notes[0].text

    # adding essay info on this page if it exists
    first_essay = title.first_essay
    first_issue = title.first_issue
    if first_issue:
        issue_date = first_issue.date_issued

    crumbs = list(settings.BASE_CRUMBS)
    crumbs.extend([{
        'label': 'Titles',
        'href': urlresolvers.reverse('chronam_newspapers')
    },
    {
        'label': title.name
    }
    ])

    response = render_to_response('title.html', dictionary=locals(),
                                  context_instance=RequestContext(request))
    return response
Ejemplo n.º 20
0
def title(request, lccn):
    title = get_object_or_404(
        models.Title.objects.prefetch_related("subjects", "languages",
                                              "places", "publication_dates"),
        lccn=lccn,
    )

    context = {
        "title": title,
        "page_title": "About %s" % label(title),
        "page_name": "title",
        "crumbs": create_crumbs(title),
        "related_titles": title.related_titles(),
        "succeeding_titles": title.succeeding_titles(),
        "preceeding_titles": title.preceeding_titles(),
    }

    context["notes"] = notes = []

    for note in title.notes.all():
        org_text = html.escape(note.text)
        text = re.sub(r"(http(s)?://[^\s]+[^\.])",
                      r'<a class="external" href="\1">\1</a>', org_text)
        notes.append(text)

    if title.has_issues:
        rep_note = title.first_issue.notes.filter(
            type="noteAboutReproduction").first()
        if rep_note:
            context["explanation"] = rep_note.text

    # adding essay info on this page if it exists
    context["first_essay"] = title.first_essay
    context["first_issue"] = first_issue = title.first_issue

    if first_issue:
        context["issue_date"] = first_issue.date_issued
        context["first_page_with_image"] = first_issue.first_page_with_image
        context["first_page_of_first_issue"] = title.first_issue.first_page

    context["last_issue"] = last_issue = title.last_issue
    if last_issue:
        context["first_page_of_last_issue"] = last_issue.first_page

    response = render(request, "title.html", context)
    return add_cache_tag(response, "lccn=%s" % lccn)
Ejemplo n.º 21
0
def title(request, lccn):
    title = get_object_or_404(models.Title, lccn=lccn)
    page_title = "About %s" % label(title)
    page_name = "title"
    # we call these here, because the query the db, they are not
    # cached by django's ORM, and we have some conditional logic
    # in the template that would result in them getting called more
    # than once. Short story: minimize database hits...
    related_titles = title.related_titles()
    succeeding_titles = title.succeeding_titles()
    preceeding_titles = title.preceeding_titles()
    profile_uri = 'http://www.openarchives.org/ore/html/'
    notes = []
    has_external_link = False
    for note in title.notes.all():
        org_text = html.escape(note.text)
        text = re.sub('(http(s)?://[^\s]+[^\.])',
                      r'<a class="external" href="\1">\1</a>', org_text)
        if text != org_text:
            has_external_link = True
        notes.append(text)

    if title.has_issues:
        rep_notes = title.first_issue.notes.filter(type="noteAboutReproduction")
        num_notes = rep_notes.count()
        if num_notes >= 1:
            explanation = rep_notes[0].text

    # adding essay info on this page if it exists
    first_essay = title.first_essay
    first_issue = title.first_issue
    if first_issue:
        issue_date = first_issue.date_issued

    crumbs = create_crumbs(title)
    response = render_to_response('title.html', dictionary=locals(),
                                  context_instance=RequestContext(request))
    return response
Ejemplo n.º 22
0
def page(request, lccn, date, edition, sequence):
    title, issue, page = _get_tip(lccn, date, edition, sequence)

    if not page.jp2_filename:
        notes = page.notes.filter(type="noteAboutReproduction")
        num_notes = notes.count()
        if num_notes >= 1:
            explanation = notes[0].text
        else:
            explanation = ""

    # see if the user came from search engine results and attempt to
    # highlight words from their query by redirecting to a url that
    # has the highlighted words in it
    try:
        words = _search_engine_words(request)
        words = '+'.join(words)
        if len(words) > 0:
            path_parts = dict(lccn=lccn,
                              date=date,
                              edition=edition,
                              sequence=sequence)
            url = '%s?%s#%s' % (urlresolvers.reverse('chronam_page_words',
                                                     kwargs=path_parts),
                                request.GET.urlencode(), words)
            response = HttpResponseRedirect(url)
            return add_cache_tag(response, "lccn=%s" % lccn)
    except Exception as exc:
        LOGGER.error(
            "Failed to add search highlighting based on the referred search engine query: %s",
            exc,
            exc_info=True)
        if settings.DEBUG:
            raise
        # else squish the exception so the page will still get
        # served up minus the highlights

    # Calculate the previous_issue_first_page. Note: it was decided
    # that we want to skip over issues with missing pages. See ticket
    # #383.
    _issue = issue
    while True:
        previous_issue_first_page = None
        _issue = _issue.previous
        if not _issue:
            break
        previous_issue_first_page = _issue.first_page
        if previous_issue_first_page:
            break

    # do the same as above but for next_issue this time.
    _issue = issue
    while True:
        next_issue_first_page = None
        _issue = _issue.next
        if not _issue:
            break
        next_issue_first_page = _issue.first_page
        if next_issue_first_page:
            break

    page_title = "%s, %s, %s" % (label(title), label(issue), label(page))
    page_head_heading = "%s, %s, %s" % (title.display_name, label(issue),
                                        label(page))
    page_head_subheading = label(title)
    crumbs = create_crumbs(title, issue, date, edition, page)

    filename = page.jp2_abs_filename
    if filename:
        try:
            im = os.path.getsize(filename)
            image_size = filesizeformat(im)
        except OSError:
            image_size = "Unknown"

    image_credit = issue.batch.awardee.name
    host = request.get_host()
    profile_uri = 'http://www.openarchives.org/ore/html/'

    template = "page.html"
    text = get_page_text(page)
    response = render_to_response(template,
                                  dictionary=locals(),
                                  context_instance=RequestContext(request))
    return add_cache_tag(response, "lccn=%s" % lccn)
Ejemplo n.º 23
0
                                    edition=edition).order_by("-created")[0]
    except IndexError, e:
        raise Http404
    paginator = Paginator(issue.pages.all(), 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    if not page.object_list:
        notes = issue.notes.filter(type="noteAboutReproduction")
        num_notes = notes.count()
        if num_notes >= 1:
            display_label = notes[0].label
            explanation = notes[0].text
    page_title = 'All Pages: %s, %s' % (label(title), label(issue))
    page_head_heading = "All Pages: %s, %s" % (title.display_name, label(issue))
    page_head_subheading = label(title)
    crumbs = create_crumbs(title, issue, date, edition)
    profile_uri = 'http://www.openarchives.org/ore/html/'
    response = render_to_response('issue_pages.html', dictionary=locals(),
                                  context_instance=RequestContext(request))
    return response


@cache_page(settings.DEFAULT_TTL_SECONDS)
@rdf_view
def issue_pages_rdf(request, lccn, date, edition):
    title, issue, page = _get_tip(lccn, date, edition)
    graph = issue_to_graph(issue)
    response = HttpResponse(graph.serialize(base=_rdf_base(request),
Ejemplo n.º 24
0
def page(request, lccn, date, edition, sequence):
    title, issue, page = _get_tip(lccn, date, edition, sequence)

    if not page.jp2_filename:
        notes = page.notes.filter(type="noteAboutReproduction")
        num_notes = notes.count()
        if num_notes >= 1:
            explanation = notes[0].text
        else:
            explanation = ""

    # see if the user came from search engine results and attempt to
    # highlight words from their query by redirecting to a url that
    # has the highlighted words in it
    try:
        words = _search_engine_words(request)
        words = '+'.join(words)
        if len(words) > 0:
            path_parts = dict(lccn=lccn, date=date, edition=edition, sequence=sequence)
            url = '%s?%s#%s' % (urlresolvers.reverse('chronam_page_words',
                                                     kwargs=path_parts), request.GET.urlencode(), words)
            response = HttpResponseRedirect(url)
            return add_cache_tag(response, "lccn=%s" % lccn)
    except Exception as exc:
        LOGGER.error("Failed to add search highlighting based on the referred search engine query: %s",
                     exc, exc_info=True)
        if settings.DEBUG:
            raise
        # else squish the exception so the page will still get
        # served up minus the highlights

    # Calculate the previous_issue_first_page. Note: it was decided
    # that we want to skip over issues with missing pages. See ticket
    # #383.
    _issue = issue
    while True:
        previous_issue_first_page = None
        _issue = _issue.previous
        if not _issue:
            break
        previous_issue_first_page = _issue.first_page
        if previous_issue_first_page:
            break

    # do the same as above but for next_issue this time.
    _issue = issue
    while True:
        next_issue_first_page = None
        _issue = _issue.next
        if not _issue:
            break
        next_issue_first_page = _issue.first_page
        if next_issue_first_page:
            break

    page_title = "%s, %s, %s" % (label(title), label(issue), label(page))
    page_head_heading = "%s, %s, %s" % (title.display_name, label(issue), label(page))
    page_head_subheading = label(title)
    crumbs = create_crumbs(title, issue, date, edition, page)

    filename = page.jp2_abs_filename
    if filename:
        try:
            im = os.path.getsize(filename)
            image_size = filesizeformat(im)
        except OSError:
            image_size = "Unknown"

    image_credit = issue.batch.awardee.name
    host = request.get_host()
    profile_uri = 'http://www.openarchives.org/ore/html/'

    template = "page.html"
    text = get_page_text(page)
    response = render_to_response(template, dictionary=locals(),
                                  context_instance=RequestContext(request))
    return add_cache_tag(response, "lccn=%s" % lccn)
Ejemplo n.º 25
0
def _label(value):
    return label(value)
Ejemplo n.º 26
0
def _label(value):
    return label(value)
Ejemplo n.º 27
0
                                    edition=edition).order_by("-created")[0]
    except IndexError, e:
        raise Http404
    paginator = Paginator(issue.pages.all(), 20)
    try:
        page = paginator.page(page_number)
    except InvalidPage:
        page = paginator.page(1)
    page_range_short = list(_page_range_short(paginator, page))
    if not page.object_list:
        notes = issue.notes.filter(type="noteAboutReproduction")
        num_notes = notes.count()
        if num_notes >= 1:
            display_label = notes[0].label
            explanation = notes[0].text
    page_title = 'All Pages: %s, %s' % (label(title), label(issue))
    page_head_heading = "All Pages: %s, %s" % (title.display_name, label(issue))
    page_head_subheading = label(title)
    crumbs = create_crumbs(title, issue, date, edition)
    profile_uri = 'http://www.openarchives.org/ore/html/'
    response = render_to_response('issue_pages.html', dictionary=locals(),
                                  context_instance=RequestContext(request))
    return response


@cache_page(settings.DEFAULT_TTL_SECONDS)
@rdf_view
def issue_pages_rdf(request, lccn, date, edition):
    title, issue, page = _get_tip(lccn, date, edition)
    graph = issue_to_graph(issue)
    response = HttpResponse(graph.serialize(base=_rdf_base(request),