def chronam_topic(request, topic_id): topic = get_object_or_404(models.Topic, pk=topic_id) page_title = topic.name crumbs = list(settings.BASE_CRUMBS) if urlresolvers.reverse('recommended_topics') in request.META.get('HTTP_REFERER'): crumbs.extend([{'label': 'Recommended Topics', 'href': urlresolvers.reverse('recommended_topics')}, {'label': topic.name, 'href': urlresolvers.reverse('chronam_topic', kwargs={'topic_id': topic.pk})}]) else: referer = re.sub('^https?:\/\/', '', request.META.get('HTTP_REFERER')).split('/') try: lccn, date, edition, sequence = referer[2], referer[3], referer[4][-1], referer[5][-1] page = get_page(lccn, date, edition, sequence) if page: title, issue, page = _get_tip(lccn, date, edition, sequence) crumbs = create_crumbs(title, issue, date, edition, page) crumbs.extend([{'label': topic.name, 'href': urlresolvers.reverse('chronam_topic', kwargs={'topic_id': topic.pk})}]) except: pass important_dates = filter(lambda s: not s.isspace(), topic.important_dates.split('\n ')) search_suggestions = topic.suggested_search_terms.split('\t') chronam_pages = [{'title': t.title, 'description': t.description.lstrip(t.title), 'url': t.url} for t in topic.topicpages_set.all()] return render_to_response('topic.html', dictionary=locals(), context_instance=RequestContext(request))
def issues_first_pages(request, lccn, page_number=1): title = get_object_or_404(models.Title, lccn=lccn) issues = title.issues.all() if not issues.count() > 0: raise Http404("No issues for %s" % title.display_name) first_pages = [] for issue in issues: first_pages.append(issue.first_page) paginator = Paginator(first_pages, 20) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) page_range_short = list(_page_range_short(paginator, page)) page_title = 'Browse Issues: %s' % label(title) page_head_heading = "Browse Issues: %s" % title.display_name page_head_subheading = label(title) crumbs = create_crumbs(title) response = render_to_response('issue_pages.html', dictionary=locals(), context_instance=RequestContext(request)) return add_cache_tag(response, "lccn=%s" % lccn)
def page_print(request, lccn, date, edition, sequence, width, height, x1, y1, x2, y2): page = get_page(lccn, date, edition, sequence) title = get_object_or_404(models.Title, lccn=lccn) issue = page.issue page_title = "%s, %s, %s" % (label(title), label(issue), label(page)) crumbs = create_crumbs(title, issue, date, edition, page) host = request.get_host() image_credit = page.issue.batch.awardee.name path_parts = dict(lccn=lccn, date=date, edition=edition, sequence=sequence, width=width, height=height, x1=x1, y1=y1, x2=x2, y2=y2) url = urlresolvers.reverse('chronam_page_print', kwargs=path_parts) width, height = int(width), int(height) x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) width = min(width, (x2-x1)) height = min(height, (y2-y1)) image_url = settings.IIIF + '%2F' \ + page.issue.batch.path.replace('/opt/chronam/data/dlg_batches/','').replace('/','%2F') \ + page.jp2_filename.replace('/','%2F') + '/' \ + str(x1) + ',' + str(y1) + ',' + str(x2 - x1) + ',' + str(y2 - y1) \ + '/' + str(width) + ',' + str(height) + '/0/default.jpg' return render_to_response('page_print.html', dictionary=locals(), context_instance=RequestContext(request))
def page_ocr(request, lccn, date, edition, sequence): title, issue, page = _get_tip(lccn, date, edition, sequence) page_title = "%s, %s, %s" % (label(title), label(issue), label(page)) crumbs = create_crumbs(title, issue, date, edition, page) host = request.get_host() return render_to_response('page_text.html', dictionary=locals(), context_instance=RequestContext(request))
def title_marc(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) page_title = "MARC Bibliographic Record: %s" % label(title) page_name = "marc" crumbs = create_crumbs(title) return render_to_response('marc.html', dictionary=locals(), context_instance=RequestContext(request))
def issues(request, lccn, year=None): title = get_object_or_404(models.Title, lccn=lccn) if year is not None: _year = int(year) else: issue_stats = title.issues.aggregate(first_issued=Min("date_issued")) first_issued = issue_stats.get("first_issued") if first_issued: _year = first_issued.year else: _year = 1900 year_view = HTMLCalendar(firstweekday=6, issues=title.issues).formatyear(_year) class SelectYearForm(django_forms.Form): year = fields.ChoiceField( choices=((d.year, d.year) for d in title.issues.dates("date_issued", "year")), initial=_year) select_year_form = SelectYearForm() page_title = "Browse Issues: %s" % title.display_name page_name = "issues" crumbs = create_crumbs(title) response = render_to_response("issues.html", dictionary=locals(), context_instance=RequestContext(request)) return add_cache_tag(response, "lccn=%s" % lccn)
def issues_first_pages(request, lccn, page_num=1): title = get_object_or_404(models.Title, lccn=lccn) crumbs = create_crumbs(title) crumbs.extend([{'label': 'All Front Pages'}]) issues = title.issues.all() if not issues.count() > 0: raise Http404("No issues for %s" % title.display_name) first_pages = [] for issue in issues: first_pages.append(issue.first_page) paginator = Paginator(first_pages, 12) try: page = paginator.page(page_num) except InvalidPage: page = paginator.page(1) page_range_short = list(_page_range_short(paginator, page)) page_title = 'All Front Pages: %s' % label(title) total_items = len(first_pages) if (int(page_num) - 1) > 1: prev_page_num = int(page_num) - 1 else: prev_page_num = 1 next_url = urlresolvers.reverse('chronam_issues_first_pages_page_number', args=(title.lccn, int(page_num) + 1)) previous_url = urlresolvers.reverse('chronam_issues_first_pages_page_number', args=(title.lccn, prev_page_num)) # title_lccn = lccn return render_to_response('issue_first_pages.html', dictionary=locals(), context_instance=RequestContext(request))
def issues(request, lccn, year=None): title = get_object_or_404(models.Title, lccn=lccn) issues = title.issues.all() if issues.count() > 0: if year is None: _year = issues[0].date_issued.year else: _year = int(year) else: _year = 1900 # no issues available year_view = HTMLCalendar(firstweekday=6, issues=issues).formatyear(_year) dates = issues.dates('date_issued', 'year') class SelectYearForm(django_forms.Form): year = fields.ChoiceField(choices=((d.year, d.year) for d in dates), initial=_year) select_year_form = SelectYearForm() page_title = "Browse Issues: %s" % title.display_name page_name = "issues" crumbs = create_crumbs(title) response = render_to_response('issues.html', dictionary=locals(), context_instance=RequestContext(request)) return add_cache_tag(response, "lccn=%s" % lccn)
def issue_pages(request, lccn, date, edition, page_number=1): title = get_object_or_404(models.Title, lccn=lccn) _year, _month, _day = date.split("-") try: _date = datetime.date(int(_year), int(_month), int(_day)) except ValueError: raise Http404 try: issue = title.issues.filter(date_issued=_date, edition=edition).order_by("-created")[0] except IndexError: raise Http404 paginator = Paginator(issue.pages.all(), 20) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) page_range_short = list(_page_range_short(paginator, page)) if not page.object_list: notes = issue.notes.filter(type="noteAboutReproduction") num_notes = notes.count() if num_notes >= 1: display_label = notes[0].label explanation = notes[0].text page_title = 'All Pages: %s, %s' % (label(title), label(issue)) page_head_heading = "All Pages: %s, %s" % (title.display_name, label(issue)) page_head_subheading = label(title) crumbs = create_crumbs(title, issue, date, edition) profile_uri = 'http://www.openarchives.org/ore/html/' response = render_to_response('issue_pages.html', dictionary=locals(), context_instance=RequestContext(request)) return add_cache_tag(response, "lccn=%s" % lccn)
def page_print(request, lccn, date, edition, sequence, width, height, x1, y1, x2, y2): page = get_page(lccn, date, edition, sequence) title = get_object_or_404(models.Title, lccn=lccn) issue = page.issue page_title = "%s, %s, %s" % (label(title), label(issue), label(page)) crumbs = create_crumbs(title, issue, date, edition, page) host = request.get_host() image_credit = page.issue.batch.awardee.name path_parts = dict(lccn=lccn, date=date, edition=edition, sequence=sequence, width=width, height=height, x1=x1, y1=y1, x2=x2, y2=y2) url = urlresolvers.reverse('chronam_page_print', kwargs=path_parts) response = render_to_response('page_print.html', dictionary=locals(), context_instance=RequestContext(request)) return add_cache_tag(response, "lccn=%s" % lccn)
def issues_first_pages(request, lccn, page_number=1): title = get_object_or_404(models.Title, lccn=lccn) issues = title.issues.all() if not issues.exists(): raise Http404("No issues for %s" % title.display_name) paginator = Paginator(issues, 20) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) response = render( request, "issue_pages.html", context={ "title": title, "issues": issues, "page_title": "Browse Issues: %s" % label(title), "page_head_heading": "Browse Issues: %s" % title.display_name, "page_head_subheading": label(title), "crumbs": create_crumbs(title), "paginator": paginator, # To avoid confusing aliasing in the templates, we use unambiguous # variable names in the templates: "paginator_page": page, "newspaper_pages": [i.first_page for i in page.object_list], "page_range_short": list(_page_range_short(paginator, page)), }, ) return add_cache_tag(response, "lccn=%s" % lccn)
def title_holdings(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) page_title = "Libraries that Have It: %s" % label(title) page_name = "holdings" crumbs = create_crumbs(title) holdings = title.holdings.all() return render_to_response('holdings.html', dictionary=locals(), context_instance=RequestContext(request))
def issue_pages(request, lccn, date, edition, page_number=1): title = get_object_or_404(models.Title, lccn=lccn) _year, _month, _day = date.split("-") try: _date = datetime.date(int(_year), int(_month), int(_day)) except ValueError: raise Http404 try: issue = title.issues.filter(date_issued=_date, edition=edition).order_by("-created")[0] except IndexError: raise Http404 paginator = Paginator(issue.pages.all(), 20) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) context = { "page_title": "All Pages: %s, %s" % (label(title), label(issue)), "page_head_heading": "All Pages: %s, %s" % (title.display_name, label(issue)), "page_head_subheading": label(title), "crumbs": create_crumbs(title, issue, date, edition), "title": title, "issue": issue, "paginator": paginator, "paginator_page": page, "page_range_short": list(_page_range_short(paginator, page)), # This name allows the same template to be used as in the # issues_first_pages view, where the paginator is *issues* rather than # pages, and for clarity we give it a name which is obviously not # paginator pages: "newspaper_pages": page.object_list, } if not page.object_list: note = issue.notes.filter(type="noteAboutReproduction").first() if note: context["display_label"] = note.label context["explanation"] = note.text response = render(request, "issue_pages.html", context=context) return add_cache_tag(response, "lccn=%s" % lccn)
def title_holdings(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) page_title = "Libraries that Have It: %s" % label(title) page_name = "holdings" crumbs = create_crumbs(title) holdings = title.holdings.select_related('institution').order_by('institution__name') return render_to_response('holdings.html', dictionary=locals(), context_instance=RequestContext(request))
def title_holdings(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) page_title = "Libraries that Have It: %s" % label(title) page_name = "holdings" crumbs = create_crumbs(title) holdings = title.holdings.select_related('institution').order_by('institution__name') response = render_to_response('holdings.html', dictionary=locals(), context_instance=RequestContext(request)) return add_cache_tag(response, "lccn=%s" % lccn)
def page_print(request, lccn, date, edition, sequence, width, height, x1, y1, x2, y2): width, height, x1, y1, x2, y2 = map(int, (width, height, x1, y1, x2, y2)) page = get_page(lccn, date, edition, sequence) title = get_object_or_404(models.Title, lccn=lccn) issue = page.issue page_title = "%s, %s, %s" % (label(title), label(issue), label(page)) crumbs = create_crumbs(title, issue, date, edition, page) host = request.get_host() image_credit = page.issue.batch.awardee.name path_parts = { "lccn": lccn, "date": date, "edition": edition, "sequence": sequence, "width": width, "height": height, "x1": x1, "y1": y1, "x2": x2, "y2": y2, } url = urlresolvers.reverse("chronam_page_print", kwargs=path_parts) download_filename = "%s %s %s %s image %dx%d from %dx%d to %dx%d.jpg" % ( lccn, date, edition, sequence, width, height, x1, y1, x2, y2, ) if page.iiif_client: download_url = page.iiif_client.region(x=x1, y=y1, width=x2 - x1, height=y2 - y1) image_url = download_url.size(width=width, height=height) else: download_url = urlresolvers.reverse("chronam_page_image_tile", kwargs=path_parts) image_url = urlresolvers.reverse("chronam_page_image_tile", kwargs=path_parts) response = render_to_response("page_print.html", dictionary=locals(), context_instance=RequestContext(request)) return add_cache_tag(response, "lccn=%s" % lccn)
def title(request, lccn): title = get_object_or_404( models.Title.objects.prefetch_related("subjects", "languages", "places", "publication_dates"), lccn=lccn, ) context = { "title": title, "page_title": "About %s" % label(title), "page_name": "title", "crumbs": create_crumbs(title), "related_titles": title.related_titles(), "succeeding_titles": title.succeeding_titles(), "preceeding_titles": title.preceeding_titles(), } context["notes"] = notes = [] for note in title.notes.all(): org_text = html.escape(note.text) text = re.sub(r"(http(s)?://[^\s]+[^\.])", r'<a class="external" href="\1">\1</a>', org_text) notes.append(text) if title.has_issues: rep_note = title.first_issue.notes.filter( type="noteAboutReproduction").first() if rep_note: context["explanation"] = rep_note.text # adding essay info on this page if it exists context["first_essay"] = title.first_essay context["first_issue"] = first_issue = title.first_issue if first_issue: context["issue_date"] = first_issue.date_issued context["first_page_with_image"] = first_issue.first_page_with_image context["first_page_of_first_issue"] = title.first_issue.first_page context["last_issue"] = last_issue = title.last_issue if last_issue: context["first_page_of_last_issue"] = last_issue.first_page response = render(request, "title.html", context) return add_cache_tag(response, "lccn=%s" % lccn)
def page_print(request, lccn, date, edition, sequence, width, height, x1, y1, x2, y2): page = get_page(lccn, date, edition, sequence) title = get_object_or_404(models.Title, lccn=lccn) issue = page.issue page_title = "%s, %s, %s" % (label(title), label(issue), label(page)) crumbs = create_crumbs(title, issue, date, edition, page) host = request.get_host() image_credit = page.issue.batch.awardee.name path_parts = dict(lccn=lccn, date=date, edition=edition, sequence=sequence, width=width, height=height, x1=x1, y1=y1, x2=x2, y2=y2) url = urlresolvers.reverse('chronam_page_print', kwargs=path_parts) return render_to_response('page_print.html', dictionary=locals(), context_instance=RequestContext(request))
def title(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) page_title = "About %s" % label(title) page_name = "title" # we call these here, because the query the db, they are not # cached by django's ORM, and we have some conditional logic # in the template that would result in them getting called more # than once. Short story: minimize database hits... related_titles = title.related_titles() succeeding_titles = title.succeeding_titles() preceeding_titles = title.preceeding_titles() profile_uri = 'http://www.openarchives.org/ore/html/' notes = [] has_external_link = False for note in title.notes.all(): org_text = html.escape(note.text) text = re.sub('(http(s)?://[^\s]+[^\.])', r'<a class="external" href="\1">\1</a>', org_text) if text != org_text: has_external_link = True notes.append(text) if title.has_issues: rep_notes = title.first_issue.notes.filter( type="noteAboutReproduction") num_notes = rep_notes.count() if num_notes >= 1: explanation = rep_notes[0].text # adding essay info on this page if it exists first_essay = title.first_essay first_issue = title.first_issue if first_issue: issue_date = first_issue.date_issued crumbs = create_crumbs(title) response = render_to_response('title.html', dictionary=locals(), context_instance=RequestContext(request)) return add_cache_tag(response, "lccn=%s" % lccn)
def title(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) page_title = "About %s" % label(title) page_name = "title" # we call these here, because the query the db, they are not # cached by django's ORM, and we have some conditional logic # in the template that would result in them getting called more # than once. Short story: minimize database hits... related_titles = title.related_titles() succeeding_titles = title.succeeding_titles() preceeding_titles = title.preceeding_titles() profile_uri = 'http://www.openarchives.org/ore/html/' notes = [] has_external_link = False for note in title.notes.all(): org_text = html.escape(note.text) text = re.sub('(http(s)?://[^\s]+[^\.])', r'<a class="external" href="\1">\1</a>', org_text) if text != org_text: has_external_link = True notes.append(text) if title.has_issues: rep_notes = title.first_issue.notes.filter(type="noteAboutReproduction") num_notes = rep_notes.count() if num_notes >= 1: explanation = rep_notes[0].text # adding essay info on this page if it exists first_essay = title.first_essay first_issue = title.first_issue if first_issue: issue_date = first_issue.date_issued crumbs = create_crumbs(title) response = render_to_response('title.html', dictionary=locals(), context_instance=RequestContext(request)) return response
def issues_first_pages(request, lccn, page_number=1): title = get_object_or_404(models.Title, lccn=lccn) issues = title.issues.all() if not issues.count() > 0: raise Http404("No issues for %s" % title.display_name) first_pages = [] for issue in issues: first_pages.append(issue.first_page) paginator = Paginator(first_pages, 20) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) page_range_short = list(_page_range_short(paginator, page)) page_title = 'Browse Issues: %s' % label(title) page_head_heading = "Browse Issues: %s" % title.display_name page_head_subheading = label(title) crumbs = create_crumbs(title) return render_to_response('issue_pages.html', dictionary=locals(), context_instance=RequestContext(request))
def issues(request, lccn, year=None): title = get_object_or_404(models.Title, lccn=lccn) issues = title.issues.all() if issues.count() > 0: if year is None: _year = issues[0].date_issued.year else: _year = int(year) else: _year = 1900 # no issues available year_view = HTMLCalendar(firstweekday=6, issues=issues).formatyear(_year) dates = issues.dates('date_issued', 'year') class SelectYearForm(django_forms.Form): year = fields.ChoiceField(choices=((d.year, d.year) for d in dates), initial=_year) select_year_form = SelectYearForm() page_title = "Browse Issues: %s" % title.display_name page_name = "issues" crumbs = create_crumbs(title) return render_to_response('issues.html', dictionary=locals(), context_instance=RequestContext(request))
paginator = Paginator(issue.pages.all(), 20) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) page_range_short = list(_page_range_short(paginator, page)) if not page.object_list: notes = issue.notes.filter(type="noteAboutReproduction") num_notes = notes.count() if num_notes >= 1: display_label = notes[0].label explanation = notes[0].text page_title = 'All Pages: %s, %s' % (label(title), label(issue)) page_head_heading = "All Pages: %s, %s" % (title.display_name, label(issue)) page_head_subheading = label(title) crumbs = create_crumbs(title, issue, date, edition) profile_uri = 'http://www.openarchives.org/ore/html/' response = render_to_response('issue_pages.html', dictionary=locals(), context_instance=RequestContext(request)) return response @cache_page(settings.DEFAULT_TTL_SECONDS) @rdf_view def issue_pages_rdf(request, lccn, date, edition): title, issue, page = _get_tip(lccn, date, edition) graph = issue_to_graph(issue) response = HttpResponse(graph.serialize(base=_rdf_base(request), include_base=True), mimetype='application/rdf+xml') return response
def page(request, lccn, date, edition, sequence): title, issue, page = _get_tip(lccn, date, edition, sequence) if not page.jp2_filename: notes = page.notes.filter(type="noteAboutReproduction") num_notes = notes.count() if num_notes >= 1: explanation = notes[0].text else: explanation = "" # see if the user came from search engine results and attempt to # highlight words from their query by redirecting to a url that # has the highlighted words in it try: words = _search_engine_words(request) words = '+'.join(words) if len(words) > 0: path_parts = dict(lccn=lccn, date=date, edition=edition, sequence=sequence) url = '%s?%s#%s' % (urlresolvers.reverse('chronam_page_words', kwargs=path_parts), request.GET.urlencode(), words) response = HttpResponseRedirect(url) return add_cache_tag(response, "lccn=%s" % lccn) except Exception as exc: LOGGER.error("Failed to add search highlighting based on the referred search engine query: %s", exc, exc_info=True) if settings.DEBUG: raise # else squish the exception so the page will still get # served up minus the highlights # Calculate the previous_issue_first_page. Note: it was decided # that we want to skip over issues with missing pages. See ticket # #383. _issue = issue while True: previous_issue_first_page = None _issue = _issue.previous if not _issue: break previous_issue_first_page = _issue.first_page if previous_issue_first_page: break # do the same as above but for next_issue this time. _issue = issue while True: next_issue_first_page = None _issue = _issue.next if not _issue: break next_issue_first_page = _issue.first_page if next_issue_first_page: break page_title = "%s, %s, %s" % (label(title), label(issue), label(page)) page_head_heading = "%s, %s, %s" % (title.display_name, label(issue), label(page)) page_head_subheading = label(title) crumbs = create_crumbs(title, issue, date, edition, page) filename = page.jp2_abs_filename if filename: try: im = os.path.getsize(filename) image_size = filesizeformat(im) except OSError: image_size = "Unknown" image_credit = issue.batch.awardee.name host = request.get_host() profile_uri = 'http://www.openarchives.org/ore/html/' template = "page.html" text = get_page_text(page) response = render_to_response(template, dictionary=locals(), context_instance=RequestContext(request)) return add_cache_tag(response, "lccn=%s" % lccn)
def issues(request, lccn, year=None): title = get_object_or_404(models.Title, lccn=lccn) issues = title.issues.all() #determine whether issue is annual, monthly or daily and display calendar or list appropriately days = [] months = [] years = [] for issue in issues: tmpyear, tmpmonth, tmpday = str(issue.date_issued).split('-') days.append(tmpday) months.append(tmpmonth) years.append(tmpyear) date_frequency = "yearly" for month in months: if month != "01": date_frequency = "monthly" for day in days: if day != "01": date_frequency = "daily" if issues.count() > 0: if year is None: _year = issues[0].date_issued.year else: _year = int(year) else: _year = 1900 # no issues available #year_view = HTMLCalendar(firstweekday=6, issues=issues).formatyear(_year) #print year_view #print settings.BROWSE_VIEW if settings.BROWSE_VIEW == "list": date_frequency = "list" #print "test" #print "date " + date_frequency if date_frequency == "list": cal_view = get_all_dates_list(issues, lccn) if date_frequency == "daily": cal_view = HTMLCalendar(firstweekday=6, issues=issues).formatyear(_year) dates = issues.dates('date_issued', 'year') if date_frequency == "monthly": cal_view = get_month_list(issues, lccn) if date_frequency == "yearly": cal_view = get_year_list(issues, lccn) #year_view = HTMLCalendar(firstweekday=6, issues=issues).formatyear(_year) #dates = issues.dates('date_issued', 'year') year_view = cal_view class SelectYearForm(django_forms.Form): year = fields.ChoiceField(choices=((d.year, d.year) for d in dates), initial=_year) select_year_form = SelectYearForm() page_title = "Browse Issues: %s" % title.display_name page_name = "issues" crumbs = create_crumbs(title) return render_to_response('issues.html', dictionary=locals(), context_instance=RequestContext(request))
def page(request, lccn, date, edition, sequence): title, issue, page = _get_tip(lccn, date, edition, sequence) if not page.jp2_filename: notes = page.notes.filter(type="noteAboutReproduction") num_notes = notes.count() if num_notes >= 1: explanation = notes[0].text else: explanation = "" # see if the user came from search engine results and attempt to # highlight words from their query by redirecting to a url that # has the highlighted words in it try: words = _search_engine_words(request) words = '+'.join(words) if len(words) > 0: path_parts = dict(lccn=lccn, date=date, edition=edition, sequence=sequence) url = '%s?%s#%s' % (urlresolvers.reverse('chronam_page_words', kwargs=path_parts), request.GET.urlencode(), words) response = HttpResponseRedirect(url) return add_cache_tag(response, "lccn=%s" % lccn) except Exception as exc: LOGGER.error( "Failed to add search highlighting based on the referred search engine query: %s", exc, exc_info=True) if settings.DEBUG: raise # else squish the exception so the page will still get # served up minus the highlights # Calculate the previous_issue_first_page. Note: it was decided # that we want to skip over issues with missing pages. See ticket # #383. _issue = issue while True: previous_issue_first_page = None _issue = _issue.previous if not _issue: break previous_issue_first_page = _issue.first_page if previous_issue_first_page: break # do the same as above but for next_issue this time. _issue = issue while True: next_issue_first_page = None _issue = _issue.next if not _issue: break next_issue_first_page = _issue.first_page if next_issue_first_page: break page_title = "%s, %s, %s" % (label(title), label(issue), label(page)) page_head_heading = "%s, %s, %s" % (title.display_name, label(issue), label(page)) page_head_subheading = label(title) crumbs = create_crumbs(title, issue, date, edition, page) filename = page.jp2_abs_filename if filename: try: im = os.path.getsize(filename) image_size = filesizeformat(im) except OSError: image_size = "Unknown" image_credit = issue.batch.awardee.name host = request.get_host() profile_uri = 'http://www.openarchives.org/ore/html/' template = "page.html" text = get_page_text(page) response = render_to_response(template, dictionary=locals(), context_instance=RequestContext(request)) return add_cache_tag(response, "lccn=%s" % lccn)