def test_pages_to_show(self): paginator = Paginator(range(300), 10) # range of pages at the beginning pages = pages_to_show(paginator, 1) self.assertEqual(7, len(pages), "show pages returns 7 items for first page") self.assert_(1 in pages, "show pages includes 1 for first page") self.assert_(6 in pages, "show pages includes 6 for first page") pages = pages_to_show(paginator, 2) self.assert_(1 in pages, "show pages for page 2 includes 1") self.assert_(2 in pages, "show pages for page 2 includes 2") self.assert_(3 in pages, "show pages for page 2 includes 3") # range of pages in the middle pages = pages_to_show(paginator, 15) self.assertEqual(7, len(pages), "show pages returns 7 items for middle of page result") self.assert_(15 in pages, "show pages includes current page for middle of page result") self.assert_(12 in pages, "show pages includes third page before current page for middle of page result") self.assert_(18 in pages, "show pages includes third page after current page for middle of page result") # range of pages at the end pages = pages_to_show(paginator, 30) self.assertEqual(7, len(pages), "show pages returns 7 items for last page") self.assert_(30 in pages, "show pages includes last page for last page of results") self.assert_(24 in pages, "show pages includes 6 pages before last page for last page of results")
def list_published(request, archive=None): """List all published EADs, optionally restricted to a single archive.""" fa = FindingAid.objects.order_by('eadid').only('document_name', 'eadid', 'last_modified') arch = None if archive is not None: arch = get_object_or_404(Archive, slug=archive) # fa = fa.filter(repository=arch.name) fa = fa.filter(repository__fulltext_terms='"%s"' % arch.name) fa_subset, paginator = paginate_queryset(request, fa, per_page=30, orphans=5) show_pages = pages_to_show(paginator, fa_subset.number) return render(request, 'fa_admin/published_list.html', {'findingaids': fa_subset, 'querytime': [fa.queryTime()], 'show_pages': show_pages, 'archive': arch})
def list_files(request, archive): '''List files associated with an archive to be prepped and previewed for publication. Expected to be retrieved via ajax and loaded in a jquery ui tab, so only returns a partial html page without site theme. ''' archive = get_object_or_404(Archive, slug=archive) files = files_to_publish(archive) # sort by last modified time, most recent first files = sorted(files, key=lambda file: file.mtime, reverse=True) paginator = Paginator(files, 30, orphans=5) try: page = int(request.GET.get('page', '1')) except ValueError: page = 1 show_pages = pages_to_show(paginator, page) # If page request (9999) is out of range, deliver last page of results. try: recent_files = paginator.page(page) except (EmptyPage, InvalidPage): recent_files = paginator.page(paginator.num_pages) # query for publish/preview modification time all at once # (more efficient than individual queries for each file) published = FindingAid.objects.only('document_name', 'last_modified') \ .filter(document_name__in=[f.filename for f in recent_files.object_list]) pubinfo = dict((r.document_name, r.last_modified) for r in published) # NOTE: if needed, we can also load preview info like this: # preview = published.using(settings.EXISTDB_PREVIEW_COLLECTION) for f in recent_files.object_list: f.published = pubinfo.get(f.filename, None) return render(request, 'fa_admin/snippets/list_files_tab.html', { 'files': recent_files, 'show_pages': show_pages})
def search(request): "Simple keyword search - runs exist full-text terms query on all terms included." form = AdvancedSearchForm(request.GET) query_error = False if form.is_valid(): # form validation requires that at least one of subject & keyword is not empty subject = form.cleaned_data['subject'] keywords = form.cleaned_data['keywords'] repository = form.cleaned_data['repository'] dao = form.cleaned_data['dao'] page = request.GET.get('page', 1) # initialize findingaid queryset - filters will be added based on search terms findingaids = FindingAid.objects # local copy of return fields (fulltext-score may be added-- don't modify master copy!) return_fields = fa_listfields[:] try: if subject: # if a subject was specified, filter on subject findingaids = findingaids.filter(subject__fulltext_terms=subject).order_by('list_title') # order by list title when searching by subject only # (if keywords are specified, fulltext score ordering will override this) if repository: # if repository is set, filter finding aids by requested repository # expecting repository value to come in as exact phrase findingaids = findingaids.filter(repository__fulltext_terms=repository).order_by('list_title') if keywords: # if keywords were specified, do a fulltext search return_fields.append('fulltext_score') findingaids = findingaids.filter( # first do a full-text search to restrict to relevant documents fulltext_terms=keywords ).or_filter( # do an OR search on boosted fields, so that relevance score # will be calculated based on boosted field values fulltext_terms=keywords, boostfields__fulltext_terms=keywords, highlight=False, # disable highlighting in search results list ).order_by('-fulltext_score') # optional filter: restrict to items with digital archival objects if dao: findingaids = findingaids.filter(daos__exists=True) # if user does not have permission to view internal daos, # restrict to public daos only if not request.user.has_perm('fa_admin.can_view_internal_dao'): findingaids = findingaids.filter(public_dao_count__gte=1) # NOTE: using >= filter to force a where clause because this works # when what seems to be the same filter on the xpath does not # (possibly an indexing issue?) findingaids = findingaids.only(*return_fields) result_subset, paginator = paginate_queryset(request, findingaids, per_page=10, orphans=5) # when searching by subject only, use alpha pagination if subject and not keywords: page_labels = alpha_pagelabels(paginator, findingaids, label_attribute='list_title') else: page_labels = {} show_pages = pages_to_show(paginator, result_subset.number, page_labels) # query_times = findingaids.queryTime() # select non-empty form values for use in template search_params = dict((key, value) for key, value in form.cleaned_data.iteritems() if value) # set query and last page in session and set it to expire on browser close for key, val in search_params.iteritems(): if key == 'dao': search_params[key] = val else: search_params[key] = val.encode('utf-8') last_search = search_params.copy() # pagination url params should NOT include page if 'page' in last_search: del(last_search['page']) url_params = urlencode(last_search) # store the current page (even if not specified in URL) for saved search last_search['page'] = page last_search = "%s?%s" % (reverse("fa:search"), urlencode(last_search)) last_search = {"url": last_search, "txt": "Return to Search Results"} request.session["last_search"] = last_search request.session.set_expiry(0) # set to expire when browser closes # ONLY keywords - not page or subject - should be included in # document url for search term highlighting if 'keywords' in search_params: highlight_params = urlencode({'keywords': search_params['keywords']}) else: highlight_params = None response_context = { 'findingaids': result_subset, 'search_params': search_params, # actual search terms, for display 'url_params': url_params, # url opts for pagination 'highlight_params': highlight_params, # keyword highlighting # 'querytime': [query_times], 'show_pages': show_pages } if page_labels: # if there are page labels to show, add to context # other page labels handled by show_pages, but first & last are special response_context['first_page_label'] = page_labels[1] response_context['last_page_label'] = page_labels[paginator.num_pages] return render(request, 'fa/search_results.html', response_context) except ExistDBException as e: # for an invalid full-text query (e.g., missing close quote), eXist # error reports 'Cannot parse' and 'Lexical error' # FIXME: could/should this be a custom eXist exception class? query_error = True if 'Cannot parse' in e.message(): messages.error(request, 'Your search query could not be parsed. ' + 'Please revise your search and try again.') else: # generic error message for any other exception messages.error(request, 'There was an error processing your search.') elif 'keywords' not in request.GET and 'subject' not in request.GET: # if form was not valid and nothing was submitted, re-initialize # don't tell the user that fields are required if they haven't submitted anything! form = AdvancedSearchForm() # if form is invalid (no search terms) or there was an error, display search form response = render(request, 'fa/search_form.html', {'form': form, 'request': request}) # if query could not be parsed, set a 'Bad Request' status code on the response if query_error: response.status_code = 400 return response