def list_published(request, archive=None): """List all published EADs, optionally restricted to a single archive.""" fa = FindingAid.objects.order_by('eadid').only('document_name', 'eadid', 'last_modified') arch = None if archive is not None: arch = get_object_or_404(Archive, slug=archive) # fa = fa.filter(repository=arch.name) fa = fa.filter(repository__fulltext_terms='"%s"' % arch.name) fa_subset, paginator = paginate_queryset(request, fa, per_page=30, orphans=5) show_pages = pages_to_show(paginator, fa_subset.number) return render(request, 'fa_admin/published_list.html', {'findingaids': fa_subset, 'querytime': [fa.queryTime()], 'show_pages': show_pages, 'archive': arch})
def titles_by_letter(request, letter): """Paginated list of finding aids by first letter in list title. Includes list of browse first-letters as in :meth:`browse_titles`. """ # set last browse letter and page in session page = request.REQUEST.get('page', 1) last_search = "%s?page=%s" % (reverse("fa:titles-by-letter", kwargs={'letter': letter}), page) last_search = {"url": last_search, "txt": "Return to Browse Results"} request.session['last_search'] = last_search request.session.set_expiry(0) # set to expire when browser closes # using ~ to do case-insensitive ordering fa = FindingAid.objects.filter(list_title__startswith=letter).order_by('~list_title').only(*fa_listfields) fa_subset, paginator = paginate_queryset(request, fa, per_page=10, orphans=5) page_labels = alpha_pagelabels(paginator, fa, label_attribute='list_title') # No longer restricting the number of page labels shown using pages_to_show (like we do for numeric pages). # That doesn't make sense here, since the alpha range labels should ideally allow anyone to jump directly # to the section they want based on the labels. response_context = { 'findingaids': fa_subset, 'querytime': [fa.queryTime()], 'letters': title_letters(), 'current_letter': letter, 'show_pages': page_labels, } if page_labels: response_context['title_range'] = page_labels[fa_subset.number] # current page range label is displayed in the title bar of the browser to indicate which page you are on #example: (Cua - Cut) # no special first/last page label is required, since we are displaying all labels (not limiting to 9) return render_to_response('fa/titles_list.html', response_context, context_instance=RequestContext(request))
def search(request): "Simple keyword search - runs exist full-text terms query on all terms included." form = AdvancedSearchForm(request.GET) query_error = False if form.is_valid(): # form validation requires that at least one of subject & keyword is not empty subject = form.cleaned_data['subject'] keywords = form.cleaned_data['keywords'] repository = form.cleaned_data['repository'] dao = form.cleaned_data['dao'] page = request.GET.get('page', 1) # initialize findingaid queryset - filters will be added based on search terms findingaids = FindingAid.objects # local copy of return fields (fulltext-score may be added-- don't modify master copy!) return_fields = fa_listfields[:] try: if subject: # if a subject was specified, filter on subject findingaids = findingaids.filter(subject__fulltext_terms=subject).order_by('list_title') # order by list title when searching by subject only # (if keywords are specified, fulltext score ordering will override this) if repository: # if repository is set, filter finding aids by requested repository # expecting repository value to come in as exact phrase findingaids = findingaids.filter(repository__fulltext_terms=repository).order_by('list_title') if keywords: # if keywords were specified, do a fulltext search return_fields.append('fulltext_score') findingaids = findingaids.filter( # first do a full-text search to restrict to relevant documents fulltext_terms=keywords ).or_filter( # do an OR search on boosted fields, so that relevance score # will be calculated based on boosted field values fulltext_terms=keywords, boostfields__fulltext_terms=keywords, highlight=False, # disable highlighting in search results list ).order_by('-fulltext_score') # optional filter: restrict to items with digital archival objects if dao: findingaids = findingaids.filter(daos__exists=True) # if user does not have permission to view internal daos, # restrict to public daos only if not request.user.has_perm('fa_admin.can_view_internal_dao'): findingaids = findingaids.filter(public_dao_count__gte=1) # NOTE: using >= filter to force a where clause because this works # when what seems to be the same filter on the xpath does not # (possibly an indexing issue?) findingaids = findingaids.only(*return_fields) result_subset, paginator = paginate_queryset(request, findingaids, per_page=10, orphans=5) # when searching by subject only, use alpha pagination if subject and not keywords: page_labels = alpha_pagelabels(paginator, findingaids, label_attribute='list_title') else: page_labels = {} show_pages = pages_to_show(paginator, result_subset.number, page_labels) # query_times = findingaids.queryTime() # select non-empty form values for use in template search_params = dict((key, value) for key, value in form.cleaned_data.iteritems() if value) # set query and last page in session and set it to expire on browser close for key, val in search_params.iteritems(): if key == 'dao': search_params[key] = val else: search_params[key] = val.encode('utf-8') last_search = search_params.copy() # pagination url params should NOT include page if 'page' in last_search: del(last_search['page']) url_params = urlencode(last_search) # store the current page (even if not specified in URL) for saved search last_search['page'] = page last_search = "%s?%s" % (reverse("fa:search"), urlencode(last_search)) last_search = {"url": last_search, "txt": "Return to Search Results"} request.session["last_search"] = last_search request.session.set_expiry(0) # set to expire when browser closes # ONLY keywords - not page or subject - should be included in # document url for search term highlighting if 'keywords' in search_params: highlight_params = urlencode({'keywords': search_params['keywords']}) else: highlight_params = None response_context = { 'findingaids': result_subset, 'search_params': search_params, # actual search terms, for display 'url_params': url_params, # url opts for pagination 'highlight_params': highlight_params, # keyword highlighting # 'querytime': [query_times], 'show_pages': show_pages } if page_labels: # if there are page labels to show, add to context # other page labels handled by show_pages, but first & last are special response_context['first_page_label'] = page_labels[1] response_context['last_page_label'] = page_labels[paginator.num_pages] return render(request, 'fa/search_results.html', response_context) except ExistDBException as e: # for an invalid full-text query (e.g., missing close quote), eXist # error reports 'Cannot parse' and 'Lexical error' # FIXME: could/should this be a custom eXist exception class? query_error = True if 'Cannot parse' in e.message(): messages.error(request, 'Your search query could not be parsed. ' + 'Please revise your search and try again.') else: # generic error message for any other exception messages.error(request, 'There was an error processing your search.') elif 'keywords' not in request.GET and 'subject' not in request.GET: # if form was not valid and nothing was submitted, re-initialize # don't tell the user that fields are required if they haven't submitted anything! form = AdvancedSearchForm() # if form is invalid (no search terms) or there was an error, display search form response = render(request, 'fa/search_form.html', {'form': form, 'request': request}) # if query could not be parsed, set a 'Bad Request' status code on the response if query_error: response.status_code = 400 return response