def setUp(self): WLTestCase.setUp(self) index = Index() self.search = Search() index.delete_query(self.search.index.query(uid="*")) index.index.commit() self.do_doktora = Book.from_xml_file( get_fixture('do-doktora.xml', opds)) self.do_anusie = Book.from_xml_file( get_fixture('fraszka-do-anusie.xml', catalogue))
class Command(BaseCommand): help = 'Reindex everything.' args = '' option_list = BaseCommand.option_list + ( make_option('-C', '--check-just-read', action='store_true', dest='check', default=False, help='Check snippets utf-8'), make_option('-c', '--check', action='store_true', dest='check2', default=False, help='Check snippets utf-8 by walking through index'), ) def handle(self, *args, **opts): from search.index import Search, Snippets if opts['check']: sfn = glob(settings.SEARCH_INDEX+'snippets/*') print sfn for fn in sfn: print fn bkid = int(path.basename(fn)) with open(fn) as f: cont = f.read() try: uc = cont.decode('utf-8') except UnicodeDecodeError, ude: print "error in snippets %d" % bkid if opts['check2']: s = Search() reader = s.searcher.getIndexReader() numdocs = reader.numDocs() for did in range(numdocs): doc = reader.document(did) if doc and doc.get('book_id'): bkid = int(doc.get('book_id')) # import pdb; pdb.set_trace() stdout.write("\r%d / %d" % (did, numdocs)) stdout.flush() ss = doc.get('snippet_position') sl = doc.get('snippet_length') if ss and sl: # WTF (nie było zaimportowane) snips = Snippets(bkid) try: txt = snips.get((ss, sl)) assert len(txt) == sl except UnicodeDecodeError, ude: stdout.write("\nerror in snippets %d\n" % bkid) raise ude stdout.write("\ndone.\n")
class BookSearchTests(WLTestCase): def setUp(self): WLTestCase.setUp(self) index = Index() self.search = Search() index.delete_query(self.search.index.query(uid="*")) index.index.commit() self.do_doktora = Book.from_xml_file( get_fixture('do-doktora.xml', opds)) self.do_anusie = Book.from_xml_file( get_fixture('fraszka-do-anusie.xml', catalogue)) def test_search_perfect_book_author(self): books = self.search.search_books(self.search.index.query(authors=u"sęp szarzyński")) assert len(books) == 1 assert books[0].id == self.do_anusie.id # here we lack slop functionality as well def test_search_perfect_book_title(self): books = self.search.search_books(self.search.index.query(title=u"fraszka do anusie")) assert len(books) == 1 assert books[0].id == self.do_anusie.id
def hint(request): prefix = request.GET.get('term', '') if len(prefix) < 2: return JsonResponse([], safe=False) prefix = remove_query_syntax_chars(prefix) search = Search() # tagi beda ograniczac tutaj # ale tagi moga byc na ksiazce i na fragmentach # jezeli tagi dot tylko ksiazki, to wazne zeby te nowe byly w tej samej ksiazce # jesli zas dotycza themes, to wazne, zeby byly w tym samym fragmencie. def is_dupe(tag): if isinstance(tag, PDCounterAuthor): if filter(lambda t: t.slug == tag.slug and t != tag, tags): return True elif isinstance(tag, PDCounterBook): if filter(lambda b: b.slug == tag.slug, tags): return True return False def category_name(c): if c.startswith('pd_'): c = c[len('pd_'):] return _(c) try: limit = int(request.GET.get('max', '')) except ValueError: limit = -1 else: if limit < 1: limit = -1 data = [] tags = search.hint_tags(prefix, pdcounter=True) tags = filter(lambda t: not is_dupe(t), tags) for t in tags: if not limit: break limit -= 1 data.append({ 'label': t.name, 'category': category_name(t.category), 'id': t.id, 'url': t.get_absolute_url() }) if limit: books = search.hint_books(prefix) for b in books: if not limit: break limit -= 1 data.append({ 'label': b.title, 'category': _('book'), 'id': b.id, 'url': b.get_absolute_url() }) callback = request.GET.get('callback', None) if callback: return HttpResponse("%s(%s);" % (callback, json.dumps(data)), content_type="application/json; charset=utf-8") else: return JsonResponse(data, safe=False)
def main(request): query = request.GET.get('q', '') if len(query) < 2: return render_to_response( 'catalogue/search_too_short.html', {'prefix': query}, context_instance=RequestContext(request)) elif len(query) > 256: return render_to_response( 'catalogue/search_too_long.html', {'prefix': query}, context_instance=RequestContext(request)) query = remove_query_syntax_chars(query) search = Search() theme_terms = search.index.analyze(text=query, field="themes_pl") \ + search.index.analyze(text=query, field="themes") # change hints tags = search.hint_tags(query, pdcounter=True, prefix=False) tags = split_tags(tags) author_results = search.search_phrase(query, 'authors', book=True) translator_results = search.search_phrase(query, 'translators', book=True) title_results = search.search_phrase(query, 'title', book=True) # Boost main author/title results with mixed search, and save some of its results for end of list. # boost author, title results author_title_mixed = search.search_some(query, ['authors', 'translators', 'title', 'tags'], query_terms=theme_terms) author_title_rest = [] for b in author_title_mixed: also_in_mixed = filter(lambda ba: ba.book_id == b.book_id, author_results + translator_results + title_results) for b2 in also_in_mixed: b2.boost *= 1.1 if also_in_mixed is []: author_title_rest.append(b) # Do a phrase search but a term search as well - this can give us better snippets then search_everywhere, # Because the query is using only one field. text_phrase = SearchResult.aggregate( search.search_phrase(query, 'text', snippets=True, book=False), search.search_some(query, ['text'], snippets=True, book=False, query_terms=theme_terms)) everywhere = search.search_everywhere(query, query_terms=theme_terms) def already_found(results): def f(e): for r in results: if e.book_id == r.book_id: e.boost = 0.9 results.append(e) return True return False return f f = already_found(author_results + translator_results + title_results + text_phrase) everywhere = filter(lambda x: not f(x), everywhere) author_results = SearchResult.aggregate(author_results) translator_results = SearchResult.aggregate(translator_results) title_results = SearchResult.aggregate(title_results) everywhere = SearchResult.aggregate(everywhere, author_title_rest) for field, res in [('authors', author_results), ('translators', translator_results), ('title', title_results), ('text', text_phrase), ('text', everywhere)]: res.sort(reverse=True) for r in res: search.get_snippets(r, query, field, 3) suggestion = u'' def ensure_exists(r): try: return r.book except Book.DoesNotExist: return False author_results = filter(ensure_exists, author_results) translator_results = filter(ensure_exists, translator_results) title_results = filter(ensure_exists, title_results) text_phrase = filter(ensure_exists, text_phrase) everywhere = filter(ensure_exists, everywhere) results = author_results + translator_results + title_results + text_phrase + everywhere # ensure books do exists & sort them for res in (author_results, translator_results, title_results, text_phrase, everywhere): res.sort(reverse=True) # We don't want to redirect to book text, but rather display result page even with one result. # if len(results) == 1: # fragment_hits = filter(lambda h: 'fragment' in h, results[0].hits) # if len(fragment_hits) == 1: # #anchor = fragment_hits[0]['fragment'] # #frag = Fragment.objects.get(anchor=anchor) # return HttpResponseRedirect(fragment_hits[0]['fragment'].get_absolute_url()) # return HttpResponseRedirect(results[0].book.get_absolute_url()) if len(results) == 0: form = PublishingSuggestForm(initial={"books": query + ", "}) return render_to_response( 'catalogue/search_no_hits.html', { 'tags': tags, 'prefix': query, 'form': form, 'did_you_mean': suggestion }, context_instance=RequestContext(request)) return render_to_response( 'catalogue/search_multiple_hits.html', { 'tags': tags, 'prefix': query, 'results': { 'author': author_results, 'translator': translator_results, 'title': title_results, 'content': text_phrase, 'other': everywhere }, 'did_you_mean': suggestion }, context_instance=RequestContext(request))