def get_section_data(self, section): defaults = self.search_sections[section] extra_filter = defaults.get("filter", {}) filter_args = extra_filter.get("args", []) filter_kwargs = extra_filter.get("kwargs", {}) extra_exclude = defaults.get("exclude", {}) query = SearchQuerySet().models(defaults["model"]) if extra_exclude: query = query.exclude(**extra_exclude) query = query.filter(content=AutoQuery(self.query), *filter_args, **filter_kwargs) if self.start_date_range: query = query.filter(start_date__gte=self.start_date_range) if self.end_date_range: query = query.filter(start_date__lte=self.end_date_range) if self.order == "date": query = query.order_by("-start_date") result = defaults.copy() result["results"] = query.highlight() result["results_count"] = result["results"].count() result["section"] = section result["section_dashes"] = section.replace("_", "-") return result
def get_section_data(self, section): defaults = self.search_sections[section] extra_filter = defaults.get('filter', {}) filter_args = extra_filter.get('args', []) filter_kwargs = extra_filter.get('kwargs', {}) extra_exclude = defaults.get('exclude', {}) query = SearchQuerySet().models(defaults['model']) if extra_exclude: query = query.exclude(**extra_exclude) query = query.filter( content=AutoQuery(self.query), *filter_args, **filter_kwargs ) if self.start_date_range: query = query.filter(start_date__gte=self.start_date_range) if self.end_date_range: query = query.filter(start_date__lte=self.end_date_range) if self.order == 'date': query = query.order_by('-start_date') result = defaults.copy() result['results'] = query.highlight() result['results_count'] = result['results'].count() result['section'] = section result['section_dashes'] = section.replace('_', '-') return result
def get_object_list(self, request, **kwargs): q = kwargs.get('query', '') # sqs = SearchQuerySet().filter(SQ(content__contains=q) | SQ(name=q)) # sqs = SearchQuerySet().filter(SQ(content__contains=q) | SQ(content_auto=q)) #sqs = SearchQuerySet().filter(SQ(content=AutoQuery(q)) | SQ(name=AutoQuery(q))) #sqs = SearchQuerySet().filter(content=AutoQuery(q)) sqs = SearchQuerySet().filter(text_auto=AutoQuery(q)) search_models = [] limit_models = request.GET.get('ct', None) if limit_models: for model in limit_models.split(':'): try: search_models.append(apps.get_model(*model.split('.'))) except LookupError as e: pass sqs = sqs.models(*search_models) # TODO: nasty hack here. filter out 'basket' playlists that do not belong to the authenticated user if request.user.is_authenticated(): sqs = sqs.exclude(Q(type='basket') | ~Q(user_pk=request.user.pk)) else: sqs = sqs.exclude(type='basket') sqs = sqs.highlight().load_all() return sqs
def get_section_data(self, section): defaults = self.search_sections[section] extra_filter = defaults.get('filter', {}) filter_args = extra_filter.get('args', []) filter_kwargs = extra_filter.get('kwargs', {}) extra_exclude = defaults.get('exclude', {}) query = SearchQuerySet().models(defaults['model']) if extra_exclude: query = query.exclude(**extra_exclude) query = query.filter(content=self.generate_fuzzy_query_object( self.query), *filter_args, **filter_kwargs) if self.start_date_range: query = query.filter(start_date__gte=self.start_date_range) if self.end_date_range: query = query.filter(start_date__lte=self.end_date_range) if self.order == 'date': query = query.order_by('-start_date') result = defaults.copy() result['results'] = query.highlight() result['results_count'] = result['results'].count() result['section'] = section result['section_dashes'] = section.replace('_', '-') return result
def search(query, limit=20, models=None, as_list=False): print "Searching for: %s" % query if not query: return [] try: sqs = SearchQuerySet() # clean query q = sqs.query.clean(query) # add models if models is not None: if not isinstance(models, (list, tuple)): models = (models, ) sqs = sqs.models(*models) # ignore letters keywords = [k for k in q.split() if len(k) > 1] # search word by word for keyword in keywords: sqs.query.add_filter(SQ(title="%s^2.0" % keyword)) # order by score and popularity sqs = sqs.order_by('-score', '-popularity') # add highlight sqs = sqs.highlight() sqs = sqs[:limit] except Exception, e: # on error show empty result print e # TODO: log error sqs = []
def search( query, limit=20, models=None, as_list=False ): print "Searching for: %s" % query if not query: return [] try: sqs = SearchQuerySet() # clean query q = sqs.query.clean( query ) # add models if models is not None: if not isinstance( models, ( list, tuple ) ): models = ( models, ) sqs = sqs.models( *models ) # ignore letters keywords = [ k for k in q.split() if len( k ) > 1 ] # search word by word for keyword in keywords: sqs.query.add_filter( SQ( title="%s^2.0" % keyword ) ) # order by score and popularity sqs = sqs.order_by( '-score', '-popularity' ) # add highlight sqs = sqs.highlight() sqs = sqs[:limit] except Exception, e: # on error show empty result print e # TODO: log error sqs = []
def parse_query(self): queryset = SearchQuerySet() statements = self.request.query_params.get('q', '').split() keywords = [] types = [] names = [] orderings = [] authors = [] highlight = False for statement in statements: if statement.startswith('o:'): orderings.append(statement[2:]) if statement.startswith('n:'): names.append(statement[2:]) elif statement.startswith('t:'): types.append(statement[2:]) elif statement.startswith('h:'): highlight = True elif statement.startswith('a:'): authors.append(statement[2:]) else: keywords.append(statement) condition = None for field, items in (('text', keywords), ('part_name', names), ('part_type', types), ('author', authors)): if items: clause = reduce( or_, map(lambda kw: SQ(**{field + '__contains': kw}), items)) if condition is not None: condition &= clause else: condition = clause if condition is not None: queryset = queryset.filter(condition) if not orderings: orderings = ['-weight', '-creation_date'] orderings = unique(orderings) queryset = queryset.order_by(*orderings) if highlight: queryset = queryset.highlight( pre_tags=['<span class="highlight">'], post_tags=['</span>']) return queryset
def autocomplete( request, limit=10 ): try: sqs = SearchQuerySet() # clean query set q = sqs.query.clean( request.GET.get( 'term', '' ) ) sqs = sqs.models( Film, Person ) # ignore letters keywords = [ k for k in q.split() if len( k ) > 1 ] # search word by word for keyword in keywords: sqs.query.add_filter( SQ( title="%s^2.0" % keyword ) ) # order by score and popularity sqs = sqs.order_by( '-score', '-popularity' ) # add highlight sqs = sqs.highlight() sqs = sqs[:limit] except: # on error show empty result sqs = [] mp = { 'film': lambda x: { 'category' : 'film', 'url' : x.get_absolute_url(), 'value' : "%s (%s)" % ( x.get_title(), x.release_year ), 'description': ','.join( [ str( t ) for t in x.top_directors()[:2] ] ), 'image' : x.hires_image.url if x.hires_image else x.image.url if x.image else urls['DEFAULT_POSTER'], }, 'person': lambda x: { 'category' : 'person', 'url' : x.get_absolute_url(), 'value' : '%s %s' % ( x.name, x.surname ), 'description': '', 'image' : x.image.url if x.image else urls['DEFAULT_ACTOR'] } } content = simplejson.dumps( [ mp[ x.model_name ]( x.object ) for x in sqs ] ) if 'callback' in request.GET: content = "%s(%s)" % ( request.GET['callback'], content ) return http.HttpResponse( content, mimetype='application/json' )
def get_data(self): from pombola.hansard.models import Entry defaults = {"model": Entry, "title": "Hansard"} data_query = SearchQuerySet().models(defaults["model"]) data_query = data_query.filter(content=AutoQuery(self.query)) if self.order == "date": data_query = data_query.order_by("-start_date") if self.order == "adate": data_query = data_query.order_by("start_date") result = defaults.copy() result["results"] = data_query.highlight() result["results_count"] = result["results"].count() return result
def get_data(self): from pombola.hansard.models import Entry defaults = { 'model': Entry, 'title': 'Hansard', } data_query = SearchQuerySet().models(defaults['model']) data_query = data_query.filter(content=AutoQuery(self.query)) if self.order == 'date': data_query = data_query.order_by('-start_date') if self.order == 'adate': data_query = data_query.order_by('start_date') result = defaults.copy() result['results'] = data_query.highlight() result['results_count'] = result['results'].count() return result
def get_data(self): from pombola.hansard.models import Entry defaults = { 'model': Entry, 'title': 'Hansard', } data_query = SearchQuerySet().models(defaults['model']) data_query = data_query.filter( content=AutoQuery(self.query) ) if self.order == 'date': data_query = data_query.order_by('-start_date') if self.order == 'adate': data_query = data_query.order_by('start_date') result = defaults.copy() result['results'] = data_query.highlight() result['results_count'] = result['results'].count() return result
def __init__(self, *args, **kwargs): # dynamically compute date ranges for faceted search curr_year = datetime.today().year for curr_year in xrange(settings.OM_START_YEAR, curr_year + 1): date_range = self._build_date_range(curr_year) self.DATE_INTERVALS_RANGES[curr_year] = date_range sqs = SearchQuerySet().filter(django_ct='acts.speech').facet('month') for (year, range) in self.DATE_INTERVALS_RANGES.items(): sqs = sqs.query_facet('date', range['qrange']) kwargs['searchqueryset'] = sqs.highlight() # Needed to switch out the default form class. if kwargs.get('form_class') is None: kwargs['form_class'] = SpeechSearchForm super(SpeechSearchView, self).__init__(*args, **kwargs)
def search(self): kwargs = { "hl.simple.pre": "<mark>", "hl.simple.post": "</mark>", "hl.fragsize": 400, "hl.snippets": 3, } sqs = self.no_query_found() # use the text_exact field in solr which doesn't stem if self.cleaned_data["q"] and self.cleaned_data["exact"] is True: query = self.cleaned_data["q"] kwargs["hl.fl"] = "text_exact" sqs = SearchQuerySet().filter(text_exact=Exact(query)) if self.load_all: sqs = sqs.load_all() else: sqs = super(HighlightedSearchForm, self).search() if self.cleaned_data["similar_type"]: similar_council = self.cleaned_data["similar_council"] comparison_type = self.cleaned_data["similar_type"] cut_off = 15 names = Distance.objects.filter( council_a=similar_council, type=comparison_type, position__lte=cut_off, ).values_list("council_b__name", flat=True) query_names = [f'council_name:"{name}"' for name in names] sqs = sqs.narrow(" OR ".join(query_names)) if self.cleaned_data["council_name"]: # narrow makes use of fq rather than q sqs = sqs.narrow("council_name:%s" % self.cleaned_data["council_name"]) return sqs.highlight(**kwargs)
def __init__(self, *args, **kwargs): # dynamically compute date ranges for faceted search curr_year = datetime.today().year for curr_year in xrange(settings.OM_START_YEAR, curr_year + 1): date_range = self._build_date_range(curr_year) self.DATE_INTERVALS_RANGES[curr_year] = date_range sqs = SearchQuerySet().filter(django_ct='acts.act')\ .facet('act_type').facet('is_key').facet('is_proposal').facet('has_locations')\ .facet('initiative').facet('organ').facet('month').facet('status')\ .facet('iter_duration').facet('iter_duration_bin').facet('multiple_supporters')\ .facet('category').facet('tag').facet('location') for (year, range) in self.DATE_INTERVALS_RANGES.items(): sqs = sqs.query_facet('pub_date', range['qrange']) kwargs['searchqueryset'] = sqs.highlight() # Needed to switch out the default form class. if kwargs.get('form_class') is None: kwargs['form_class'] = ActSearchForm super(ActSearchView, self).__init__(*args, **kwargs)
def regulation_results_page(self, request): all_regs = Part.objects.order_by('part_number') regs = [] order = request.GET.get('order', 'relevance') if 'regs' in request.GET and request.GET.get('regs'): regs = request.GET.getlist('regs') search_query = request.GET.get('q', '') # haystack cleans this string payload = { 'search_query': search_query, 'results': [], 'total_results': 0, 'regs': regs, 'all_regs': [], } if not search_query: self.results = payload return TemplateResponse(request, self.get_template(request), self.get_context(request)) sqs = SearchQuerySet().filter(content=search_query) payload.update({ 'all_regs': [{ 'letter_code': reg.letter_code, 'id': reg.part_number, 'num_results': sqs.filter( part=reg.part_number).models(SectionParagraph).count(), 'selected': reg.part_number in regs } for reg in all_regs] }) payload.update({ 'total_count': sum([reg['num_results'] for reg in payload['all_regs']]) }) if len(regs) == 1: sqs = sqs.filter(part=regs[0]) elif regs: sqs = sqs.filter(part__in=regs) if order == 'regulation': sqs = sqs.order_by('part', 'section_order') sqs = sqs.highlight(pre_tags=['<strong>'], post_tags=['</strong>']).models(SectionParagraph) for hit in sqs: letter_code = LETTER_CODES.get(hit.part) snippet = Markup(" ".join(hit.highlighted)) hit_payload = { 'id': hit.paragraph_id, 'part': hit.part, 'reg': 'Regulation {}'.format(letter_code), 'label': hit.title, 'snippet': snippet, 'url': "{}{}/{}/#{}".format(self.parent().url, hit.part, hit.section_label, hit.paragraph_id), } payload['results'].append(hit_payload) payload.update({'current_count': sqs.count()}) self.results = payload context = self.get_context(request) num_results = validate_num_results(request) paginator = Paginator(payload['results'], num_results) page_number = validate_page_number(request, paginator) paginated_page = paginator.page(page_number) context.update({ 'current_count': payload['current_count'], 'total_count': payload['total_count'], 'paginator': paginator, 'current_page': page_number, 'num_results': num_results, 'order': order, 'results': paginated_page, 'show_filters': any(reg['selected'] is True for reg in payload['all_regs']) }) return TemplateResponse(request, self.get_template(request), context)
class SearchQuerySetTestCase(TestCase): fixtures = ["base_data.json", "bulk_data.json"] def setUp(self): super(SearchQuerySetTestCase, self).setUp() # Stow. self.old_unified_index = connections["default"]._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.cpkmmsi = CharPKMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.cpkmmsi]) connections["default"]._index = self.ui # Update the "index". backend = connections["default"].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) self.msqs = SearchQuerySet() # Stow. reset_search_queries() def tearDown(self): # Restore. connections["default"]._index = self.old_unified_index super(SearchQuerySetTestCase, self).tearDown() def test_len(self): self.assertEqual(len(self.msqs), 23) def test_repr(self): reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) self.assertRegexpMatches( repr(self.msqs), r"^<SearchQuerySet: query=<test_haystack.mocks.MockSearchQuery object" r" at 0x[0-9A-Fa-f]+>, using=None>$", ) def test_iter(self): reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) msqs = self.msqs.all() results = [int(res.pk) for res in iter(msqs)] self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]]) self.assertEqual(len(connections["default"].queries), 3) def test_slice(self): reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) results = self.msqs.all() self.assertEqual([int(res.pk) for res in results[1:11]], [res.pk for res in MOCK_SEARCH_RESULTS[1:11]]) self.assertEqual(len(connections["default"].queries), 1) reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) results = self.msqs.all() self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk) self.assertEqual(len(connections["default"].queries), 1) def test_manual_iter(self): results = self.msqs.all() reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) check = [result.pk for result in results._manual_iter()] self.assertEqual( check, [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", ], ) self.assertEqual(len(connections["default"].queries), 3) reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not a handled model) than the hit count indicates. # This will hang indefinitely if broken. old_ui = self.ui self.ui.build(indexes=[self.cpkmmsi]) connections["default"]._index = self.ui self.cpkmmsi.update() results = self.msqs.all() loaded = [result.pk for result in results._manual_iter()] self.assertEqual(loaded, ["sometext", "1234"]) self.assertEqual(len(connections["default"].queries), 1) connections["default"]._index = old_ui def test_cache_is_full(self): reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) self.assertEqual(self.msqs._cache_is_full(), False) results = self.msqs.all() fire_the_iterator_and_fill_cache = list(results) self.assertEqual(23, len(fire_the_iterator_and_fill_cache)) self.assertEqual(results._cache_is_full(), True) self.assertEqual(len(connections["default"].queries), 4) def test_all(self): sqs = self.msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) def test_filter(self): sqs = self.msqs.filter(content="foo") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_exclude(self): sqs = self.msqs.exclude(content="foo") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_order_by(self): sqs = self.msqs.order_by("foo") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertTrue("foo" in sqs.query.order_by) def test_models(self): # Stow. old_unified_index = connections["default"]._index ui = UnifiedIndex() bmmsi = BasicMockModelSearchIndex() bammsi = BasicAnotherMockModelSearchIndex() ui.build(indexes=[bmmsi, bammsi]) connections["default"]._index = ui msqs = SearchQuerySet() sqs = msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = msqs.models(MockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = msqs.models(MockModel, AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) # This will produce a warning. ui.build(indexes=[bmmsi]) sqs = msqs.models(AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) def test_result_class(self): sqs = self.msqs.all() self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) # Custom class. class IttyBittyResult(object): pass sqs = self.msqs.result_class(IttyBittyResult) self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult)) # Reset to default. sqs = self.msqs.result_class(None) self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) def test_boost(self): sqs = self.msqs.boost("foo", 10) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.boost.keys()), 1) def test_highlight(self): sqs = self.msqs.highlight() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.highlight, True) def test_spelling_override(self): sqs = self.msqs.filter(content="not the spellchecking query") self.assertEqual(sqs.query.spelling_query, None) sqs = self.msqs.set_spelling_query("override") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.spelling_query, "override") def test_spelling_suggestions(self): # Test the case where spelling support is disabled. sqs = self.msqs.filter(content="Indx") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.spelling_suggestion(), None) self.assertEqual(sqs.spelling_suggestion("indexy"), None) def test_raw_search(self): self.assertEqual(len(self.msqs.raw_search("foo")), 23) self.assertEqual(len(self.msqs.raw_search("(content__exact:hello AND content__exact:world)")), 23) def test_load_all(self): # Models with character primary keys. sqs = SearchQuerySet() sqs.query.backend = CharPKMockSearchBackend("charpk") results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # If nothing is handled, you get nothing. old_ui = connections["default"]._index ui = UnifiedIndex() ui.build(indexes=[]) connections["default"]._index = ui sqs = self.msqs.load_all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs), 0) connections["default"]._index = old_ui # For full tests, see the solr_backend. def test_load_all_read_queryset(self): # Stow. old_ui = connections["default"]._index ui = UnifiedIndex() gafmmsi = GhettoAFifthMockModelSearchIndex() ui.build(indexes=[gafmmsi]) connections["default"]._index = ui gafmmsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend("default") results._fill_cache(0, 2) # The deleted result isn't returned self.assertEqual(len([result for result in results._result_cache if result is not None]), 1) # Register a SearchIndex with a read_queryset that returns deleted items rqstsi = TextReadQuerySetTestSearchIndex() ui.build(indexes=[rqstsi]) rqstsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend("default") results._fill_cache(0, 2) # Both the deleted and not deleted items are returned self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # Restore. connections["default"]._index = old_ui def test_auto_query(self): sqs = self.msqs.auto_query("test search -stuff") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND content__content=test search -stuff>") sqs = self.msqs.auto_query('test "my thing" search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__content=test "my thing" search -stuff>') sqs = self.msqs.auto_query("test \"my thing\" search 'moar quotes' -stuff") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), "<SQ: AND content__content=test \"my thing\" search 'moar quotes' -stuff>" ) sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND content__content=test "my thing" search \'moar quotes\' "foo -stuff>', ) sqs = self.msqs.auto_query("test - stuff") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND content__content=test - stuff>") # Ensure bits in exact matches get escaped properly as well. sqs = self.msqs.auto_query('"pants:rule"') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__content="pants:rule">') # Now with a different fieldname sqs = self.msqs.auto_query("test search -stuff", fieldname="title") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND title__content=test search -stuff>") sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname="title") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND title__content=test "my thing" search -stuff>') def test_count(self): self.assertEqual(self.msqs.count(), 23) def test_facet_counts(self): self.assertEqual(self.msqs.facet_counts(), {}) def test_best_match(self): self.assertTrue(isinstance(self.msqs.best_match(), SearchResult)) def test_latest(self): self.assertTrue(isinstance(self.msqs.latest("pub_date"), SearchResult)) def test_more_like_this(self): mock = MockModel() mock.id = 1 self.assertEqual(len(self.msqs.more_like_this(mock)), 23) def test_facets(self): sqs = self.msqs.facet("foo") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.facets), 1) sqs2 = self.msqs.facet("foo").facet("bar") self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.facets), 2) def test_date_facets(self): try: sqs = self.msqs.date_facet( "foo", start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="smarblaph" ) self.fail() except FacetingError as e: self.assertEqual( str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second." ) sqs = self.msqs.date_facet( "foo", start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="month" ) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.date_facets), 1) sqs2 = self.msqs.date_facet( "foo", start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="month" ).date_facet("bar", start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="year") self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.date_facets), 2) def test_query_facets(self): sqs = self.msqs.query_facet("foo", "[bar TO *]") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_facets), 1) sqs2 = self.msqs.query_facet("foo", "[bar TO *]").query_facet("bar", "[100 TO 499]") self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.query_facets), 2) # Test multiple query facets on a single field sqs3 = ( self.msqs.query_facet("foo", "[bar TO *]") .query_facet("bar", "[100 TO 499]") .query_facet("foo", "[1000 TO 1499]") ) self.assertTrue(isinstance(sqs3, SearchQuerySet)) self.assertEqual(len(sqs3.query.query_facets), 3) def test_stats(self): sqs = self.msqs.stats_facet("foo", "bar") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.stats), 1) sqs2 = self.msqs.stats_facet("foo", "bar").stats_facet("foo", "baz") self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.stats), 1) sqs3 = self.msqs.stats_facet("foo", "bar").stats_facet("moof", "baz") self.assertTrue(isinstance(sqs3, SearchQuerySet)) self.assertEqual(len(sqs3.query.stats), 2) def test_narrow(self): sqs = self.msqs.narrow("foo:moof") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.narrow_queries), 1) def test_clone(self): results = self.msqs.filter(foo="bar", foo__lt="10") clone = results._clone() self.assertTrue(isinstance(clone, SearchQuerySet)) self.assertEqual(str(clone.query), str(results.query)) self.assertEqual(clone._result_cache, []) self.assertEqual(clone._result_count, None) self.assertEqual(clone._cache_full, False) self.assertEqual(clone._using, results._using) def test_using(self): sqs = SearchQuerySet(using="default") self.assertNotEqual(sqs.query, None) self.assertEqual(sqs.query._using, "default") def test_chaining(self): sqs = self.msqs.filter(content="foo") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) # A second instance should inherit none of the changes from above. sqs = self.msqs.filter(content="bar") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_none(self): sqs = self.msqs.none() self.assertTrue(isinstance(sqs, EmptySearchQuerySet)) self.assertEqual(len(sqs), 0) def test___and__(self): sqs1 = self.msqs.filter(content="foo") sqs2 = self.msqs.filter(content="bar") sqs = sqs1 & sqs2 self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 2) def test___or__(self): sqs1 = self.msqs.filter(content="foo") sqs2 = self.msqs.filter(content="bar") sqs = sqs1 | sqs2 self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 2) def test_and_or(self): """ Combining AND queries with OR should give AND(OR(a, b), OR(c, d)) """ sqs1 = self.msqs.filter(content="foo").filter(content="oof") sqs2 = self.msqs.filter(content="bar").filter(content="rab") sqs = sqs1 | sqs2 self.assertEqual(sqs.query.query_filter.connector, "OR") self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)) self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)) def test_or_and(self): """ Combining OR queries with AND should give OR(AND(a, b), AND(c, d)) """ sqs1 = self.msqs.filter(content="foo").filter_or(content="oof") sqs2 = self.msqs.filter(content="bar").filter_or(content="rab") sqs = sqs1 & sqs2 self.assertEqual(sqs.query.query_filter.connector, "AND") self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)) self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter))
class SearchQuerySetTestCase(TestCase): fixtures = ['bulk_data.json'] def setUp(self): super(SearchQuerySetTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.cpkmmsi = CharPKMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.cpkmmsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) self.msqs = SearchQuerySet() # Stow. self.old_debug = settings.DEBUG settings.DEBUG = True reset_search_queries() def tearDown(self): # Restore. connections['default']._index = self.old_unified_index settings.DEBUG = self.old_debug super(SearchQuerySetTestCase, self).tearDown() def test_len(self): self.assertEqual(len(self.msqs), 23) def test_repr(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) self.assertEqual(repr(self.msqs), "[<SearchResult: core.mockmodel (pk=u'1')>, <SearchResult: core.mockmodel (pk=u'2')>, <SearchResult: core.mockmodel (pk=u'3')>, <SearchResult: core.mockmodel (pk=u'4')>, <SearchResult: core.mockmodel (pk=u'5')>, <SearchResult: core.mockmodel (pk=u'6')>, <SearchResult: core.mockmodel (pk=u'7')>, <SearchResult: core.mockmodel (pk=u'8')>, <SearchResult: core.mockmodel (pk=u'9')>, <SearchResult: core.mockmodel (pk=u'10')>, <SearchResult: core.mockmodel (pk=u'11')>, <SearchResult: core.mockmodel (pk=u'12')>, <SearchResult: core.mockmodel (pk=u'13')>, <SearchResult: core.mockmodel (pk=u'14')>, <SearchResult: core.mockmodel (pk=u'15')>, <SearchResult: core.mockmodel (pk=u'16')>, <SearchResult: core.mockmodel (pk=u'17')>, <SearchResult: core.mockmodel (pk=u'18')>, <SearchResult: core.mockmodel (pk=u'19')>, '...(remaining elements truncated)...']") self.assertEqual(len(connections['default'].queries), 1) def test_iter(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) msqs = self.msqs.all() results = [int(res.pk) for res in msqs] self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]]) self.assertEqual(len(connections['default'].queries), 3) def test_slice(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual([int(res.pk) for res in results[1:11]], [res.pk for res in MOCK_SEARCH_RESULTS[1:11]]) self.assertEqual(len(connections['default'].queries), 1) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk) self.assertEqual(len(connections['default'].queries), 1) def test_manual_iter(self): results = self.msqs.all() reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) check = [result.pk for result in results._manual_iter()] self.assertEqual(check, [u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9', u'10', u'11', u'12', u'13', u'14', u'15', u'16', u'17', u'18', u'19', u'20', u'21', u'22', u'23']) self.assertEqual(len(connections['default'].queries), 3) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not a handled model) than the hit count indicates. # This will hang indefinitely if broken. old_ui = self.ui self.ui.build(indexes=[self.cpkmmsi]) connections['default']._index = self.ui self.cpkmmsi.update() results = self.msqs.all() loaded = [result.pk for result in results._manual_iter()] self.assertEqual(loaded, [u'sometext', u'1234']) self.assertEqual(len(connections['default'].queries), 1) connections['default']._index = old_ui def test_fill_cache(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual(len(results._result_cache), 0) self.assertEqual(len(connections['default'].queries), 0) results._fill_cache(0, 10) self.assertEqual(len([result for result in results._result_cache if result is not None]), 10) self.assertEqual(len(connections['default'].queries), 1) results._fill_cache(10, 20) self.assertEqual(len([result for result in results._result_cache if result is not None]), 20) self.assertEqual(len(connections['default'].queries), 2) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not a handled model) than the hit count indicates. sqs = SearchQuerySet().all() sqs.query.backend = MixedMockSearchBackend('default') results = sqs self.assertEqual(len([result for result in results._result_cache if result is not None]), 0) self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], []) self.assertEqual(len(connections['default'].queries), 0) results._fill_cache(0, 10) self.assertEqual(len([result for result in results._result_cache if result is not None]), 9) self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10]) self.assertEqual(len(connections['default'].queries), 2) results._fill_cache(10, 20) self.assertEqual(len([result for result in results._result_cache if result is not None]), 17) self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20]) self.assertEqual(len(connections['default'].queries), 4) results._fill_cache(20, 30) self.assertEqual(len([result for result in results._result_cache if result is not None]), 20) self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23]) self.assertEqual(len(connections['default'].queries), 6) def test_cache_is_full(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) self.assertEqual(self.msqs._cache_is_full(), False) results = self.msqs.all() fire_the_iterator_and_fill_cache = [result for result in results] self.assertEqual(results._cache_is_full(), True) self.assertEqual(len(connections['default'].queries), 3) def test_all(self): sqs = self.msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) def test_filter(self): sqs = self.msqs.filter(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_exclude(self): sqs = self.msqs.exclude(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_order_by(self): sqs = self.msqs.order_by('foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertTrue('foo' in sqs.query.order_by) def test_models(self): # Stow. old_unified_index = connections['default']._index ui = UnifiedIndex() bmmsi = BasicMockModelSearchIndex() bammsi = BasicAnotherMockModelSearchIndex() ui.build(indexes=[bmmsi, bammsi]) connections['default']._index = ui msqs = SearchQuerySet() sqs = msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = msqs.models(MockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = msqs.models(MockModel, AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) # This will produce a warning. ui.build(indexes=[bmmsi]) sqs = msqs.models(AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) def test_result_class(self): sqs = self.msqs.all() self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) # Custom class. class IttyBittyResult(object): pass sqs = self.msqs.result_class(IttyBittyResult) self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult)) # Reset to default. sqs = self.msqs.result_class(None) self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) def test_boost(self): sqs = self.msqs.boost('foo', 10) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.boost.keys()), 1) def test_highlight(self): sqs = self.msqs.highlight() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.highlight, True) def test_spelling(self): # Test the case where spelling support is disabled. sqs = self.msqs.filter(content='Indx') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.spelling_suggestion(), None) self.assertEqual(sqs.spelling_suggestion('indexy'), None) def test_raw_search(self): self.assertEqual(len(self.msqs.raw_search('foo')), 23) self.assertEqual(len(self.msqs.raw_search('(content__exact:hello AND content__exact:world)')), 23) def test_load_all(self): # Models with character primary keys. sqs = SearchQuerySet() sqs.query.backend = CharPKMockSearchBackend('charpk') results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # If nothing is handled, you get nothing. old_ui = connections['default']._index ui = UnifiedIndex() ui.build(indexes=[]) connections['default']._index = ui sqs = self.msqs.load_all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs), 0) connections['default']._index = old_ui # For full tests, see the solr_backend. def test_load_all_read_queryset(self): # Stow. old_ui = connections['default']._index ui = UnifiedIndex() gafmmsi = GhettoAFifthMockModelSearchIndex() ui.build(indexes=[gafmmsi]) connections['default']._index = ui gafmmsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # The deleted result isn't returned self.assertEqual(len([result for result in results._result_cache if result is not None]), 1) # Register a SearchIndex with a read_queryset that returns deleted items rqstsi = TextReadQuerySetTestSearchIndex() ui.build(indexes=[rqstsi]) rqstsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # Both the deleted and not deleted items are returned self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # Restore. connections['default']._index = old_ui def test_auto_query(self): sqs = self.msqs.auto_query('test search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test search -stuff>') sqs = self.msqs.auto_query('test "my thing" search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search -stuff>') sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' -stuff>') sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' "foo -stuff>') sqs = self.msqs.auto_query('test - stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND content__contains=test - stuff>") # Ensure bits in exact matches get escaped properly as well. sqs = self.msqs.auto_query('"pants:rule"') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains="pants:rule">') # Now with a different fieldname sqs = self.msqs.auto_query('test search -stuff', fieldname='title') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND title__contains=test search -stuff>") sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname='title') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND title__contains=test "my thing" search -stuff>') def test_count(self): self.assertEqual(self.msqs.count(), 23) def test_facet_counts(self): self.assertEqual(self.msqs.facet_counts(), {}) def test_best_match(self): self.assertTrue(isinstance(self.msqs.best_match(), SearchResult)) def test_latest(self): self.assertTrue(isinstance(self.msqs.latest('pub_date'), SearchResult)) def test_more_like_this(self): mock = MockModel() mock.id = 1 self.assertEqual(len(self.msqs.more_like_this(mock)), 23) def test_facets(self): sqs = self.msqs.facet('foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.facets), 1) sqs2 = self.msqs.facet('foo').facet('bar') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.facets), 2) def test_date_facets(self): try: sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph') self.fail() except FacetingError, e: self.assertEqual(str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.") sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.date_facets), 1) sqs2 = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.date_facets), 2)
def employer_students(request, extra_context=None): context = {} if request.is_ajax(): student_list = request.GET['student_list'] student_list_id = request.GET['student_list_id'] recruiter = request.user.recruiter if student_list == student_enums.GENERAL_STUDENT_LISTS[0][1]: students = SearchQuerySet().models(Student).filter(visible=True) else: #if student_list == student_enums.GENERAL_STUDENT_LISTS[1][1]: # students = get_unlocked_students(recruiter.employer, request.META['has_at_least_premium']) if student_list == student_enums.GENERAL_STUDENT_LISTS[1][1]: students = recruiter.employer.starred_students.all() elif student_list == student_enums.GENERAL_STUDENT_LISTS[2][1]: try: resume_book = ResumeBook.objects.get(recruiter = recruiter, delivered=False) except ResumeBook.DoesNotExist: resume_book = ResumeBook.objects.create(recruiter = recruiter) students = resume_book.students.visible() else: parts = student_list.split(" ") if parts[-1] == "RSVPs" or parts[-1] == "Attendees" or parts[-1] == "Drop" and parts[-2] == "Resume": try: e = Event.objects.get(id = student_list_id) except: raise Http404 if parts[-1] == "RSVPs": students = Student.objects.visible().filter(rsvp__in=e.rsvp_set.filter(attending=True), profile_created=True) elif parts[-1] == "Attendees": students = Student.objects.visible().filter(attendee__in=e.attendee_set.all(), profile_created=True) elif parts[-1] == "Drop" and parts[-2] == "Resume": students = Student.objects.visible().filter(droppedresume__in=e.droppedresume_set.all(), profile_created=True) else: students = ResumeBook.objects.get(id = student_list_id).students.visible() students = SearchQuerySet().models(Student).filter(obj_id__in = [student.id for student in students]) am_filtering = False if request.GET.has_key('gpa'): am_filtering = True students = students.filter(gpa__gte = request.GET['gpa']) if request.GET.has_key('act'): am_filtering = True students = students.filter(act__gte = request.GET['act']) if request.GET.has_key('sat_t'): am_filtering = True students = students.filter(sat_t__gte = request.GET['sat_t']) if request.GET.has_key('sat_m'): am_filtering = True students = students.filter(sat_m__gte = request.GET['sat_m']) if request.GET.has_key('sat_v'): am_filtering = True students = students.filter(sat_v__gte = request.GET['sat_v']) if request.GET.has_key('sat_w'): am_filtering = True students = students.filter(sat_w__gte = request.GET['sat_w']) if request.GET.has_key('degree_programs'): am_filtering = True students = students.filter(degree_program__in = request.GET['degree_programs'].split('~')) if request.GET.has_key('graduation_years'): am_filtering = True students = students.filter(graduation_year__in = request.GET['graduation_years'].split('~')) if request.GET.has_key('employment_types'): am_filtering = True students = students.filter(looking_for__in = request.GET['employment_types'].split('~')) if request.GET.has_key('previous_employers'): am_filtering = True students = students.filter(previous_employers__in = request.GET['previous_employers'].split('~')) if request.GET.has_key('industries_of_interest'): am_filtering = True students = students.filter(industries_of_interest__in = request.GET['industries_of_interest'].split('~')) if request.GET.has_key('languages'): am_filtering = True students = students.filter(languages__in = request.GET['languages'].split('~')) if request.GET.has_key('campus_orgs'): am_filtering = True students = students.filter(campus_involvement__in = request.GET['campus_orgs'].split('~')) if request.GET.has_key('countries_of_citizenship'): am_filtering = True students = students.filter(countries_of_citizenship__in = request.GET['countries_of_citizenship'].split('~')) if request.GET['older_than_21'] != 'N': am_filtering = True students = students.filter(older_than_21 = True) if request.GET.has_key('schools'): am_filtering = True school_ids = request.GET['schools'].split('~') schools = School.objects.filter(id__in = school_ids) domain_names = DomainName.objects.filter(school__in=schools) domains = map(lambda x: x.domain, domain_names) if domains and students: students = students.filter(reduce(operator.or_, (SQ(email__contains=x) for x in domains))) if request.GET.has_key('courses'): am_filtering = True courses = request.GET['courses'].split('~') students = students.filter(SQ(first_major__in = courses)|SQ(second_major__in = courses)) if request.GET.has_key('query'): students = search(students, request.GET['query']) # Highlight the results students = students.highlight() results_per_page = int(request.GET['results_per_page']) start_index = results_per_page * (int(request.GET['page']) - 1) count = students.count() if start_index >= count: start_index = 0 ordered_results = order_results(students, request)[start_index:start_index + results_per_page] ordered_result_objects = [] for search_result in ordered_results: if search_result.highlighted: ordered_result_object = (search_result.object, search_result.highlighted['text'][0]) else: ordered_result_object = (search_result.object, "") ordered_result_objects.append(ordered_result_object) padded_ordered_results = ['']*count for i in range(len(ordered_result_objects)): padded_ordered_results[i + start_index] = ordered_result_objects[i] paginator = DiggPaginator(padded_ordered_results, results_per_page, body=3, padding=1, margin=2) context['filtering'] = am_filtering try: page = paginator.page(request.GET['page']) except EmptyPage: page = paginator.page(1) context['page'] = page context['results'] = process_results(request.user.recruiter, request.META['has_at_least_premium'], page) context['current_student_list'] = request.GET['student_list'] context['total_results_num'] = count # I don't like this method of statistics if request.user.recruiter.employer.name != "Umeqo": for student, highlighted_text, is_in_resume_book, is_starred, comment, num_of_events_attended, visible, school in context['results']: if student: student.studentstatistics.shown_in_results_count += 1 student.studentstatistics.save() resume_book = ResumeBook.objects.get(recruiter = request.user.recruiter, delivered=False) if len(resume_book.students.visible()) >= s.RESUME_BOOK_CAPACITY: context['resume_book_capacity_reached'] = True context['TEMPLATE'] = 'employer_students_results.html' context.update(extra_context or {}) return context else: page_messages = { 'NO_STUDENTS_SELECTED_MESSAGE': messages.no_students_selected, 'WAIT_UNTIL_RESUME_BOOK_IS_READY_MESSAGE': messages.wait_until_resume_book_is_ready } context['page_messages'] = page_messages context['query'] = request.GET.get('query', '') # Passing the employer id to generate tha appropriate student list choices context['student_filtering_form'] = StudentFilteringForm(initial={ 'has_at_least_premium':request.META['has_at_least_premium'], 'recruiter_id': request.user.recruiter.id, 'ordering': request.user.recruiter.recruiterpreferences.default_student_result_ordering, 'results_per_page': request.user.recruiter.recruiterpreferences.default_student_results_per_page }) context['student_search_form'] = StudentSearchForm() context['added'] = employer_enums.ADDED context['starred'] = employer_enums.STARRED context['email_delivery_type'] = core_enums.EMAIL context['in_resume_book_student_list'] = student_enums.GENERAL_STUDENT_LISTS[2][1] context['resume_book_capacity'] = s.RESUME_BOOK_CAPACITY context['TEMPLATE'] = 'employer_students.html' context.update(extra_context or {}) return context
class SearchQuerySetTestCase(TestCase): fixtures = ['bulk_data.json'] def setUp(self): super(SearchQuerySetTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.cpkmmsi = CharPKMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.cpkmmsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) self.msqs = SearchQuerySet() # Stow. reset_search_queries() def tearDown(self): # Restore. connections['default']._index = self.old_unified_index super(SearchQuerySetTestCase, self).tearDown() def test_len(self): self.assertEqual(len(self.msqs), 23) def test_repr(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) self.assertRegexpMatches( repr(self.msqs), r'^<SearchQuerySet: query=<test_haystack.mocks.MockSearchQuery object' r' at 0x[0-9A-Fa-f]+>, using=None>$') def test_iter(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) msqs = self.msqs.all() results = [int(res.pk) for res in msqs] self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]]) self.assertEqual(len(connections['default'].queries), 3) def test_slice(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual([int(res.pk) for res in results[1:11]], [res.pk for res in MOCK_SEARCH_RESULTS[1:11]]) self.assertEqual(len(connections['default'].queries), 1) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk) self.assertEqual(len(connections['default'].queries), 1) def test_manual_iter(self): results = self.msqs.all() reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) check = [result.pk for result in results._manual_iter()] self.assertEqual(check, [ u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9', u'10', u'11', u'12', u'13', u'14', u'15', u'16', u'17', u'18', u'19', u'20', u'21', u'22', u'23' ]) self.assertEqual(len(connections['default'].queries), 3) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not a handled model) than the hit count indicates. # This will hang indefinitely if broken. old_ui = self.ui self.ui.build(indexes=[self.cpkmmsi]) connections['default']._index = self.ui self.cpkmmsi.update() results = self.msqs.all() loaded = [result.pk for result in results._manual_iter()] self.assertEqual(loaded, [u'sometext', u'1234']) self.assertEqual(len(connections['default'].queries), 1) connections['default']._index = old_ui def test_fill_cache(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual(len(results._result_cache), 0) self.assertEqual(len(connections['default'].queries), 0) results._fill_cache(0, 10) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 10) self.assertEqual(len(connections['default'].queries), 1) results._fill_cache(10, 20) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 20) self.assertEqual(len(connections['default'].queries), 2) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not a handled model) than the hit count indicates. sqs = SearchQuerySet().all() sqs.query.backend = MixedMockSearchBackend('default') results = sqs self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 0) self.assertEqual([ int(result.pk) for result in results._result_cache if result is not None ], []) self.assertEqual(len(connections['default'].queries), 0) results._fill_cache(0, 10) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 9) self.assertEqual([ int(result.pk) for result in results._result_cache if result is not None ], [1, 2, 3, 4, 5, 6, 7, 8, 10]) self.assertEqual(len(connections['default'].queries), 2) results._fill_cache(10, 20) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 17) self.assertEqual([ int(result.pk) for result in results._result_cache if result is not None ], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20]) self.assertEqual(len(connections['default'].queries), 4) results._fill_cache(20, 30) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 20) self.assertEqual([ int(result.pk) for result in results._result_cache if result is not None ], [ 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23 ]) self.assertEqual(len(connections['default'].queries), 6) def test_cache_is_full(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) self.assertEqual(self.msqs._cache_is_full(), False) results = self.msqs.all() fire_the_iterator_and_fill_cache = [result for result in results] self.assertEqual(results._cache_is_full(), True) self.assertEqual(len(connections['default'].queries), 3) def test_all(self): sqs = self.msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) def test_filter(self): sqs = self.msqs.filter(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_exclude(self): sqs = self.msqs.exclude(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_order_by(self): sqs = self.msqs.order_by('foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertTrue('foo' in sqs.query.order_by) def test_models(self): # Stow. old_unified_index = connections['default']._index ui = UnifiedIndex() bmmsi = BasicMockModelSearchIndex() bammsi = BasicAnotherMockModelSearchIndex() ui.build(indexes=[bmmsi, bammsi]) connections['default']._index = ui msqs = SearchQuerySet() sqs = msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = msqs.models(MockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = msqs.models(MockModel, AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) # This will produce a warning. ui.build(indexes=[bmmsi]) sqs = msqs.models(AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) def test_result_class(self): sqs = self.msqs.all() self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) # Custom class. class IttyBittyResult(object): pass sqs = self.msqs.result_class(IttyBittyResult) self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult)) # Reset to default. sqs = self.msqs.result_class(None) self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) def test_boost(self): sqs = self.msqs.boost('foo', 10) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.boost.keys()), 1) def test_highlight(self): sqs = self.msqs.highlight() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.highlight, True) def test_spelling(self): # Test the case where spelling support is disabled. sqs = self.msqs.filter(content='Indx') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.spelling_suggestion(), None) self.assertEqual(sqs.spelling_suggestion('indexy'), None) def test_raw_search(self): self.assertEqual(len(self.msqs.raw_search('foo')), 23) self.assertEqual( len( self.msqs.raw_search( '(content__exact:hello AND content__exact:world)')), 23) def test_load_all(self): # Models with character primary keys. sqs = SearchQuerySet() sqs.query.backend = CharPKMockSearchBackend('charpk') results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 2) # If nothing is handled, you get nothing. old_ui = connections['default']._index ui = UnifiedIndex() ui.build(indexes=[]) connections['default']._index = ui sqs = self.msqs.load_all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs), 0) connections['default']._index = old_ui # For full tests, see the solr_backend. def test_load_all_read_queryset(self): # Stow. old_ui = connections['default']._index ui = UnifiedIndex() gafmmsi = GhettoAFifthMockModelSearchIndex() ui.build(indexes=[gafmmsi]) connections['default']._index = ui gafmmsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # The deleted result isn't returned self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 1) # Register a SearchIndex with a read_queryset that returns deleted items rqstsi = TextReadQuerySetTestSearchIndex() ui.build(indexes=[rqstsi]) rqstsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # Both the deleted and not deleted items are returned self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 2) # Restore. connections['default']._index = old_ui def test_auto_query(self): sqs = self.msqs.auto_query('test search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test search -stuff>') sqs = self.msqs.auto_query('test "my thing" search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search -stuff>') sqs = self.msqs.auto_query( 'test "my thing" search \'moar quotes\' -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' -stuff>' ) sqs = self.msqs.auto_query( 'test "my thing" search \'moar quotes\' "foo -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' "foo -stuff>' ) sqs = self.msqs.auto_query('test - stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND content__contains=test - stuff>") # Ensure bits in exact matches get escaped properly as well. sqs = self.msqs.auto_query('"pants:rule"') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains="pants:rule">') # Now with a different fieldname sqs = self.msqs.auto_query('test search -stuff', fieldname='title') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND title__contains=test search -stuff>") sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname='title') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND title__contains=test "my thing" search -stuff>') def test_count(self): self.assertEqual(self.msqs.count(), 23) def test_facet_counts(self): self.assertEqual(self.msqs.facet_counts(), {}) def test_best_match(self): self.assertTrue(isinstance(self.msqs.best_match(), SearchResult)) def test_latest(self): self.assertTrue(isinstance(self.msqs.latest('pub_date'), SearchResult)) def test_more_like_this(self): mock = MockModel() mock.id = 1 self.assertEqual(len(self.msqs.more_like_this(mock)), 23) def test_facets(self): sqs = self.msqs.facet('foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.facets), 1) sqs2 = self.msqs.facet('foo').facet('bar') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.facets), 2) def test_date_facets(self): try: sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph') self.fail() except FacetingError as e: self.assertEqual( str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second." ) sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.date_facets), 1) sqs2 = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet( 'bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.date_facets), 2) def test_query_facets(self): sqs = self.msqs.query_facet('foo', '[bar TO *]') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_facets), 1) sqs2 = self.msqs.query_facet('foo', '[bar TO *]').query_facet( 'bar', '[100 TO 499]') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.query_facets), 2) # Test multiple query facets on a single field sqs3 = self.msqs.query_facet('foo', '[bar TO *]').query_facet( 'bar', '[100 TO 499]').query_facet('foo', '[1000 TO 1499]') self.assertTrue(isinstance(sqs3, SearchQuerySet)) self.assertEqual(len(sqs3.query.query_facets), 3) def test_stats(self): sqs = self.msqs.stats_facet('foo', 'bar') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.stats), 1) sqs2 = self.msqs.stats_facet('foo', 'bar').stats_facet('foo', 'baz') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.stats), 1) sqs3 = self.msqs.stats_facet('foo', 'bar').stats_facet('moof', 'baz') self.assertTrue(isinstance(sqs3, SearchQuerySet)) self.assertEqual(len(sqs3.query.stats), 2) def test_narrow(self): sqs = self.msqs.narrow('foo:moof') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.narrow_queries), 1) def test_clone(self): results = self.msqs.filter(foo='bar', foo__lt='10') clone = results._clone() self.assertTrue(isinstance(clone, SearchQuerySet)) self.assertEqual(str(clone.query), str(results.query)) self.assertEqual(clone._result_cache, []) self.assertEqual(clone._result_count, None) self.assertEqual(clone._cache_full, False) self.assertEqual(clone._using, results._using) def test_using(self): sqs = SearchQuerySet(using='default') self.assertNotEqual(sqs.query, None) self.assertEqual(sqs.query._using, 'default') def test_chaining(self): sqs = self.msqs.filter(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) # A second instance should inherit none of the changes from above. sqs = self.msqs.filter(content='bar') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_none(self): sqs = self.msqs.none() self.assertTrue(isinstance(sqs, EmptySearchQuerySet)) self.assertEqual(len(sqs), 0) def test___and__(self): sqs1 = self.msqs.filter(content='foo') sqs2 = self.msqs.filter(content='bar') sqs = sqs1 & sqs2 self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 2) def test___or__(self): sqs1 = self.msqs.filter(content='foo') sqs2 = self.msqs.filter(content='bar') sqs = sqs1 | sqs2 self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 2) def test_and_or(self): """ Combining AND queries with OR should give AND(OR(a, b), OR(c, d)) """ sqs1 = self.msqs.filter(content='foo').filter(content='oof') sqs2 = self.msqs.filter(content='bar').filter(content='rab') sqs = sqs1 | sqs2 self.assertEqual(sqs.query.query_filter.connector, 'OR') self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)) self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)) def test_or_and(self): """ Combining OR queries with AND should give OR(AND(a, b), AND(c, d)) """ sqs1 = self.msqs.filter(content='foo').filter_or(content='oof') sqs2 = self.msqs.filter(content='bar').filter_or(content='rab') sqs = sqs1 & sqs2 self.assertEqual(sqs.query.query_filter.connector, 'AND') self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)) self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter))
class SearchQuerySetTestCase(TestCase): def setUp(self): super(SearchQuerySetTestCase, self).setUp() self.bsqs = SearchQuerySet(query=DummySearchQuery(backend=DummySearchBackend())) self.msqs = SearchQuerySet(query=MockSearchQuery(backend=MockSearchBackend())) self.mmsqs = SearchQuerySet(query=MockSearchQuery(backend=MixedMockSearchBackend())) # Stow. self.old_debug = settings.DEBUG settings.DEBUG = True self.old_site = haystack.site test_site = SearchSite() test_site.register(MockModel) test_site.register(CharPKMockModel) haystack.site = test_site backends.reset_search_queries() def tearDown(self): # Restore. haystack.site = self.old_site settings.DEBUG = self.old_debug super(SearchQuerySetTestCase, self).tearDown() def test_len(self): # Dummy always returns 0. self.assertEqual(len(self.bsqs), 0) self.assertEqual(len(self.msqs), 100) def test_repr(self): self.assertEqual(repr(self.bsqs), '[]') backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) self.assertEqual(repr(self.msqs), "[<SearchResult: core.MockModel (pk=0)>, <SearchResult: core.MockModel (pk=1)>, <SearchResult: core.MockModel (pk=2)>, <SearchResult: core.MockModel (pk=3)>, <SearchResult: core.MockModel (pk=4)>, <SearchResult: core.MockModel (pk=5)>, <SearchResult: core.MockModel (pk=6)>, <SearchResult: core.MockModel (pk=7)>, <SearchResult: core.MockModel (pk=8)>, <SearchResult: core.MockModel (pk=9)>, <SearchResult: core.MockModel (pk=10)>, <SearchResult: core.MockModel (pk=11)>, <SearchResult: core.MockModel (pk=12)>, <SearchResult: core.MockModel (pk=13)>, <SearchResult: core.MockModel (pk=14)>, <SearchResult: core.MockModel (pk=15)>, <SearchResult: core.MockModel (pk=16)>, <SearchResult: core.MockModel (pk=17)>, <SearchResult: core.MockModel (pk=18)>, '...(remaining elements truncated)...']") self.assertEqual(len(backends.queries), 1) def test_iter(self): # Dummy always returns []. self.assertEqual([result for result in self.bsqs.all()], []) backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) msqs = self.msqs.all() results = [result for result in msqs] self.assertEqual(results, MOCK_SEARCH_RESULTS) self.assertEqual(len(backends.queries), 10) def test_slice(self): backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) results = self.msqs.all() self.assertEqual(results[1:11], MOCK_SEARCH_RESULTS[1:11]) self.assertEqual(len(backends.queries), 1) backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) results = self.msqs.all() self.assertEqual(results[50], MOCK_SEARCH_RESULTS[50]) self.assertEqual(len(backends.queries), 1) def test_manual_iter(self): results = self.msqs.all() backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) for offset, result in enumerate(results._manual_iter()): self.assertEqual(result, MOCK_SEARCH_RESULTS[offset]) self.assertEqual(len(backends.queries), 10) backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not in the SearchSite) than the hit count indicates. # This will hang indefinitely if broken. results = self.mmsqs.all() loaded = [result.pk for result in results._manual_iter()] self.assertEqual(loaded, [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) self.assertEqual(len(backends.queries), 8) def test_fill_cache(self): backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) results = self.msqs.all() self.assertEqual(len(results._result_cache), 0) self.assertEqual(len(backends.queries), 0) results._fill_cache(0, 10) self.assertEqual(len([result for result in results._result_cache if result is not None]), 10) self.assertEqual(len(backends.queries), 1) results._fill_cache(10, 20) self.assertEqual(len([result for result in results._result_cache if result is not None]), 20) self.assertEqual(len(backends.queries), 2) backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not in the SearchSite) than the hit count indicates. results = self.mmsqs.all() self.assertEqual(len([result for result in results._result_cache if result is not None]), 0) self.assertEqual([result.pk for result in results._result_cache if result is not None], []) self.assertEqual(len(backends.queries), 0) results._fill_cache(0, 10) self.assertEqual(len([result for result in results._result_cache if result is not None]), 9) self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8]) self.assertEqual(len(backends.queries), 2) results._fill_cache(10, 20) self.assertEqual(len([result for result in results._result_cache if result is not None]), 17) self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19]) self.assertEqual(len(backends.queries), 4) results._fill_cache(20, 30) self.assertEqual(len([result for result in results._result_cache if result is not None]), 27) self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) self.assertEqual(len(backends.queries), 6) def test_cache_is_full(self): # Dummy always has a count of 0 and an empty _result_cache, hence True. self.assertEqual(self.bsqs._cache_is_full(), False) results = self.bsqs.all() fire_the_iterator_and_fill_cache = [result for result in results] self.assertEqual(results._cache_is_full(), True) backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) self.assertEqual(self.msqs._cache_is_full(), False) results = self.msqs.all() fire_the_iterator_and_fill_cache = [result for result in results] self.assertEqual(results._cache_is_full(), True) self.assertEqual(len(backends.queries), 10) def test_all(self): sqs = self.bsqs.all() self.assert_(isinstance(sqs, SearchQuerySet)) def test_filter(self): sqs = self.bsqs.filter(content='foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_exclude(self): sqs = self.bsqs.exclude(content='foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_order_by(self): sqs = self.bsqs.order_by('foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assert_('foo' in sqs.query.order_by) def test_models(self): mock_index_site = SearchSite() mock_index_site.register(MockModel) mock_index_site.register(AnotherMockModel) bsqs = SearchQuerySet(site=mock_index_site) sqs = bsqs.all() self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = bsqs.models(MockModel) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = bsqs.models(MockModel, AnotherMockModel) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) # This will produce a warning. mock_index_site.unregister(AnotherMockModel) sqs = bsqs.models(AnotherMockModel) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) def test_result_class(self): sqs = self.bsqs.all() self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) # Custom class. class IttyBittyResult(object): pass sqs = self.bsqs.result_class(IttyBittyResult) self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult)) # Reset to default. sqs = self.bsqs.result_class(None) self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) def test_boost(self): sqs = self.bsqs.boost('foo', 10) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.boost.keys()), 1) def test_highlight(self): sqs = self.bsqs.highlight() self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.highlight, True) def test_spelling(self): # Test the case where spelling support is disabled. sqs = self.bsqs.filter(content='Indx') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.spelling_suggestion(), None) self.assertEqual(sqs.spelling_suggestion('indexy'), None) def test_raw_search(self): self.assertEqual(len(self.bsqs.raw_search('foo')), 0) self.assertEqual(len(self.bsqs.raw_search('(content__exact hello AND content__exact world)')), 1) def test_load_all(self): # Models with character primary keys sqs = SearchQuerySet(query=MockSearchQuery(backend=CharPKMockSearchBackend())) results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # If nothing is registered, you get nothing. haystack.site.unregister(MockModel) haystack.site.unregister(CharPKMockModel) sqs = self.msqs.load_all() self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs), 0) # For full tests, see the solr_backend. def test_auto_query(self): sqs = self.bsqs.auto_query('test search -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=test AND content__exact=search AND NOT (content__exact=stuff))>') sqs = self.bsqs.auto_query('test "my thing" search -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND NOT (content__exact=stuff))>') sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND content__exact='moar AND content__exact=quotes' AND NOT (content__exact=stuff))>") sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND content__exact=\'moar AND content__exact=quotes\' AND content__exact="foo AND NOT (content__exact=stuff))>') sqs = self.bsqs.auto_query('test - stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=test AND content__exact=- AND content__exact=stuff)>') # Ensure bits in exact matches get escaped properly as well. sqs = self.bsqs.auto_query('"pants:rule"') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__exact=pants:rule>') def test_count(self): self.assertEqual(self.bsqs.count(), 0) def test_facet_counts(self): self.assertEqual(self.bsqs.facet_counts(), {}) def test_best_match(self): self.assert_(isinstance(self.msqs.best_match(), SearchResult)) def test_latest(self): self.assert_(isinstance(self.msqs.latest('pub_date'), SearchResult)) def test_more_like_this(self): mock = MockModel() mock.id = 1 self.assertEqual(len(self.msqs.more_like_this(mock)), 100) def test_facets(self): sqs = self.bsqs.facet('foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.facets), 1) sqs2 = self.bsqs.facet('foo').facet('bar') self.assert_(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.facets), 2) def test_date_facets(self): try: sqs = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph') self.fail() except FacetingError, e: self.assertEqual(str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.") sqs = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.date_facets), 1) sqs2 = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year') self.assert_(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.date_facets), 2)
def employer_students(request, extra_context=None): context = {} if request.is_ajax(): student_list = request.GET["student_list"] student_list_id = request.GET["student_list_id"] recruiter = request.user.recruiter if student_list == student_enums.GENERAL_STUDENT_LISTS[0][1]: students = SearchQuerySet().models(Student).filter(visible=True) else: # if student_list == student_enums.GENERAL_STUDENT_LISTS[1][1]: # students = get_unlocked_students(recruiter.employer, request.META['has_at_least_premium']) if student_list == student_enums.GENERAL_STUDENT_LISTS[1][1]: students = recruiter.employer.starred_students.all() elif student_list == student_enums.GENERAL_STUDENT_LISTS[2][1]: try: resume_book = ResumeBook.objects.get(recruiter=recruiter, delivered=False) except ResumeBook.DoesNotExist: resume_book = ResumeBook.objects.create(recruiter=recruiter) students = resume_book.students.visible() else: parts = student_list.split(" ") if parts[-1] == "RSVPs" or parts[-1] == "Attendees" or parts[-1] == "Drop" and parts[-2] == "Resume": try: e = Event.objects.get(id=student_list_id) except: raise Http404 if parts[-1] == "RSVPs": students = Student.objects.visible().filter( rsvp__in=e.rsvp_set.filter(attending=True), profile_created=True ) elif parts[-1] == "Attendees": students = Student.objects.visible().filter( attendee__in=e.attendee_set.all(), profile_created=True ) elif parts[-1] == "Drop" and parts[-2] == "Resume": students = Student.objects.visible().filter( droppedresume__in=e.droppedresume_set.all(), profile_created=True ) else: students = ResumeBook.objects.get(id=student_list_id).students.visible() students = SearchQuerySet().models(Student).filter(obj_id__in=[student.id for student in students]) am_filtering = False if request.GET.has_key("gpa"): am_filtering = True students = students.filter(gpa__gte=request.GET["gpa"]) if request.GET.has_key("act"): am_filtering = True students = students.filter(act__gte=request.GET["act"]) if request.GET.has_key("sat_t"): am_filtering = True students = students.filter(sat_t__gte=request.GET["sat_t"]) if request.GET.has_key("sat_m"): am_filtering = True students = students.filter(sat_m__gte=request.GET["sat_m"]) if request.GET.has_key("sat_v"): am_filtering = True students = students.filter(sat_v__gte=request.GET["sat_v"]) if request.GET.has_key("sat_w"): am_filtering = True students = students.filter(sat_w__gte=request.GET["sat_w"]) if request.GET.has_key("degree_programs"): am_filtering = True students = students.filter(degree_program__in=request.GET["degree_programs"].split("~")) if request.GET.has_key("graduation_years"): am_filtering = True students = students.filter(graduation_year__in=request.GET["graduation_years"].split("~")) if request.GET.has_key("employment_types"): am_filtering = True students = students.filter(looking_for__in=request.GET["employment_types"].split("~")) if request.GET.has_key("previous_employers"): am_filtering = True students = students.filter(previous_employers__in=request.GET["previous_employers"].split("~")) if request.GET.has_key("industries_of_interest"): am_filtering = True students = students.filter(industries_of_interest__in=request.GET["industries_of_interest"].split("~")) if request.GET.has_key("languages"): am_filtering = True students = students.filter(languages__in=request.GET["languages"].split("~")) if request.GET.has_key("campus_orgs"): am_filtering = True students = students.filter(campus_involvement__in=request.GET["campus_orgs"].split("~")) if request.GET.has_key("countries_of_citizenship"): am_filtering = True students = students.filter(countries_of_citizenship__in=request.GET["countries_of_citizenship"].split("~")) if request.GET["older_than_21"] != "N": am_filtering = True students = students.filter(older_than_21=True) if request.GET.has_key("schools"): am_filtering = True school_ids = request.GET["schools"].split("~") schools = School.objects.filter(id__in=school_ids) domain_names = DomainName.objects.filter(school__in=schools) domains = map(lambda x: x.domain, domain_names) if domains and students: students = students.filter(reduce(operator.or_, (SQ(email__contains=x) for x in domains))) if request.GET.has_key("courses"): am_filtering = True courses = request.GET["courses"].split("~") students = students.filter(SQ(first_major__in=courses) | SQ(second_major__in=courses)) if request.GET.has_key("query"): students = search(students, request.GET["query"]) # Highlight the results students = students.highlight() results_per_page = int(request.GET["results_per_page"]) start_index = results_per_page * (int(request.GET["page"]) - 1) count = students.count() if start_index >= count: start_index = 0 ordered_results = order_results(students, request)[start_index : start_index + results_per_page] ordered_result_objects = [] for search_result in ordered_results: if search_result.highlighted: ordered_result_object = (search_result.object, search_result.highlighted["text"][0]) else: ordered_result_object = (search_result.object, "") ordered_result_objects.append(ordered_result_object) padded_ordered_results = [""] * count for i in range(len(ordered_result_objects)): padded_ordered_results[i + start_index] = ordered_result_objects[i] paginator = DiggPaginator(padded_ordered_results, results_per_page, body=3, padding=1, margin=2) context["filtering"] = am_filtering try: page = paginator.page(request.GET["page"]) except EmptyPage: page = paginator.page(1) context["page"] = page context["results"] = process_results(request.user.recruiter, request.META["has_at_least_premium"], page) context["current_student_list"] = request.GET["student_list"] context["total_results_num"] = count # I don't like this method of statistics if request.user.recruiter.employer.name != "Umeqo": for ( student, highlighted_text, is_in_resume_book, is_starred, comment, num_of_events_attended, visible, school, ) in context["results"]: if student: student.studentstatistics.shown_in_results_count += 1 student.studentstatistics.save() resume_book = ResumeBook.objects.get(recruiter=request.user.recruiter, delivered=False) if len(resume_book.students.visible()) >= s.RESUME_BOOK_CAPACITY: context["resume_book_capacity_reached"] = True context["TEMPLATE"] = "employer_students_results.html" context.update(extra_context or {}) return context else: page_messages = { "NO_STUDENTS_SELECTED_MESSAGE": messages.no_students_selected, "WAIT_UNTIL_RESUME_BOOK_IS_READY_MESSAGE": messages.wait_until_resume_book_is_ready, } context["page_messages"] = page_messages context["query"] = request.GET.get("query", "") # Passing the employer id to generate tha appropriate student list choices context["student_filtering_form"] = StudentFilteringForm( initial={ "has_at_least_premium": request.META["has_at_least_premium"], "recruiter_id": request.user.recruiter.id, "ordering": request.user.recruiter.recruiterpreferences.default_student_result_ordering, "results_per_page": request.user.recruiter.recruiterpreferences.default_student_results_per_page, } ) context["student_search_form"] = StudentSearchForm() context["added"] = employer_enums.ADDED context["starred"] = employer_enums.STARRED context["email_delivery_type"] = core_enums.EMAIL context["in_resume_book_student_list"] = student_enums.GENERAL_STUDENT_LISTS[2][1] context["resume_book_capacity"] = s.RESUME_BOOK_CAPACITY context["TEMPLATE"] = "employer_students.html" context.update(extra_context or {}) return context
class SearchQuerySetTestCase(TestCase): def setUp(self): super(SearchQuerySetTestCase, self).setUp() self.bsqs = SearchQuerySet(query=DummySearchQuery(backend=DummySearchBackend())) self.msqs = SearchQuerySet(query=MockSearchQuery(backend=MockSearchBackend())) self.mmsqs = SearchQuerySet(query=MockSearchQuery(backend=MixedMockSearchBackend())) # Stow. self.old_site = haystack.site test_site = SearchSite() test_site.register(MockModel) haystack.site = test_site def tearDown(self): # Restore. haystack.site = self.old_site super(SearchQuerySetTestCase, self).tearDown() def test_len(self): # Dummy always returns 0. self.assertEqual(len(self.bsqs), 0) self.assertEqual(len(self.msqs), 100) def test_iter(self): # Dummy always returns []. self.assertEqual([result for result in self.bsqs.all()], []) msqs = self.msqs.all() results = [result for result in msqs] self.assertEqual(results, MOCK_SEARCH_RESULTS) def test_slice(self): self.assertEqual(self.msqs.all()[1:11], MOCK_SEARCH_RESULTS[1:11]) self.assertEqual(self.msqs.all()[50], MOCK_SEARCH_RESULTS[50]) def test_manual_iter(self): results = self.msqs.all() for offset, result in enumerate(results._manual_iter()): self.assertEqual(result, MOCK_SEARCH_RESULTS[offset]) # Test to ensure we properly fill the cache, even if we get fewer # results back (not in the SearchSite) than the hit count indicates. # This will hang indefinitely if broken. results = self.mmsqs.all() self.assertEqual([result.pk for result in results._manual_iter()], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) def test_fill_cache(self): results = self.msqs.all() self.assertEqual(len(results._result_cache), 0) results._fill_cache() self.assertEqual(len(results._result_cache), 10) results._fill_cache() self.assertEqual(len(results._result_cache), 20) # Test to ensure we properly fill the cache, even if we get fewer # results back (not in the SearchSite) than the hit count indicates. results = self.mmsqs.all() self.assertEqual(len(results._result_cache), 0) self.assertEqual([result.pk for result in results._result_cache], []) results._fill_cache() self.assertEqual(len(results._result_cache), 10) self.assertEqual([result.pk for result in results._result_cache], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10]) results._fill_cache() self.assertEqual(len(results._result_cache), 20) self.assertEqual([result.pk for result in results._result_cache], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18, 19, 20, 21, 22]) results._fill_cache() self.assertEqual(len(results._result_cache), 27) self.assertEqual([result.pk for result in results._result_cache], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) def test_cache_is_full(self): # Dummy always has a count of 0 and an empty _result_cache, hence True. self.assertEqual(self.bsqs._cache_is_full(), True) self.assertEqual(self.msqs._cache_is_full(), False) results = self.msqs.all() fire_the_iterator_and_fill_cache = [result for result in results] self.assertEqual(results._cache_is_full(), True) def test_all(self): sqs = self.bsqs.all() self.assert_(isinstance(sqs, SearchQuerySet)) def test_filter(self): sqs = self.bsqs.filter(content='foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filters), 1) def test_exclude(self): sqs = self.bsqs.exclude(content='foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filters), 1) def test_order_by(self): sqs = self.bsqs.order_by('foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assert_('foo' in sqs.query.order_by) def test_models(self): mock_index_site = SearchSite() mock_index_site.register(MockModel) mock_index_site.register(AnotherMockModel) bsqs = SearchQuerySet(site=mock_index_site) sqs = bsqs.all() self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = bsqs.models(MockModel) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = bsqs.models(MockModel, AnotherMockModel) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) def test_boost(self): sqs = self.bsqs.boost('foo', 10) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.boost.keys()), 1) def test_highlight(self): sqs = self.bsqs.highlight() self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.highlight, True) def test_spelling(self): # Test the case where spelling support is disabled. sqs = self.bsqs.filter(content='Indx') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.spelling_suggestion(), None) def test_raw_search(self): self.assertEqual(len(self.bsqs.raw_search('foo')), 0) self.assertEqual(len(self.bsqs.raw_search('content__exact hello AND content__exact world')), 1) def test_load_all(self): # If nothing is registered, you get nothing. haystack.site.unregister(MockModel) sqs = self.msqs.load_all() self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs), 0) # For full tests, see the solr_backend. def test_load_all_queryset(self): sqs = self.msqs.load_all() self.assertEqual(len(sqs._load_all_querysets), 0) sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=1)) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs._load_all_querysets), 1) # For full tests, see the solr_backend. def test_auto_query(self): sqs = self.bsqs.auto_query('test search -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: NOT content__exact=stuff>']) sqs = self.bsqs.auto_query('test "my thing" search -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=my thing>', '<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: NOT content__exact=stuff>']) sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=my thing>', '<QueryFilter: AND content__exact=moar quotes>', '<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: NOT content__exact=stuff>']) sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=my thing>', '<QueryFilter: AND content__exact=moar quotes>', '<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: AND content__exact="foo>', '<QueryFilter: NOT content__exact=stuff>']) sqs = self.bsqs.auto_query('test - stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=->', '<QueryFilter: AND content__exact=stuff>']) def test_count(self): self.assertEqual(self.bsqs.count(), 0) def test_facet_counts(self): self.assertEqual(self.bsqs.facet_counts(), {}) def test_best_match(self): self.assert_(isinstance(self.msqs.best_match(), SearchResult)) def test_latest(self): self.assert_(isinstance(self.msqs.latest('pub_date'), SearchResult)) def test_more_like_this(self): mock = MockModel() mock.id = 1 self.assertEqual(len(self.msqs.more_like_this(mock)), 100) def test_facets(self): sqs = self.bsqs.facet('foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.facets), 1) sqs2 = self.bsqs.facet('foo').facet('bar') self.assert_(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.facets), 2) def test_date_facets(self): sqs = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.date_facets), 1) sqs2 = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year') self.assert_(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.date_facets), 2) def test_query_facets(self): sqs = self.bsqs.query_facet('foo', '[bar TO *]') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_facets), 1) sqs2 = self.bsqs.query_facet('foo', '[bar TO *]').query_facet('bar', '[100 TO 499]') self.assert_(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.query_facets), 2) def test_narrow(self): sqs = self.bsqs.narrow('foo:moof') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.narrow_queries), 1) def test_clone(self): results = self.msqs.filter(foo='bar', foo__lt='10') clone = results._clone() self.assert_(isinstance(clone, SearchQuerySet)) self.assertEqual(clone.site, results.site) self.assertEqual(str(clone.query), str(results.query)) self.assertEqual(clone._result_cache, []) self.assertEqual(clone._result_count, None) self.assertEqual(clone._cache_full, False) def test_chaining(self): sqs = self.bsqs.filter(content='foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filters), 1) # A second instance should inherit none of the changes from above. sqs = self.bsqs.filter(content='bar') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filters), 1) def test_none(self): sqs = self.bsqs.none() self.assert_(isinstance(sqs, EmptySearchQuerySet)) self.assertEqual(len(sqs), 0)
class SearchQuerySetTestCase(TestCase): fixtures = ['base_data.json', 'bulk_data.json'] def setUp(self): super(SearchQuerySetTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.cpkmmsi = CharPKMockModelSearchIndex() self.uuidmmsi = SimpleMockUUIDModelIndex() self.ui.build(indexes=[self.bmmsi, self.cpkmmsi, self.uuidmmsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) self.msqs = SearchQuerySet() # Stow. reset_search_queries() def tearDown(self): # Restore. connections['default']._index = self.old_unified_index super(SearchQuerySetTestCase, self).tearDown() def test_len(self): self.assertEqual(len(self.msqs), 23) def test_repr(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) self.assertRegexpMatches(repr(self.msqs), r'^<SearchQuerySet: query=<test_haystack.mocks.MockSearchQuery object' r' at 0x[0-9A-Fa-f]+>, using=None>$') def test_iter(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) msqs = self.msqs.all() results = [int(res.pk) for res in iter(msqs)] self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]]) self.assertEqual(len(connections['default'].queries), 3) def test_slice(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual([int(res.pk) for res in results[1:11]], [res.pk for res in MOCK_SEARCH_RESULTS[1:11]]) self.assertEqual(len(connections['default'].queries), 1) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk) self.assertEqual(len(connections['default'].queries), 1) def test_manual_iter(self): results = self.msqs.all() reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) check = [result.pk for result in results._manual_iter()] self.assertEqual(check, [u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9', u'10', u'11', u'12', u'13', u'14', u'15', u'16', u'17', u'18', u'19', u'20', u'21', u'22', u'23']) self.assertEqual(len(connections['default'].queries), 3) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not a handled model) than the hit count indicates. # This will hang indefinitely if broken. # CharPK testing old_ui = self.ui self.ui.build(indexes=[self.cpkmmsi]) connections['default']._index = self.ui self.cpkmmsi.update() results = self.msqs.all() loaded = [result.pk for result in results._manual_iter()] self.assertEqual(loaded, [u'sometext', u'1234']) self.assertEqual(len(connections['default'].queries), 1) #UUID testing self.ui.build(indexes=[self.uuidmmsi]) connections['default']._index = self.ui self.uuidmmsi.update() results = self.msqs.all() loaded = [result.pk for result in results._manual_iter()] self.assertEqual(loaded, [u'53554c58-7051-4350-bcc9-dad75eb248a9', u'77554c58-7051-4350-bcc9-dad75eb24888']) connections['default']._index = old_ui def test_cache_is_full(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) self.assertEqual(self.msqs._cache_is_full(), False) results = self.msqs.all() fire_the_iterator_and_fill_cache = list(results) self.assertEqual(23, len(fire_the_iterator_and_fill_cache)) self.assertEqual(results._cache_is_full(), True) self.assertEqual(len(connections['default'].queries), 4) def test_all(self): sqs = self.msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) def test_filter(self): sqs = self.msqs.filter(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_exclude(self): sqs = self.msqs.exclude(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_order_by(self): sqs = self.msqs.order_by('foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertTrue('foo' in sqs.query.order_by) def test_models(self): # Stow. old_unified_index = connections['default']._index ui = UnifiedIndex() bmmsi = BasicMockModelSearchIndex() bammsi = BasicAnotherMockModelSearchIndex() ui.build(indexes=[bmmsi, bammsi]) connections['default']._index = ui msqs = SearchQuerySet() sqs = msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = msqs.models(MockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = msqs.models(MockModel, AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) # This will produce a warning. ui.build(indexes=[bmmsi]) sqs = msqs.models(AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) def test_result_class(self): sqs = self.msqs.all() self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) # Custom class. class IttyBittyResult(object): pass sqs = self.msqs.result_class(IttyBittyResult) self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult)) # Reset to default. sqs = self.msqs.result_class(None) self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) def test_boost(self): sqs = self.msqs.boost('foo', 10) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.boost.keys()), 1) def test_highlight(self): sqs = self.msqs.highlight() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.highlight, True) def test_spelling_override(self): sqs = self.msqs.filter(content='not the spellchecking query') self.assertEqual(sqs.query.spelling_query, None) sqs = self.msqs.set_spelling_query('override') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.spelling_query, 'override') def test_spelling_suggestions(self): # Test the case where spelling support is disabled. sqs = self.msqs.filter(content='Indx') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.spelling_suggestion(), None) self.assertEqual(sqs.spelling_suggestion('indexy'), None) def test_raw_search(self): self.assertEqual(len(self.msqs.raw_search('foo')), 23) self.assertEqual(len(self.msqs.raw_search('(content__exact:hello AND content__exact:world)')), 23) def test_load_all(self): # Models with character primary keys. sqs = SearchQuerySet() sqs.query.backend = CharPKMockSearchBackend('charpk') results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # Models with uuid primary keys. sqs = SearchQuerySet() sqs.query.backend = UUIDMockSearchBackend('uuid') results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # If nothing is handled, you get nothing. old_ui = connections['default']._index ui = UnifiedIndex() ui.build(indexes=[]) connections['default']._index = ui sqs = self.msqs.load_all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs), 0) connections['default']._index = old_ui # For full tests, see the solr_backend. def test_load_all_read_queryset(self): # Stow. old_ui = connections['default']._index ui = UnifiedIndex() gafmmsi = GhettoAFifthMockModelSearchIndex() ui.build(indexes=[gafmmsi]) connections['default']._index = ui gafmmsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # The deleted result isn't returned self.assertEqual(len([result for result in results._result_cache if result is not None]), 1) # Register a SearchIndex with a read_queryset that returns deleted items rqstsi = TextReadQuerySetTestSearchIndex() ui.build(indexes=[rqstsi]) rqstsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # Both the deleted and not deleted items are returned self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # Restore. connections['default']._index = old_ui def test_auto_query(self): sqs = self.msqs.auto_query('test search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__content=test search -stuff>') sqs = self.msqs.auto_query('test "my thing" search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__content=test "my thing" search -stuff>') sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__content=test "my thing" search \'moar quotes\' -stuff>') sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__content=test "my thing" search \'moar quotes\' "foo -stuff>') sqs = self.msqs.auto_query('test - stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND content__content=test - stuff>") # Ensure bits in exact matches get escaped properly as well. sqs = self.msqs.auto_query('"pants:rule"') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__content="pants:rule">') # Now with a different fieldname sqs = self.msqs.auto_query('test search -stuff', fieldname='title') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND title__content=test search -stuff>") sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname='title') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND title__content=test "my thing" search -stuff>') def test_count(self): self.assertEqual(self.msqs.count(), 23) def test_facet_counts(self): self.assertEqual(self.msqs.facet_counts(), {}) def test_best_match(self): self.assertTrue(isinstance(self.msqs.best_match(), SearchResult)) def test_latest(self): self.assertTrue(isinstance(self.msqs.latest('pub_date'), SearchResult)) def test_more_like_this(self): mock = MockModel() mock.id = 1 self.assertEqual(len(self.msqs.more_like_this(mock)), 23) def test_facets(self): sqs = self.msqs.facet('foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.facets), 1) sqs2 = self.msqs.facet('foo').facet('bar') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.facets), 2) def test_date_facets(self): try: sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph') self.fail() except FacetingError as e: self.assertEqual(str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.") sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.date_facets), 1) sqs2 = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.date_facets), 2) def test_query_facets(self): sqs = self.msqs.query_facet('foo', '[bar TO *]') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_facets), 1) sqs2 = self.msqs.query_facet('foo', '[bar TO *]').query_facet('bar', '[100 TO 499]') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.query_facets), 2) # Test multiple query facets on a single field sqs3 = self.msqs.query_facet('foo', '[bar TO *]').query_facet('bar', '[100 TO 499]').query_facet('foo', '[1000 TO 1499]') self.assertTrue(isinstance(sqs3, SearchQuerySet)) self.assertEqual(len(sqs3.query.query_facets), 3) def test_stats(self): sqs = self.msqs.stats_facet('foo', 'bar') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.stats), 1) sqs2 = self.msqs.stats_facet('foo', 'bar').stats_facet('foo', 'baz') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.stats), 1) sqs3 = self.msqs.stats_facet('foo', 'bar').stats_facet('moof', 'baz') self.assertTrue(isinstance(sqs3, SearchQuerySet)) self.assertEqual(len(sqs3.query.stats), 2) def test_narrow(self): sqs = self.msqs.narrow('foo:moof') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.narrow_queries), 1) def test_clone(self): results = self.msqs.filter(foo='bar', foo__lt='10') clone = results._clone() self.assertTrue(isinstance(clone, SearchQuerySet)) self.assertEqual(str(clone.query), str(results.query)) self.assertEqual(clone._result_cache, []) self.assertEqual(clone._result_count, None) self.assertEqual(clone._cache_full, False) self.assertEqual(clone._using, results._using) def test_using(self): sqs = SearchQuerySet(using='default') self.assertNotEqual(sqs.query, None) self.assertEqual(sqs.query._using, 'default') def test_chaining(self): sqs = self.msqs.filter(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) # A second instance should inherit none of the changes from above. sqs = self.msqs.filter(content='bar') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_none(self): sqs = self.msqs.none() self.assertTrue(isinstance(sqs, EmptySearchQuerySet)) self.assertEqual(len(sqs), 0) def test___and__(self): sqs1 = self.msqs.filter(content='foo') sqs2 = self.msqs.filter(content='bar') sqs = sqs1 & sqs2 self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 2) def test___or__(self): sqs1 = self.msqs.filter(content='foo') sqs2 = self.msqs.filter(content='bar') sqs = sqs1 | sqs2 self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 2) def test_and_or(self): """ Combining AND queries with OR should give AND(OR(a, b), OR(c, d)) """ sqs1 = self.msqs.filter(content='foo').filter(content='oof') sqs2 = self.msqs.filter(content='bar').filter(content='rab') sqs = sqs1 | sqs2 self.assertEqual(sqs.query.query_filter.connector, 'OR') self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)) self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)) def test_or_and(self): """ Combining OR queries with AND should give OR(AND(a, b), AND(c, d)) """ sqs1 = self.msqs.filter(content='foo').filter_or(content='oof') sqs2 = self.msqs.filter(content='bar').filter_or(content='rab') sqs = sqs1 & sqs2 self.assertEqual(sqs.query.query_filter.connector, 'AND') self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)) self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter))
class SearchQuerySetTestCase(TestCase): fixtures = ['bulk_data.json'] def setUp(self): super(SearchQuerySetTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.cpkmmsi = CharPKMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.cpkmmsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) self.msqs = SearchQuerySet() # Stow. self.old_debug = settings.DEBUG settings.DEBUG = True reset_search_queries() def tearDown(self): # Restore. connections['default']._index = self.old_unified_index settings.DEBUG = self.old_debug super(SearchQuerySetTestCase, self).tearDown() def test_len(self): self.assertEqual(len(self.msqs), 23) def test_repr(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) self.assertEqual( repr(self.msqs), "[<SearchResult: core.mockmodel (pk=u'1')>, <SearchResult: core.mockmodel (pk=u'2')>, <SearchResult: core.mockmodel (pk=u'3')>, <SearchResult: core.mockmodel (pk=u'4')>, <SearchResult: core.mockmodel (pk=u'5')>, <SearchResult: core.mockmodel (pk=u'6')>, <SearchResult: core.mockmodel (pk=u'7')>, <SearchResult: core.mockmodel (pk=u'8')>, <SearchResult: core.mockmodel (pk=u'9')>, <SearchResult: core.mockmodel (pk=u'10')>, <SearchResult: core.mockmodel (pk=u'11')>, <SearchResult: core.mockmodel (pk=u'12')>, <SearchResult: core.mockmodel (pk=u'13')>, <SearchResult: core.mockmodel (pk=u'14')>, <SearchResult: core.mockmodel (pk=u'15')>, <SearchResult: core.mockmodel (pk=u'16')>, <SearchResult: core.mockmodel (pk=u'17')>, <SearchResult: core.mockmodel (pk=u'18')>, <SearchResult: core.mockmodel (pk=u'19')>, '...(remaining elements truncated)...']" ) self.assertEqual(len(connections['default'].queries), 1) def test_iter(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) msqs = self.msqs.all() results = [int(res.pk) for res in msqs] self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]]) self.assertEqual(len(connections['default'].queries), 3) def test_slice(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual([int(res.pk) for res in results[1:11]], [res.pk for res in MOCK_SEARCH_RESULTS[1:11]]) self.assertEqual(len(connections['default'].queries), 1) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk) self.assertEqual(len(connections['default'].queries), 1) def test_manual_iter(self): results = self.msqs.all() reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) check = [result.pk for result in results._manual_iter()] self.assertEqual(check, [ u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9', u'10', u'11', u'12', u'13', u'14', u'15', u'16', u'17', u'18', u'19', u'20', u'21', u'22', u'23' ]) self.assertEqual(len(connections['default'].queries), 3) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not a handled model) than the hit count indicates. # This will hang indefinitely if broken. old_ui = self.ui self.ui.build(indexes=[self.cpkmmsi]) connections['default']._index = self.ui self.cpkmmsi.update() results = self.msqs.all() loaded = [result.pk for result in results._manual_iter()] self.assertEqual(loaded, [u'sometext', u'1234']) self.assertEqual(len(connections['default'].queries), 1) connections['default']._index = old_ui def test_fill_cache(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual(len(results._result_cache), 0) self.assertEqual(len(connections['default'].queries), 0) results._fill_cache(0, 10) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 10) self.assertEqual(len(connections['default'].queries), 1) results._fill_cache(10, 20) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 20) self.assertEqual(len(connections['default'].queries), 2) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not a handled model) than the hit count indicates. sqs = SearchQuerySet().all() sqs.query.backend = MixedMockSearchBackend('default') results = sqs self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 0) self.assertEqual([ int(result.pk) for result in results._result_cache if result is not None ], []) self.assertEqual(len(connections['default'].queries), 0) results._fill_cache(0, 10) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 9) self.assertEqual([ int(result.pk) for result in results._result_cache if result is not None ], [1, 2, 3, 4, 5, 6, 7, 8, 10]) self.assertEqual(len(connections['default'].queries), 2) results._fill_cache(10, 20) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 17) self.assertEqual([ int(result.pk) for result in results._result_cache if result is not None ], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20]) self.assertEqual(len(connections['default'].queries), 4) results._fill_cache(20, 30) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 20) self.assertEqual([ int(result.pk) for result in results._result_cache if result is not None ], [ 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23 ]) self.assertEqual(len(connections['default'].queries), 6) def test_cache_is_full(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) self.assertEqual(self.msqs._cache_is_full(), False) results = self.msqs.all() fire_the_iterator_and_fill_cache = [result for result in results] self.assertEqual(results._cache_is_full(), True) self.assertEqual(len(connections['default'].queries), 3) def test_all(self): sqs = self.msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) def test_filter(self): sqs = self.msqs.filter(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_exclude(self): sqs = self.msqs.exclude(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_order_by(self): sqs = self.msqs.order_by('foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertTrue('foo' in sqs.query.order_by) def test_models(self): # Stow. old_unified_index = connections['default']._index ui = UnifiedIndex() bmmsi = BasicMockModelSearchIndex() bammsi = BasicAnotherMockModelSearchIndex() ui.build(indexes=[bmmsi, bammsi]) connections['default']._index = ui msqs = SearchQuerySet() sqs = msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = msqs.models(MockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = msqs.models(MockModel, AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) # This will produce a warning. ui.build(indexes=[bmmsi]) sqs = msqs.models(AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) def test_result_class(self): sqs = self.msqs.all() self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) # Custom class. class IttyBittyResult(object): pass sqs = self.msqs.result_class(IttyBittyResult) self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult)) # Reset to default. sqs = self.msqs.result_class(None) self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) def test_boost(self): sqs = self.msqs.boost('foo', 10) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.boost.keys()), 1) def test_highlight(self): sqs = self.msqs.highlight() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.highlight, True) def test_spelling(self): # Test the case where spelling support is disabled. sqs = self.msqs.filter(content='Indx') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.spelling_suggestion(), None) self.assertEqual(sqs.spelling_suggestion('indexy'), None) def test_raw_search(self): self.assertEqual(len(self.msqs.raw_search('foo')), 23) self.assertEqual( len( self.msqs.raw_search( '(content__exact:hello AND content__exact:world)')), 23) def test_load_all(self): # Models with character primary keys. sqs = SearchQuerySet() sqs.query.backend = CharPKMockSearchBackend('charpk') results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 2) # If nothing is handled, you get nothing. old_ui = connections['default']._index ui = UnifiedIndex() ui.build(indexes=[]) connections['default']._index = ui sqs = self.msqs.load_all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs), 0) connections['default']._index = old_ui # For full tests, see the solr_backend. def test_load_all_read_queryset(self): # Stow. old_ui = connections['default']._index ui = UnifiedIndex() gafmmsi = GhettoAFifthMockModelSearchIndex() ui.build(indexes=[gafmmsi]) connections['default']._index = ui gafmmsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # The deleted result isn't returned self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 1) # Register a SearchIndex with a read_queryset that returns deleted items rqstsi = TextReadQuerySetTestSearchIndex() ui.build(indexes=[rqstsi]) rqstsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # Both the deleted and not deleted items are returned self.assertEqual( len([ result for result in results._result_cache if result is not None ]), 2) # Restore. connections['default']._index = old_ui def test_auto_query(self): sqs = self.msqs.auto_query('test search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND (content__contains=test AND content__contains=search AND NOT (content__contains=stuff))>' ) sqs = self.msqs.auto_query('test "my thing" search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND (content__exact=my thing AND content__contains=test AND content__contains=search AND NOT (content__contains=stuff))>' ) sqs = self.msqs.auto_query( 'test "my thing" search \'moar quotes\' -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), "<SQ: AND (content__exact=my thing AND content__contains=test AND content__contains=search AND content__contains='moar AND content__contains=quotes' AND NOT (content__contains=stuff))>" ) sqs = self.msqs.auto_query( 'test "my thing" search \'moar quotes\' "foo -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND (content__exact=my thing AND content__contains=test AND content__contains=search AND content__contains=\'moar AND content__contains=quotes\' AND content__contains="foo AND NOT (content__contains=stuff))>' ) sqs = self.msqs.auto_query('test - stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND (content__contains=test AND content__contains=- AND content__contains=stuff)>' ) # Ensure bits in exact matches get escaped properly as well. sqs = self.msqs.auto_query('"pants:rule"') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__exact=pants:rule>') # Now with a different fieldname sqs = self.msqs.auto_query('test search -stuff', fieldname='title') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND (title__contains=test AND title__contains=search AND NOT (title__contains=stuff))>' ) sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname='title') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND (title__exact=my thing AND title__contains=test AND title__contains=search AND NOT (title__contains=stuff))>' ) def test_count(self): self.assertEqual(self.msqs.count(), 23) def test_facet_counts(self): self.assertEqual(self.msqs.facet_counts(), {}) def test_best_match(self): self.assertTrue(isinstance(self.msqs.best_match(), SearchResult)) def test_latest(self): self.assertTrue(isinstance(self.msqs.latest('pub_date'), SearchResult)) def test_more_like_this(self): mock = MockModel() mock.id = 1 self.assertEqual(len(self.msqs.more_like_this(mock)), 23) def test_facets(self): sqs = self.msqs.facet('foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.facets), 1) sqs2 = self.msqs.facet('foo').facet('bar') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.facets), 2) def test_date_facets(self): try: sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph') self.fail() except FacetingError, e: self.assertEqual( str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second." ) sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.date_facets), 1) sqs2 = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet( 'bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.date_facets), 2)
def regulation_results_page(self, request): all_regs = Part.objects.order_by('part_number') regs = validate_regs_list(request) order = validate_order(request) search_query = request.GET.get('q', '').strip() payload = { 'search_query': search_query, 'results': [], 'total_results': 0, 'regs': regs, 'all_regs': [], } if not search_query or len(urllib.parse.unquote(search_query)) == 1: self.results = payload return TemplateResponse( request, self.get_template(request), self.get_context(request)) sqs = SearchQuerySet().filter(content=search_query) payload.update({ 'all_regs': [{ 'letter_code': reg.letter_code, 'id': reg.part_number, 'num_results': sqs.filter( part=reg.part_number).models(SectionParagraph).count(), 'selected': reg.part_number in regs} for reg in all_regs] }) payload.update({'total_count': sum( [reg['num_results'] for reg in payload['all_regs']])}) if len(regs) == 1: sqs = sqs.filter(part=regs[0]) elif regs: sqs = sqs.filter(part__in=regs) if order == 'regulation': sqs = sqs.order_by('part', 'section_order') sqs = sqs.highlight( pre_tags=['<strong>'], post_tags=['</strong>']).models(SectionParagraph) for hit in sqs: try: snippet = Markup(" ".join(hit.highlighted)) except TypeError as e: logger.warning( "Query string {} produced a TypeError: {}".format( search_query, e)) continue letter_code = LETTER_CODES.get(hit.part) hit_payload = { 'id': hit.paragraph_id, 'part': hit.part, 'reg': 'Regulation {}'.format(letter_code), 'label': hit.title, 'snippet': snippet, 'url': "{}{}/{}/#{}".format( self.parent().url, hit.part, hit.section_label, hit.paragraph_id), } payload['results'].append(hit_payload) payload.update({'current_count': sqs.count()}) self.results = payload context = self.get_context(request) num_results = validate_num_results(request) paginator = Paginator(payload['results'], num_results) page_number = validate_page_number(request, paginator) paginated_page = paginator.page(page_number) context.update({ 'current_count': payload['current_count'], 'total_count': payload['total_count'], 'paginator': paginator, 'current_page': page_number, 'num_results': num_results, 'order': order, 'results': paginated_page, 'show_filters': any( reg['selected'] is True for reg in payload['all_regs']) }) return TemplateResponse( request, self.get_template(request), context)
def regulation_results_page(self, request): if flag_enabled('ELASTICSEARCH_DSL_REGULATIONS'): return self.regulation_results_page_es7(request) all_regs = Part.objects.order_by('part_number') regs = validate_regs_list(request) order = validate_order(request) search_query = request.GET.get('q', '').strip() payload = { 'search_query': search_query, 'results': [], 'total_results': 0, 'regs': regs, 'all_regs': [], } if not search_query or len(urllib.parse.unquote(search_query)) == 1: self.results = payload return TemplateResponse(request, self.get_template(request), self.get_context(request)) sqs = SearchQuerySet().filter(content=search_query) payload.update({ 'all_regs': [{ 'short_name': reg.short_name, 'id': reg.part_number, 'num_results': sqs.filter( part=reg.part_number).models(SectionParagraph).count(), 'selected': reg.part_number in regs } for reg in all_regs] }) payload.update({ 'total_count': sum([reg['num_results'] for reg in payload['all_regs']]) }) if len(regs) == 1: sqs = sqs.filter(part=regs[0]) elif regs: sqs = sqs.filter(part__in=regs) if order == 'regulation': sqs = sqs.order_by('part', 'section_order') sqs = sqs.highlight(pre_tags=['<strong>'], post_tags=['</strong>']).models(SectionParagraph) for hit in sqs: try: snippet = Markup(" ".join(hit.highlighted)) except TypeError as e: logger.warning( "Query string {} produced a TypeError: {}".format( search_query, e)) continue short_name = all_regs.get(part_number=hit.part).short_name hit_payload = { 'id': hit.paragraph_id, 'part': hit.part, 'reg': short_name, 'label': hit.title, 'snippet': snippet, 'url': "{}{}/{}/#{}".format(self.parent().url, hit.part, hit.section_label, hit.paragraph_id), } payload['results'].append(hit_payload) payload.update({'current_count': sqs.count()}) self.results = payload context = self.get_context(request) num_results = validate_num_results(request) paginator = Paginator(payload['results'], num_results) page_number = validate_page_number(request, paginator) paginated_page = paginator.page(page_number) context.update({ 'current_count': payload['current_count'], 'total_count': payload['total_count'], 'paginator': paginator, 'current_page': page_number, 'num_results': num_results, 'order': order, 'results': paginated_page, 'show_filters': any(reg['selected'] is True for reg in payload['all_regs']) }) return TemplateResponse(request, self.get_template(request), context)
class SearchQuerySetTestCase(TestCase): fixtures = ["base_data.json", "bulk_data.json"] def setUp(self): super(SearchQuerySetTestCase, self).setUp() # Stow. self.old_unified_index = connections["default"]._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.cpkmmsi = CharPKMockModelSearchIndex() self.uuidmmsi = SimpleMockUUIDModelIndex() self.ui.build(indexes=[self.bmmsi, self.cpkmmsi, self.uuidmmsi]) connections["default"]._index = self.ui # Update the "index". backend = connections["default"].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) self.msqs = SearchQuerySet() # Stow. reset_search_queries() def tearDown(self): # Restore. connections["default"]._index = self.old_unified_index super(SearchQuerySetTestCase, self).tearDown() def test_len(self): self.assertEqual(len(self.msqs), 23) def test_repr(self): reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) self.assertRegexpMatches( repr(self.msqs), r"^<SearchQuerySet: query=<test_haystack.mocks.MockSearchQuery object" r" at 0x[0-9A-Fa-f]+>, using=None>$", ) def test_iter(self): reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) msqs = self.msqs.all() results = [int(res.pk) for res in iter(msqs)] self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]]) self.assertEqual(len(connections["default"].queries), 3) def test_slice(self): reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) results = self.msqs.all() self.assertEqual( [int(res.pk) for res in results[1:11]], [res.pk for res in MOCK_SEARCH_RESULTS[1:11]], ) self.assertEqual(len(connections["default"].queries), 1) reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) results = self.msqs.all() self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk) self.assertEqual(len(connections["default"].queries), 1) def test_manual_iter(self): results = self.msqs.all() reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) check = [result.pk for result in results._manual_iter()] self.assertEqual( check, [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", ], ) self.assertEqual(len(connections["default"].queries), 3) reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not a handled model) than the hit count indicates. # This will hang indefinitely if broken. # CharPK testing old_ui = self.ui self.ui.build(indexes=[self.cpkmmsi]) connections["default"]._index = self.ui self.cpkmmsi.update() results = self.msqs.all() loaded = [result.pk for result in results._manual_iter()] self.assertEqual(loaded, ["sometext", "1234"]) self.assertEqual(len(connections["default"].queries), 1) # UUID testing self.ui.build(indexes=[self.uuidmmsi]) connections["default"]._index = self.ui self.uuidmmsi.update() results = self.msqs.all() loaded = [result.pk for result in results._manual_iter()] self.assertEqual( loaded, [ "53554c58-7051-4350-bcc9-dad75eb248a9", "77554c58-7051-4350-bcc9-dad75eb24888", ], ) connections["default"]._index = old_ui def test_cache_is_full(self): reset_search_queries() self.assertEqual(len(connections["default"].queries), 0) self.assertEqual(self.msqs._cache_is_full(), False) results = self.msqs.all() fire_the_iterator_and_fill_cache = list(results) self.assertEqual(23, len(fire_the_iterator_and_fill_cache)) self.assertEqual(results._cache_is_full(), True) self.assertEqual(len(connections["default"].queries), 4) def test_all(self): sqs = self.msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) def test_filter(self): sqs = self.msqs.filter(content="foo") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_exclude(self): sqs = self.msqs.exclude(content="foo") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_order_by(self): sqs = self.msqs.order_by("foo") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertTrue("foo" in sqs.query.order_by) def test_models(self): # Stow. old_unified_index = connections["default"]._index ui = UnifiedIndex() bmmsi = BasicMockModelSearchIndex() bammsi = BasicAnotherMockModelSearchIndex() ui.build(indexes=[bmmsi, bammsi]) connections["default"]._index = ui msqs = SearchQuerySet() sqs = msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = msqs.models(MockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = msqs.models(MockModel, AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) # This will produce a warning. ui.build(indexes=[bmmsi]) sqs = msqs.models(AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) def test_result_class(self): sqs = self.msqs.all() self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) # Custom class. class IttyBittyResult(object): pass sqs = self.msqs.result_class(IttyBittyResult) self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult)) # Reset to default. sqs = self.msqs.result_class(None) self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) def test_boost(self): sqs = self.msqs.boost("foo", 10) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.boost.keys()), 1) def test_highlight(self): sqs = self.msqs.highlight() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.highlight, True) def test_spelling_override(self): sqs = self.msqs.filter(content="not the spellchecking query") self.assertEqual(sqs.query.spelling_query, None) sqs = self.msqs.set_spelling_query("override") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.spelling_query, "override") def test_spelling_suggestions(self): # Test the case where spelling support is disabled. sqs = self.msqs.filter(content="Indx") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.spelling_suggestion(), None) self.assertEqual(sqs.spelling_suggestion("indexy"), None) def test_raw_search(self): self.assertEqual(len(self.msqs.raw_search("foo")), 23) self.assertEqual( len( self.msqs.raw_search("(content__exact:hello AND content__exact:world)") ), 23, ) def test_load_all(self): # Models with character primary keys. sqs = SearchQuerySet() sqs.query.backend = CharPKMockSearchBackend("charpk") results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual( len([result for result in results._result_cache if result is not None]), 2 ) # Models with uuid primary keys. sqs = SearchQuerySet() sqs.query.backend = UUIDMockSearchBackend("uuid") results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual( len([result for result in results._result_cache if result is not None]), 2 ) # If nothing is handled, you get nothing. old_ui = connections["default"]._index ui = UnifiedIndex() ui.build(indexes=[]) connections["default"]._index = ui sqs = self.msqs.load_all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs), 0) connections["default"]._index = old_ui # For full tests, see the solr_backend. def test_load_all_read_queryset(self): # Stow. old_ui = connections["default"]._index ui = UnifiedIndex() gafmmsi = GhettoAFifthMockModelSearchIndex() ui.build(indexes=[gafmmsi]) connections["default"]._index = ui gafmmsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend("default") results._fill_cache(0, 2) # The deleted result isn't returned self.assertEqual( len([result for result in results._result_cache if result is not None]), 1 ) # Register a SearchIndex with a read_queryset that returns deleted items rqstsi = TextReadQuerySetTestSearchIndex() ui.build(indexes=[rqstsi]) rqstsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend("default") results._fill_cache(0, 2) # Both the deleted and not deleted items are returned self.assertEqual( len([result for result in results._result_cache if result is not None]), 2 ) # Restore. connections["default"]._index = old_ui def test_auto_query(self): sqs = self.msqs.auto_query("test search -stuff") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), "<SQ: AND content__content=test search -stuff>", ) sqs = self.msqs.auto_query('test "my thing" search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND content__content=test "my thing" search -stuff>', ) sqs = self.msqs.auto_query("test \"my thing\" search 'moar quotes' -stuff") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), "<SQ: AND content__content=test \"my thing\" search 'moar quotes' -stuff>", ) sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND content__content=test "my thing" search \'moar quotes\' "foo -stuff>', ) sqs = self.msqs.auto_query("test - stuff") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), "<SQ: AND content__content=test - stuff>" ) # Ensure bits in exact matches get escaped properly as well. sqs = self.msqs.auto_query('"pants:rule"') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND content__content="pants:rule">' ) # Now with a different fieldname sqs = self.msqs.auto_query("test search -stuff", fieldname="title") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), "<SQ: AND title__content=test search -stuff>" ) sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname="title") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual( repr(sqs.query.query_filter), '<SQ: AND title__content=test "my thing" search -stuff>', ) def test_count(self): self.assertEqual(self.msqs.count(), 23) def test_facet_counts(self): self.assertEqual(self.msqs.facet_counts(), {}) def test_best_match(self): self.assertTrue(isinstance(self.msqs.best_match(), SearchResult)) def test_latest(self): self.assertTrue(isinstance(self.msqs.latest("pub_date"), SearchResult)) def test_more_like_this(self): mock = MockModel() mock.id = 1 self.assertEqual(len(self.msqs.more_like_this(mock)), 23) def test_facets(self): sqs = self.msqs.facet("foo") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.facets), 1) sqs2 = self.msqs.facet("foo").facet("bar") self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.facets), 2) def test_date_facets(self): try: sqs = self.msqs.date_facet( "foo", start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="smarblaph", ) self.fail() except FacetingError as e: self.assertEqual( str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.", ) sqs = self.msqs.date_facet( "foo", start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="month", ) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.date_facets), 1) sqs2 = self.msqs.date_facet( "foo", start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="month", ).date_facet( "bar", start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="year", ) self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.date_facets), 2) def test_query_facets(self): sqs = self.msqs.query_facet("foo", "[bar TO *]") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_facets), 1) sqs2 = self.msqs.query_facet("foo", "[bar TO *]").query_facet( "bar", "[100 TO 499]" ) self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.query_facets), 2) # Test multiple query facets on a single field sqs3 = ( self.msqs.query_facet("foo", "[bar TO *]") .query_facet("bar", "[100 TO 499]") .query_facet("foo", "[1000 TO 1499]") ) self.assertTrue(isinstance(sqs3, SearchQuerySet)) self.assertEqual(len(sqs3.query.query_facets), 3) def test_stats(self): sqs = self.msqs.stats_facet("foo", "bar") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.stats), 1) sqs2 = self.msqs.stats_facet("foo", "bar").stats_facet("foo", "baz") self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.stats), 1) sqs3 = self.msqs.stats_facet("foo", "bar").stats_facet("moof", "baz") self.assertTrue(isinstance(sqs3, SearchQuerySet)) self.assertEqual(len(sqs3.query.stats), 2) def test_narrow(self): sqs = self.msqs.narrow("foo:moof") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.narrow_queries), 1) def test_clone(self): results = self.msqs.filter(foo="bar", foo__lt="10") clone = results._clone() self.assertTrue(isinstance(clone, SearchQuerySet)) self.assertEqual(str(clone.query), str(results.query)) self.assertEqual(clone._result_cache, []) self.assertEqual(clone._result_count, None) self.assertEqual(clone._cache_full, False) self.assertEqual(clone._using, results._using) def test_using(self): sqs = SearchQuerySet(using="default") self.assertNotEqual(sqs.query, None) self.assertEqual(sqs.query._using, "default") def test_chaining(self): sqs = self.msqs.filter(content="foo") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) # A second instance should inherit none of the changes from above. sqs = self.msqs.filter(content="bar") self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_none(self): sqs = self.msqs.none() self.assertTrue(isinstance(sqs, EmptySearchQuerySet)) self.assertEqual(len(sqs), 0) def test___and__(self): sqs1 = self.msqs.filter(content="foo") sqs2 = self.msqs.filter(content="bar") sqs = sqs1 & sqs2 self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 2) def test___or__(self): sqs1 = self.msqs.filter(content="foo") sqs2 = self.msqs.filter(content="bar") sqs = sqs1 | sqs2 self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 2) def test_and_or(self): """ Combining AND queries with OR should give AND(OR(a, b), OR(c, d)) """ sqs1 = self.msqs.filter(content="foo").filter(content="oof") sqs2 = self.msqs.filter(content="bar").filter(content="rab") sqs = sqs1 | sqs2 self.assertEqual(sqs.query.query_filter.connector, "OR") self.assertEqual( repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter) ) self.assertEqual( repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter) ) def test_or_and(self): """ Combining OR queries with AND should give OR(AND(a, b), AND(c, d)) """ sqs1 = self.msqs.filter(content="foo").filter_or(content="oof") sqs2 = self.msqs.filter(content="bar").filter_or(content="rab") sqs = sqs1 & sqs2 self.assertEqual(sqs.query.query_filter.connector, "AND") self.assertEqual( repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter) ) self.assertEqual( repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter) )
class SearchQuerySetTestCase(TestCase): def setUp(self): super(SearchQuerySetTestCase, self).setUp() self.bsqs = SearchQuerySet(query=DummySearchQuery(backend=DummySearchBackend())) self.msqs = SearchQuerySet(query=MockSearchQuery(backend=MockSearchBackend())) self.mmsqs = SearchQuerySet(query=MockSearchQuery(backend=MixedMockSearchBackend())) # Stow. self.old_debug = settings.DEBUG settings.DEBUG = True self.old_site = haystack.site test_site = SearchSite() test_site.register(MockModel) test_site.register(CharPKMockModel) haystack.site = test_site backends.reset_search_queries() def tearDown(self): # Restore. haystack.site = self.old_site settings.DEBUG = self.old_debug super(SearchQuerySetTestCase, self).tearDown() def test_len(self): # Dummy always returns 0. self.assertEqual(len(self.bsqs), 0) self.assertEqual(len(self.msqs), 100) def test_repr(self): self.assertEqual(repr(self.bsqs), '[]') backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) self.assertEqual(repr(self.msqs), "[<SearchResult: core.MockModel (pk=0)>, <SearchResult: core.MockModel (pk=1)>, <SearchResult: core.MockModel (pk=2)>, <SearchResult: core.MockModel (pk=3)>, <SearchResult: core.MockModel (pk=4)>, <SearchResult: core.MockModel (pk=5)>, <SearchResult: core.MockModel (pk=6)>, <SearchResult: core.MockModel (pk=7)>, <SearchResult: core.MockModel (pk=8)>, <SearchResult: core.MockModel (pk=9)>, <SearchResult: core.MockModel (pk=10)>, <SearchResult: core.MockModel (pk=11)>, <SearchResult: core.MockModel (pk=12)>, <SearchResult: core.MockModel (pk=13)>, <SearchResult: core.MockModel (pk=14)>, <SearchResult: core.MockModel (pk=15)>, <SearchResult: core.MockModel (pk=16)>, <SearchResult: core.MockModel (pk=17)>, <SearchResult: core.MockModel (pk=18)>, '...(remaining elements truncated)...']") self.assertEqual(len(backends.queries), 1) def test_iter(self): # Dummy always returns []. self.assertEqual([result for result in self.bsqs.all()], []) backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) msqs = self.msqs.all() results = [result for result in msqs] self.assertEqual(results, MOCK_SEARCH_RESULTS) self.assertEqual(len(backends.queries), 10) def test_slice(self): backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) results = self.msqs.all() self.assertEqual(results[1:11], MOCK_SEARCH_RESULTS[1:11]) self.assertEqual(len(backends.queries), 1) backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) results = self.msqs.all() self.assertEqual(results[50], MOCK_SEARCH_RESULTS[50]) self.assertEqual(len(backends.queries), 1) def test_manual_iter(self): results = self.msqs.all() backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) for offset, result in enumerate(results._manual_iter()): self.assertEqual(result, MOCK_SEARCH_RESULTS[offset]) self.assertEqual(len(backends.queries), 10) backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not in the SearchSite) than the hit count indicates. # This will hang indefinitely if broken. results = self.mmsqs.all() loaded = [result.pk for result in results._manual_iter()] self.assertEqual(loaded, [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) self.assertEqual(len(backends.queries), 8) def test_fill_cache(self): backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) results = self.msqs.all() self.assertEqual(len(results._result_cache), 0) self.assertEqual(len(backends.queries), 0) results._fill_cache(0, 10) self.assertEqual(len([result for result in results._result_cache if result is not None]), 10) self.assertEqual(len(backends.queries), 1) results._fill_cache(10, 20) self.assertEqual(len([result for result in results._result_cache if result is not None]), 20) self.assertEqual(len(backends.queries), 2) backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not in the SearchSite) than the hit count indicates. results = self.mmsqs.all() self.assertEqual(len([result for result in results._result_cache if result is not None]), 0) self.assertEqual([result.pk for result in results._result_cache if result is not None], []) self.assertEqual(len(backends.queries), 0) results._fill_cache(0, 10) self.assertEqual(len([result for result in results._result_cache if result is not None]), 9) self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8]) self.assertEqual(len(backends.queries), 2) results._fill_cache(10, 20) self.assertEqual(len([result for result in results._result_cache if result is not None]), 17) self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19]) self.assertEqual(len(backends.queries), 4) results._fill_cache(20, 30) self.assertEqual(len([result for result in results._result_cache if result is not None]), 27) self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) self.assertEqual(len(backends.queries), 6) def test_cache_is_full(self): # Dummy always has a count of 0 and an empty _result_cache, hence True. self.assertEqual(self.bsqs._cache_is_full(), False) results = self.bsqs.all() fire_the_iterator_and_fill_cache = [result for result in results] self.assertEqual(results._cache_is_full(), True) backends.reset_search_queries() self.assertEqual(len(backends.queries), 0) self.assertEqual(self.msqs._cache_is_full(), False) results = self.msqs.all() fire_the_iterator_and_fill_cache = [result for result in results] self.assertEqual(results._cache_is_full(), True) self.assertEqual(len(backends.queries), 10) def test_all(self): sqs = self.bsqs.all() self.assert_(isinstance(sqs, SearchQuerySet)) def test_filter(self): sqs = self.bsqs.filter(content='foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_exclude(self): sqs = self.bsqs.exclude(content='foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_order_by(self): sqs = self.bsqs.order_by('foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assert_('foo' in sqs.query.order_by) def test_models(self): mock_index_site = SearchSite() mock_index_site.register(MockModel) mock_index_site.register(AnotherMockModel) bsqs = SearchQuerySet(site=mock_index_site) sqs = bsqs.all() self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = bsqs.models(MockModel) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = bsqs.models(MockModel, AnotherMockModel) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) # This will produce a warning. mock_index_site.unregister(AnotherMockModel) sqs = bsqs.models(AnotherMockModel) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) def test_boost(self): sqs = self.bsqs.boost('foo', 10) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.boost.keys()), 1) def test_highlight(self): sqs = self.bsqs.highlight() self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.highlight, True) def test_spelling(self): # Test the case where spelling support is disabled. sqs = self.bsqs.filter(content='Indx') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.spelling_suggestion(), None) self.assertEqual(sqs.spelling_suggestion('indexy'), None) def test_raw_search(self): self.assertEqual(len(self.bsqs.raw_search('foo')), 0) self.assertEqual(len(self.bsqs.raw_search('(content__exact hello AND content__exact world)')), 1) def test_load_all(self): # Models with character primary keys sqs = SearchQuerySet(query=MockSearchQuery(backend=CharPKMockSearchBackend())) results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # If nothing is registered, you get nothing. haystack.site.unregister(MockModel) haystack.site.unregister(CharPKMockModel) sqs = self.msqs.load_all() self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs), 0) # For full tests, see the solr_backend. def test_auto_query(self): sqs = self.bsqs.auto_query('test search -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=test AND content__exact=search AND NOT (content__exact=stuff))>') sqs = self.bsqs.auto_query('test "my thing" search -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND NOT (content__exact=stuff))>') sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND content__exact='moar AND content__exact=quotes' AND NOT (content__exact=stuff))>") sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND content__exact=\'moar AND content__exact=quotes\' AND content__exact="foo AND NOT (content__exact=stuff))>') sqs = self.bsqs.auto_query('test - stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=test AND content__exact=- AND content__exact=stuff)>') # Ensure bits in exact matches get escaped properly as well. sqs = self.bsqs.auto_query('"pants:rule"') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__exact=pants:rule>') def test_count(self): self.assertEqual(self.bsqs.count(), 0) def test_facet_counts(self): self.assertEqual(self.bsqs.facet_counts(), {}) def test_best_match(self): self.assert_(isinstance(self.msqs.best_match(), SearchResult)) def test_latest(self): self.assert_(isinstance(self.msqs.latest('pub_date'), SearchResult)) def test_more_like_this(self): mock = MockModel() mock.id = 1 self.assertEqual(len(self.msqs.more_like_this(mock)), 100) def test_facets(self): sqs = self.bsqs.facet('foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.facets), 1) sqs2 = self.bsqs.facet('foo').facet('bar') self.assert_(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.facets), 2) def test_date_facets(self): try: sqs = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph') self.fail() except FacetingError, e: self.assertEqual(str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.") sqs = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.date_facets), 1) sqs2 = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year') self.assert_(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.date_facets), 2)
class SearchQuerySetTestCase(TestCase): def setUp(self): super(SearchQuerySetTestCase, self).setUp() self.bsqs = SearchQuerySet(query=DummySearchQuery(backend=DummySearchBackend())) self.msqs = SearchQuerySet(query=MockSearchQuery(backend=MockSearchBackend())) def test_len(self): # Dummy always returns 0. self.assertEqual(len(self.bsqs), 0) self.assertEqual(len(self.msqs), 100) def test_iter(self): # Dummy always returns []. self.assertEqual([result for result in self.bsqs.all()], []) msqs = self.msqs.all() results = [result for result in msqs] self.assertEqual(results, MOCK_SEARCH_RESULTS) def test_slice(self): self.assertEqual(self.msqs.all()[1:11], MOCK_SEARCH_RESULTS[1:11]) self.assertEqual(self.msqs.all()[50], MOCK_SEARCH_RESULTS[50]) def test_manual_iter(self): results = self.msqs.all() for offset, result in enumerate(results._manual_iter()): self.assertEqual(result, MOCK_SEARCH_RESULTS[offset]) def test_fill_cache(self): results = self.msqs.all() self.assertEqual(len(results._result_cache), 0) results._fill_cache() self.assertEqual(len(results._result_cache), 20) def test_cache_is_full(self): # Dummy always has a count of 0 and an empty _result_cache, hence True. self.assertEqual(self.bsqs._cache_is_full(), True) self.assertEqual(self.msqs._cache_is_full(), False) results = self.msqs.all() fire_the_iterator_and_fill_cache = [result for result in results] self.assertEqual(results._cache_is_full(), True) def test_all(self): sqs = self.bsqs.all() self.assert_(isinstance(sqs, SearchQuerySet)) def test_filter(self): sqs = self.bsqs.filter(content='foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filters), 1) def test_exclude(self): sqs = self.bsqs.exclude(content='foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filters), 1) def test_order_by(self): sqs = self.bsqs.order_by('foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assert_('foo' in sqs.query.order_by) def test_models(self): mock_index_site = SearchSite() mock_index_site.register(MockModel) mock_index_site.register(AnotherMockModel) bsqs = SearchQuerySet(site=mock_index_site) sqs = bsqs.all() self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = bsqs.models(MockModel) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = bsqs.models(MockModel, AnotherMockModel) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) def test_boost(self): sqs = self.bsqs.boost(foo=10) self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.boost.keys()), 1) def test_highlight(self): sqs = self.bsqs.highlight() self.assert_(isinstance(sqs, SearchQuerySet)) self.assert_(sqs.query.highlight, True) def test_raw_search(self): self.assertEqual(len(self.bsqs.raw_search('foo')), 0) self.assertEqual(len(self.bsqs.raw_search('content__exact hello AND content__exact world')), 1) def test_load_all(self): sqs = self.msqs.load_all() self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs[0].object.foo, 'bar') def test_auto_query(self): sqs = self.bsqs.auto_query('test search -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: NOT content__exact=stuff>']) sqs = self.bsqs.auto_query('test "my thing" search -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=my thing>', '<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: NOT content__exact=stuff>']) sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=my thing>', '<QueryFilter: AND content__exact=moar quotes>', '<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: NOT content__exact=stuff>']) sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=my thing>', '<QueryFilter: AND content__exact=moar quotes>', '<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: AND content__exact="foo>', '<QueryFilter: NOT content__exact=stuff>']) def test_count(self): self.assertEqual(self.bsqs.count(), 0) def test_facet_counts(self): self.assertEqual(self.bsqs.facet_counts(), {}) def test_best_match(self): self.assert_(isinstance(self.msqs.best_match(), SearchResult)) def test_latest(self): self.assert_(isinstance(self.msqs.latest('pub_date'), SearchResult)) def test_more_like_this(self): mock = MockModel() mock.id = 1 self.assertEqual(len(self.msqs.more_like_this(mock)), 100) def test_facets(self): sqs = self.bsqs.facet('foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.facets), 1) sqs2 = self.bsqs.facet('foo').facet('bar') self.assert_(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.facets), 2) def test_date_facets(self): sqs = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap='/MONTH') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.date_facets), 1) sqs2 = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap='/MONTH').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap='/YEAR') self.assert_(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.date_facets), 2) def test_query_facets(self): sqs = self.bsqs.query_facet('foo', '[bar TO *]') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_facets), 1) sqs2 = self.bsqs.query_facet('foo', '[bar TO *]').query_facet('bar', '[100 TO 499]') self.assert_(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.query_facets), 2) def test_narrow(self): sqs = self.bsqs.narrow('foo:moof') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.narrow_queries), 1) def test_clone(self): results = self.msqs.filter(foo='bar', foo__lt='10') clone = results._clone() self.assert_(isinstance(clone, SearchQuerySet)) self.assertEqual(clone.site, results.site) self.assertEqual(str(clone.query), str(results.query)) self.assertEqual(clone._result_cache, []) self.assertEqual(clone._result_count, None) self.assertEqual(clone._cache_full, False) def test_chaining(self): sqs = self.bsqs.filter(content='foo') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filters), 1) # A second instance should inherit none of the changes from above. sqs = self.bsqs.filter(content='bar') self.assert_(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filters), 1)