def test_stored_fields(self): # Stow. old_unified_index = connections['default']._index ui = UnifiedIndex() ui.build(indexes=[]) connections['default']._index = ui # Without registering, we should receive an empty dict. self.assertEqual(self.no_data_sr.get_stored_fields(), {}) self.assertEqual(self.extra_data_sr.get_stored_fields(), {}) self.assertEqual(self.no_overwrite_data_sr.get_stored_fields(), {}) from searchstack import indexes class TestSearchIndex(indexes.SearchIndex, indexes.Indexable): stored = indexes.CharField(model_attr='author', document=True) def get_model(self): return MockModel # Include the index & try again. ui.document_field = 'stored' ui.build(indexes=[TestSearchIndex()]) self.assertEqual(self.no_data_sr.get_stored_fields(), {'stored': None}) self.assertEqual(self.extra_data_sr.get_stored_fields(), {'stored': 'I am stored data. How fun.'}) self.assertEqual(self.no_overwrite_data_sr.get_stored_fields(), {'stored': 'I am stored data. How fun.'}) # Restore. connections['default']._index = old_unified_index
class SearchFormTestCase(TestCase): def setUp(self): super(SearchFormTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.bammsi = BasicAnotherMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.bammsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) self.sqs = SearchQuerySet() def tearDown(self): connections['default']._index = self.old_unified_index super(SearchFormTestCase, self).tearDown() def test_unbound(self): sf = SearchForm({}, searchqueryset=self.sqs) self.assertEqual(sf.errors, {}) self.assertEqual(sf.is_valid(), True) # This shouldn't blow up. sqs = sf.search() self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
class ResultsPerPageTestCase(TestCase): fixtures = ['initial_data.json'] urls = 'test_searchstack.results_per_page_urls' def setUp(self): super(ResultsPerPageTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.bammsi = BasicAnotherMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.bammsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) def tearDown(self): connections['default']._index = self.old_unified_index super(ResultsPerPageTestCase, self).tearDown() def test_custom_results_per_page(self): response = self.client.get('/search/', {'q': 'haystack'}) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.context[-1]['page'].object_list), 1) self.assertEqual(response.context[-1]['paginator'].per_page, 1) response = self.client.get('/search2/', {'q': 'hello world'}) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.context[-1]['page'].object_list), 2) self.assertEqual(response.context[-1]['paginator'].per_page, 2)
class FacetedSearchFormTestCase(TestCase): def setUp(self): super(FacetedSearchFormTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.bammsi = BasicAnotherMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.bammsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) self.sqs = SearchQuerySet() def tearDown(self): connections['default']._index = self.old_unified_index super(FacetedSearchFormTestCase, self).tearDown() def test_init_with_selected_facets(self): sf = FacetedSearchForm({}, searchqueryset=self.sqs) self.assertEqual(sf.errors, {}) self.assertEqual(sf.is_valid(), True) self.assertEqual(sf.selected_facets, []) sf = FacetedSearchForm({}, selected_facets=[], searchqueryset=self.sqs) self.assertEqual(sf.errors, {}) self.assertEqual(sf.is_valid(), True) self.assertEqual(sf.selected_facets, []) sf = FacetedSearchForm({}, selected_facets=['author:daniel'], searchqueryset=self.sqs) self.assertEqual(sf.errors, {}) self.assertEqual(sf.is_valid(), True) self.assertEqual(sf.selected_facets, ['author:daniel']) sf = FacetedSearchForm({}, selected_facets=['author:daniel', 'author:chris'], searchqueryset=self.sqs) self.assertEqual(sf.errors, {}) self.assertEqual(sf.is_valid(), True) self.assertEqual(sf.selected_facets, ['author:daniel', 'author:chris']) def test_search(self): sf = FacetedSearchForm({'q': 'test'}, selected_facets=[], searchqueryset=self.sqs) sqs = sf.search() self.assertEqual(sqs.query.narrow_queries, set()) # Test the "skip no-colon" bits. sf = FacetedSearchForm({'q': 'test'}, selected_facets=['authordaniel'], searchqueryset=self.sqs) sqs = sf.search() self.assertEqual(sqs.query.narrow_queries, set()) sf = FacetedSearchForm({'q': 'test'}, selected_facets=['author:daniel'], searchqueryset=self.sqs) sqs = sf.search() self.assertEqual(sqs.query.narrow_queries, set(['author:"daniel"'])) sf = FacetedSearchForm({'q': 'test'}, selected_facets=['author:daniel', 'author:chris'], searchqueryset=self.sqs) sqs = sf.search() self.assertEqual(sqs.query.narrow_queries, set(['author:"daniel"', 'author:"chris"']))
def test_models(self): # Stow. old_unified_index = connections['default']._index ui = UnifiedIndex() bmmsi = BasicMockModelSearchIndex() bammsi = BasicAnotherMockModelSearchIndex() ui.build(indexes=[bmmsi, bammsi]) connections['default']._index = ui msqs = SearchQuerySet() sqs = msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = msqs.models(MockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = msqs.models(MockModel, AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) # This will produce a warning. ui.build(indexes=[bmmsi]) sqs = msqs.models(AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1)
def test_load_all_read_queryset(self): # Stow. old_ui = connections['default']._index ui = UnifiedIndex() gafmmsi = GhettoAFifthMockModelSearchIndex() ui.build(indexes=[gafmmsi]) connections['default']._index = ui gafmmsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # The deleted result isn't returned self.assertEqual(len([result for result in results._result_cache if result is not None]), 1) # Register a SearchIndex with a read_queryset that returns deleted items rqstsi = TextReadQuerySetTestSearchIndex() ui.build(indexes=[rqstsi]) rqstsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # Both the deleted and not deleted items are returned self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # Restore. connections['default']._index = old_ui
def setUp(self): check_solr() super(AlteredInternalNamesTestCase, self).setUp() self.old_ui = connections['solr'].get_unified_index() ui = UnifiedIndex() ui.build(indexes=[MockModelSearchIndex()]) connections['solr']._index = ui constants.ID = 'my_id' constants.DJANGO_CT = 'my_django_ct' constants.DJANGO_ID = 'my_django_id'
class FacetedSearchViewTestCase(TestCase): def setUp(self): super(FacetedSearchViewTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.bammsi = BasicAnotherMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.bammsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) def tearDown(self): connections['default']._index = self.old_unified_index super(FacetedSearchViewTestCase, self).tearDown() def test_search_no_query(self): response = self.client.get(reverse('searchstack_faceted_search')) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['facets'], {}) def test_empty_results(self): fsv = FacetedSearchView() fsv.request = HttpRequest() fsv.request.GET = QueryDict('') fsv.form = fsv.build_form() self.assertTrue(isinstance(fsv.get_results(), EmptySearchQuerySet)) def test_default_form(self): fsv = FacetedSearchView() fsv.request = HttpRequest() fsv.request.GET = QueryDict('') fsv.form = fsv.build_form() self.assertTrue(isinstance(fsv.form, FacetedSearchForm)) def test_list_selected_facets(self): fsv = FacetedSearchView() fsv.request = HttpRequest() fsv.request.GET = QueryDict('') fsv.form = fsv.build_form() self.assertEqual(fsv.form.selected_facets, []) fsv = FacetedSearchView() fsv.request = HttpRequest() fsv.request.GET = QueryDict('selected_facets=author:daniel&selected_facets=author:chris') fsv.form = fsv.build_form() self.assertEqual(fsv.form.selected_facets, ['author:daniel', 'author:chris'])
class ModelSearchFormTestCase(TestCase): def setUp(self): super(ModelSearchFormTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.bammsi = BasicAnotherMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.bammsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) self.sqs = SearchQuerySet() def tearDown(self): connections['default']._index = self.old_unified_index super(ModelSearchFormTestCase, self).tearDown() def test_models_regression_1(self): # Regression for issue #1. msf = ModelSearchForm({ 'query': 'test', 'models': ['core.mockmodel', 'core.anothermockmodel'], }, searchqueryset=self.sqs) self.assertEqual(msf.fields['models'].choices, [('core.anothermockmodel', 'Another mock models'), ('core.mockmodel', 'Mock models')]) self.assertEqual(msf.errors, {}) self.assertEqual(msf.is_valid(), True) sqs_with_models = msf.search() self.assertEqual(len(sqs_with_models.query.models), 2) def test_model_choices(self): self.assertEqual(len(model_choices()), 2) self.assertEqual([option[1] for option in model_choices()], ['Another mock models', 'Mock models']) def test_model_choices_unicode(self): stowed_verbose_name_plural = MockModel._meta.verbose_name_plural MockModel._meta.verbose_name_plural = '☃' self.assertEqual(len(model_choices()), 2) self.assertEqual([option[1] for option in model_choices()], ['Another mock models', '☃']) MockModel._meta.verbose_name_plural = stowed_verbose_name_plural
def test_read_queryset(self): # The model is flagged deleted so not returned by the default manager. deleted1 = SearchResult('core', 'afifthmockmodel', 2, 2) self.assertEqual(deleted1.object, None) # Stow. old_unified_index = connections['default']._index ui = UnifiedIndex() ui.document_field = 'author' ui.build(indexes=[ReadQuerySetTestSearchIndex()]) connections['default']._index = ui # The soft delete manager returns the object. deleted2 = SearchResult('core', 'afifthmockmodel', 2, 2) self.assertNotEqual(deleted2.object, None) self.assertEqual(deleted2.object.author, 'sam2') # Restore. connections['default']._index = old_unified_index
def test_load_all(self): # Models with character primary keys. sqs = SearchQuerySet() sqs.query.backend = CharPKMockSearchBackend('charpk') results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # If nothing is handled, you get nothing. old_ui = connections['default']._index ui = UnifiedIndex() ui.build(indexes=[]) connections['default']._index = ui sqs = self.msqs.load_all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs), 0) connections['default']._index = old_ui
class BasicSearchViewTestCase(TestCase): fixtures = ['initial_data.json'] def setUp(self): super(BasicSearchViewTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.bammsi = BasicAnotherMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.bammsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) def tearDown(self): connections['default']._index = self.old_unified_index super(BasicSearchViewTestCase, self).tearDown() def test_search_no_query(self): response = self.client.get(reverse('searchstack_basic_search')) self.assertEqual(response.status_code, 200) def test_search_query(self): response = self.client.get(reverse('searchstack_basic_search'), {'q': 'haystack'}) self.assertEqual(response.status_code, 200) self.assertEqual(type(response.context[-1]['form']), ModelSearchForm) self.assertEqual(len(response.context[-1]['page'].object_list), 3) self.assertEqual(response.context[-1]['page'].object_list[0].content_type(), 'core.mockmodel') self.assertEqual(response.context[-1]['page'].object_list[0].pk, '1') self.assertEqual(response.context[-1]['query'], 'haystack') def test_invalid_page(self): response = self.client.get(reverse('searchstack_basic_search'), {'q': 'haystack', 'page': '165233'}) self.assertEqual(response.status_code, 404)
class PickleSearchQuerySetTestCase(TestCase): fixtures = ['initial_data.json'] def setUp(self): super(PickleSearchQuerySetTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.cpkmmsi = CharPKMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.cpkmmsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) self.msqs = SearchQuerySet() # Stow. reset_search_queries() def tearDown(self): # Restore. connections['default']._index = self.old_unified_index super(PickleSearchQuerySetTestCase, self).tearDown() def test_pickling(self): results = self.msqs.all() for res in results: # Make sure the cache is full. pass in_a_pickle = pickle.dumps(results) like_a_cuke = pickle.loads(in_a_pickle) self.assertEqual(len(like_a_cuke), len(results)) self.assertEqual(like_a_cuke[0].id, results[0].id)
class SearchIndexTestCase(TestCase): fixtures = ['initial_data.json'] def setUp(self): super(SearchIndexTestCase, self).setUp() self.sb = connections['default'].get_backend() self.mi = GoodMockSearchIndex() self.cmi = GoodCustomMockSearchIndex() self.cnmi = GoodNullableMockSearchIndex() self.gfmsi = GoodFacetedMockSearchIndex() # Fake the unified index. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.ui.build(indexes=[self.mi]) connections['default']._index = self.ui self.sample_docs = { 'core.mockmodel.1': { 'text': 'Indexed!\n1', 'django_id': '1', 'django_ct': 'core.mockmodel', 'extra': 'Stored!\n1', 'author': 'daniel1', 'pub_date': datetime.datetime(2009, 3, 17, 6, 0), 'id': 'core.mockmodel.1' }, 'core.mockmodel.2': { 'text': 'Indexed!\n2', 'django_id': '2', 'django_ct': 'core.mockmodel', 'extra': 'Stored!\n2', 'author': 'daniel2', 'pub_date': datetime.datetime(2009, 3, 17, 7, 0), 'id': 'core.mockmodel.2' }, 'core.mockmodel.3': { 'text': 'Indexed!\n3', 'django_id': '3', 'django_ct': 'core.mockmodel', 'extra': 'Stored!\n3', 'author': 'daniel3', 'pub_date': datetime.datetime(2009, 3, 17, 8, 0), 'id': 'core.mockmodel.3' } } def tearDown(self): connections['default']._index = self.old_unified_index super(SearchIndexTestCase, self).tearDown() def test_no_contentfield_present(self): self.assertRaises(SearchFieldError, BadSearchIndex1) def test_too_many_contentfields_present(self): self.assertRaises(SearchFieldError, BadSearchIndex2) def test_contentfield_present(self): try: mi = GoodMockSearchIndex() except: self.fail() def test_proper_fields(self): self.assertEqual(len(self.mi.fields), 4) self.assertTrue('text' in self.mi.fields) self.assertTrue(isinstance(self.mi.fields['text'], indexes.CharField)) self.assertTrue('author' in self.mi.fields) self.assertTrue(isinstance(self.mi.fields['author'], indexes.CharField)) self.assertTrue('pub_date' in self.mi.fields) self.assertTrue(isinstance(self.mi.fields['pub_date'], indexes.DateTimeField)) self.assertTrue('extra' in self.mi.fields) self.assertTrue(isinstance(self.mi.fields['extra'], indexes.CharField)) self.assertEqual(len(self.cmi.fields), 7) self.assertTrue('text' in self.cmi.fields) self.assertTrue(isinstance(self.cmi.fields['text'], indexes.CharField)) self.assertTrue('author' in self.cmi.fields) self.assertTrue(isinstance(self.cmi.fields['author'], indexes.CharField)) self.assertTrue('author_exact' in self.cmi.fields) self.assertTrue(isinstance(self.cmi.fields['author_exact'], indexes.FacetCharField)) self.assertTrue('pub_date' in self.cmi.fields) self.assertTrue(isinstance(self.cmi.fields['pub_date'], indexes.DateTimeField)) self.assertTrue('pub_date_exact' in self.cmi.fields) self.assertTrue(isinstance(self.cmi.fields['pub_date_exact'], indexes.FacetDateTimeField)) self.assertTrue('extra' in self.cmi.fields) self.assertTrue(isinstance(self.cmi.fields['extra'], indexes.CharField)) self.assertTrue('hello' in self.cmi.fields) self.assertTrue(isinstance(self.cmi.fields['extra'], indexes.CharField)) def test_index_queryset(self): self.assertEqual(len(self.cmi.index_queryset()), 3) def test_read_queryset(self): self.assertEqual(len(self.cmi.read_queryset()), 2) def test_build_queryset(self): # The custom SearchIndex.build_queryset returns the same records as # the read_queryset self.assertEqual(len(self.cmi.build_queryset()), 2) # Store a reference to the original method old_guf = self.mi.__class__.get_updated_field self.mi.__class__.get_updated_field = lambda self: 'pub_date' # With an updated field, we should get have filtered results sd = datetime.datetime(2009, 3, 17, 7, 0) self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 2) ed = datetime.datetime(2009, 3, 17, 7, 59) self.assertEqual(len(self.mi.build_queryset(end_date=ed)), 2) sd = datetime.datetime(2009, 3, 17, 6, 0) ed = datetime.datetime(2009, 3, 17, 6, 59) self.assertEqual(len(self.mi.build_queryset(start_date=sd, end_date=ed)), 1) # Remove the updated field for the next test del self.mi.__class__.get_updated_field # The default should return all 3 even if we specify a start date # because there is no updated field specified self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 3) # Restore the original attribute self.mi.__class__.get_updated_field = old_guf def test_prepare(self): mock = MockModel() mock.pk = 20 mock.author = 'daniel%s' % mock.id mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) self.assertEqual(len(self.mi.prepare(mock)), 7) self.assertEqual(sorted(self.mi.prepare(mock).keys()), ['author', 'django_ct', 'django_id', 'extra', 'id', 'pub_date', 'text']) def test_custom_prepare(self): mock = MockModel() mock.pk = 20 mock.author = 'daniel%s' % mock.id mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) self.assertEqual(len(self.cmi.prepare(mock)), 11) self.assertEqual(sorted(self.cmi.prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee']) self.assertEqual(len(self.cmi.full_prepare(mock)), 11) self.assertEqual(sorted(self.cmi.full_prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee']) def test_thread_safety(self): # This is a regression. ``SearchIndex`` used to write to # ``self.prepared_data``, which would leak between threads if things # went too fast. exceptions = [] def threaded_prepare(index_queue, index, model): try: index.queue = index_queue prepped = index.prepare(model) except Exception as e: exceptions.append(e) raise class ThreadedSearchIndex(GoodMockSearchIndex): def prepare_author(self, obj): if obj.pk == 20: time.sleep(0.1) else: time.sleep(0.5) index_queue.put(self.prepared_data['author']) return self.prepared_data['author'] tmi = ThreadedSearchIndex() index_queue = queue.Queue() mock_1 = MockModel() mock_1.pk = 20 mock_1.author = 'foo' mock_1.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) mock_2 = MockModel() mock_2.pk = 21 mock_2.author = 'daniel%s' % mock_2.id mock_2.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) th1 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_1)) th2 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_2)) th1.start() th2.start() th1.join() th2.join() mock_1_result = index_queue.get() mock_2_result = index_queue.get() self.assertEqual(mock_1_result, 'foo') self.assertEqual(mock_2_result, 'daniel21') def test_custom_prepare_author(self): mock = MockModel() mock.pk = 20 mock.author = 'daniel%s' % mock.id mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) self.assertEqual(len(self.cmi.prepare(mock)), 11) self.assertEqual(sorted(self.cmi.prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee']) self.assertEqual(len(self.cmi.full_prepare(mock)), 11) self.assertEqual(sorted(self.cmi.full_prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee']) self.assertEqual(self.cmi.prepared_data['author'], "Hi, I'm daniel20") self.assertEqual(self.cmi.prepared_data['author_exact'], "Hi, I'm daniel20") def test_custom_model_attr(self): mock = MockModel() mock.pk = 20 mock.author = 'daniel%s' % mock.id mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) self.assertEqual(len(self.cmi.prepare(mock)), 11) self.assertEqual(sorted(self.cmi.prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee']) self.assertEqual(len(self.cmi.full_prepare(mock)), 11) self.assertEqual(sorted(self.cmi.full_prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee']) self.assertEqual(self.cmi.prepared_data['hello'], 'World!') def test_custom_index_fieldname(self): mock = MockModel() mock.pk = 20 mock.author = 'daniel%s' % mock.id mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) cofnmi = GoodOverriddenFieldNameMockSearchIndex() self.assertEqual(len(cofnmi.prepare(mock)), 6) self.assertEqual(sorted(cofnmi.prepare(mock).keys()), ['django_ct', 'django_id', 'hello', 'id', 'more_content', 'name_s']) self.assertEqual(cofnmi.prepared_data['name_s'], 'daniel20') self.assertEqual(cofnmi.get_content_field(), 'more_content') def test_get_content_field(self): self.assertEqual(self.mi.get_content_field(), 'text') def test_update(self): self.sb.clear() self.assertEqual(self.sb.search('*')['hits'], 0) self.mi.update() self.assertEqual(self.sb.search('*')['hits'], 3) self.sb.clear() def test_update_object(self): self.sb.clear() self.assertEqual(self.sb.search('*')['hits'], 0) mock = MockModel() mock.pk = 20 mock.author = 'daniel%s' % mock.id mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) self.mi.update_object(mock) self.assertEqual([(res.content_type(), res.pk) for res in self.sb.search('*')['results']], [('core.mockmodel', '20')]) self.sb.clear() def test_remove_object(self): self.mi.update() self.assertEqual(self.sb.search('*')['hits'], 3) mock = MockModel() mock.pk = 20 mock.author = 'daniel%s' % mock.id mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) self.mi.update_object(mock) self.assertEqual(self.sb.search('*')['hits'], 4) self.mi.remove_object(mock) self.assertEqual([(res.content_type(), res.pk) for res in self.sb.search('*')['results']], [('core.mockmodel', '1'), ('core.mockmodel', '2'), ('core.mockmodel', '3')]) # Put it back so we can test passing kwargs. mock = MockModel() mock.pk = 20 mock.author = 'daniel%s' % mock.id mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) self.mi.update_object(mock) self.assertEqual(self.sb.search('*')['hits'], 4) self.mi.remove_object(mock, commit=False) self.assertEqual([(res.content_type(), res.pk) for res in self.sb.search('*')['results']], [('core.mockmodel', '1'), ('core.mockmodel', '2'), ('core.mockmodel', '3'), ('core.mockmodel', '20')]) self.sb.clear() def test_clear(self): self.mi.update() self.assertGreater(self.sb.search('*')['hits'], 0) self.mi.clear() self.assertEqual(self.sb.search('*')['hits'], 0) def test_reindex(self): self.mi.reindex() self.assertEqual([(res.content_type(), res.pk) for res in self.sb.search('*')['results']], [('core.mockmodel', '1'), ('core.mockmodel', '2'), ('core.mockmodel', '3')]) self.sb.clear() def test_inheritance(self): try: agmi = AltGoodMockSearchIndex() except: self.fail() self.assertEqual(len(agmi.fields), 5) self.assertTrue('text' in agmi.fields) self.assertTrue(isinstance(agmi.fields['text'], indexes.CharField)) self.assertTrue('author' in agmi.fields) self.assertTrue(isinstance(agmi.fields['author'], indexes.CharField)) self.assertTrue('pub_date' in agmi.fields) self.assertTrue(isinstance(agmi.fields['pub_date'], indexes.DateTimeField)) self.assertTrue('extra' in agmi.fields) self.assertTrue(isinstance(agmi.fields['extra'], indexes.CharField)) self.assertTrue('additional' in agmi.fields) self.assertTrue(isinstance(agmi.fields['additional'], indexes.CharField)) def test_proper_field_resolution(self): mrofsc = MROFieldsSearchChild() mock = MockModel() mock.pk = 20 mock.author = 'daniel%s' % mock.id mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) mock.test_a = 'This is A' mock.test_b = 'This is B' self.assertEqual(len(mrofsc.fields), 1) prepped_data = mrofsc.prepare(mock) self.assertEqual(len(prepped_data), 4) self.assertEqual(prepped_data['text'], 'This is A') def test_load_all_queryset(self): self.assertEqual([obj.id for obj in self.cmi.load_all_queryset()], [2, 3]) def test_nullable(self): mock = MockModel() mock.pk = 20 mock.author = None mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) prepared_data = self.cnmi.prepare(mock) self.assertEqual(len(prepared_data), 6) self.assertEqual(sorted(prepared_data.keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'id', 'text']) prepared_data = self.cnmi.full_prepare(mock) self.assertEqual(len(prepared_data), 4) self.assertEqual(sorted(prepared_data.keys()), ['django_ct', 'django_id', 'id', 'text']) def test_custom_facet_fields(self): mock = MockModel() mock.pk = 20 mock.author = 'daniel' mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0) prepared_data = self.gfmsi.prepare(mock) self.assertEqual(len(prepared_data), 8) self.assertEqual(sorted(prepared_data.keys()), ['author', 'author_foo', 'django_ct', 'django_id', 'id', 'pub_date', 'pub_date_exact', 'text']) prepared_data = self.gfmsi.full_prepare(mock) self.assertEqual(len(prepared_data), 8) self.assertEqual(sorted(prepared_data.keys()), ['author', 'author_foo', 'django_ct', 'django_id', 'id', 'pub_date', 'pub_date_exact', 'text']) self.assertEqual(prepared_data['author_foo'], "Hi, I'm daniel") self.assertEqual(prepared_data['pub_date_exact'], '2010-10-26T01:54:32')
class ManagementCommandTestCase(TestCase): fixtures = ['bulk_data.json'] def setUp(self): super(ManagementCommandTestCase, self).setUp() self.solr = pysolr.Solr(settings.SEARCHSTACK_CONNECTIONS['solr']['URL']) # Stow. self.old_ui = connections['solr'].get_unified_index() self.ui = UnifiedIndex() self.smmi = SolrMockSearchIndex() self.ui.build(indexes=[self.smmi]) connections['solr']._index = self.ui def tearDown(self): connections['solr']._index = self.old_ui super(ManagementCommandTestCase, self).tearDown() def test_basic_commands(self): call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('update_index', verbosity=0, commit=False) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('update_index', verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 23) call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('rebuild_index', interactive=False, verbosity=0, commit=False) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('rebuild_index', interactive=False, verbosity=0, commit=True) self.assertEqual(self.solr.search('*:*').hits, 23) call_command('clear_index', interactive=False, verbosity=0, commit=False) self.assertEqual(self.solr.search('*:*').hits, 23) def test_remove(self): call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('update_index', verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 23) # Remove several instances, two of which will fit in the same block: MockModel.objects.get(pk=1).delete() MockModel.objects.get(pk=2).delete() MockModel.objects.get(pk=8).delete() self.assertEqual(self.solr.search('*:*').hits, 23) # Plain ``update_index`` doesn't fix it. call_command('update_index', verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 23) # Remove without commit also doesn't affect queries: call_command('update_index', remove=True, verbosity=0, batchsize=2, commit=False) self.assertEqual(self.solr.search('*:*').hits, 23) # … but remove with commit does: call_command('update_index', remove=True, verbosity=0, batchsize=2) self.assertEqual(self.solr.search('*:*').hits, 20) def test_age(self): call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) start = datetime.datetime.now() - datetime.timedelta(hours=3) end = datetime.datetime.now() mock = MockModel.objects.get(pk=1) mock.pub_date = datetime.datetime.now() - datetime.timedelta(hours=2) mock.save() self.assertEqual(MockModel.objects.filter(pub_date__range=(start, end)).count(), 1) call_command('update_index', age=3, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 1) def test_age_with_time_zones(self): """Haystack should use django.utils.timezone.now on Django 1.4+""" from django.utils.timezone import now as django_now from searchstack.management.commands.update_index import now as searchstack_now self.assertIs(searchstack_now, django_now, msg="update_index should use django.utils.timezone.now") with patch("searchstack.management.commands.update_index.now") as m: m.return_value = django_now() self.test_age() assert m.called def test_dates(self): call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) start = datetime.datetime.now() - datetime.timedelta(hours=5, minutes=30) end = datetime.datetime.now() - datetime.timedelta(hours=2) mock_1 = MockModel.objects.get(pk=1) mock_1.pub_date = datetime.datetime.now() - datetime.timedelta(hours=5, minutes=1) mock_1.save() mock_2 = MockModel.objects.get(pk=2) mock_2.pub_date = datetime.datetime.now() - datetime.timedelta(hours=3) mock_2.save() mock_3 = MockModel.objects.get(pk=3) mock_3.pub_date = datetime.datetime.now() - datetime.timedelta(hours=1) mock_3.save() self.assertEqual(MockModel.objects.filter(pub_date__range=(start, end)).count(), 2) call_command('update_index', start_date=start.isoformat(), end_date=end.isoformat(), verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 2) def test_multiprocessing(self): call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) # FIXME: this one is failing intermittently, possibly a bug in Java code # Disable until test infrastructure is updated for Solr 5 # call_command('update_index', verbosity=2, workers=2, batchsize=5) # self.assertEqual(self.solr.search('*:*').hits, 23) call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('update_index', verbosity=2, workers=2, batchsize=5, commit=False) self.assertEqual(self.solr.search('*:*').hits, 0)
class SearchModelAdminTestCase(TestCase): fixtures = ['bulk_data.json'] def setUp(self): super(SearchModelAdminTestCase, self).setUp() # With the models setup, you get the proper bits. # Stow. self.old_ui = connections['solr'].get_unified_index() self.ui = UnifiedIndex() smmsi = SolrMockModelSearchIndex() self.ui.build(indexes=[smmsi]) connections['solr']._index = self.ui # Wipe it clean. clear_solr_index() # Force indexing of the content. smmsi.update(using='solr') User.objects.create_superuser( username='******', password='******', email='*****@*****.**', ) def tearDown(self): # Restore. connections['solr']._index = self.old_ui super(SearchModelAdminTestCase, self).tearDown() def test_usage(self): reset_search_queries() self.assertEqual(len(connections['solr'].queries), 0) self.assertEqual(self.client.login(username='******', password='******'), True) # First, non-search behavior. resp = self.client.get('/admin/core/mockmodel/') self.assertEqual(resp.status_code, 200) self.assertEqual(len(connections['solr'].queries), 0) self.assertEqual(resp.context['cl'].full_result_count, 23) # Then search behavior. resp = self.client.get('/admin/core/mockmodel/', data={'q': 'Haystack'}) self.assertEqual(resp.status_code, 200) self.assertEqual(len(connections['solr'].queries), 3) self.assertEqual(resp.context['cl'].full_result_count, 23) # Ensure they aren't search results. self.assertEqual(isinstance(resp.context['cl'].result_list[0], MockModel), True) result_pks = [i.pk for i in resp.context['cl'].result_list] self.assertIn(5, result_pks) # Make sure only changelist is affected. change_url = '/admin/core/mockmodel/1/' if DJANGO_VERSION >= (1, 9, 0): # in Django 1.9 change view urls changed change_url += 'change/' resp = self.client.get(change_url) self.assertEqual(resp.status_code, 200) self.assertEqual(len(connections['solr'].queries), 3) self.assertEqual(resp.context['original'].id, 1)
class BaseSearchQueryTestCase(TestCase): fixtures = ['bulk_data.json'] def setUp(self): super(BaseSearchQueryTestCase, self).setUp() self.bsq = BaseSearchQuery() def test_get_count(self): self.bsq.add_filter(SQ(foo='bar')) self.assertRaises(NotImplementedError, self.bsq.get_count) def test_build_query(self): self.bsq.add_filter(SQ(foo='bar')) self.assertRaises(NotImplementedError, self.bsq.build_query) def test_add_filter(self): self.assertEqual(len(self.bsq.query_filter), 0) self.bsq.add_filter(SQ(foo='bar')) self.assertEqual(len(self.bsq.query_filter), 1) self.bsq.add_filter(SQ(foo__lt='10')) self.bsq.add_filter(~SQ(claris='moof')) self.bsq.add_filter(SQ(claris='moof'), use_or=True) self.assertEqual(repr(self.bsq.query_filter), '<SQ: OR ((foo__contains=bar AND foo__lt=10 AND NOT (claris__contains=moof)) OR claris__contains=moof)>') self.bsq.add_filter(SQ(claris='moof')) self.assertEqual(repr(self.bsq.query_filter), '<SQ: AND (((foo__contains=bar AND foo__lt=10 AND NOT (claris__contains=moof)) OR claris__contains=moof) AND claris__contains=moof)>') self.bsq.add_filter(SQ(claris='wtf mate')) self.assertEqual(repr(self.bsq.query_filter), '<SQ: AND (((foo__contains=bar AND foo__lt=10 AND NOT (claris__contains=moof)) OR claris__contains=moof) AND claris__contains=moof AND claris__contains=wtf mate)>') def test_add_order_by(self): self.assertEqual(len(self.bsq.order_by), 0) self.bsq.add_order_by('foo') self.assertEqual(len(self.bsq.order_by), 1) def test_clear_order_by(self): self.bsq.add_order_by('foo') self.assertEqual(len(self.bsq.order_by), 1) self.bsq.clear_order_by() self.assertEqual(len(self.bsq.order_by), 0) def test_add_model(self): self.assertEqual(len(self.bsq.models), 0) self.assertRaises(AttributeError, self.bsq.add_model, object) self.assertEqual(len(self.bsq.models), 0) self.bsq.add_model(MockModel) self.assertEqual(len(self.bsq.models), 1) self.bsq.add_model(AnotherMockModel) self.assertEqual(len(self.bsq.models), 2) def test_set_limits(self): self.assertEqual(self.bsq.start_offset, 0) self.assertEqual(self.bsq.end_offset, None) self.bsq.set_limits(10, 50) self.assertEqual(self.bsq.start_offset, 10) self.assertEqual(self.bsq.end_offset, 50) def test_clear_limits(self): self.bsq.set_limits(10, 50) self.assertEqual(self.bsq.start_offset, 10) self.assertEqual(self.bsq.end_offset, 50) self.bsq.clear_limits() self.assertEqual(self.bsq.start_offset, 0) self.assertEqual(self.bsq.end_offset, None) def test_add_boost(self): self.assertEqual(self.bsq.boost, {}) self.bsq.add_boost('foo', 10) self.assertEqual(self.bsq.boost, {'foo': 10}) def test_add_highlight(self): self.assertEqual(self.bsq.highlight, False) self.bsq.add_highlight() self.assertEqual(self.bsq.highlight, True) def test_more_like_this(self): mock = MockModel() mock.id = 1 msq = MockSearchQuery() msq.backend = MockSearchBackend('mlt') ui = connections['default'].get_unified_index() bmmsi = BasicMockModelSearchIndex() ui.build(indexes=[bmmsi]) bmmsi.update() msq.more_like_this(mock) self.assertEqual(msq.get_count(), 23) self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk) def test_add_field_facet(self): self.bsq.add_field_facet('foo') self.assertEqual(self.bsq.facets, {'foo': {}}) self.bsq.add_field_facet('bar') self.assertEqual(self.bsq.facets, {'foo': {}, 'bar': {}}) def test_add_date_facet(self): self.bsq.add_date_facet('foo', start_date=datetime.date(2009, 2, 25), end_date=datetime.date(2009, 3, 25), gap_by='day') self.assertEqual(self.bsq.date_facets, {'foo': {'gap_by': 'day', 'start_date': datetime.date(2009, 2, 25), 'end_date': datetime.date(2009, 3, 25), 'gap_amount': 1}}) self.bsq.add_date_facet('bar', start_date=datetime.date(2008, 1, 1), end_date=datetime.date(2009, 12, 1), gap_by='month') self.assertEqual(self.bsq.date_facets, {'foo': {'gap_by': 'day', 'start_date': datetime.date(2009, 2, 25), 'end_date': datetime.date(2009, 3, 25), 'gap_amount': 1}, 'bar': {'gap_by': 'month', 'start_date': datetime.date(2008, 1, 1), 'end_date': datetime.date(2009, 12, 1), 'gap_amount': 1}}) def test_add_query_facet(self): self.bsq.add_query_facet('foo', 'bar') self.assertEqual(self.bsq.query_facets, [('foo', 'bar')]) self.bsq.add_query_facet('moof', 'baz') self.assertEqual(self.bsq.query_facets, [('foo', 'bar'), ('moof', 'baz')]) self.bsq.add_query_facet('foo', 'baz') self.assertEqual(self.bsq.query_facets, [('foo', 'bar'), ('moof', 'baz'), ('foo', 'baz')]) def test_add_stats(self): self.bsq.add_stats_query('foo',['bar']) self.assertEqual(self.bsq.stats,{'foo':['bar']}) self.bsq.add_stats_query('moof',['bar','baz']) self.assertEqual(self.bsq.stats,{'foo':['bar'],'moof':['bar','baz']}) def test_add_narrow_query(self): self.bsq.add_narrow_query('foo:bar') self.assertEqual(self.bsq.narrow_queries, set(['foo:bar'])) self.bsq.add_narrow_query('moof:baz') self.assertEqual(self.bsq.narrow_queries, set(['foo:bar', 'moof:baz'])) def test_set_result_class(self): # Assert that we're defaulting to ``SearchResult``. self.assertTrue(issubclass(self.bsq.result_class, SearchResult)) # Custom class. class IttyBittyResult(object): pass self.bsq.set_result_class(IttyBittyResult) self.assertTrue(issubclass(self.bsq.result_class, IttyBittyResult)) # Reset to default. self.bsq.set_result_class(None) self.assertTrue(issubclass(self.bsq.result_class, SearchResult)) def test_run(self): # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.bammsi = BasicAnotherMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.bammsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) msq = connections['default'].get_query() self.assertEqual(len(msq.get_results()), 23) self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk) # Restore. connections['default']._index = self.old_unified_index def test_clone(self): self.bsq.add_filter(SQ(foo='bar')) self.bsq.add_filter(SQ(foo__lt='10')) self.bsq.add_filter(~SQ(claris='moof')) self.bsq.add_filter(SQ(claris='moof'), use_or=True) self.bsq.add_order_by('foo') self.bsq.add_model(MockModel) self.bsq.add_boost('foo', 2) self.bsq.add_highlight() self.bsq.add_field_facet('foo') self.bsq.add_date_facet('foo', start_date=datetime.date(2009, 1, 1), end_date=datetime.date(2009, 1, 31), gap_by='day') self.bsq.add_query_facet('foo', 'bar') self.bsq.add_stats_query('foo', 'bar') self.bsq.add_narrow_query('foo:bar') clone = self.bsq._clone() self.assertTrue(isinstance(clone, BaseSearchQuery)) self.assertEqual(len(clone.query_filter), 2) self.assertEqual(len(clone.order_by), 1) self.assertEqual(len(clone.models), 1) self.assertEqual(len(clone.boost), 1) self.assertEqual(clone.highlight, True) self.assertEqual(len(clone.facets), 1) self.assertEqual(len(clone.date_facets), 1) self.assertEqual(len(clone.query_facets), 1) self.assertEqual(len(clone.narrow_queries), 1) self.assertEqual(clone.start_offset, self.bsq.start_offset) self.assertEqual(clone.end_offset, self.bsq.end_offset) self.assertEqual(clone.backend.__class__, self.bsq.backend.__class__) def test_log_query(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() self.bmmsi.update() with self.settings(DEBUG=False): msq = connections['default'].get_query() self.assertEqual(len(msq.get_results()), 23) self.assertEqual(len(connections['default'].queries), 0) with self.settings(DEBUG=True): # Redefine it to clear out the cached results. msq2 = connections['default'].get_query() self.assertEqual(len(msq2.get_results()), 23) self.assertEqual(len(connections['default'].queries), 1) self.assertEqual(connections['default'].queries[0]['query_string'], '') msq3 = connections['default'].get_query() msq3.add_filter(SQ(foo='bar')) len(msq3.get_results()) self.assertEqual(len(connections['default'].queries), 2) self.assertEqual(connections['default'].queries[0]['query_string'], '') self.assertEqual(connections['default'].queries[1]['query_string'], '') # Restore. connections['default']._index = self.old_unified_index
class AppModelManagementCommandTestCase(TestCase): fixtures = ['initial_data.json', 'bulk_data.json'] def setUp(self): super(AppModelManagementCommandTestCase, self).setUp() self.solr = pysolr.Solr(settings.SEARCHSTACK_CONNECTIONS['solr']['URL']) # Stow. self.old_ui = connections['solr'].get_unified_index() self.ui = UnifiedIndex() self.smmi = SolrMockSearchIndex() self.smtmi = SolrMockTagSearchIndex() self.ui.build(indexes=[self.smmi, self.smtmi]) connections['solr']._index = self.ui def tearDown(self): connections['solr']._index = self.old_ui super(AppModelManagementCommandTestCase, self).tearDown() def test_app_model_variations(self): call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('update_index', verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 25) call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('update_index', 'core', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 25) call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) self.assertRaises(ImproperlyConfigured, call_command, 'update_index', 'fake_app_thats_not_there', interactive=False) call_command('update_index', 'core', 'discovery', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 25) call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('update_index', 'discovery', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('update_index', 'core.MockModel', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 23) call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('update_index', 'core.MockTag', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 2) call_command('clear_index', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 0) call_command('update_index', 'core.MockTag', 'core.MockModel', interactive=False, verbosity=0) self.assertEqual(self.solr.search('*:*').hits, 25)
class SearchViewTestCase(TestCase): fixtures = ['initial_data.json'] def setUp(self): super(SearchViewTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.bammsi = BasicAnotherMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.bammsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) def tearDown(self): connections['default']._index = self.old_unified_index super(SearchViewTestCase, self).tearDown() def test_search_no_query(self): response = self.client.get(reverse('searchstack_search')) self.assertEqual(response.status_code, 200) def test_search_query(self): response = self.client.get(reverse('searchstack_search'), {'q': 'haystack'}) self.assertEqual(response.status_code, 200) self.assertIn('page', response.context) self.assertNotIn('page_obj', response.context) self.assertEqual(len(response.context[-1]['page'].object_list), 3) self.assertEqual(response.context[-1]['page'].object_list[0].content_type(), 'core.mockmodel') self.assertEqual(response.context[-1]['page'].object_list[0].pk, '1') def test_invalid_page(self): response = self.client.get(reverse('searchstack_search'), {'q': 'haystack', 'page': '165233'}) self.assertEqual(response.status_code, 404) def test_empty_results(self): sv = SearchView() sv.request = HttpRequest() sv.form = sv.build_form() self.assertTrue(isinstance(sv.get_results(), EmptySearchQuerySet)) def test_initial_data(self): sv = SearchView(form_class=InitialedSearchForm) sv.request = HttpRequest() form = sv.build_form() self.assertTrue(isinstance(form, InitialedSearchForm)) self.assertEqual(form.fields['q'].initial, 'Search for...') para = form.as_p() self.assertTrue('<label for="id_q">Search:</label>' in para) self.assertTrue('value="Search for..."' in para) def test_pagination(self): response = self.client.get(reverse('searchstack_search'), {'q': 'haystack', 'page': 0}) self.assertEqual(response.status_code, 404) response = self.client.get(reverse('searchstack_search'), {'q': 'haystack', 'page': 1}) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.context[-1]['page'].object_list), 3) response = self.client.get(reverse('searchstack_search'), {'q': 'haystack', 'page': 2}) self.assertEqual(response.status_code, 404) def test_thread_safety(self): exceptions = [] def threaded_view(resp_queue, view, request): import time time.sleep(2) try: inst = view(request) resp_queue.put(request.GET['name']) except Exception as e: exceptions.append(e) raise class ThreadedSearchView(SearchView): def __call__(self, request): print("Name: %s" % request.GET['name']) return super(ThreadedSearchView, self).__call__(request) view = search_view_factory(view_class=ThreadedSearchView) resp_queue = queue.Queue() request_1 = HttpRequest() request_1.GET = {'name': 'foo'} request_2 = HttpRequest() request_2.GET = {'name': 'bar'} th1 = Thread(target=threaded_view, args=(resp_queue, view, request_1)) th2 = Thread(target=threaded_view, args=(resp_queue, view, request_2)) th1.start() th2.start() th1.join() th2.join() foo = resp_queue.get() bar = resp_queue.get() self.assertNotEqual(foo, bar) def test_spelling(self): # Stow. from django.conf import settings old = settings.SEARCHSTACK_CONNECTIONS['default'].get('INCLUDE_SPELLING', None) sv = SearchView() sv.query = 'Nothing' sv.results = [] sv.build_page = lambda: (None, None) output = sv.create_response() # Restore settings.SEARCHSTACK_CONNECTIONS['default']['INCLUDE_SPELLING'] = old if old is None: del settings.SEARCHSTACK_CONNECTIONS['default']['INCLUDE_SPELLING']
class SearchQuerySetTestCase(TestCase): fixtures = ['initial_data.json', 'bulk_data.json'] def setUp(self): super(SearchQuerySetTestCase, self).setUp() # Stow. self.old_unified_index = connections['default']._index self.ui = UnifiedIndex() self.bmmsi = BasicMockModelSearchIndex() self.cpkmmsi = CharPKMockModelSearchIndex() self.ui.build(indexes=[self.bmmsi, self.cpkmmsi]) connections['default']._index = self.ui # Update the "index". backend = connections['default'].get_backend() backend.clear() backend.update(self.bmmsi, MockModel.objects.all()) self.msqs = SearchQuerySet() # Stow. reset_search_queries() def tearDown(self): # Restore. connections['default']._index = self.old_unified_index super(SearchQuerySetTestCase, self).tearDown() def test_len(self): self.assertEqual(len(self.msqs), 23) def test_repr(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) self.assertRegexpMatches(repr(self.msqs), r'^<SearchQuerySet: query=<test_searchstack.mocks.MockSearchQuery object' r' at 0x[0-9A-Fa-f]+>, using=None>$') def test_iter(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) msqs = self.msqs.all() results = [int(res.pk) for res in msqs] self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]]) self.assertEqual(len(connections['default'].queries), 3) def test_slice(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual([int(res.pk) for res in results[1:11]], [res.pk for res in MOCK_SEARCH_RESULTS[1:11]]) self.assertEqual(len(connections['default'].queries), 1) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk) self.assertEqual(len(connections['default'].queries), 1) def test_manual_iter(self): results = self.msqs.all() reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) check = [result.pk for result in results._manual_iter()] self.assertEqual(check, ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23']) self.assertEqual(len(connections['default'].queries), 3) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not a handled model) than the hit count indicates. # This will hang indefinitely if broken. old_ui = self.ui self.ui.build(indexes=[self.cpkmmsi]) connections['default']._index = self.ui self.cpkmmsi.update() results = self.msqs.all() loaded = [result.pk for result in results._manual_iter()] self.assertEqual(loaded, ['sometext', '1234']) self.assertEqual(len(connections['default'].queries), 1) connections['default']._index = old_ui def test_fill_cache(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) results = self.msqs.all() self.assertEqual(len(results._result_cache), 0) self.assertEqual(len(connections['default'].queries), 0) results._fill_cache(0, 10) self.assertEqual(len([result for result in results._result_cache if result is not None]), 10) self.assertEqual(len(connections['default'].queries), 1) results._fill_cache(10, 20) self.assertEqual(len([result for result in results._result_cache if result is not None]), 20) self.assertEqual(len(connections['default'].queries), 2) reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) # Test to ensure we properly fill the cache, even if we get fewer # results back (not a handled model) than the hit count indicates. sqs = SearchQuerySet().all() sqs.query.backend = MixedMockSearchBackend('default') results = sqs self.assertEqual(len([result for result in results._result_cache if result is not None]), 0) self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], []) self.assertEqual(len(connections['default'].queries), 0) results._fill_cache(0, 10) self.assertEqual(len([result for result in results._result_cache if result is not None]), 9) self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10]) self.assertEqual(len(connections['default'].queries), 2) results._fill_cache(10, 20) self.assertEqual(len([result for result in results._result_cache if result is not None]), 17) self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20]) self.assertEqual(len(connections['default'].queries), 4) results._fill_cache(20, 30) self.assertEqual(len([result for result in results._result_cache if result is not None]), 20) self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23]) self.assertEqual(len(connections['default'].queries), 6) def test_cache_is_full(self): reset_search_queries() self.assertEqual(len(connections['default'].queries), 0) self.assertEqual(self.msqs._cache_is_full(), False) results = self.msqs.all() fire_the_iterator_and_fill_cache = [result for result in results] self.assertEqual(results._cache_is_full(), True) self.assertEqual(len(connections['default'].queries), 3) def test_all(self): sqs = self.msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) def test_filter(self): sqs = self.msqs.filter(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_exclude(self): sqs = self.msqs.exclude(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_order_by(self): sqs = self.msqs.order_by('foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertTrue('foo' in sqs.query.order_by) def test_models(self): # Stow. old_unified_index = connections['default']._index ui = UnifiedIndex() bmmsi = BasicMockModelSearchIndex() bammsi = BasicAnotherMockModelSearchIndex() ui.build(indexes=[bmmsi, bammsi]) connections['default']._index = ui msqs = SearchQuerySet() sqs = msqs.all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 0) sqs = msqs.models(MockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) sqs = msqs.models(MockModel, AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 2) # This will produce a warning. ui.build(indexes=[bmmsi]) sqs = msqs.models(AnotherMockModel) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.models), 1) def test_result_class(self): sqs = self.msqs.all() self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) # Custom class. class IttyBittyResult(object): pass sqs = self.msqs.result_class(IttyBittyResult) self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult)) # Reset to default. sqs = self.msqs.result_class(None) self.assertTrue(issubclass(sqs.query.result_class, SearchResult)) def test_boost(self): sqs = self.msqs.boost('foo', 10) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.boost.keys()), 1) def test_highlight(self): sqs = self.msqs.highlight() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.query.highlight, True) def test_spelling(self): # Test the case where spelling support is disabled. sqs = self.msqs.filter(content='Indx') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(sqs.spelling_suggestion(), None) self.assertEqual(sqs.spelling_suggestion('indexy'), None) def test_raw_search(self): self.assertEqual(len(self.msqs.raw_search('foo')), 23) self.assertEqual(len(self.msqs.raw_search('(content__exact:hello AND content__exact:world)')), 23) def test_load_all(self): # Models with character primary keys. sqs = SearchQuerySet() sqs.query.backend = CharPKMockSearchBackend('charpk') results = sqs.load_all().all() self.assertEqual(len(results._result_cache), 0) results._fill_cache(0, 2) self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # If nothing is handled, you get nothing. old_ui = connections['default']._index ui = UnifiedIndex() ui.build(indexes=[]) connections['default']._index = ui sqs = self.msqs.load_all() self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs), 0) connections['default']._index = old_ui # For full tests, see the solr_backend. def test_load_all_read_queryset(self): # Stow. old_ui = connections['default']._index ui = UnifiedIndex() gafmmsi = GhettoAFifthMockModelSearchIndex() ui.build(indexes=[gafmmsi]) connections['default']._index = ui gafmmsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # The deleted result isn't returned self.assertEqual(len([result for result in results._result_cache if result is not None]), 1) # Register a SearchIndex with a read_queryset that returns deleted items rqstsi = TextReadQuerySetTestSearchIndex() ui.build(indexes=[rqstsi]) rqstsi.update() sqs = SearchQuerySet() results = sqs.load_all().all() results.query.backend = ReadQuerySetMockSearchBackend('default') results._fill_cache(0, 2) # Both the deleted and not deleted items are returned self.assertEqual(len([result for result in results._result_cache if result is not None]), 2) # Restore. connections['default']._index = old_ui def test_auto_query(self): sqs = self.msqs.auto_query('test search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test search -stuff>') sqs = self.msqs.auto_query('test "my thing" search -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search -stuff>') sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' -stuff>') sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' "foo -stuff>') sqs = self.msqs.auto_query('test - stuff') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND content__contains=test - stuff>") # Ensure bits in exact matches get escaped properly as well. sqs = self.msqs.auto_query('"pants:rule"') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains="pants:rule">') # Now with a different fieldname sqs = self.msqs.auto_query('test search -stuff', fieldname='title') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND title__contains=test search -stuff>") sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname='title') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND title__contains=test "my thing" search -stuff>') def test_count(self): self.assertEqual(self.msqs.count(), 23) def test_facet_counts(self): self.assertEqual(self.msqs.facet_counts(), {}) def test_best_match(self): self.assertTrue(isinstance(self.msqs.best_match(), SearchResult)) def test_latest(self): self.assertTrue(isinstance(self.msqs.latest('pub_date'), SearchResult)) def test_more_like_this(self): mock = MockModel() mock.id = 1 self.assertEqual(len(self.msqs.more_like_this(mock)), 23) def test_facets(self): sqs = self.msqs.facet('foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.facets), 1) sqs2 = self.msqs.facet('foo').facet('bar') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.facets), 2) def test_date_facets(self): try: sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph') self.fail() except FacetingError as e: self.assertEqual(str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.") sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.date_facets), 1) sqs2 = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.date_facets), 2) def test_query_facets(self): sqs = self.msqs.query_facet('foo', '[bar TO *]') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_facets), 1) sqs2 = self.msqs.query_facet('foo', '[bar TO *]').query_facet('bar', '[100 TO 499]') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.query_facets), 2) # Test multiple query facets on a single field sqs3 = self.msqs.query_facet('foo', '[bar TO *]').query_facet('bar', '[100 TO 499]').query_facet('foo', '[1000 TO 1499]') self.assertTrue(isinstance(sqs3, SearchQuerySet)) self.assertEqual(len(sqs3.query.query_facets), 3) def test_stats(self): sqs = self.msqs.stats_facet('foo','bar') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.stats),1) sqs2 = self.msqs.stats_facet('foo','bar').stats_facet('foo','baz') self.assertTrue(isinstance(sqs2, SearchQuerySet)) self.assertEqual(len(sqs2.query.stats),1) sqs3 = self.msqs.stats_facet('foo','bar').stats_facet('moof','baz') self.assertTrue(isinstance(sqs3, SearchQuerySet)) self.assertEqual(len(sqs3.query.stats),2) def test_narrow(self): sqs = self.msqs.narrow('foo:moof') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.narrow_queries), 1) def test_clone(self): results = self.msqs.filter(foo='bar', foo__lt='10') clone = results._clone() self.assertTrue(isinstance(clone, SearchQuerySet)) self.assertEqual(str(clone.query), str(results.query)) self.assertEqual(clone._result_cache, []) self.assertEqual(clone._result_count, None) self.assertEqual(clone._cache_full, False) self.assertEqual(clone._using, results._using) def test_using(self): sqs = SearchQuerySet(using='default') self.assertNotEqual(sqs.query, None) self.assertEqual(sqs.query._using, 'default') def test_chaining(self): sqs = self.msqs.filter(content='foo') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) # A second instance should inherit none of the changes from above. sqs = self.msqs.filter(content='bar') self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 1) def test_none(self): sqs = self.msqs.none() self.assertTrue(isinstance(sqs, EmptySearchQuerySet)) self.assertEqual(len(sqs), 0) def test___and__(self): sqs1 = self.msqs.filter(content='foo') sqs2 = self.msqs.filter(content='bar') sqs = sqs1 & sqs2 self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 2) def test___or__(self): sqs1 = self.msqs.filter(content='foo') sqs2 = self.msqs.filter(content='bar') sqs = sqs1 | sqs2 self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.query_filter), 2) def test_and_or(self): """ Combining AND queries with OR should give AND(OR(a, b), OR(c, d)) """ sqs1 = self.msqs.filter(content='foo').filter(content='oof') sqs2 = self.msqs.filter(content='bar').filter(content='rab') sqs = sqs1 | sqs2 self.assertEqual(sqs.query.query_filter.connector, 'OR') self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)) self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)) def test_or_and(self): """ Combining OR queries with AND should give OR(AND(a, b), AND(c, d)) """ sqs1 = self.msqs.filter(content='foo').filter_or(content='oof') sqs2 = self.msqs.filter(content='bar').filter_or(content='rab') sqs = sqs1 & sqs2 self.assertEqual(sqs.query.query_filter.connector, 'AND') self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)) self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter))