def test_cron_updates_counts(self): q = question(save=True) self.refresh() eq_(q.num_votes_past_week, 0) # NB: Need to call .values_dict() here and later otherwise we # get a Question object which has data from the database and # not the index. document = (QuestionMappingType.search() .filter(id=q.id))[0] eq_(document['question_num_votes_past_week'], 0) vote = questionvote(question=q, anonymous_id='abc123') vote.save() q.num_votes_past_week = 0 q.save() update_weekly_votes() self.refresh() q = Question.objects.get(pk=q.pk) eq_(1, q.num_votes_past_week) document = (QuestionMappingType.search() .filter(id=q.id))[0] eq_(document['question_num_votes_past_week'], 1)
def test_added(self): search = QuestionMappingType.search() # Create a question--that adds one document to the index. q = question(title=u'Does this test work?', save=True) self.refresh() query = dict(('%s__match' % field, 'test') for field in QuestionMappingType.get_query_fields()) eq_(search.query(should=True, **query).count(), 1) # Create an answer for the question. It shouldn't be searchable # until the answer is saved. a = answer(content=u'There\'s only one way to find out!', question=q) self.refresh() query = dict(('%s__match' % field, 'only') for field in QuestionMappingType.get_query_fields()) eq_(search.query(should=True, **query).count(), 0) a.save() self.refresh() query = dict(('%s__match' % field, 'only') for field in QuestionMappingType.get_query_fields()) eq_(search.query(should=True, **query).count(), 1) # Make sure that there's only one question document in the # index--creating an answer should have updated the existing # one. eq_(search.count(), 1)
def test_cron_updates_counts(self): q = question(save=True) self.refresh() eq_(q.num_votes_past_week, 0) # NB: Need to call .values_dict() here and later otherwise we # get a Question object which has data from the database and # not the index. document = (QuestionMappingType.search().filter(id=q.id))[0] eq_(document['question_num_votes_past_week'], 0) vote = questionvote(question=q, anonymous_id='abc123') vote.save() q.num_votes_past_week = 0 q.save() update_weekly_votes() self.refresh() q = Question.objects.get(pk=q.pk) eq_(1, q.num_votes_past_week) document = (QuestionMappingType.search().filter(id=q.id))[0] eq_(document['question_num_votes_past_week'], 1)
def test_cron_updates_counts(self): q = QuestionFactory() self.refresh() eq_(q.num_votes_past_week, 0) # NB: Need to call .values_dict() here and later otherwise we # get a Question object which has data from the database and # not the index. document = (QuestionMappingType.search().filter(id=q.id))[0] eq_(document["question_num_votes_past_week"], 0) QuestionVoteFactory(question=q, anonymous_id="abc123") q.num_votes_past_week = 0 q.save() call_command("update_weekly_votes") self.refresh() q = Question.objects.get(pk=q.pk) eq_(1, q.num_votes_past_week) document = (QuestionMappingType.search().filter(id=q.id))[0] eq_(document["question_num_votes_past_week"], 1)
def update_question_vote_chunk(data): """Update num_votes_past_week for a number of questions.""" # First we recalculate num_votes_past_week in the db. log.info("Calculating past week votes for %s questions." % len(data)) ids = ",".join(map(str, data)) sql = (""" UPDATE questions_question q SET num_votes_past_week = ( SELECT COUNT(created) FROM questions_questionvote qv WHERE qv.question_id = q.id AND qv.created >= DATE(SUBDATE(NOW(), 7)) ) WHERE q.id IN (%s); """ % ids) cursor = connection.cursor() cursor.execute(sql) if not transaction.get_connection().in_atomic_block: transaction.commit() # Next we update our index with the changes we made directly in # the db. if data and settings.ES_LIVE_INDEXING: # Get the data we just updated from the database. sql = (""" SELECT id, num_votes_past_week FROM questions_question WHERE id in (%s); """ % ids) cursor = connection.cursor() cursor.execute(sql) # Since this returns (id, num_votes_past_week) tuples, we can # convert that directly to a dict. id_to_num = dict(cursor.fetchall()) try: # Fetch all the documents we need to update. from kitsune.questions.models import QuestionMappingType from kitsune.search import es_utils es_docs = es_utils.get_documents(QuestionMappingType, data) # For each document, update the data and stick it back in the # index. for doc in es_docs: # Note: Need to keep this in sync with # Question.extract_document. num = id_to_num[int(doc["id"])] doc["question_num_votes_past_week"] = num QuestionMappingType.index(doc, id_=doc["id"]) except ES_EXCEPTIONS: # Something happened with ES, so let's push index updating # into an index_task which retries when it fails because # of ES issues. index_task.delay(to_class_path(QuestionMappingType), list(id_to_num.keys()))
def handle(self, **options): # Set up logging so it doesn't send Ricky email. logging.basicConfig(level=logging.ERROR) # Get a list of ids of questions we're going to go change. We need # a list of ids so that we can feed it to the update, but then # also know what we need to update in the index. days_180 = datetime.now() - timedelta(days=180) q_ids = list( Question.objects.filter(is_archived=False).filter( created__lte=days_180).values_list("id", flat=True)) if q_ids: log.info("Updating %d questions", len(q_ids)) sql = """ UPDATE questions_question SET is_archived = 1 WHERE id IN (%s) """ % ",".join(map(str, q_ids)) cursor = connection.cursor() cursor.execute(sql) if not transaction.get_connection().in_atomic_block: transaction.commit() if settings.ES_LIVE_INDEXING: try: # So... the first time this runs, it'll handle 160K # questions or so which stresses everything. Thus we # do it in chunks because otherwise this won't work. # # After we've done this for the first time, we can nix # the chunking code. from kitsune.search.utils import chunked for chunk in chunked(q_ids, 100): # Fetch all the documents we need to update. es_docs = get_documents(QuestionMappingType, chunk) log.info("Updating %d index documents", len(es_docs)) documents = [] # For each document, update the data and stick it # back in the index. for doc in es_docs: doc["question_is_archived"] = True doc["indexed_on"] = int(time.time()) documents.append(doc) QuestionMappingType.bulk_index(documents) except ES_EXCEPTIONS: # Something happened with ES, so let's push index # updating into an index_task which retries when it # fails because of ES issues. index_task.delay(to_class_path(QuestionMappingType), q_ids)
def auto_archive_old_questions(): """Archive all questions that were created over 180 days ago""" # Set up logging so it doesn't send Ricky email. logging.basicConfig(level=logging.ERROR) # Get a list of ids of questions we're going to go change. We need # a list of ids so that we can feed it to the update, but then # also know what we need to update in the index. days_180 = datetime.now() - timedelta(days=180) q_ids = list(Question.objects.filter(is_archived=False) .filter(created__lte=days_180) .values_list('id', flat=True)) if q_ids: log.info('Updating %d questions', len(q_ids)) sql = """ UPDATE questions_question SET is_archived = 1 WHERE id IN (%s) """ % ','.join(map(str, q_ids)) cursor = connection.cursor() cursor.execute(sql) if not transaction.get_connection().in_atomic_block: transaction.commit() if settings.ES_LIVE_INDEXING: try: # So... the first time this runs, it'll handle 160K # questions or so which stresses everything. Thus we # do it in chunks because otherwise this won't work. # # After we've done this for the first time, we can nix # the chunking code. from kitsune.search.utils import chunked for chunk in chunked(q_ids, 100): # Fetch all the documents we need to update. es_docs = get_documents(QuestionMappingType, chunk) log.info('Updating %d index documents', len(es_docs)) documents = [] # For each document, update the data and stick it # back in the index. for doc in es_docs: doc[u'question_is_archived'] = True doc[u'indexed_on'] = int(time.time()) documents.append(doc) QuestionMappingType.bulk_index(documents) except ES_EXCEPTIONS: # Something happened with ES, so let's push index # updating into an index_task which retries when it # fails because of ES issues. index_task.delay(QuestionMappingType, q_ids)
def update_question_vote_chunk(data): """Update num_votes_past_week for a number of questions.""" # First we recalculate num_votes_past_week in the db. log.info('Calculating past week votes for %s questions.' % len(data)) ids = ','.join(map(str, data)) sql = """ UPDATE questions_question q SET num_votes_past_week = ( SELECT COUNT(created) FROM questions_questionvote qv WHERE qv.question_id = q.id AND qv.created >= DATE(SUBDATE(NOW(), 7)) ) WHERE q.id IN (%s); """ % ids cursor = connection.cursor() cursor.execute(sql) if not transaction.get_connection().in_atomic_block: transaction.commit() # Next we update our index with the changes we made directly in # the db. if data and settings.ES_LIVE_INDEXING: # Get the data we just updated from the database. sql = """ SELECT id, num_votes_past_week FROM questions_question WHERE id in (%s); """ % ids cursor = connection.cursor() cursor.execute(sql) # Since this returns (id, num_votes_past_week) tuples, we can # convert that directly to a dict. id_to_num = dict(cursor.fetchall()) try: # Fetch all the documents we need to update. from kitsune.questions.models import QuestionMappingType from kitsune.search import es_utils es_docs = es_utils.get_documents(QuestionMappingType, data) # For each document, update the data and stick it back in the # index. for doc in es_docs: # Note: Need to keep this in sync with # Question.extract_document. num = id_to_num[int(doc[u'id'])] doc[u'question_num_votes_past_week'] = num QuestionMappingType.index(doc, id_=doc['id']) except ES_EXCEPTIONS: # Something happened with ES, so let's push index updating # into an index_task which retries when it fails because # of ES issues. index_task.delay(QuestionMappingType, id_to_num.keys())
def opensearch_suggestions(request): """A simple search view that returns OpenSearch suggestions.""" content_type = "application/x-suggestions+json" term = request.GET.get("q") if not term: return HttpResponseBadRequest(content_type=content_type) locale = locale_or_default(request.LANGUAGE_CODE) # FIXME: Rewrite this using the simple search search business # logic. This currently returns templates (amongst other things) # which is totally wrong. try: query = dict(("%s__match" % field, term) for field in DocumentMappingType.get_query_fields()) # Upgrade the query to an analyzer-aware one. query = es_utils.es_query_with_analyzer(query, locale) wiki_s = ( DocumentMappingType.search() .filter(document_is_archived=False) .filter(document_locale=locale) .values_dict("document_title", "url") .query(or_=query)[:5] ) query = dict(("%s__match" % field, term) for field in QuestionMappingType.get_query_fields()) question_s = ( QuestionMappingType.search() .filter(question_has_helpful=True) .values_dict("question_title", "url") .query(or_=query)[:5] ) results = list(chain(question_s, wiki_s)) except ES_EXCEPTIONS: # If we have ES problems, we just send back an empty result # set. results = [] def urlize(r): return u"%s://%s%s" % ("https" if request.is_secure() else "http", request.get_host(), r["url"][0]) def titleize(r): # NB: Elasticsearch returns an array of strings as the value, # so we mimic that and then pull out the first (and only) # string. return r.get("document_title", r.get("question_title", [_("No title")]))[0] data = [term, [titleize(r) for r in results], [], [urlize(r) for r in results]] return HttpResponse(json.dumps(data), content_type=content_type)
def change_and_reindex(self, orm, is_archived, is_locked): """Locks all questions that were created over 180 days ago""" # Get a list of ids of questions we're going to go change. We need # a list of ids so that we can feed it to the update, but then # also know what we need to update in the index. days_180 = datetime.now() - timedelta(days=180) assert is_archived != is_locked f = Q(created__lte=days_180) if is_archived: f |= Q(is_locked=True) if is_locked: f |= Q(is_archived=True) # Update the DB (orm.Question.objects.filter(f).update( is_archived=is_archived, is_locked=is_locked)) # Using the efficient .update() of query sets doesn't emit any # signals, so live indexing won't automatically happen. This # does it manually. if settings.ES_LIVE_INDEXING: q_ids = list( orm.Question.objects.filter(f).values_list('id', flat=True)) try: # This is going to process about 200K questions in # production, so it will take a while and stress # everything. To alleviate this stress, it is # divided into chunks. for chunk in chunked(q_ids, 1000): # Fetch all the documents we need to update. es_docs = get_documents(QuestionMappingType, chunk) documents = [] # For each document, update the data and stick it # back in the index. for doc in es_docs: doc[u'question_is_locked'] = is_locked doc[u'question_is_archived'] = is_archived doc[u'indexed_on'] = int(time.time()) documents.append(doc) if documents: QuestionMappingType.bulk_index(documents) except ES_EXCEPTIONS: # Something happened with ES, so let's push index # updating into an index_task which retries when it # fails because of ES issues. index_task.delay(QuestionMappingType, q_ids)
def opensearch_suggestions(request): """A simple search view that returns OpenSearch suggestions.""" content_type = 'application/x-suggestions+json' term = request.GET.get('q') if not term: return HttpResponseBadRequest(content_type=content_type) locale = locale_or_default(request.LANGUAGE_CODE) # FIXME: Rewrite this using the simple search search business # logic. This currently returns templates (amongst other things) # which is totally wrong. try: query = dict(('%s__match' % field, term) for field in DocumentMappingType.get_query_fields()) # Upgrade the query to an analyzer-aware one. query = es_utils.es_query_with_analyzer(query, locale) wiki_s = (DocumentMappingType.search().filter( document_is_archived=False).filter( document_locale=locale).values_dict( 'document_title', 'url').query(or_=query)[:5]) query = dict(('%s__match' % field, term) for field in QuestionMappingType.get_query_fields()) question_s = (QuestionMappingType.search().filter( question_has_helpful=True).values_dict('question_title', 'url').query(or_=query)[:5]) results = list(chain(question_s, wiki_s)) except ES_EXCEPTIONS: # If we have ES problems, we just send back an empty result # set. results = [] def urlize(r): return u'%s://%s%s' % ('https' if request.is_secure() else 'http', request.get_host(), r['url'][0]) def titleize(r): # NB: Elasticsearch returns an array of strings as the value, # so we mimic that and then pull out the first (and only) # string. return r.get('document_title', r.get('question_title', [_('No title')]))[0] data = [ term, [titleize(r) for r in results], [], [urlize(r) for r in results] ] return HttpResponse(json.dumps(data), content_type=content_type)
def test_questions_tags(self): """Make sure that adding tags to a Question causes it to refresh the index. """ tag = 'hiphop' eq_(QuestionMappingType.search().filter(question_tag=tag).count(), 0) q = QuestionFactory() self.refresh() eq_(QuestionMappingType.search().filter(question_tag=tag).count(), 0) q.tags.add(tag) self.refresh() eq_(QuestionMappingType.search().filter(question_tag=tag).count(), 1) q.tags.remove(tag) self.refresh() eq_(QuestionMappingType.search().filter(question_tag=tag).count(), 0)
def test_questions_tags(self): """Make sure that adding tags to a Question causes it to refresh the index. """ tag = u'hiphop' eq_(QuestionMappingType.search().filter(question_tag=tag).count(), 0) q = question(save=True) self.refresh() eq_(QuestionMappingType.search().filter(question_tag=tag).count(), 0) q.tags.add(tag) self.refresh() eq_(QuestionMappingType.search().filter(question_tag=tag).count(), 1) q.tags.remove(tag) self.refresh() eq_(QuestionMappingType.search().filter(question_tag=tag).count(), 0)
def suggestions(request): """A simple search view that returns OpenSearch suggestions.""" content_type = 'application/x-suggestions+json' term = request.GET.get('q') if not term: return HttpResponseBadRequest(content_type=content_type) site = Site.objects.get_current() locale = locale_or_default(request.LANGUAGE_CODE) try: query = dict(('{0!s}__match'.format(field), term) for field in DocumentMappingType.get_query_fields()) # Upgrade the query to an analyzer-aware one. query = es_utils.es_query_with_analyzer(query, locale) wiki_s = (DocumentMappingType.search() .filter(document_is_archived=False) .filter(document_locale=locale) .values_dict('document_title', 'url') .query(or_=query)[:5]) query = dict(('{0!s}__match'.format(field), term) for field in QuestionMappingType.get_query_fields()) question_s = (QuestionMappingType.search() .filter(question_has_helpful=True) .values_dict('question_title', 'url') .query(or_=query)[:5]) results = list(chain(question_s, wiki_s)) except ES_EXCEPTIONS: # If we have ES problems, we just send back an empty result # set. results = [] def urlize(r): return u'https://{0!s}{1!s}'.format(site, r['url']) def titleize(r): return r.get('document_title', r.get('document_title')) data = [term, [titleize(r) for r in results], [], [urlize(r) for r in results]] return HttpResponse(json.dumps(data), content_type=content_type)
def suggestions(request): """A simple search view that returns OpenSearch suggestions.""" content_type = 'application/x-suggestions+json' term = request.GET.get('q') if not term: return HttpResponseBadRequest(content_type=content_type) site = Site.objects.get_current() locale = locale_or_default(request.LANGUAGE_CODE) try: query = dict(('%s__match' % field, term) for field in DocumentMappingType.get_query_fields()) # Upgrade the query to an analyzer-aware one. query = es_utils.es_query_with_analyzer(query, locale) wiki_s = (DocumentMappingType.search() .filter(document_is_archived=False) .filter(document_locale=locale) .values_dict('document_title', 'url') .query(or_=query)[:5]) query = dict(('%s__match' % field, term) for field in QuestionMappingType.get_query_fields()) question_s = (QuestionMappingType.search() .filter(question_has_helpful=True) .values_dict('question_title', 'url') .query(or_=query)[:5]) results = list(chain(question_s, wiki_s)) except ES_EXCEPTIONS: # If we have ES problems, we just send back an empty result # set. results = [] def urlize(r): return u'https://%s%s' % (site, r['url']) def titleize(r): return r.get('document_title', r.get('document_title')) data = [term, [titleize(r) for r in results], [], [urlize(r) for r in results]] return HttpResponse(json.dumps(data), content_type=content_type)
def test_case_insensitive_search(self): """Ensure the default searcher is case insensitive.""" q = QuestionFactory(title="lolrus", content="I am the lolrus.") AnswerVoteFactory(answer__question=q) self.refresh() # This is an AND operation result = QuestionMappingType.search().query( question_title__match="LOLRUS", question_content__match="LOLRUS") assert result.count() > 0
def test_case_insensitive_search(self): """Ensure the default searcher is case insensitive.""" answervote( answer=answer(question=question(title="lolrus", content="I am the lolrus.", save=True), save=True), helpful=True, ).save() self.refresh() result = QuestionMappingType.search().query(question_title__text="LOLRUS", question_content__text="LOLRUS") assert result.count() > 0
def test_question_is_unindexed_on_creator_delete(self): search = QuestionMappingType.search() q = question(title=u'Does this work?', save=True) self.refresh() eq_(search.query(question_title__text='work').count(), 1) q.creator.delete() self.refresh() eq_(search.query(question_title__text='work').count(), 0)
def test_question_no_answers_deleted(self): search = QuestionMappingType.search() q = question(title=u'Does this work?', save=True) self.refresh() eq_(search.query(question_title__text='work').count(), 1) q.delete() self.refresh() eq_(search.query(question_title__text='work').count(), 0)
def test_case_insensitive_search(self): """Ensure the default searcher is case insensitive.""" q = QuestionFactory(title='lolrus', content='I am the lolrus.') AnswerVoteFactory(answer__question=q) self.refresh() # This is an AND operation result = QuestionMappingType.search().query( question_title__match='LOLRUS', question_content__match='LOLRUS') assert result.count() > 0
def test_question_is_unindexed_on_creator_delete(self): search = QuestionMappingType.search() q = question(title=u'Does this work?', save=True) self.refresh() eq_(search.query(question_title__match='work').count(), 1) q.creator.delete() self.refresh() eq_(search.query(question_title__match='work').count(), 0)
def test_question_is_unindexed_on_creator_delete(self): search = QuestionMappingType.search() q = QuestionFactory(title='Does this work?') self.refresh() eq_(search.query(question_title__match='work').count(), 1) q.creator.delete() self.refresh() eq_(search.query(question_title__match='work').count(), 0)
def test_question_no_answers_deleted(self): search = QuestionMappingType.search() q = question(title=u'Does this work?', save=True) self.refresh() eq_(search.query(question_title__match='work').count(), 1) q.delete() self.refresh() eq_(search.query(question_title__match='work').count(), 0)
def test_question_no_answers_deleted(self): search = QuestionMappingType.search() q = QuestionFactory(title='Does this work?') self.refresh() eq_(search.query(question_title__match='work').count(), 1) q.delete() self.refresh() eq_(search.query(question_title__match='work').count(), 0)
def test_question_spam_is_unindexed(self): search = QuestionMappingType.search() q = question(title=u'I am spam', save=True) self.refresh() eq_(search.query(question_title__match='spam').count(), 1) q.is_spam = True q.save() self.refresh() eq_(search.query(question_title__match='spam').count(), 0)
def test_case_insensitive_search(self): """Ensure the default searcher is case insensitive.""" answervote(answer=answer(question=question(title='lolrus', content='I am the lolrus.', save=True), save=True), helpful=True).save() self.refresh() result = QuestionMappingType.search().query( question_title__match='LOLRUS', question_content__match='LOLRUS') assert result.count() > 0
def test_question_spam_is_unindexed(self): search = QuestionMappingType.search() q = QuestionFactory(title='I am spam') self.refresh() eq_(search.query(question_title__match='spam').count(), 1) q.is_spam = True q.save() self.refresh() eq_(search.query(question_title__match='spam').count(), 0)
def test_answer_spam_is_unindexed(self): search = QuestionMappingType.search() a = AnswerFactory(content='I am spam') self.refresh() eq_(search.query(question_answer_content__match='spam').count(), 1) a.is_spam = True a.save() self.refresh() eq_(search.query(question_answer_content__match='spam').count(), 0)
def test_answer_spam_is_unindexed(self): search = QuestionMappingType.search() a = answer(content=u'I am spam', save=True) self.refresh() eq_(search.query(question_answer_content__match='spam').count(), 1) a.is_spam = True a.save() self.refresh() eq_(search.query(question_answer_content__match='spam').count(), 0)
def test_question_products(self): """Make sure that adding products to a Question causes it to refresh the index. """ p = product(slug=u'desktop', save=True) eq_(QuestionMappingType.search().filter(product=p.slug).count(), 0) q = question(save=True) self.refresh() eq_(QuestionMappingType.search().filter(product=p.slug).count(), 0) q.products.add(p) self.refresh() eq_(QuestionMappingType.search().filter(product=p.slug).count(), 1) q.products.remove(p) self.refresh() # Make sure the question itself is still there and that we didn't # accidentally delete it through screwed up signal handling: eq_(QuestionMappingType.search().filter().count(), 1) eq_(QuestionMappingType.search().filter(product=p.slug).count(), 0)
def test_question_topics(self): """Make sure that adding topics to a Question causes it to refresh the index. """ t = topic(slug=u"hiphop", save=True) eq_(QuestionMappingType.search().filter(topic=t.slug).count(), 0) q = question(save=True) self.refresh() eq_(QuestionMappingType.search().filter(topic=t.slug).count(), 0) q.topics.add(t) self.refresh() eq_(QuestionMappingType.search().filter(topic=t.slug).count(), 1) q.topics.clear() self.refresh() # Make sure the question itself is still there and that we didn't # accidentally delete it through screwed up signal handling: eq_(QuestionMappingType.search().filter().count(), 1) eq_(QuestionMappingType.search().filter(topic=t.slug).count(), 0)
def test_question_products(self): """Make sure that adding products to a Question causes it to refresh the index. """ p = product(slug=u"desktop", save=True) eq_(QuestionMappingType.search().filter(product=p.slug).count(), 0) q = question(save=True) self.refresh() eq_(QuestionMappingType.search().filter(product=p.slug).count(), 0) q.products.add(p) self.refresh() eq_(QuestionMappingType.search().filter(product=p.slug).count(), 1) q.products.remove(p) self.refresh() # Make sure the question itself is still there and that we didn't # accidentally delete it through screwed up signal handling: eq_(QuestionMappingType.search().filter().count(), 1) eq_(QuestionMappingType.search().filter(product=p.slug).count(), 0)
def suggestions(request): """A simple search view that returns OpenSearch suggestions.""" mimetype = 'application/x-suggestions+json' term = request.GET.get('q') if not term: return HttpResponseBadRequest(mimetype=mimetype) site = Site.objects.get_current() locale = locale_or_default(request.LANGUAGE_CODE) try: query = dict(('%s__text' % field, term) for field in DocumentMappingType.get_query_fields()) wiki_s = (DocumentMappingType.search() .filter(document_is_archived=False) .filter(document_locale=locale) .values_dict('document_title', 'url') .query(or_=query)[:5]) query = dict(('%s__text' % field, term) for field in QuestionMappingType.get_query_fields()) question_s = (QuestionMappingType.search() .filter(question_has_helpful=True) .values_dict('question_title', 'url') .query(or_=query)[:5]) results = list(chain(question_s, wiki_s)) except ES_EXCEPTIONS: # If we have ES problems, we just send back an empty result # set. results = [] urlize = lambda r: u'https://%s%s' % (site, r['url']) titleize = lambda r: (r['document_title'] if 'document_title' in r else r['question_title']) data = [term, [titleize(r) for r in results], [], [urlize(r) for r in results]] return HttpResponse(json.dumps(data), mimetype=mimetype)
def test_question_questionvote(self): search = QuestionMappingType.search() # Create a question and verify it doesn't show up in a # query for num_votes__gt=0. q = QuestionFactory(title='model makers will inherit the earth') self.refresh() eq_(search.filter(question_num_votes__gt=0).count(), 0) # Add a QuestionVote--it should show up now. QuestionVoteFactory(question=q) self.refresh() eq_(search.filter(question_num_votes__gt=0).count(), 1)
def test_question_questionvote(self): search = QuestionMappingType.search() # Create a question and verify it doesn't show up in a # query for num_votes__gt=0. q = question(title=u'model makers will inherit the earth', save=True) self.refresh() eq_(search.filter(question_num_votes__gt=0).count(), 0) # Add a QuestionVote--it should show up now. questionvote(question=q, save=True) self.refresh() eq_(search.filter(question_num_votes__gt=0).count(), 1)
def test_case_insensitive_search(self): """Ensure the default searcher is case insensitive.""" answervote( answer=answer(question=question(title='lolrus', content='I am the lolrus.', save=True), save=True), helpful=True).save() self.refresh() result = QuestionMappingType.search().query( question_title__match='LOLRUS', question_content__match='LOLRUS') assert result.count() > 0
def test_question_topics(self): """Make sure that adding topics to a Question causes it to refresh the index. """ p = product(save=True) t = topic(slug=u'hiphop', product=p, save=True) eq_(QuestionMappingType.search().filter(topic=t.slug).count(), 0) q = question(save=True) self.refresh() eq_(QuestionMappingType.search().filter(topic=t.slug).count(), 0) q.topics.add(t) self.refresh() eq_(QuestionMappingType.search().filter(topic=t.slug).count(), 1) q.topics.clear() self.refresh() # Make sure the question itself is still there and that we didn't # accidentally delete it through screwed up signal handling: eq_(QuestionMappingType.search().filter().count(), 1) eq_(QuestionMappingType.search().filter(topic=t.slug).count(), 0)
def test_question_one_answer_deleted(self): search = QuestionMappingType.search() q = QuestionFactory(title='are model makers the new pink?') a = AnswerFactory(content='yes.', question=q) self.refresh() # Question and its answers are a single document--so the index count should be only 1. eq_(search.query(question_title__match='pink').count(), 1) # After deleting the answer, the question document should remain. a.delete() self.refresh() eq_(search.query(question_title__match='pink').count(), 1) # Delete the question and it should be removed from the index. q.delete() self.refresh() eq_(search.query(question_title__match='pink').count(), 0)
def test_question_one_answer_deleted(self): search = QuestionMappingType.search() q = QuestionFactory(title=u'are model makers the new pink?') a = AnswerFactory(content=u'yes.', question=q) self.refresh() # Question and its answers are a single document--so the index count should be only 1. eq_(search.query(question_title__match='pink').count(), 1) # After deleting the answer, the question document should remain. a.delete() self.refresh() eq_(search.query(question_title__match='pink').count(), 1) # Delete the question and it should be removed from the index. q.delete() self.refresh() eq_(search.query(question_title__match='pink').count(), 0)
def test_added(self): search = QuestionMappingType.search() # Create a question--that adds one document to the index. q = QuestionFactory(title='Does this test work?') self.refresh() eq_(search.count(), 1) eq_(search.query(question_title__match='test').count(), 1) # No answer exist, so none should be searchable. eq_(search.query(question_answer_content__match='only').count(), 0) # Create an answer for the question. It should be searchable now. AnswerFactory(content="There's only one way to find out!", question=q) self.refresh() eq_(search.query(question_answer_content__match='only').count(), 1) # Make sure that there's only one question document in the index--creating an answer # should have updated the existing one. eq_(search.count(), 1)
def test_added(self): search = QuestionMappingType.search() # Create a question--that adds one document to the index. q = QuestionFactory(title=u'Does this test work?') self.refresh() eq_(search.count(), 1) eq_(search.query(question_title__match='test').count(), 1) # No answer exist, so none should be searchable. eq_(search.query(question_answer_content__match='only').count(), 0) # Create an answer for the question. It should be searchable now. AnswerFactory(content=u"There's only one way to find out!", question=q) self.refresh() eq_(search.query(question_answer_content__match='only').count(), 1) # Make sure that there's only one question document in the index--creating an answer # should have updated the existing one. eq_(search.count(), 1)
def test_question_is_reindexed_on_username_change(self): search = QuestionMappingType.search() u = user(username='******', save=True) q = question(creator=u, title=u'Hello', save=True) a = answer(creator=u, content=u'I love you', save=True) self.refresh() eq_(search.query(question_title__match='hello')[0]['question_creator'], u'dexter') query = search.query(question_answer_content__match='love') eq_(query[0]['question_answer_creator'], [u'dexter']) # Change the username and verify the index. u.username = '******' u.save() self.refresh() eq_(search.query(question_title__match='hello')[0]['question_creator'], u'walter') query = search.query(question_answer_content__match='love') eq_(query[0]['question_answer_creator'], [u'walter'])
def test_question_one_answer_deleted(self): search = QuestionMappingType.search() q = question(title=u'are model makers the new pink?', save=True) a = answer(content=u'yes.', question=q, save=True) self.refresh() # Question and its answers are a single document--so the # index count should be only 1. eq_(search.query(question_title__text='pink').count(), 1) # After deleting the answer, the question document should # remain. a.delete() self.refresh() eq_(search.query(question_title__text='pink').count(), 1) # Delete the question and it should be removed from the # index. q.delete() self.refresh() eq_(search.query(question_title__text='pink').count(), 0)
def test_question_is_reindexed_on_username_change(self): search = QuestionMappingType.search() u = user(username='******', save=True) question(creator=u, title=u'Hello', save=True) answer(creator=u, content=u'I love you', save=True) self.refresh() eq_(search.query(question_title__match='hello')[0]['question_creator'], u'dexter') query = search.query(question_answer_content__match='love') eq_(query[0]['question_answer_creator'], [u'dexter']) # Change the username and verify the index. u.username = '******' u.save() self.refresh() eq_(search.query(question_title__match='hello')[0]['question_creator'], u'walter') query = search.query(question_answer_content__match='love') eq_(query[0]['question_answer_creator'], [u'walter'])
def test_question_one_answer_deleted(self): search = QuestionMappingType.search() q = question(title=u"are model makers the new pink?", save=True) a = answer(content=u"yes.", question=q, save=True) self.refresh() # Question and its answers are a single document--so the # index count should be only 1. eq_(search.query(question_title__text="pink").count(), 1) # After deleting the answer, the question document should # remain. a.delete() self.refresh() eq_(search.query(question_title__text="pink").count(), 1) # Delete the question and it should be removed from the # index. q.delete() self.refresh() eq_(search.query(question_title__text="pink").count(), 0)
def test_question_is_reindexed_on_username_change(self): search = QuestionMappingType.search() u = UserFactory(username='******') QuestionFactory(creator=u, title='Hello') AnswerFactory(creator=u, content='I love you') self.refresh() eq_(search.query(question_title__match='hello')[0]['question_creator'], 'dexter') query = search.query(question_answer_content__match='love') eq_(query[0]['question_answer_creator'], ['dexter']) # Change the username and verify the index. u.username = '******' u.save() self.refresh() eq_(search.query(question_title__match='hello')[0]['question_creator'], 'walter') query = search.query(question_answer_content__match='love') eq_(query[0]['question_answer_creator'], ['walter'])
def test_question_is_reindexed_on_username_change(self): search = QuestionMappingType.search() u = UserFactory(username="******") QuestionFactory(creator=u, title="Hello") AnswerFactory(creator=u, content="I love you") self.refresh() eq_( search.query(question_title__match="hello")[0]["question_creator"], "dexter") query = search.query(question_answer_content__match="love") eq_(query[0]["question_answer_creator"], ["dexter"]) # Change the username and verify the index. u.username = "******" u.save() self.refresh() eq_( search.query(question_title__match="hello")[0]["question_creator"], "walter") query = search.query(question_answer_content__match="love") eq_(query[0]["question_answer_creator"], ["walter"])
def get_doctypes(self): return [QuestionMappingType.get_mapping_type_name()]
def search(request, template=None): """ES-specific search view""" # JSON-specific variables is_json = (request.GET.get('format') == 'json') callback = request.GET.get('callback', '').strip() mimetype = 'application/x-javascript' if callback else 'application/json' # Search "Expires" header format expires_fmt = '%A, %d %B %Y %H:%M:%S GMT' # Check callback is valid if is_json and callback and not jsonp_is_valid(callback): return HttpResponse( json.dumps({'error': _('Invalid callback function.')}), mimetype=mimetype, status=400) language = locale_or_default( request.GET.get('language', request.LANGUAGE_CODE)) r = request.GET.copy() a = request.GET.get('a', '0') # Search default values try: category = (map(int, r.getlist('category')) or settings.SEARCH_DEFAULT_CATEGORIES) except ValueError: category = settings.SEARCH_DEFAULT_CATEGORIES r.setlist('category', category) # Basic form if a == '0': r['w'] = r.get('w', constants.WHERE_BASIC) # Advanced form if a == '2': r['language'] = language r['a'] = '1' # TODO: Rewrite so SearchForm is unbound initially and we can use # `initial` on the form fields. if 'include_archived' not in r: r['include_archived'] = False search_form = SearchForm(r) search_form.set_allowed_forums(request.user) if not search_form.is_valid() or a == '2': if is_json: return HttpResponse( json.dumps({'error': _('Invalid search data.')}), mimetype=mimetype, status=400) t = template if request.MOBILE else 'search/form.html' search_ = render(request, t, { 'advanced': a, 'request': request, 'search_form': search_form}) search_['Cache-Control'] = 'max-age=%s' % \ (settings.SEARCH_CACHE_PERIOD * 60) search_['Expires'] = (datetime.utcnow() + timedelta( minutes=settings.SEARCH_CACHE_PERIOD)) \ .strftime(expires_fmt) return search_ cleaned = search_form.cleaned_data if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC: cleaned['w'] = constants.WHERE_WIKI page = max(smart_int(request.GET.get('page')), 1) offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE lang = language.lower() if settings.LANGUAGES.get(lang): lang_name = settings.LANGUAGES[lang] else: lang_name = '' # We use a regular S here because we want to search across # multiple doctypes. searcher = (AnalyzerS().es(urls=settings.ES_URLS) .indexes(es_utils.READ_INDEX)) wiki_f = F(model='wiki_document') question_f = F(model='questions_question') discussion_f = F(model='forums_thread') # Start - wiki filters if cleaned['w'] & constants.WHERE_WIKI: # Category filter if cleaned['category']: wiki_f &= F(document_category__in=cleaned['category']) # Locale filter wiki_f &= F(document_locale=language) # Product filter products = cleaned['product'] for p in products: wiki_f &= F(product=p) # Topics filter topics = cleaned['topics'] for t in topics: wiki_f &= F(topic=t) # Archived bit if a == '0' and not cleaned['include_archived']: # Default to NO for basic search: cleaned['include_archived'] = False if not cleaned['include_archived']: wiki_f &= F(document_is_archived=False) # End - wiki filters # Start - support questions filters if cleaned['w'] & constants.WHERE_SUPPORT: # Solved is set by default if using basic search if a == '0' and not cleaned['has_helpful']: cleaned['has_helpful'] = constants.TERNARY_YES # These filters are ternary, they can be either YES, NO, or OFF ternary_filters = ('is_locked', 'is_solved', 'has_answers', 'has_helpful') d = dict(('question_%s' % filter_name, _ternary_filter(cleaned[filter_name])) for filter_name in ternary_filters if cleaned[filter_name]) if d: question_f &= F(**d) if cleaned['asked_by']: question_f &= F(question_creator=cleaned['asked_by']) if cleaned['answered_by']: question_f &= F(question_answer_creator=cleaned['answered_by']) q_tags = [t.strip() for t in cleaned['q_tags'].split(',')] for t in q_tags: if t: question_f &= F(question_tag=t) # Product filter products = cleaned['product'] for p in products: question_f &= F(product=p) # Topics filter topics = cleaned['topics'] for t in topics: question_f &= F(topic=t) # End - support questions filters # Start - discussion forum filters if cleaned['w'] & constants.WHERE_DISCUSSION: if cleaned['author']: discussion_f &= F(post_author_ord=cleaned['author']) if cleaned['thread_type']: if constants.DISCUSSION_STICKY in cleaned['thread_type']: discussion_f &= F(post_is_sticky=1) if constants.DISCUSSION_LOCKED in cleaned['thread_type']: discussion_f &= F(post_is_locked=1) valid_forum_ids = [ f.id for f in Forum.authorized_forums_for_user(request.user)] forum_ids = None if cleaned['forum']: forum_ids = [f for f in cleaned['forum'] if f in valid_forum_ids] # If we removed all the forums they wanted to look at or if # they didn't specify, then we filter on the list of all # forums they're authorized to look at. if not forum_ids: forum_ids = valid_forum_ids discussion_f &= F(post_forum_id__in=forum_ids) # End - discussion forum filters # Created filter unix_now = int(time.time()) interval_filters = ( ('created', cleaned['created'], cleaned['created_date']), ('updated', cleaned['updated'], cleaned['updated_date'])) for filter_name, filter_option, filter_date in interval_filters: if filter_option == constants.INTERVAL_BEFORE: before = {filter_name + '__gte': 0, filter_name + '__lte': max(filter_date, 0)} discussion_f &= F(**before) question_f &= F(**before) elif filter_option == constants.INTERVAL_AFTER: after = {filter_name + '__gte': min(filter_date, unix_now), filter_name + '__lte': unix_now} discussion_f &= F(**after) question_f &= F(**after) # In basic search, we limit questions from the last # SEARCH_DEFAULT_MAX_QUESTION_AGE seconds. if a == '0': start_date = unix_now - settings.SEARCH_DEFAULT_MAX_QUESTION_AGE question_f &= F(created__gte=start_date) # Note: num_voted (with a d) is a different field than num_votes # (with an s). The former is a dropdown and the latter is an # integer value. if cleaned['num_voted'] == constants.INTERVAL_BEFORE: question_f &= F(question_num_votes__lte=max(cleaned['num_votes'], 0)) elif cleaned['num_voted'] == constants.INTERVAL_AFTER: question_f &= F(question_num_votes__gte=cleaned['num_votes']) # Done with all the filtery stuff--time to generate results # Combine all the filters and add to the searcher doctypes = [] final_filter = F() if cleaned['w'] & constants.WHERE_WIKI: doctypes.append(DocumentMappingType.get_mapping_type_name()) final_filter |= wiki_f if cleaned['w'] & constants.WHERE_SUPPORT: doctypes.append(QuestionMappingType.get_mapping_type_name()) final_filter |= question_f if cleaned['w'] & constants.WHERE_DISCUSSION: doctypes.append(ThreadMappingType.get_mapping_type_name()) final_filter |= discussion_f searcher = searcher.doctypes(*doctypes) searcher = searcher.filter(final_filter) if 'explain' in request.GET and request.GET['explain'] == '1': searcher = searcher.explain() documents = ComposedList() try: cleaned_q = cleaned['q'] # Set up the highlights. Show the entire field highlighted. searcher = searcher.highlight( 'question_content', # support forum 'document_summary', # kb 'post_content', # contributor forum pre_tags=['<b>'], post_tags=['</b>'], number_of_fragments=0) # Set up boosts searcher = searcher.boost( question_title=4.0, question_content=3.0, question_answer_content=3.0, post_title=2.0, post_content=1.0, document_title=6.0, document_content=1.0, document_keywords=8.0, document_summary=2.0, # Text phrases in document titles and content get an extra # boost. document_title__text_phrase=10.0, document_content__text_phrase=8.0) # Apply sortby for advanced search of questions if cleaned['w'] == constants.WHERE_SUPPORT: sortby = cleaned['sortby'] try: searcher = searcher.order_by( *constants.SORT_QUESTIONS[sortby]) except IndexError: # Skip index errors because they imply the user is # sending us sortby values that aren't valid. pass # Apply sortby for advanced search of kb documents if cleaned['w'] == constants.WHERE_WIKI: sortby = cleaned['sortby_documents'] try: searcher = searcher.order_by( *constants.SORT_DOCUMENTS[sortby]) except IndexError: # Skip index errors because they imply the user is # sending us sortby values that aren't valid. pass # Build the query if cleaned_q: query_fields = chain(*[cls.get_query_fields() for cls in get_mapping_types()]) query = {} # Create text and text_phrase queries for every field # we want to search. for field in query_fields: for query_type in ['text', 'text_phrase']: query['%s__%s' % (field, query_type)] = cleaned_q # Transform the query to use locale aware analyzers. query = es_utils.es_query_with_analyzer(query, language) searcher = searcher.query(should=True, **query) num_results = min(searcher.count(), settings.SEARCH_MAX_RESULTS) # TODO - Can ditch the ComposedList here, but we need # something that paginate can use to figure out the paging. documents = ComposedList() documents.set_count(('results', searcher), num_results) results_per_page = settings.SEARCH_RESULTS_PER_PAGE pages = paginate(request, documents, results_per_page) # If we know there aren't any results, let's cheat and in # doing that, not hit ES again. if num_results == 0: searcher = [] else: # Get the documents we want to show and add them to # docs_for_page documents = documents[offset:offset + results_per_page] if len(documents) == 0: # If the user requested a page that's beyond the # pagination, then documents is an empty list and # there are no results to show. searcher = [] else: bounds = documents[0][1] searcher = searcher.values_dict()[bounds[0]:bounds[1]] results = [] for i, doc in enumerate(searcher): rank = i + offset if doc['model'] == 'wiki_document': summary = _build_es_excerpt(doc) if not summary: summary = doc['document_summary'] result = { 'title': doc['document_title'], 'type': 'document'} elif doc['model'] == 'questions_question': summary = _build_es_excerpt(doc) if not summary: # We're excerpting only question_content, so if # the query matched question_title or # question_answer_content, then there won't be any # question_content excerpts. In that case, just # show the question--but only the first 500 # characters. summary = bleach.clean( doc['question_content'], strip=True)[:500] result = { 'title': doc['question_title'], 'type': 'question', 'is_solved': doc['question_is_solved'], 'num_answers': doc['question_num_answers'], 'num_votes': doc['question_num_votes'], 'num_votes_past_week': doc['question_num_votes_past_week']} else: summary = _build_es_excerpt(doc, first_only=True) result = { 'title': doc['post_title'], 'type': 'thread'} result['url'] = doc['url'] result['object'] = ObjectDict(doc) result['search_summary'] = summary result['rank'] = rank result['score'] = doc._score result['explanation'] = escape(format_explanation( doc._explanation)) results.append(result) except ES_EXCEPTIONS as exc: # Handle timeout and all those other transient errors with a # "Search Unavailable" rather than a Django error page. if is_json: return HttpResponse(json.dumps({'error': _('Search Unavailable')}), mimetype=mimetype, status=503) # Cheating here: Convert from 'Timeout()' to 'timeout' so # we have less code, but still have good stats. exc_bucket = repr(exc).lower().strip('()') statsd.incr('search.esunified.{0}'.format(exc_bucket)) import logging logging.exception(exc) t = 'search/mobile/down.html' if request.MOBILE else 'search/down.html' return render(request, t, {'q': cleaned['q']}, status=503) items = [(k, v) for k in search_form.fields for v in r.getlist(k) if v and k != 'a'] items.append(('a', '2')) if is_json: # Models are not json serializable. for r in results: del r['object'] data = {} data['results'] = results data['total'] = len(results) data['query'] = cleaned['q'] if not results: data['message'] = _('No pages matched the search criteria') json_data = json.dumps(data) if callback: json_data = callback + '(' + json_data + ');' return HttpResponse(json_data, mimetype=mimetype) fallback_results = None if num_results == 0: fallback_results = _fallback_results(language, cleaned['product']) results_ = render(request, template, { 'num_results': num_results, 'results': results, 'fallback_results': fallback_results, 'q': cleaned['q'], 'w': cleaned['w'], 'product': Product.objects.filter(slug__in=cleaned['product']), 'products': Product.objects.filter(visible=True), 'pages': pages, 'search_form': search_form, 'lang_name': lang_name, }) results_['Cache-Control'] = 'max-age=%s' % \ (settings.SEARCH_CACHE_PERIOD * 60) results_['Expires'] = (datetime.utcnow() + timedelta(minutes=settings.SEARCH_CACHE_PERIOD)) \ .strftime(expires_fmt) results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']), max_age=3600, secure=False, httponly=False) return results_
def generate_simple_search(search_form, language, with_highlights=False): """Generates an S given a form :arg search_form: a validated SimpleSearch form :arg language: the language code :arg with_highlights: whether or not to ask for highlights :returns: a fully formed S """ # We use a regular S here because we want to search across # multiple doctypes. searcher = (es_utils.AnalyzerS().es( urls=settings.ES_URLS, timeout=settings.ES_TIMEOUT, use_ssl=settings.ES_USE_SSL, http_auth=settings.ES_HTTP_AUTH, connection_class=RequestsHttpConnection, ).indexes(es_utils.read_index("default"))) cleaned = search_form.cleaned_data doctypes = [] final_filter = es_utils.F() cleaned_q = cleaned["q"] products = cleaned["product"] # Handle wiki filters if cleaned["w"] & constants.WHERE_WIKI: wiki_f = es_utils.F( model="wiki_document", document_category__in=settings.SEARCH_DEFAULT_CATEGORIES, document_locale=language, document_is_archived=False, ) for p in products: wiki_f &= es_utils.F(product=p) doctypes.append(DocumentMappingType.get_mapping_type_name()) final_filter |= wiki_f # Handle question filters if cleaned["w"] & constants.WHERE_SUPPORT: question_f = es_utils.F(model="questions_question", question_is_archived=False, question_has_helpful=True) for p in products: question_f &= es_utils.F(product=p) doctypes.append(QuestionMappingType.get_mapping_type_name()) final_filter |= question_f # Build a filter for those filters and add the other bits to # finish the search searcher = searcher.doctypes(*doctypes) searcher = searcher.filter(final_filter) if cleaned["explain"]: searcher = searcher.explain() if with_highlights: # Set up the highlights. Show the entire field highlighted. searcher = searcher.highlight( "question_content", # support forum "document_summary", # kb pre_tags=["<b>"], post_tags=["</b>"], number_of_fragments=0, ) searcher = apply_boosts(searcher) # Build the query query_fields = chain(*[ cls.get_query_fields() for cls in [DocumentMappingType, QuestionMappingType] ]) query = {} # Create match and match_phrase queries for every field # we want to search. for field in query_fields: for query_type in ["match", "match_phrase"]: query["%s__%s" % (field, query_type)] = cleaned_q # Transform the query to use locale aware analyzers. query = es_utils.es_query_with_analyzer(query, language) searcher = searcher.query(should=True, **query) return searcher
def simple_search(request, template=None): """ES-specific simple search view. This view is for end user searching of the Knowledge Base and Support Forum. Filtering options are limited to: * product (`product=firefox`, for example, for only Firefox results) * document type (`w=2`, for esample, for Support Forum questions only) """ # Redirect to old Advanced Search URLs (?a={1,2}) to the new URL. a = request.GET.get("a") if a in ["1", "2"]: new_url = reverse("search.advanced") + "?" + request.GET.urlencode() return HttpResponseRedirect(new_url) # JSON-specific variables is_json = request.GET.get("format") == "json" callback = request.GET.get("callback", "").strip() content_type = "application/x-javascript" if callback else "application/json" # Check callback is valid if is_json and callback and not jsonp_is_valid(callback): return HttpResponse( json.dumps({"error": _("Invalid callback function.")}), content_type=content_type, status=400 ) language = locale_or_default(request.GET.get("language", request.LANGUAGE_CODE)) r = request.GET.copy() # TODO: Do we really need to add this to the URL if it isn't already there? r["w"] = r.get("w", constants.WHERE_BASIC) # TODO: Break out a separate simple search form. search_form = SimpleSearchForm(r, auto_id=False) if not search_form.is_valid(): if is_json: return HttpResponse(json.dumps({"error": _("Invalid search data.")}), content_type=content_type, status=400) t = template if request.MOBILE else "search/form.html" search_ = render(request, t, {"advanced": False, "request": request, "search_form": search_form}) cache_period = settings.SEARCH_CACHE_PERIOD search_["Cache-Control"] = "max-age=%s" % (cache_period * 60) search_["Expires"] = (datetime.utcnow() + timedelta(minutes=cache_period)).strftime(EXPIRES_FMT) return search_ cleaned = search_form.cleaned_data # On mobile, we default to just wiki results. if request.MOBILE and cleaned["w"] == constants.WHERE_BASIC: cleaned["w"] = constants.WHERE_WIKI page = max(smart_int(request.GET.get("page")), 1) offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE lang = language.lower() if settings.LANGUAGES_DICT.get(lang): lang_name = settings.LANGUAGES_DICT[lang] else: lang_name = "" # We use a regular S here because we want to search across # multiple doctypes. searcher = AnalyzerS().es(urls=settings.ES_URLS).indexes(es_utils.read_index("default")) wiki_f = F(model="wiki_document") question_f = F(model="questions_question") cleaned_q = cleaned["q"] products = cleaned["product"] if not products and "all_products" not in request.GET: lowered_q = cleaned_q.lower() if "thunderbird" in lowered_q: products.append("thunderbird") elif "android" in lowered_q: products.append("mobile") elif "ios" in lowered_q or "ipad" in lowered_q or "ipod" in lowered_q or "iphone" in lowered_q: products.append("ios") elif "firefox os" in lowered_q: products.append("firefox-os") elif "firefox" in lowered_q: products.append("firefox") # Start - wiki filters if cleaned["w"] & constants.WHERE_WIKI: # Category filter wiki_f &= F(document_category__in=settings.SEARCH_DEFAULT_CATEGORIES) # Locale filter wiki_f &= F(document_locale=language) # Product filter for p in products: wiki_f &= F(product=p) # Archived bit wiki_f &= F(document_is_archived=False) # End - wiki filters # Start - support questions filters if cleaned["w"] & constants.WHERE_SUPPORT: # Has helpful answers is set by default if using basic search cleaned["has_helpful"] = constants.TERNARY_YES # No archived questions in default search. cleaned["is_archived"] = constants.TERNARY_NO # These filters are ternary, they can be either YES, NO, or OFF ternary_filters = ("has_helpful", "is_archived") d = dict( ("question_%s" % filter_name, _ternary_filter(cleaned[filter_name])) for filter_name in ternary_filters if cleaned[filter_name] ) if d: question_f &= F(**d) # Product filter for p in products: question_f &= F(product=p) # End - support questions filters # Done with all the filtery stuff--time to generate results # Combine all the filters and add to the searcher doctypes = [] final_filter = F() if cleaned["w"] & constants.WHERE_WIKI: doctypes.append(DocumentMappingType.get_mapping_type_name()) final_filter |= wiki_f if cleaned["w"] & constants.WHERE_SUPPORT: doctypes.append(QuestionMappingType.get_mapping_type_name()) final_filter |= question_f searcher = searcher.doctypes(*doctypes) searcher = searcher.filter(final_filter) if "explain" in request.GET and request.GET["explain"] == "1": searcher = searcher.explain() documents = ComposedList() try: # Set up the highlights. Show the entire field highlighted. searcher = searcher.highlight( "question_content", # support forum "document_summary", # kb pre_tags=["<b>"], post_tags=["</b>"], number_of_fragments=0, ) # Set up boosts searcher = searcher.boost( question_title=4.0, question_content=3.0, question_answer_content=3.0, document_title=6.0, document_content=1.0, document_keywords=8.0, document_summary=2.0, # Text phrases in document titles and content get an extra # boost. document_title__match_phrase=10.0, document_content__match_phrase=8.0, ) # Build the query query_fields = chain(*[cls.get_query_fields() for cls in [DocumentMappingType, QuestionMappingType]]) query = {} # Create match and match_phrase queries for every field # we want to search. for field in query_fields: for query_type in ["match", "match_phrase"]: query["%s__%s" % (field, query_type)] = cleaned_q # Transform the query to use locale aware analyzers. query = es_utils.es_query_with_analyzer(query, language) searcher = searcher.query(should=True, **query) num_results = min(searcher.count(), settings.SEARCH_MAX_RESULTS) # TODO - Can ditch the ComposedList here, but we need # something that paginate can use to figure out the paging. documents = ComposedList() documents.set_count(("results", searcher), num_results) results_per_page = settings.SEARCH_RESULTS_PER_PAGE pages = paginate(request, documents, results_per_page) # If we know there aren't any results, let's cheat and in # doing that, not hit ES again. if num_results == 0: searcher = [] else: # Get the documents we want to show and add them to # docs_for_page documents = documents[offset : offset + results_per_page] if len(documents) == 0: # If the user requested a page that's beyond the # pagination, then documents is an empty list and # there are no results to show. searcher = [] else: bounds = documents[0][1] searcher = searcher[bounds[0] : bounds[1]] results = [] for i, doc in enumerate(searcher): rank = i + offset if doc["model"] == "wiki_document": summary = _build_es_excerpt(doc) if not summary: summary = doc["document_summary"] result = {"title": doc["document_title"], "type": "document"} elif doc["model"] == "questions_question": summary = _build_es_excerpt(doc) if not summary: # We're excerpting only question_content, so if # the query matched question_title or # question_answer_content, then there won't be any # question_content excerpts. In that case, just # show the question--but only the first 500 # characters. summary = bleach.clean(doc["question_content"], strip=True)[:500] result = { "title": doc["question_title"], "type": "question", "is_solved": doc["question_is_solved"], "num_answers": doc["question_num_answers"], "num_votes": doc["question_num_votes"], "num_votes_past_week": doc["question_num_votes_past_week"], } result["url"] = doc["url"] result["object"] = doc result["search_summary"] = summary result["rank"] = rank result["score"] = doc.es_meta.score result["explanation"] = escape(format_explanation(doc.es_meta.explanation)) result["id"] = doc["id"] results.append(result) except ES_EXCEPTIONS as exc: # Handle timeout and all those other transient errors with a # "Search Unavailable" rather than a Django error page. if is_json: return HttpResponse(json.dumps({"error": _("Search Unavailable")}), content_type=content_type, status=503) # Cheating here: Convert from 'Timeout()' to 'timeout' so # we have less code, but still have good stats. exc_bucket = repr(exc).lower().strip("()") statsd.incr("search.esunified.{0}".format(exc_bucket)) log.exception(exc) t = "search/mobile/down.html" if request.MOBILE else "search/down.html" return render(request, t, {"q": cleaned["q"]}, status=503) items = [(k, v) for k in search_form.fields for v in r.getlist(k) if v and k != "a"] items.append(("a", "2")) fallback_results = None if num_results == 0: fallback_results = _fallback_results(language, cleaned["product"]) product = Product.objects.filter(slug__in=cleaned["product"]) if product: product_titles = [_(p.title, "DB: products.Product.title") for p in product] else: product_titles = [_("All Products")] product_titles = ", ".join(product_titles) data = { "num_results": num_results, "results": results, "fallback_results": fallback_results, "product_titles": product_titles, "q": cleaned["q"], "w": cleaned["w"], "lang_name": lang_name, } if is_json: # Models are not json serializable. for r in data["results"]: del r["object"] data["total"] = len(data["results"]) data["products"] = [{"slug": p.slug, "title": p.title} for p in Product.objects.filter(visible=True)] if product: data["product"] = product[0].slug pages = Paginator(pages) data["pagination"] = dict( number=pages.pager.number, num_pages=pages.pager.paginator.num_pages, has_next=pages.pager.has_next(), has_previous=pages.pager.has_previous(), max=pages.max, span=pages.span, dotted_upper=pages.pager.dotted_upper, dotted_lower=pages.pager.dotted_lower, page_range=pages.pager.page_range, url=pages.pager.url, ) if not results: data["message"] = _("No pages matched the search criteria") json_data = json.dumps(data) if callback: json_data = callback + "(" + json_data + ");" return HttpResponse(json_data, content_type=content_type) data.update( { "product": product, "products": Product.objects.filter(visible=True), "pages": pages, "search_form": search_form, "advanced": False, } ) results_ = render(request, template, data) cache_period = settings.SEARCH_CACHE_PERIOD results_["Cache-Control"] = "max-age=%s" % (cache_period * 60) results_["Expires"] = (datetime.utcnow() + timedelta(minutes=cache_period)).strftime(EXPIRES_FMT) results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned["q"]), max_age=3600, secure=False, httponly=False) return results_
def simple_search(request, template=None): """ES-specific simple search view. This view is for end user searching of the Knowledge Base and Support Forum. Filtering options are limited to: * product (`product=firefox`, for example, for only Firefox results) * document type (`w=2`, for esample, for Support Forum questions only) """ # Redirect to old Advanced Search URLs (?a={1,2}) to the new URL. a = request.GET.get('a') if a in ['1', '2']: new_url = reverse('search.advanced') + '?' + request.GET.urlencode() return HttpResponseRedirect(new_url) # JSON-specific variables is_json = (request.GET.get('format') == 'json') callback = request.GET.get('callback', '').strip() content_type = ( 'application/x-javascript' if callback else 'application/json') # Check callback is valid if is_json and callback and not jsonp_is_valid(callback): return HttpResponse( json.dumps({'error': _('Invalid callback function.')}), content_type=content_type, status=400) language = locale_or_default( request.GET.get('language', request.LANGUAGE_CODE)) r = request.GET.copy() # TODO: Do we really need to add this to the URL if it isn't already there? r['w'] = r.get('w', constants.WHERE_BASIC) # TODO: Break out a separate simple search form. search_form = SimpleSearchForm(r, auto_id=False) if not search_form.is_valid(): if is_json: return HttpResponse( json.dumps({'error': _('Invalid search data.')}), content_type=content_type, status=400) t = template if request.MOBILE else 'search/form.html' search_ = render(request, t, { 'advanced': False, 'request': request, 'search_form': search_form}) cache_period = settings.SEARCH_CACHE_PERIOD search_['Cache-Control'] = 'max-age={0!s}'.format((cache_period * 60)) search_['Expires'] = ( (datetime.utcnow() + timedelta(minutes=cache_period)) .strftime(EXPIRES_FMT)) return search_ cleaned = search_form.cleaned_data # On mobile, we default to just wiki results. if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC: cleaned['w'] = constants.WHERE_WIKI page = max(smart_int(request.GET.get('page')), 1) offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE lang = language.lower() if settings.LANGUAGES_DICT.get(lang): lang_name = settings.LANGUAGES_DICT[lang] else: lang_name = '' # We use a regular S here because we want to search across # multiple doctypes. searcher = (AnalyzerS().es(urls=settings.ES_URLS) .indexes(es_utils.read_index('default'))) wiki_f = F(model='wiki_document') question_f = F(model='questions_question') cleaned_q = cleaned['q'] products = cleaned['product'] if not products and 'all_products' not in request.GET: lowered_q = cleaned_q.lower() if 'thunderbird' in lowered_q: products.append('thunderbird') elif 'android' in lowered_q: products.append('mobile') elif ('ios' in lowered_q or 'ipad' in lowered_q or 'ipod' in lowered_q or 'iphone' in lowered_q): products.append('ios') elif 'firefox os' in lowered_q: products.append('firefox-os') elif 'firefox' in lowered_q: products.append('firefox') # Start - wiki filters if cleaned['w'] & constants.WHERE_WIKI: # Category filter wiki_f &= F(document_category__in=settings.SEARCH_DEFAULT_CATEGORIES) # Locale filter wiki_f &= F(document_locale=language) # Product filter for p in products: wiki_f &= F(product=p) # Archived bit wiki_f &= F(document_is_archived=False) # End - wiki filters # Start - support questions filters if cleaned['w'] & constants.WHERE_SUPPORT: # Has helpful answers is set by default if using basic search cleaned['has_helpful'] = constants.TERNARY_YES # No archived questions in default search. cleaned['is_archived'] = constants.TERNARY_NO # These filters are ternary, they can be either YES, NO, or OFF ternary_filters = ('has_helpful', 'is_archived') d = dict(('question_{0!s}'.format(filter_name), _ternary_filter(cleaned[filter_name])) for filter_name in ternary_filters if cleaned[filter_name]) if d: question_f &= F(**d) # Product filter for p in products: question_f &= F(product=p) # End - support questions filters # Done with all the filtery stuff--time to generate results # Combine all the filters and add to the searcher doctypes = [] final_filter = F() if cleaned['w'] & constants.WHERE_WIKI: doctypes.append(DocumentMappingType.get_mapping_type_name()) final_filter |= wiki_f if cleaned['w'] & constants.WHERE_SUPPORT: doctypes.append(QuestionMappingType.get_mapping_type_name()) final_filter |= question_f searcher = searcher.doctypes(*doctypes) searcher = searcher.filter(final_filter) if 'explain' in request.GET and request.GET['explain'] == '1': searcher = searcher.explain() documents = ComposedList() try: # Set up the highlights. Show the entire field highlighted. searcher = searcher.highlight( 'question_content', # support forum 'document_summary', # kb pre_tags=['<b>'], post_tags=['</b>'], number_of_fragments=0) # Set up boosts searcher = searcher.boost( question_title=4.0, question_content=3.0, question_answer_content=3.0, document_title=6.0, document_content=1.0, document_keywords=8.0, document_summary=2.0, # Text phrases in document titles and content get an extra # boost. document_title__match_phrase=10.0, document_content__match_phrase=8.0) # Build the query query_fields = chain(*[ cls.get_query_fields() for cls in [ DocumentMappingType, QuestionMappingType ] ]) query = {} # Create match and match_phrase queries for every field # we want to search. for field in query_fields: for query_type in ['match', 'match_phrase']: query['{0!s}__{1!s}'.format(field, query_type)] = cleaned_q # Transform the query to use locale aware analyzers. query = es_utils.es_query_with_analyzer(query, language) searcher = searcher.query(should=True, **query) num_results = min(searcher.count(), settings.SEARCH_MAX_RESULTS) # TODO - Can ditch the ComposedList here, but we need # something that paginate can use to figure out the paging. documents = ComposedList() documents.set_count(('results', searcher), num_results) results_per_page = settings.SEARCH_RESULTS_PER_PAGE pages = paginate(request, documents, results_per_page) # If we know there aren't any results, let's cheat and in # doing that, not hit ES again. if num_results == 0: searcher = [] else: # Get the documents we want to show and add them to # docs_for_page documents = documents[offset:offset + results_per_page] if len(documents) == 0: # If the user requested a page that's beyond the # pagination, then documents is an empty list and # there are no results to show. searcher = [] else: bounds = documents[0][1] searcher = searcher[bounds[0]:bounds[1]] results = [] for i, doc in enumerate(searcher): rank = i + offset if doc['model'] == 'wiki_document': summary = _build_es_excerpt(doc) if not summary: summary = doc['document_summary'] result = { 'title': doc['document_title'], 'type': 'document'} elif doc['model'] == 'questions_question': summary = _build_es_excerpt(doc) if not summary: # We're excerpting only question_content, so if # the query matched question_title or # question_answer_content, then there won't be any # question_content excerpts. In that case, just # show the question--but only the first 500 # characters. summary = bleach.clean( doc['question_content'], strip=True)[:500] result = { 'title': doc['question_title'], 'type': 'question', 'is_solved': doc['question_is_solved'], 'num_answers': doc['question_num_answers'], 'num_votes': doc['question_num_votes'], 'num_votes_past_week': doc['question_num_votes_past_week']} result['url'] = doc['url'] result['object'] = doc result['search_summary'] = summary result['rank'] = rank result['score'] = doc.es_meta.score result['explanation'] = escape(format_explanation( doc.es_meta.explanation)) result['id'] = doc['id'] results.append(result) except ES_EXCEPTIONS as exc: # Handle timeout and all those other transient errors with a # "Search Unavailable" rather than a Django error page. if is_json: return HttpResponse(json.dumps({'error': _('Search Unavailable')}), content_type=content_type, status=503) # Cheating here: Convert from 'Timeout()' to 'timeout' so # we have less code, but still have good stats. exc_bucket = repr(exc).lower().strip('()') statsd.incr('search.esunified.{0}'.format(exc_bucket)) log.exception(exc) t = 'search/mobile/down.html' if request.MOBILE else 'search/down.html' return render(request, t, {'q': cleaned['q']}, status=503) items = [(k, v) for k in search_form.fields for v in r.getlist(k) if v and k != 'a'] items.append(('a', '2')) fallback_results = None if num_results == 0: fallback_results = _fallback_results(language, cleaned['product']) product = Product.objects.filter(slug__in=cleaned['product']) if product: product_titles = [_(p.title, 'DB: products.Product.title') for p in product] else: product_titles = [_('All Products')] product_titles = ', '.join(product_titles) data = { 'num_results': num_results, 'results': results, 'fallback_results': fallback_results, 'product_titles': product_titles, 'q': cleaned['q'], 'w': cleaned['w'], 'lang_name': lang_name, } if is_json: # Models are not json serializable. for r in data['results']: del r['object'] data['total'] = len(data['results']) data['products'] = ([{'slug': p.slug, 'title': p.title} for p in Product.objects.filter(visible=True)]) if product: data['product'] = product[0].slug pages = Paginator(pages) data['pagination'] = dict( number=pages.pager.number, num_pages=pages.pager.paginator.num_pages, has_next=pages.pager.has_next(), has_previous=pages.pager.has_previous(), max=pages.max, span=pages.span, dotted_upper=pages.pager.dotted_upper, dotted_lower=pages.pager.dotted_lower, page_range=pages.pager.page_range, url=pages.pager.url, ) if not results: data['message'] = _('No pages matched the search criteria') json_data = json.dumps(data) if callback: json_data = callback + '(' + json_data + ');' return HttpResponse(json_data, content_type=content_type) data.update({ 'product': product, 'products': Product.objects.filter(visible=True), 'pages': pages, 'search_form': search_form, 'advanced': False, }) results_ = render(request, template, data) cache_period = settings.SEARCH_CACHE_PERIOD results_['Cache-Control'] = 'max-age={0!s}'.format((cache_period * 60)) results_['Expires'] = ( (datetime.utcnow() + timedelta(minutes=cache_period)) .strftime(EXPIRES_FMT)) results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']), max_age=3600, secure=False, httponly=False) return results_
def get_query_fields(self): return QuestionMappingType.get_query_fields()
def generate_simple_search(search_form, language, with_highlights=False): """Generates an S given a form :arg search_form: a validated SimpleSearch form :arg language: the language code :arg with_highlights: whether or not to ask for highlights :returns: a fully formed S """ # We use a regular S here because we want to search across # multiple doctypes. searcher = ( es_utils.AnalyzerS().es( urls=settings.ES_URLS, timeout=settings.ES_TIMEOUT, use_ssl=settings.ES_USE_SSL, http_auth=settings.ES_HTTP_AUTH, ) .indexes(es_utils.read_index('default')) ) cleaned = search_form.cleaned_data doctypes = [] final_filter = es_utils.F() cleaned_q = cleaned['q'] products = cleaned['product'] # Handle wiki filters if cleaned['w'] & constants.WHERE_WIKI: wiki_f = es_utils.F(model='wiki_document', document_category__in=settings.SEARCH_DEFAULT_CATEGORIES, document_locale=language, document_is_archived=False) for p in products: wiki_f &= es_utils.F(product=p) doctypes.append(DocumentMappingType.get_mapping_type_name()) final_filter |= wiki_f # Handle question filters if cleaned['w'] & constants.WHERE_SUPPORT: question_f = es_utils.F(model='questions_question', question_is_archived=False, question_has_helpful=True) for p in products: question_f &= es_utils.F(product=p) doctypes.append(QuestionMappingType.get_mapping_type_name()) final_filter |= question_f # Build a filter for those filters and add the other bits to # finish the search searcher = searcher.doctypes(*doctypes) searcher = searcher.filter(final_filter) if cleaned['explain']: searcher = searcher.explain() if with_highlights: # Set up the highlights. Show the entire field highlighted. searcher = searcher.highlight( 'question_content', # support forum 'document_summary', # kb pre_tags=['<b>'], post_tags=['</b>'], number_of_fragments=0 ) searcher = apply_boosts(searcher) # Build the query query_fields = chain(*[ cls.get_query_fields() for cls in [ DocumentMappingType, QuestionMappingType ] ]) query = {} # Create match and match_phrase queries for every field # we want to search. for field in query_fields: for query_type in ['match', 'match_phrase']: query['%s__%s' % (field, query_type)] = cleaned_q # Transform the query to use locale aware analyzers. query = es_utils.es_query_with_analyzer(query, language) searcher = searcher.query(should=True, **query) return searcher
def advanced_search(request): """Elasticsearch-specific Advanced search view""" to_json = JSONRenderer().render template = 'search/results.html' # 1. Prep request. r = request.GET.copy() # TODO: Figure out how to get rid of 'a' and do it. # It basically is used to switch between showing the form or results. a = request.GET.get('a', '2') # TODO: This is so the 'a=1' stays in the URL for pagination. r['a'] = 1 language = locale_or_default( request.GET.get('language', request.LANGUAGE_CODE)) r['language'] = language lang = language.lower() lang_name = settings.LANGUAGES_DICT.get(lang) or '' # 2. Build form. search_form = AdvancedSearchForm(r, auto_id=False) search_form.set_allowed_forums(request.user) # 3. Validate request. # Note: a == 2 means "show the form"--that's all we use it for now. if a == '2' or not search_form.is_valid(): if request.IS_JSON: return HttpResponse(json.dumps( {'error': _('Invalid search data.')}), content_type=request.CONTENT_TYPE, status=400) t = 'search/form.html' data = { 'advanced': True, 'request': request, 'search_form': search_form } # get value for search input from last search term. last_search = request.COOKIES.get(settings.LAST_SEARCH_COOKIE) # If there is any cached input from last search, pass it to template if last_search and 'q' not in r: cached_field = urlquote(last_search) data.update({'cached_field': cached_field}) return cache_control(render(request, t, data), settings.SEARCH_CACHE_PERIOD) # 4. Generate search. cleaned = search_form.cleaned_data # We use a regular S here because we want to search across # multiple doctypes. searcher = (AnalyzerS().es( urls=settings.ES_URLS, timeout=settings.ES_TIMEOUT, use_ssl=settings.ES_USE_SSL, http_auth=settings.ES_HTTP_AUTH, connection_class=RequestsHttpConnection).indexes( es_utils.read_index('default'))) doctypes = [] final_filter = F() unix_now = int(time.time()) interval_filters = (('created', cleaned['created'], cleaned['created_date']), ('updated', cleaned['updated'], cleaned['updated_date'])) # Start - wiki search configuration if cleaned['w'] & constants.WHERE_WIKI: wiki_f = F(model='wiki_document') # Category filter if cleaned['category']: wiki_f &= F(document_category__in=cleaned['category']) # Locale filter wiki_f &= F(document_locale=language) # Product filter products = cleaned['product'] for p in products: wiki_f &= F(product=p) # Topics filter topics = cleaned['topics'] for t in topics: wiki_f &= F(topic=t) # Archived bit if not cleaned['include_archived']: wiki_f &= F(document_is_archived=False) # Apply sortby sortby = cleaned['sortby_documents'] try: searcher = searcher.order_by(*constants.SORT_DOCUMENTS[sortby]) except IndexError: # Skip index errors because they imply the user is sending us sortby values # that aren't valid. pass doctypes.append(DocumentMappingType.get_mapping_type_name()) final_filter |= wiki_f # End - wiki search configuration # Start - support questions configuration if cleaned['w'] & constants.WHERE_SUPPORT: question_f = F(model='questions_question') # These filters are ternary, they can be either YES, NO, or OFF ternary_filters = ('is_locked', 'is_solved', 'has_answers', 'has_helpful', 'is_archived') d = dict(('question_%s' % filter_name, _ternary_filter(cleaned[filter_name])) for filter_name in ternary_filters if cleaned[filter_name]) if d: question_f &= F(**d) if cleaned['asked_by']: question_f &= F(question_creator=cleaned['asked_by']) if cleaned['answered_by']: question_f &= F(question_answer_creator=cleaned['answered_by']) q_tags = [t.strip() for t in cleaned['q_tags'].split(',')] for t in q_tags: if t: question_f &= F(question_tag=t) # Product filter products = cleaned['product'] for p in products: question_f &= F(product=p) # Topics filter topics = cleaned['topics'] for t in topics: question_f &= F(topic=t) # Note: num_voted (with a d) is a different field than num_votes # (with an s). The former is a dropdown and the latter is an # integer value. if cleaned['num_voted'] == constants.INTERVAL_BEFORE: question_f &= F( question_num_votes__lte=max(cleaned['num_votes'], 0)) elif cleaned['num_voted'] == constants.INTERVAL_AFTER: question_f &= F(question_num_votes__gte=cleaned['num_votes']) # Apply sortby sortby = cleaned['sortby'] try: searcher = searcher.order_by(*constants.SORT_QUESTIONS[sortby]) except IndexError: # Skip index errors because they imply the user is sending us sortby values # that aren't valid. pass # Apply created and updated filters for filter_name, filter_option, filter_date in interval_filters: if filter_option == constants.INTERVAL_BEFORE: before = { filter_name + '__gte': 0, filter_name + '__lte': max(filter_date, 0) } question_f &= F(**before) elif filter_option == constants.INTERVAL_AFTER: after = { filter_name + '__gte': min(filter_date, unix_now), filter_name + '__lte': unix_now } question_f &= F(**after) doctypes.append(QuestionMappingType.get_mapping_type_name()) final_filter |= question_f # End - support questions configuration # Start - discussion forum configuration if cleaned['w'] & constants.WHERE_DISCUSSION: discussion_f = F(model='forums_thread') if cleaned['author']: discussion_f &= F(post_author_ord=cleaned['author']) if cleaned['thread_type']: if constants.DISCUSSION_STICKY in cleaned['thread_type']: discussion_f &= F(post_is_sticky=1) if constants.DISCUSSION_LOCKED in cleaned['thread_type']: discussion_f &= F(post_is_locked=1) valid_forum_ids = [ f.id for f in Forum.authorized_forums_for_user(request.user) ] forum_ids = None if cleaned['forum']: forum_ids = [f for f in cleaned['forum'] if f in valid_forum_ids] # If we removed all the forums they wanted to look at or if # they didn't specify, then we filter on the list of all # forums they're authorized to look at. if not forum_ids: forum_ids = valid_forum_ids discussion_f &= F(post_forum_id__in=forum_ids) # Apply created and updated filters for filter_name, filter_option, filter_date in interval_filters: if filter_option == constants.INTERVAL_BEFORE: before = { filter_name + '__gte': 0, filter_name + '__lte': max(filter_date, 0) } discussion_f &= F(**before) elif filter_option == constants.INTERVAL_AFTER: after = { filter_name + '__gte': min(filter_date, unix_now), filter_name + '__lte': unix_now } discussion_f &= F(**after) doctypes.append(ThreadMappingType.get_mapping_type_name()) final_filter |= discussion_f # End - discussion forum configuration # Done with all the filtery stuff--time to generate results searcher = searcher.doctypes(*doctypes) searcher = searcher.filter(final_filter) if 'explain' in request.GET and request.GET['explain'] == '1': searcher = searcher.explain() cleaned_q = cleaned['q'] # Set up the highlights. Show the entire field highlighted. searcher = searcher.highlight( 'question_content', # support forum 'document_summary', # kb 'post_content', # contributor forum pre_tags=['<b>'], post_tags=['</b>'], number_of_fragments=0) searcher = apply_boosts(searcher) # Build the query if cleaned_q: query_fields = chain(*[ cls.get_query_fields() for cls in [DocumentMappingType, ThreadMappingType, QuestionMappingType] ]) query = {} # Create a simple_query_search query for every field we want to search. for field in query_fields: query['%s__sqs' % field] = cleaned_q # Transform the query to use locale aware analyzers. query = es_utils.es_query_with_analyzer(query, language) searcher = searcher.query(should=True, **query) searcher = searcher[:settings.SEARCH_MAX_RESULTS] # 5. Generate output pages = paginate(request, searcher, settings.SEARCH_RESULTS_PER_PAGE) if pages.paginator.count == 0: # If we know there aren't any results, show fallback_results. fallback_results = _fallback_results(language, cleaned['product']) results = [] else: fallback_results = None results = build_results_list(pages, request.IS_JSON) items = [(k, v) for k in search_form.fields for v in r.getlist(k) if v and k != 'a'] items.append(('a', '2')) product = Product.objects.filter(slug__in=cleaned['product']) if product: product_titles = [ pgettext('DB: products.Product.title', p.title) for p in product ] else: product_titles = [_('All Products')] # FIXME: This is probably bad l10n. product_titles = ', '.join(product_titles) data = { 'num_results': pages.paginator.count, 'results': results, 'fallback_results': fallback_results, 'product_titles': product_titles, 'q': cleaned['q'], 'w': cleaned['w'], 'lang_name': lang_name, 'advanced': True, 'products': Product.objects.filter(visible=True) } if request.IS_JSON: data['total'] = len(data['results']) data['products'] = [{ 'slug': p.slug, 'title': p.title } for p in data['products']] if product: data['product'] = product[0].slug pages = Paginator(pages) data['pagination'] = dict( number=pages.pager.number, num_pages=pages.pager.paginator.num_pages, has_next=pages.pager.has_next(), has_previous=pages.pager.has_previous(), max=pages.max, span=pages.span, dotted_upper=pages.pager.dotted_upper, dotted_lower=pages.pager.dotted_lower, page_range=pages.pager.page_range, url=pages.pager.url, ) if not results: data['message'] = _('No pages matched the search criteria') json_data = to_json(data) if request.JSON_CALLBACK: json_data = request.JSON_CALLBACK + '(' + json_data + ');' return HttpResponse(json_data, content_type=request.CONTENT_TYPE) data.update({ 'product': product, 'pages': pages, 'search_form': search_form }) resp = cache_control(render(request, template, data), settings.SEARCH_CACHE_PERIOD) resp.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']), max_age=3600, secure=False, httponly=False) return resp
def advanced_search(request, template=None): """Elasticsearch-specific Advanced search view""" to_json = JSONRenderer().render # 1. Prep request. r = request.GET.copy() # TODO: Figure out how to get rid of 'a' and do it. # It basically is used to switch between showing the form or results. a = request.GET.get('a', '2') # TODO: This is so the 'a=1' stays in the URL for pagination. r['a'] = 1 language = locale_or_default(request.GET.get('language', request.LANGUAGE_CODE)) r['language'] = language lang = language.lower() lang_name = settings.LANGUAGES_DICT.get(lang) or '' # 2. Build form. search_form = AdvancedSearchForm(r, auto_id=False) search_form.set_allowed_forums(request.user) # 3. Validate request. # Note: a == 2 means "show the form"--that's all we use it for now. if a == '2' or not search_form.is_valid(): if request.IS_JSON: return HttpResponse( json.dumps({'error': _('Invalid search data.')}), content_type=request.CONTENT_TYPE, status=400) t = template if request.MOBILE else 'search/form.html' data = {'advanced': True, 'request': request, 'search_form': search_form} # get value for search input from last search term. last_search = request.COOKIES.get(settings.LAST_SEARCH_COOKIE) # If there is any cached input from last search, pass it to template if last_search and 'q' not in r: cached_field = urlquote(last_search) data.update({'cached_field': cached_field}) return cache_control( render(request, t, data), settings.SEARCH_CACHE_PERIOD) # 4. Generate search. cleaned = search_form.cleaned_data # On mobile, we default to just wiki results. if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC: cleaned['w'] = constants.WHERE_WIKI # We use a regular S here because we want to search across # multiple doctypes. searcher = (AnalyzerS().es(urls=settings.ES_URLS) .indexes(es_utils.read_index('default'))) doctypes = [] final_filter = F() unix_now = int(time.time()) interval_filters = ( ('created', cleaned['created'], cleaned['created_date']), ('updated', cleaned['updated'], cleaned['updated_date']) ) # Start - wiki search configuration if cleaned['w'] & constants.WHERE_WIKI: wiki_f = F(model='wiki_document') # Category filter if cleaned['category']: wiki_f &= F(document_category__in=cleaned['category']) # Locale filter wiki_f &= F(document_locale=language) # Product filter products = cleaned['product'] for p in products: wiki_f &= F(product=p) # Topics filter topics = cleaned['topics'] for t in topics: wiki_f &= F(topic=t) # Archived bit if not cleaned['include_archived']: wiki_f &= F(document_is_archived=False) # Apply sortby sortby = cleaned['sortby_documents'] try: searcher = searcher.order_by(*constants.SORT_DOCUMENTS[sortby]) except IndexError: # Skip index errors because they imply the user is sending us sortby values # that aren't valid. pass doctypes.append(DocumentMappingType.get_mapping_type_name()) final_filter |= wiki_f # End - wiki search configuration # Start - support questions configuration if cleaned['w'] & constants.WHERE_SUPPORT: question_f = F(model='questions_question') # These filters are ternary, they can be either YES, NO, or OFF ternary_filters = ('is_locked', 'is_solved', 'has_answers', 'has_helpful', 'is_archived') d = dict(('question_%s' % filter_name, _ternary_filter(cleaned[filter_name])) for filter_name in ternary_filters if cleaned[filter_name]) if d: question_f &= F(**d) if cleaned['asked_by']: question_f &= F(question_creator=cleaned['asked_by']) if cleaned['answered_by']: question_f &= F(question_answer_creator=cleaned['answered_by']) q_tags = [t.strip() for t in cleaned['q_tags'].split(',')] for t in q_tags: if t: question_f &= F(question_tag=t) # Product filter products = cleaned['product'] for p in products: question_f &= F(product=p) # Topics filter topics = cleaned['topics'] for t in topics: question_f &= F(topic=t) # Note: num_voted (with a d) is a different field than num_votes # (with an s). The former is a dropdown and the latter is an # integer value. if cleaned['num_voted'] == constants.INTERVAL_BEFORE: question_f &= F(question_num_votes__lte=max(cleaned['num_votes'], 0)) elif cleaned['num_voted'] == constants.INTERVAL_AFTER: question_f &= F(question_num_votes__gte=cleaned['num_votes']) # Apply sortby sortby = cleaned['sortby'] try: searcher = searcher.order_by(*constants.SORT_QUESTIONS[sortby]) except IndexError: # Skip index errors because they imply the user is sending us sortby values # that aren't valid. pass # Apply created and updated filters for filter_name, filter_option, filter_date in interval_filters: if filter_option == constants.INTERVAL_BEFORE: before = {filter_name + '__gte': 0, filter_name + '__lte': max(filter_date, 0)} question_f &= F(**before) elif filter_option == constants.INTERVAL_AFTER: after = {filter_name + '__gte': min(filter_date, unix_now), filter_name + '__lte': unix_now} question_f &= F(**after) doctypes.append(QuestionMappingType.get_mapping_type_name()) final_filter |= question_f # End - support questions configuration # Start - discussion forum configuration if cleaned['w'] & constants.WHERE_DISCUSSION: discussion_f = F(model='forums_thread') if cleaned['author']: discussion_f &= F(post_author_ord=cleaned['author']) if cleaned['thread_type']: if constants.DISCUSSION_STICKY in cleaned['thread_type']: discussion_f &= F(post_is_sticky=1) if constants.DISCUSSION_LOCKED in cleaned['thread_type']: discussion_f &= F(post_is_locked=1) valid_forum_ids = [f.id for f in Forum.authorized_forums_for_user(request.user)] forum_ids = None if cleaned['forum']: forum_ids = [f for f in cleaned['forum'] if f in valid_forum_ids] # If we removed all the forums they wanted to look at or if # they didn't specify, then we filter on the list of all # forums they're authorized to look at. if not forum_ids: forum_ids = valid_forum_ids discussion_f &= F(post_forum_id__in=forum_ids) # Apply created and updated filters for filter_name, filter_option, filter_date in interval_filters: if filter_option == constants.INTERVAL_BEFORE: before = {filter_name + '__gte': 0, filter_name + '__lte': max(filter_date, 0)} discussion_f &= F(**before) elif filter_option == constants.INTERVAL_AFTER: after = {filter_name + '__gte': min(filter_date, unix_now), filter_name + '__lte': unix_now} discussion_f &= F(**after) doctypes.append(ThreadMappingType.get_mapping_type_name()) final_filter |= discussion_f # End - discussion forum configuration # Done with all the filtery stuff--time to generate results searcher = searcher.doctypes(*doctypes) searcher = searcher.filter(final_filter) if 'explain' in request.GET and request.GET['explain'] == '1': searcher = searcher.explain() cleaned_q = cleaned['q'] # Set up the highlights. Show the entire field highlighted. searcher = searcher.highlight( 'question_content', # support forum 'document_summary', # kb 'post_content', # contributor forum pre_tags=['<b>'], post_tags=['</b>'], number_of_fragments=0) searcher = apply_boosts(searcher) # Build the query if cleaned_q: query_fields = chain(*[ cls.get_query_fields() for cls in [ DocumentMappingType, ThreadMappingType, QuestionMappingType ] ]) query = {} # Create a simple_query_search query for every field we want to search. for field in query_fields: query['%s__sqs' % field] = cleaned_q # Transform the query to use locale aware analyzers. query = es_utils.es_query_with_analyzer(query, language) searcher = searcher.query(should=True, **query) searcher = searcher[:settings.SEARCH_MAX_RESULTS] # 5. Generate output pages = paginate(request, searcher, settings.SEARCH_RESULTS_PER_PAGE) if pages.paginator.count == 0: # If we know there aren't any results, show fallback_results. fallback_results = _fallback_results(language, cleaned['product']) results = [] else: fallback_results = None results = build_results_list(pages, request.IS_JSON) items = [(k, v) for k in search_form.fields for v in r.getlist(k) if v and k != 'a'] items.append(('a', '2')) product = Product.objects.filter(slug__in=cleaned['product']) if product: product_titles = [pgettext('DB: products.Product.title', p.title) for p in product] else: product_titles = [_('All Products')] # FIXME: This is probably bad l10n. product_titles = ', '.join(product_titles) data = { 'num_results': pages.paginator.count, 'results': results, 'fallback_results': fallback_results, 'product_titles': product_titles, 'q': cleaned['q'], 'w': cleaned['w'], 'lang_name': lang_name, 'advanced': True, 'products': Product.objects.filter(visible=True) } if request.IS_JSON: data['total'] = len(data['results']) data['products'] = [{'slug': p.slug, 'title': p.title} for p in data['products']] if product: data['product'] = product[0].slug pages = Paginator(pages) data['pagination'] = dict( number=pages.pager.number, num_pages=pages.pager.paginator.num_pages, has_next=pages.pager.has_next(), has_previous=pages.pager.has_previous(), max=pages.max, span=pages.span, dotted_upper=pages.pager.dotted_upper, dotted_lower=pages.pager.dotted_lower, page_range=pages.pager.page_range, url=pages.pager.url, ) if not results: data['message'] = _('No pages matched the search criteria') json_data = to_json(data) if request.JSON_CALLBACK: json_data = request.JSON_CALLBACK + '(' + json_data + ');' return HttpResponse(json_data, content_type=request.CONTENT_TYPE) data.update({ 'product': product, 'pages': pages, 'search_form': search_form }) resp = cache_control(render(request, template, data), settings.SEARCH_CACHE_PERIOD) resp.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']), max_age=3600, secure=False, httponly=False) return resp