def top_contributors_l10n(start=None, end=None, locale=None, product=None, count=10, page=1, use_cache=True): """Get the top l10n contributors for the KB.""" if use_cache: cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page) cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest() cache_key = u'top_contributors_l10n_{}'.format(cache_key) cached = cache.get(cache_key, None) if cached: return cached # Get the user ids and contribution count of the top contributors. query = RevisionMetricsMappingType.search() if locale is None: # If there is no locale specified, exclude en-US only. The rest are # l10n. query = query.filter(~F(locale=settings.WIKI_DEFAULT_LANGUAGE)) query = _apply_filters(query, start, end, locale, product) revisions = [q.id for q in query.all()[:HUGE_NUMBER]] users = (User.objects.filter(created_revisions__in=revisions).annotate( query_count=Count('created_revisions')).order_by('-query_count')) counts = _get_creator_counts(users, count, page) if use_cache: cache.set(cache_key, counts, 60 * 15) # 15 minutes return counts
def top_contributors_l10n(start=None, end=None, locale=None, product=None, count=10): """Get the top l10n contributors for the KB.""" # Get the user ids and contribution count of the top contributors. query = (RevisionMetricsMappingType.search().facet('creator_id', filtered=True, size=count)) if locale is None: # If there is no locale specified, exlude en-US only. The rest are # l10n. query = query.filter(~F(locale=settings.WIKI_DEFAULT_LANGUAGE)) query = _apply_filters(query, start, end, locale, product) return _get_creator_counts(query, count)
def advanced_search(request): """Elasticsearch-specific Advanced search view""" to_json = JSONRenderer().render template = 'search/results.html' # 1. Prep request. r = request.GET.copy() # TODO: Figure out how to get rid of 'a' and do it. # It basically is used to switch between showing the form or results. a = request.GET.get('a', '2') # TODO: This is so the 'a=1' stays in the URL for pagination. r['a'] = 1 language = locale_or_default( request.GET.get('language', request.LANGUAGE_CODE)) r['language'] = language lang = language.lower() lang_name = settings.LANGUAGES_DICT.get(lang) or '' # 2. Build form. search_form = AdvancedSearchForm(r, auto_id=False) search_form.set_allowed_forums(request.user) # 3. Validate request. # Note: a == 2 means "show the form"--that's all we use it for now. if a == '2' or not search_form.is_valid(): if request.IS_JSON: return HttpResponse(json.dumps( {'error': _('Invalid search data.')}), content_type=request.CONTENT_TYPE, status=400) t = 'search/form.html' data = { 'advanced': True, 'request': request, 'search_form': search_form } # get value for search input from last search term. last_search = request.COOKIES.get(settings.LAST_SEARCH_COOKIE) # If there is any cached input from last search, pass it to template if last_search and 'q' not in r: cached_field = urlquote(last_search) data.update({'cached_field': cached_field}) return cache_control(render(request, t, data), settings.SEARCH_CACHE_PERIOD) # 4. Generate search. cleaned = search_form.cleaned_data # We use a regular S here because we want to search across # multiple doctypes. searcher = (AnalyzerS().es( urls=settings.ES_URLS, timeout=settings.ES_TIMEOUT, use_ssl=settings.ES_USE_SSL, http_auth=settings.ES_HTTP_AUTH, connection_class=RequestsHttpConnection).indexes( es_utils.read_index('default'))) doctypes = [] final_filter = F() unix_now = int(time.time()) interval_filters = (('created', cleaned['created'], cleaned['created_date']), ('updated', cleaned['updated'], cleaned['updated_date'])) # Start - wiki search configuration if cleaned['w'] & constants.WHERE_WIKI: wiki_f = F(model='wiki_document') # Category filter if cleaned['category']: wiki_f &= F(document_category__in=cleaned['category']) # Locale filter wiki_f &= F(document_locale=language) # Product filter products = cleaned['product'] for p in products: wiki_f &= F(product=p) # Topics filter topics = cleaned['topics'] for t in topics: wiki_f &= F(topic=t) # Archived bit if not cleaned['include_archived']: wiki_f &= F(document_is_archived=False) # Apply sortby sortby = cleaned['sortby_documents'] try: searcher = searcher.order_by(*constants.SORT_DOCUMENTS[sortby]) except IndexError: # Skip index errors because they imply the user is sending us sortby values # that aren't valid. pass doctypes.append(DocumentMappingType.get_mapping_type_name()) final_filter |= wiki_f # End - wiki search configuration # Start - support questions configuration if cleaned['w'] & constants.WHERE_SUPPORT: question_f = F(model='questions_question') # These filters are ternary, they can be either YES, NO, or OFF ternary_filters = ('is_locked', 'is_solved', 'has_answers', 'has_helpful', 'is_archived') d = dict(('question_%s' % filter_name, _ternary_filter(cleaned[filter_name])) for filter_name in ternary_filters if cleaned[filter_name]) if d: question_f &= F(**d) if cleaned['asked_by']: question_f &= F(question_creator=cleaned['asked_by']) if cleaned['answered_by']: question_f &= F(question_answer_creator=cleaned['answered_by']) q_tags = [t.strip() for t in cleaned['q_tags'].split(',')] for t in q_tags: if t: question_f &= F(question_tag=t) # Product filter products = cleaned['product'] for p in products: question_f &= F(product=p) # Topics filter topics = cleaned['topics'] for t in topics: question_f &= F(topic=t) # Note: num_voted (with a d) is a different field than num_votes # (with an s). The former is a dropdown and the latter is an # integer value. if cleaned['num_voted'] == constants.INTERVAL_BEFORE: question_f &= F( question_num_votes__lte=max(cleaned['num_votes'], 0)) elif cleaned['num_voted'] == constants.INTERVAL_AFTER: question_f &= F(question_num_votes__gte=cleaned['num_votes']) # Apply sortby sortby = cleaned['sortby'] try: searcher = searcher.order_by(*constants.SORT_QUESTIONS[sortby]) except IndexError: # Skip index errors because they imply the user is sending us sortby values # that aren't valid. pass # Apply created and updated filters for filter_name, filter_option, filter_date in interval_filters: if filter_option == constants.INTERVAL_BEFORE: before = { filter_name + '__gte': 0, filter_name + '__lte': max(filter_date, 0) } question_f &= F(**before) elif filter_option == constants.INTERVAL_AFTER: after = { filter_name + '__gte': min(filter_date, unix_now), filter_name + '__lte': unix_now } question_f &= F(**after) doctypes.append(QuestionMappingType.get_mapping_type_name()) final_filter |= question_f # End - support questions configuration # Start - discussion forum configuration if cleaned['w'] & constants.WHERE_DISCUSSION: discussion_f = F(model='forums_thread') if cleaned['author']: discussion_f &= F(post_author_ord=cleaned['author']) if cleaned['thread_type']: if constants.DISCUSSION_STICKY in cleaned['thread_type']: discussion_f &= F(post_is_sticky=1) if constants.DISCUSSION_LOCKED in cleaned['thread_type']: discussion_f &= F(post_is_locked=1) valid_forum_ids = [ f.id for f in Forum.authorized_forums_for_user(request.user) ] forum_ids = None if cleaned['forum']: forum_ids = [f for f in cleaned['forum'] if f in valid_forum_ids] # If we removed all the forums they wanted to look at or if # they didn't specify, then we filter on the list of all # forums they're authorized to look at. if not forum_ids: forum_ids = valid_forum_ids discussion_f &= F(post_forum_id__in=forum_ids) # Apply created and updated filters for filter_name, filter_option, filter_date in interval_filters: if filter_option == constants.INTERVAL_BEFORE: before = { filter_name + '__gte': 0, filter_name + '__lte': max(filter_date, 0) } discussion_f &= F(**before) elif filter_option == constants.INTERVAL_AFTER: after = { filter_name + '__gte': min(filter_date, unix_now), filter_name + '__lte': unix_now } discussion_f &= F(**after) doctypes.append(ThreadMappingType.get_mapping_type_name()) final_filter |= discussion_f # End - discussion forum configuration # Done with all the filtery stuff--time to generate results searcher = searcher.doctypes(*doctypes) searcher = searcher.filter(final_filter) if 'explain' in request.GET and request.GET['explain'] == '1': searcher = searcher.explain() cleaned_q = cleaned['q'] # Set up the highlights. Show the entire field highlighted. searcher = searcher.highlight( 'question_content', # support forum 'document_summary', # kb 'post_content', # contributor forum pre_tags=['<b>'], post_tags=['</b>'], number_of_fragments=0) searcher = apply_boosts(searcher) # Build the query if cleaned_q: query_fields = chain(*[ cls.get_query_fields() for cls in [DocumentMappingType, ThreadMappingType, QuestionMappingType] ]) query = {} # Create a simple_query_search query for every field we want to search. for field in query_fields: query['%s__sqs' % field] = cleaned_q # Transform the query to use locale aware analyzers. query = es_utils.es_query_with_analyzer(query, language) searcher = searcher.query(should=True, **query) searcher = searcher[:settings.SEARCH_MAX_RESULTS] # 5. Generate output pages = paginate(request, searcher, settings.SEARCH_RESULTS_PER_PAGE) if pages.paginator.count == 0: # If we know there aren't any results, show fallback_results. fallback_results = _fallback_results(language, cleaned['product']) results = [] else: fallback_results = None results = build_results_list(pages, request.IS_JSON) items = [(k, v) for k in search_form.fields for v in r.getlist(k) if v and k != 'a'] items.append(('a', '2')) product = Product.objects.filter(slug__in=cleaned['product']) if product: product_titles = [ pgettext('DB: products.Product.title', p.title) for p in product ] else: product_titles = [_('All Products')] # FIXME: This is probably bad l10n. product_titles = ', '.join(product_titles) data = { 'num_results': pages.paginator.count, 'results': results, 'fallback_results': fallback_results, 'product_titles': product_titles, 'q': cleaned['q'], 'w': cleaned['w'], 'lang_name': lang_name, 'advanced': True, 'products': Product.objects.filter(visible=True) } if request.IS_JSON: data['total'] = len(data['results']) data['products'] = [{ 'slug': p.slug, 'title': p.title } for p in data['products']] if product: data['product'] = product[0].slug pages = Paginator(pages) data['pagination'] = dict( number=pages.pager.number, num_pages=pages.pager.paginator.num_pages, has_next=pages.pager.has_next(), has_previous=pages.pager.has_previous(), max=pages.max, span=pages.span, dotted_upper=pages.pager.dotted_upper, dotted_lower=pages.pager.dotted_lower, page_range=pages.pager.page_range, url=pages.pager.url, ) if not results: data['message'] = _('No pages matched the search criteria') json_data = to_json(data) if request.JSON_CALLBACK: json_data = request.JSON_CALLBACK + '(' + json_data + ');' return HttpResponse(json_data, content_type=request.CONTENT_TYPE) data.update({ 'product': product, 'pages': pages, 'search_form': search_form }) resp = cache_control(render(request, template, data), settings.SEARCH_CACHE_PERIOD) resp.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']), max_age=3600, secure=False, httponly=False) return resp
def search(request, template=None): """ES-specific search view""" # JSON-specific variables is_json = (request.GET.get('format') == 'json') callback = request.GET.get('callback', '').strip() mimetype = 'application/x-javascript' if callback else 'application/json' # Search "Expires" header format expires_fmt = '%A, %d %B %Y %H:%M:%S GMT' # Check callback is valid if is_json and callback and not jsonp_is_valid(callback): return HttpResponse( json.dumps({'error': _('Invalid callback function.')}), mimetype=mimetype, status=400) language = locale_or_default( request.GET.get('language', request.LANGUAGE_CODE)) r = request.GET.copy() a = request.GET.get('a', '0') # Search default values try: category = (map(int, r.getlist('category')) or settings.SEARCH_DEFAULT_CATEGORIES) except ValueError: category = settings.SEARCH_DEFAULT_CATEGORIES r.setlist('category', category) # Basic form if a == '0': r['w'] = r.get('w', constants.WHERE_BASIC) # Advanced form if a == '2': r['language'] = language r['a'] = '1' # TODO: Rewrite so SearchForm is unbound initially and we can use # `initial` on the form fields. if 'include_archived' not in r: r['include_archived'] = False search_form = SearchForm(r) search_form.set_allowed_forums(request.user) if not search_form.is_valid() or a == '2': if is_json: return HttpResponse( json.dumps({'error': _('Invalid search data.')}), mimetype=mimetype, status=400) t = template if request.MOBILE else 'search/form.html' search_ = render(request, t, { 'advanced': a, 'request': request, 'search_form': search_form}) search_['Cache-Control'] = 'max-age=%s' % \ (settings.SEARCH_CACHE_PERIOD * 60) search_['Expires'] = (datetime.utcnow() + timedelta( minutes=settings.SEARCH_CACHE_PERIOD)) \ .strftime(expires_fmt) return search_ cleaned = search_form.cleaned_data if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC: cleaned['w'] = constants.WHERE_WIKI page = max(smart_int(request.GET.get('page')), 1) offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE lang = language.lower() if settings.LANGUAGES.get(lang): lang_name = settings.LANGUAGES[lang] else: lang_name = '' # We use a regular S here because we want to search across # multiple doctypes. searcher = (AnalyzerS().es(urls=settings.ES_URLS) .indexes(es_utils.READ_INDEX)) wiki_f = F(model='wiki_document') question_f = F(model='questions_question') discussion_f = F(model='forums_thread') # Start - wiki filters if cleaned['w'] & constants.WHERE_WIKI: # Category filter if cleaned['category']: wiki_f &= F(document_category__in=cleaned['category']) # Locale filter wiki_f &= F(document_locale=language) # Product filter products = cleaned['product'] for p in products: wiki_f &= F(product=p) # Topics filter topics = cleaned['topics'] for t in topics: wiki_f &= F(topic=t) # Archived bit if a == '0' and not cleaned['include_archived']: # Default to NO for basic search: cleaned['include_archived'] = False if not cleaned['include_archived']: wiki_f &= F(document_is_archived=False) # End - wiki filters # Start - support questions filters if cleaned['w'] & constants.WHERE_SUPPORT: # Solved is set by default if using basic search if a == '0' and not cleaned['has_helpful']: cleaned['has_helpful'] = constants.TERNARY_YES # These filters are ternary, they can be either YES, NO, or OFF ternary_filters = ('is_locked', 'is_solved', 'has_answers', 'has_helpful') d = dict(('question_%s' % filter_name, _ternary_filter(cleaned[filter_name])) for filter_name in ternary_filters if cleaned[filter_name]) if d: question_f &= F(**d) if cleaned['asked_by']: question_f &= F(question_creator=cleaned['asked_by']) if cleaned['answered_by']: question_f &= F(question_answer_creator=cleaned['answered_by']) q_tags = [t.strip() for t in cleaned['q_tags'].split(',')] for t in q_tags: if t: question_f &= F(question_tag=t) # Product filter products = cleaned['product'] for p in products: question_f &= F(product=p) # Topics filter topics = cleaned['topics'] for t in topics: question_f &= F(topic=t) # End - support questions filters # Start - discussion forum filters if cleaned['w'] & constants.WHERE_DISCUSSION: if cleaned['author']: discussion_f &= F(post_author_ord=cleaned['author']) if cleaned['thread_type']: if constants.DISCUSSION_STICKY in cleaned['thread_type']: discussion_f &= F(post_is_sticky=1) if constants.DISCUSSION_LOCKED in cleaned['thread_type']: discussion_f &= F(post_is_locked=1) valid_forum_ids = [ f.id for f in Forum.authorized_forums_for_user(request.user)] forum_ids = None if cleaned['forum']: forum_ids = [f for f in cleaned['forum'] if f in valid_forum_ids] # If we removed all the forums they wanted to look at or if # they didn't specify, then we filter on the list of all # forums they're authorized to look at. if not forum_ids: forum_ids = valid_forum_ids discussion_f &= F(post_forum_id__in=forum_ids) # End - discussion forum filters # Created filter unix_now = int(time.time()) interval_filters = ( ('created', cleaned['created'], cleaned['created_date']), ('updated', cleaned['updated'], cleaned['updated_date'])) for filter_name, filter_option, filter_date in interval_filters: if filter_option == constants.INTERVAL_BEFORE: before = {filter_name + '__gte': 0, filter_name + '__lte': max(filter_date, 0)} discussion_f &= F(**before) question_f &= F(**before) elif filter_option == constants.INTERVAL_AFTER: after = {filter_name + '__gte': min(filter_date, unix_now), filter_name + '__lte': unix_now} discussion_f &= F(**after) question_f &= F(**after) # In basic search, we limit questions from the last # SEARCH_DEFAULT_MAX_QUESTION_AGE seconds. if a == '0': start_date = unix_now - settings.SEARCH_DEFAULT_MAX_QUESTION_AGE question_f &= F(created__gte=start_date) # Note: num_voted (with a d) is a different field than num_votes # (with an s). The former is a dropdown and the latter is an # integer value. if cleaned['num_voted'] == constants.INTERVAL_BEFORE: question_f &= F(question_num_votes__lte=max(cleaned['num_votes'], 0)) elif cleaned['num_voted'] == constants.INTERVAL_AFTER: question_f &= F(question_num_votes__gte=cleaned['num_votes']) # Done with all the filtery stuff--time to generate results # Combine all the filters and add to the searcher doctypes = [] final_filter = F() if cleaned['w'] & constants.WHERE_WIKI: doctypes.append(DocumentMappingType.get_mapping_type_name()) final_filter |= wiki_f if cleaned['w'] & constants.WHERE_SUPPORT: doctypes.append(QuestionMappingType.get_mapping_type_name()) final_filter |= question_f if cleaned['w'] & constants.WHERE_DISCUSSION: doctypes.append(ThreadMappingType.get_mapping_type_name()) final_filter |= discussion_f searcher = searcher.doctypes(*doctypes) searcher = searcher.filter(final_filter) if 'explain' in request.GET and request.GET['explain'] == '1': searcher = searcher.explain() documents = ComposedList() try: cleaned_q = cleaned['q'] # Set up the highlights. Show the entire field highlighted. searcher = searcher.highlight( 'question_content', # support forum 'document_summary', # kb 'post_content', # contributor forum pre_tags=['<b>'], post_tags=['</b>'], number_of_fragments=0) # Set up boosts searcher = searcher.boost( question_title=4.0, question_content=3.0, question_answer_content=3.0, post_title=2.0, post_content=1.0, document_title=6.0, document_content=1.0, document_keywords=8.0, document_summary=2.0, # Text phrases in document titles and content get an extra # boost. document_title__text_phrase=10.0, document_content__text_phrase=8.0) # Apply sortby for advanced search of questions if cleaned['w'] == constants.WHERE_SUPPORT: sortby = cleaned['sortby'] try: searcher = searcher.order_by( *constants.SORT_QUESTIONS[sortby]) except IndexError: # Skip index errors because they imply the user is # sending us sortby values that aren't valid. pass # Apply sortby for advanced search of kb documents if cleaned['w'] == constants.WHERE_WIKI: sortby = cleaned['sortby_documents'] try: searcher = searcher.order_by( *constants.SORT_DOCUMENTS[sortby]) except IndexError: # Skip index errors because they imply the user is # sending us sortby values that aren't valid. pass # Build the query if cleaned_q: query_fields = chain(*[cls.get_query_fields() for cls in get_mapping_types()]) query = {} # Create text and text_phrase queries for every field # we want to search. for field in query_fields: for query_type in ['text', 'text_phrase']: query['%s__%s' % (field, query_type)] = cleaned_q # Transform the query to use locale aware analyzers. query = es_utils.es_query_with_analyzer(query, language) searcher = searcher.query(should=True, **query) num_results = min(searcher.count(), settings.SEARCH_MAX_RESULTS) # TODO - Can ditch the ComposedList here, but we need # something that paginate can use to figure out the paging. documents = ComposedList() documents.set_count(('results', searcher), num_results) results_per_page = settings.SEARCH_RESULTS_PER_PAGE pages = paginate(request, documents, results_per_page) # If we know there aren't any results, let's cheat and in # doing that, not hit ES again. if num_results == 0: searcher = [] else: # Get the documents we want to show and add them to # docs_for_page documents = documents[offset:offset + results_per_page] if len(documents) == 0: # If the user requested a page that's beyond the # pagination, then documents is an empty list and # there are no results to show. searcher = [] else: bounds = documents[0][1] searcher = searcher.values_dict()[bounds[0]:bounds[1]] results = [] for i, doc in enumerate(searcher): rank = i + offset if doc['model'] == 'wiki_document': summary = _build_es_excerpt(doc) if not summary: summary = doc['document_summary'] result = { 'title': doc['document_title'], 'type': 'document'} elif doc['model'] == 'questions_question': summary = _build_es_excerpt(doc) if not summary: # We're excerpting only question_content, so if # the query matched question_title or # question_answer_content, then there won't be any # question_content excerpts. In that case, just # show the question--but only the first 500 # characters. summary = bleach.clean( doc['question_content'], strip=True)[:500] result = { 'title': doc['question_title'], 'type': 'question', 'is_solved': doc['question_is_solved'], 'num_answers': doc['question_num_answers'], 'num_votes': doc['question_num_votes'], 'num_votes_past_week': doc['question_num_votes_past_week']} else: summary = _build_es_excerpt(doc, first_only=True) result = { 'title': doc['post_title'], 'type': 'thread'} result['url'] = doc['url'] result['object'] = ObjectDict(doc) result['search_summary'] = summary result['rank'] = rank result['score'] = doc._score result['explanation'] = escape(format_explanation( doc._explanation)) results.append(result) except ES_EXCEPTIONS as exc: # Handle timeout and all those other transient errors with a # "Search Unavailable" rather than a Django error page. if is_json: return HttpResponse(json.dumps({'error': _('Search Unavailable')}), mimetype=mimetype, status=503) # Cheating here: Convert from 'Timeout()' to 'timeout' so # we have less code, but still have good stats. exc_bucket = repr(exc).lower().strip('()') statsd.incr('search.esunified.{0}'.format(exc_bucket)) import logging logging.exception(exc) t = 'search/mobile/down.html' if request.MOBILE else 'search/down.html' return render(request, t, {'q': cleaned['q']}, status=503) items = [(k, v) for k in search_form.fields for v in r.getlist(k) if v and k != 'a'] items.append(('a', '2')) if is_json: # Models are not json serializable. for r in results: del r['object'] data = {} data['results'] = results data['total'] = len(results) data['query'] = cleaned['q'] if not results: data['message'] = _('No pages matched the search criteria') json_data = json.dumps(data) if callback: json_data = callback + '(' + json_data + ');' return HttpResponse(json_data, mimetype=mimetype) fallback_results = None if num_results == 0: fallback_results = _fallback_results(language, cleaned['product']) results_ = render(request, template, { 'num_results': num_results, 'results': results, 'fallback_results': fallback_results, 'q': cleaned['q'], 'w': cleaned['w'], 'product': Product.objects.filter(slug__in=cleaned['product']), 'products': Product.objects.filter(visible=True), 'pages': pages, 'search_form': search_form, 'lang_name': lang_name, }) results_['Cache-Control'] = 'max-age=%s' % \ (settings.SEARCH_CACHE_PERIOD * 60) results_['Expires'] = (datetime.utcnow() + timedelta(minutes=settings.SEARCH_CACHE_PERIOD)) \ .strftime(expires_fmt) results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']), max_age=3600, secure=False, httponly=False) return results_
def simple_search(request, template=None): """ES-specific simple search view. This view is for end user searching of the Knowledge Base and Support Forum. Filtering options are limited to: * product (`product=firefox`, for example, for only Firefox results) * document type (`w=2`, for esample, for Support Forum questions only) """ # Redirect to old Advanced Search URLs (?a={1,2}) to the new URL. a = request.GET.get('a') if a in ['1', '2']: new_url = reverse('search.advanced') + '?' + request.GET.urlencode() return HttpResponseRedirect(new_url) # JSON-specific variables is_json = (request.GET.get('format') == 'json') callback = request.GET.get('callback', '').strip() content_type = ('application/x-javascript' if callback else 'application/json') # Check callback is valid if is_json and callback and not jsonp_is_valid(callback): return HttpResponse(json.dumps( {'error': _('Invalid callback function.')}), content_type=content_type, status=400) language = locale_or_default( request.GET.get('language', request.LANGUAGE_CODE)) r = request.GET.copy() # TODO: Do we really need to add this to the URL if it isn't already there? r['w'] = r.get('w', constants.WHERE_BASIC) # TODO: Break out a separate simple search form. search_form = SimpleSearchForm(r, auto_id=False) if not search_form.is_valid(): if is_json: return HttpResponse(json.dumps( {'error': _('Invalid search data.')}), content_type=content_type, status=400) t = template if request.MOBILE else 'search/form.html' search_ = render(request, t, { 'advanced': False, 'request': request, 'search_form': search_form }) cache_period = settings.SEARCH_CACHE_PERIOD search_['Cache-Control'] = 'max-age=%s' % (cache_period * 60) search_['Expires'] = ( (datetime.utcnow() + timedelta(minutes=cache_period)).strftime(EXPIRES_FMT)) return search_ cleaned = search_form.cleaned_data # On mobile, we default to just wiki results. if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC: cleaned['w'] = constants.WHERE_WIKI page = max(smart_int(request.GET.get('page')), 1) offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE lang = language.lower() if settings.LANGUAGES_DICT.get(lang): lang_name = settings.LANGUAGES_DICT[lang] else: lang_name = '' # We use a regular S here because we want to search across # multiple doctypes. searcher = (AnalyzerS().es(urls=settings.ES_URLS).indexes( es_utils.read_index('default'))) wiki_f = F(model='wiki_document') question_f = F(model='questions_question') # Start - wiki filters if cleaned['w'] & constants.WHERE_WIKI: # Category filter wiki_f &= F(document_category__in=settings.SEARCH_DEFAULT_CATEGORIES) # Locale filter wiki_f &= F(document_locale=language) # Product filter products = cleaned['product'] for p in products: wiki_f &= F(product=p) # Archived bit wiki_f &= F(document_is_archived=False) # End - wiki filters # Start - support questions filters if cleaned['w'] & constants.WHERE_SUPPORT: # Has helpful answers is set by default if using basic search cleaned['has_helpful'] = constants.TERNARY_YES # No archived questions in default search. cleaned['is_archived'] = constants.TERNARY_NO # These filters are ternary, they can be either YES, NO, or OFF ternary_filters = ('has_helpful', 'is_archived') d = dict(('question_%s' % filter_name, _ternary_filter(cleaned[filter_name])) for filter_name in ternary_filters if cleaned[filter_name]) if d: question_f &= F(**d) # Product filter products = cleaned['product'] for p in products: question_f &= F(product=p) # End - support questions filters # Done with all the filtery stuff--time to generate results # Combine all the filters and add to the searcher doctypes = [] final_filter = F() if cleaned['w'] & constants.WHERE_WIKI: doctypes.append(DocumentMappingType.get_mapping_type_name()) final_filter |= wiki_f if cleaned['w'] & constants.WHERE_SUPPORT: doctypes.append(QuestionMappingType.get_mapping_type_name()) final_filter |= question_f searcher = searcher.doctypes(*doctypes) searcher = searcher.filter(final_filter) if 'explain' in request.GET and request.GET['explain'] == '1': searcher = searcher.explain() documents = ComposedList() try: cleaned_q = cleaned['q'] # Set up the highlights. Show the entire field highlighted. searcher = searcher.highlight( 'question_content', # support forum 'document_summary', # kb pre_tags=['<b>'], post_tags=['</b>'], number_of_fragments=0) # Set up boosts searcher = searcher.boost( question_title=4.0, question_content=3.0, question_answer_content=3.0, document_title=6.0, document_content=1.0, document_keywords=8.0, document_summary=2.0, # Text phrases in document titles and content get an extra # boost. document_title__match_phrase=10.0, document_content__match_phrase=8.0) # Build the query query_fields = chain(*[ cls.get_query_fields() for cls in [DocumentMappingType, QuestionMappingType] ]) query = {} # Create match and match_phrase queries for every field # we want to search. for field in query_fields: for query_type in ['match', 'match_phrase']: query['%s__%s' % (field, query_type)] = cleaned_q # Transform the query to use locale aware analyzers. query = es_utils.es_query_with_analyzer(query, language) searcher = searcher.query(should=True, **query) num_results = min(searcher.count(), settings.SEARCH_MAX_RESULTS) # TODO - Can ditch the ComposedList here, but we need # something that paginate can use to figure out the paging. documents = ComposedList() documents.set_count(('results', searcher), num_results) results_per_page = settings.SEARCH_RESULTS_PER_PAGE pages = paginate(request, documents, results_per_page) # If we know there aren't any results, let's cheat and in # doing that, not hit ES again. if num_results == 0: searcher = [] else: # Get the documents we want to show and add them to # docs_for_page documents = documents[offset:offset + results_per_page] if len(documents) == 0: # If the user requested a page that's beyond the # pagination, then documents is an empty list and # there are no results to show. searcher = [] else: bounds = documents[0][1] searcher = searcher[bounds[0]:bounds[1]] results = [] for i, doc in enumerate(searcher): rank = i + offset if doc['model'] == 'wiki_document': summary = _build_es_excerpt(doc) if not summary: summary = doc['document_summary'] result = {'title': doc['document_title'], 'type': 'document'} elif doc['model'] == 'questions_question': summary = _build_es_excerpt(doc) if not summary: # We're excerpting only question_content, so if # the query matched question_title or # question_answer_content, then there won't be any # question_content excerpts. In that case, just # show the question--but only the first 500 # characters. summary = bleach.clean(doc['question_content'], strip=True)[:500] result = { 'title': doc['question_title'], 'type': 'question', 'is_solved': doc['question_is_solved'], 'num_answers': doc['question_num_answers'], 'num_votes': doc['question_num_votes'], 'num_votes_past_week': doc['question_num_votes_past_week'] } result['url'] = doc['url'] result['object'] = doc result['search_summary'] = summary result['rank'] = rank result['score'] = doc.es_meta.score result['explanation'] = escape( format_explanation(doc.es_meta.explanation)) results.append(result) except ES_EXCEPTIONS as exc: # Handle timeout and all those other transient errors with a # "Search Unavailable" rather than a Django error page. if is_json: return HttpResponse(json.dumps({'error': _('Search Unavailable')}), content_type=content_type, status=503) # Cheating here: Convert from 'Timeout()' to 'timeout' so # we have less code, but still have good stats. exc_bucket = repr(exc).lower().strip('()') statsd.incr('search.esunified.{0}'.format(exc_bucket)) log.exception(exc) t = 'search/mobile/down.html' if request.MOBILE else 'search/down.html' return render(request, t, {'q': cleaned['q']}, status=503) items = [(k, v) for k in search_form.fields for v in r.getlist(k) if v and k != 'a'] items.append(('a', '2')) fallback_results = None if num_results == 0: fallback_results = _fallback_results(language, cleaned['product']) product = Product.objects.filter(slug__in=cleaned['product']) if product: product_titles = [ _(p.title, 'DB: products.Product.title') for p in product ] else: product_titles = [_('All Products')] product_titles = ', '.join(product_titles) data = { 'num_results': num_results, 'results': results, 'fallback_results': fallback_results, 'product_titles': product_titles, 'q': cleaned['q'], 'w': cleaned['w'], 'lang_name': lang_name, } if is_json: # Models are not json serializable. for r in data['results']: del r['object'] data['total'] = len(data['results']) data['products'] = ([{ 'slug': p.slug, 'title': p.title } for p in Product.objects.filter(visible=True)]) if product: data['product'] = product[0].slug pages = Paginator(pages) data['pagination'] = dict( number=pages.pager.number, num_pages=pages.pager.paginator.num_pages, has_next=pages.pager.has_next(), has_previous=pages.pager.has_previous(), max=pages.max, span=pages.span, dotted_upper=pages.pager.dotted_upper, dotted_lower=pages.pager.dotted_lower, page_range=pages.pager.page_range, url=pages.pager.url, ) if not results: data['message'] = _('No pages matched the search criteria') json_data = json.dumps(data) if callback: json_data = callback + '(' + json_data + ');' return HttpResponse(json_data, content_type=content_type) data.update({ 'product': product, 'products': Product.objects.filter(visible=True), 'pages': pages, 'search_form': search_form, 'advanced': False, }) results_ = render(request, template, data) cache_period = settings.SEARCH_CACHE_PERIOD results_['Cache-Control'] = 'max-age=%s' % (cache_period * 60) results_['Expires'] = ( (datetime.utcnow() + timedelta(minutes=cache_period)).strftime(EXPIRES_FMT)) results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']), max_age=3600, secure=False, httponly=False) return results_