示例#1
0
def search(request):
    """Search through the experiments for a search term."""
    form = SearchForm(request.POST or None)
    if not form.is_valid():
        return render_to_response("search.html", {"form": form},
                                  context_instance=RequestContext(request))
    results = set()
    if form.cleaned_data['expt_type']:
        expt_type = form.cleaned_data.pop('expt_type')
        experiments = Experiment.objects.filter(expt_type__type_name=expt_type)
        results = _intersect_unless_empty(results, experiments)

    if form.cleaned_data['transcription_factor']:
        try:
            tf = json.loads('"%s"' % form.cleaned_data['transcription_factor'])
            experiments = Experiment.objects.filter(transcription_factor__tf=tf)
            form.cleaned_data.pop('transcription_factor')
        except ValueError:
            tfs = json.loads(form.cleaned_data.pop('transcription_factor'))
            experiments = set()
            for tf in tfs:
                experiments = experiments.union(set(Experiment.objects.filter(transcription_factor__tf=tf)))
        results = _intersect_unless_empty(results, experiments)

    if form.cleaned_data['tissue_name']:
        tissue = form.cleaned_data.pop('tissue_name')
        experiments = Experiment.objects.filter(experimental_tissues=tissue)
        results = _intersect_unless_empty(results, experiments)

    for key, value in form.cleaned_data.iteritems():
        if value:
            these_results = Experiment.objects.filter(**{key: value})
            results = _intersect_unless_empty(results, these_results)
    return HttpResponse(str(list(results)))
示例#2
0
def search(request):
    result = []

    if 'TOKEN' in request.COOKIES:
        token = request.COOKIES['TOKEN']
    else:
        token = ""
    link['token'] = token

    if request.method == 'POST':
        form = SearchForm(request.POST)

        if form.is_valid():
            category = form.cleaned_data['category']
            search_keyword = form.cleaned_data['search_keyword']

            if category:
                result = search_category(category, search_keyword)

        return render(request, 'search/index.html', {
            'result': result,
            'link': link
        })
    else:
        form = SearchForm()
    return render(request, 'search/index.html', {'link': link})
示例#3
0
文件: views.py 项目: mikpanko/grakon
def search(request):
    res_list = []
    for res in RESOURCE_CHOICES:
        res_list.append({'title': res[0], 'value': res[1]})

    form = SearchForm(request.GET)
    location_path = None

    if form.is_valid():
        last_name = form.cleaned_data['last_name']
        country = form.cleaned_data['country']
        region = form.cleaned_data['region']
        district = form.cleaned_data['district']
        resource = form.cleaned_data['resource']

        location = Location.objects.get(country=None)

        if country:
            qs = Location.objects.filter(country=country)
            if qs.count() > 0:
                location = qs[:1].get()

        if region:
            qs = Location.objects.filter(region=region)
            if qs.count() > 0:
                location = qs[:1].get()

        if district:
            qs = Location.objects.filter(district=district)
            if qs.count() > 0:
                location = qs[:1].get()

        # Filter by part of last_name
        qfilter = Q()
        if last_name != '':
            qfilter &= Q(last_name__icontains=last_name)

        if resource and resource!='all' and resource!='':
            qfilter &= Q(provided_resources__resource__icontains=resource)

        # Filter by resource
        participants = location.get_entities('participants', qfilter)
        ctx = table_data(request, 'participants', participants)

        if country or region or district:
            location_path = location.path()

        ctx['last_name'] = last_name
        ctx['country'] = country
        ctx['region'] = region
        ctx['district'] = district
        ctx['resource'] = resource
    else:
        participants = Location.objects.get(country=None).get_entities('participants')
        ctx = table_data(request, 'participants', participants)

    ctx['locationWidget'] = LocationSelectWidget().render('location_select', location_path or [])
    ctx['resources'] = res_list

    return render_to_response('search.html', context_instance=RequestContext(request, ctx))
示例#4
0
def search(request):
    form = SearchForm(request.GET)
    if form.is_valid():
        query = form.cleaned_data['q']

        has_real_name_access = request.user.has_perm(
            'demoscene.view_releaser_real_names')
        (name_results, results,
         resultset) = form.search(with_real_names=has_real_name_access)

        if len(name_results) == 1 and len(results) == 0:
            messages.success(request, "One match found for '%s'" % query)
            return HttpResponseRedirect(
                name_results[0].instance.get_absolute_url())
        page = get_page(results, request.GET.get('page', '1'))
    else:
        query = None
        page = None
        name_results = None
        resultset = None
    return render(
        request, 'search/search.html', {
            'form': form,
            'query': query,
            'global_search_query': query,
            'name_results': name_results,
            'page': page,
            'resultset': resultset,
        })
示例#5
0
def index(request):
    accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
    user_langs = get_user_languages_from_request(request)
    
    if 'q' in request.REQUEST:
        form = SearchForm(request.user, user_langs, request.REQUEST)
    else:
        form = SearchForm(request.user, user_langs)
    
    qs = SearchQuerySet().none()
    
    display_mode = 'all'
    
    if form.is_valid():
        qs = form.search_qs(SearchQuerySet().models(Video))
        display_mode = form.cleaned_data.get('display', 'all')
        
    if settings.HAYSTACK_SEARCH_ENGINE == 'dummy' and settings.DEBUG:
        q = request.REQUEST.get('q', '')
        qs = Video.objects.filter(title__icontains=q)
    
    context = {
        'query': request.REQUEST.get('q', ''),
        'form': form,
        'display_mode': display_mode
    }
        
    return object_list(request, queryset=qs,
                       paginate_by=30,
                       template_name='search/index.html',
                       template_object_name='result',
                       extra_context=context)   
示例#6
0
def index(request):
    # if this is a POST request we need to process the form data
    # dump()
    if request.method == 'POST':
        # create a form instance and populate it with data from the request:
        form = SearchForm(request.POST)
        # check whether it's valid:
        if form.is_valid():
            print("here")
            # process the data in form.cleaned_data as required
            # ...
            # redirect to a new URL:
            zipcode = form.cleaned_data["zipcode"]
            hosp_type = form.cleaned_data["hosp_type"]
            emergency = form.cleaned_data["emergency"]
            criteria = form.cleaned_data["criteria"]
            print(form.errors)
            data = rank(zipcode, hosp_type, emergency, criteria)

            return render(request, "search/result.html", {'form': form, 'data': data})
        # else:
        #     print("invalid form")
        #     print(form.errors)
    # if a GET (or any other method) we'll create a blank form
    else:
        form = SearchForm()

    return render(request, "search/index_.html", {'form': form})
示例#7
0
def search(request):
	form = SearchForm(request.GET)
	if form.is_valid():
		query = form.cleaned_data['q']

		has_real_name_access = request.user.has_perm('demoscene.view_releaser_real_names')
		(name_results, results, resultset) = form.search(with_real_names=has_real_name_access)

		if len(name_results) == 1 and len(results) == 0:
			messages.success(request, "One match found for '%s'" % query)
			return HttpResponseRedirect(name_results[0].instance.get_absolute_url())
		page = get_page(results, request.GET.get('page', '1'))
	else:
		query = None
		page = None
		name_results = None
		resultset = None
	return render(request, 'search/search.html', {
		'form': form,
		'query': query,
		'global_search_query': query,
		'name_results': name_results,
		'page': page,
		'resultset': resultset,
	})
示例#8
0
    def __init__(self,
                 form: SearchForm,
                 save_request: bool = False,
                 highlight: bool = True):
        logger = logging.getLogger(__name__)

        self._response = None
        self._error = False
        self._form = form
        self._highlight = highlight

        try:
            response = requests.get(form.url, params={'form': form.to_json()})
            response.raise_for_status()

            self._response = response.json()
        except requests.exceptions.Timeout:
            logger.error("Search Request Connection Timeout")
            self._error = True
        except requests.exceptions.HTTPError:
            self._error = True
            logger.error("Http Error occured")
        except requests.exceptions.RequestException:
            logger.error("Some unknown request exception occured")
            self._error = True

        if self._response is None:
            self._error = True
        elif settings.SAVE_SEARCH_QUERIES and save_request and form.interesting:
            SearchQuery.objects.create(query=form.to_dict())
示例#9
0
def search(request):
    form = SearchForm(request.GET)
    if form.is_valid():
        query = form.cleaned_data['q']

        has_real_name_access = request.user.has_perm(
            'demoscene.view_releaser_real_names')

        page_number = request.GET.get('page', '1')
        # Make sure page request is an int. If not, deliver first page.
        try:
            page_number = int(page_number)
        except ValueError:
            page_number = 1

        results, page = form.search(with_real_names=has_real_name_access,
                                    page_number=page_number)
    else:
        query = ''
        page = None
        results = None
    return render(request, 'search/search.html', {
        'form': form,
        'query': query,
        'results': results,
        'page': page,
    })
示例#10
0
文件: rpc.py 项目: oaleeapp/EguoWorld
 def search(self, rdata, user):
     form = SearchForm(rdata)
     output = render_page(rdata.get('page', 1), form.queryset(), 20)
     output['sidebar'] = render_to_string('search/_sidebar.html', {
         'form': form,
         'rdata': rdata,
     })
     return output
示例#11
0
 def count_faceted_results(self, vocab, term):
     """Return count of matching indexed records by facet."""
     facet_query = "{0}_exact:{1}".format(vocab, term)
     form = SearchForm(
         selected_facets=[facet_query],
         repo_slug=self.repo.slug,
         sortby=LoreSortingFields.DEFAULT_SORTING_FIELD
     )
     return form.search().count()
示例#12
0
文件: views.py 项目: erdnaxe/aube
def search(request):
    """ La page de recherche standard """
    search_form = SearchForm(request.GET or None)
    if search_form.is_valid():
        return render(
            request, 'search/index.html',
            get_results(search_form.cleaned_data.get('q', ''), request,
                        search_form.cleaned_data))
    return render(request, 'search/search.html', {'search_form': search_form})
示例#13
0
def search_home_page_url(request):
    form = SearchForm(
        request.POST or None
    )  # jeśli metoda POST to renderuj ten formularz a jeśli nie ma danych to renderuj pusty formularz
    if form.is_valid():
        form.save(commit=True)
        # form = SearchForm()  # odświeża formularz, po zapisaniu będą puste pola. To już niepotrzebne, bo po wysłaniu przenosi na inną stronę
        return redirect("/search/result_url")
    context = {'form': form}
    return render(request, "search/search.html", context)
示例#14
0
 def search(self, query, sorting=LoreSortingFields.DEFAULT_SORTING_FIELD):
     """
     Helper function to perform a search
     """
     form = SearchForm(
         data={"q": query},
         repo_slug=self.repo.slug,
         sortby=sorting
     )
     return form.search()
示例#15
0
    def get_context_data(self, **kwargs):
        data = super(MainView, self).get_context_data(**kwargs)
        posts = Post.objects.filter(parent=None).order_by('-date_created')[:15]

        form = SearchForm(self.request.GET or None)
        if form.is_valid():
            posts = form.search()

        data['posts'] = posts
        data['form'] = form
        return data
示例#16
0
def product_search_view(request):
    post_data = request.POST.copy()
    form = SearchForm(data=post_data)
    params = request.GET.copy()
    if form.is_valid():
        q = form.cleaned_data['q']
        category_slug = form.cleaned_data.get('category_slug', '')
        url = reverse('catalogue_category', kwargs={'category_slug': category_slug})
        params['search'] = q
        return HttpResponseRedirect(url + "?" + urllib.urlencode(params))
    return HttpResponseRedirect(post_data['from'] + "?" + urllib.urlencode(params))
示例#17
0
def toolbar(context, on):
    request = context.get("request", None)
    if request:
        search_form = SearchForm(data=request.GET)
    else:
        search_form = SearchForm()
    return {
        'on': on,
        'graph': context["graph"],
        'node_type': context.get("node_type", None),
        'search_form': search_form
    }
示例#18
0
    def get_context_data(self, **kwargs):
        data = super(PostListView, self).get_context_data(**kwargs)
        posts = Post.objects.filter(parent=None).order_by('-date_created')

        form = SearchForm(self.request.GET or None)
        if form.is_valid():
            posts = form.search()

        offset = int(self.request.GET.get('offset',0))

        data['posts'] = posts[offset:offset+15]
        return data
示例#19
0
 def test_search1(self):
     for title in self.titles:
         video = Video.objects.all()[0]
         sqs = SearchQuerySet().models(Video)
         video.title = title
         video.save()
         reset_solr()
         
         result = SearchForm.apply_query(video.title, sqs)
         self.assertTrue(video in [item.object for item in result], u"Failed to find video by title: %s" % title)
         
         result = SearchForm.apply_query(u'BBC', sqs)
         self.assertTrue(video in [item.object for item in result], u"Failed to find video by 'BBC' with title: %s" % title)              
示例#20
0
def search(request, tag_name=None, template=None):
    # If the form is invalid we still want to have a query.
    query = request.REQUEST.get('q', '')

    search_opts = {
            'meta': ('versions', 'categories', 'tags', 'platforms'),
            'version': None,
            }

    form = SearchForm(request)
    form.is_valid()  # Let the form try to clean data.

    category = form.cleaned_data.get('cat')

    if category == 'collections':
        return _collections(request)
    elif category == 'personas':
        return _personas(request)

    # TODO: Let's change the form values to something less gross when
    # Remora dies in a fire.
    query = form.cleaned_data['q']

    addon_type = form.cleaned_data.get('atype', 0)
    tag = tag_name if tag_name is not None else form.cleaned_data.get('tag')
    if tag_name:
        search_opts['show_personas'] = True
    page = form.cleaned_data['page']
    sort = form.cleaned_data.get('sort')

    search_opts['version'] = form.cleaned_data.get('lver')
    search_opts['limit'] = form.cleaned_data.get('pp', DEFAULT_NUM_RESULTS)
    search_opts['platform'] = form.cleaned_data.get('pid', amo.PLATFORM_ALL)
    search_opts['sort'] = sort
    search_opts['app'] = request.APP.id
    search_opts['offset'] = (page - 1) * search_opts['limit']

    if category:
        search_opts['category'] = category
    elif addon_type:
        search_opts['type'] = addon_type

    search_opts['tag'] = tag

    client = SearchClient()

    try:
        results = client.query(query, **search_opts)
    except SearchError, e:
        log.error('Sphinx Error: %s' % e)
        return jingo.render(request, 'search/down.html', locals(), status=503)
示例#21
0
文件: views.py 项目: phrac/partflux
def results(request):
    url = urllib.unquote_plus(request.get_full_path())
    print url
    searchform = SearchForm(request.GET)
    
    if not "q" in request.GET:
        return redirect('search.views.index')
    
    if searchform.is_valid():
        q = searchform.cleaned_data['q']
        selected_facets = request.GET.getlist("selected_facets")
        remove_facets = request.GET.getlist("remove_facets")
        applied_facets = {}
        
        if remove_facets:
            for r in remove_facets:
                filter(lambda a: a != r, selected_facets)
                filter(lambda a: a != r, applied_facets)

        if selected_facets:
            for f in selected_facets:
                k, v = f.split(":")
                myurl = url.replace("&selected_facets=%s" % f, "")
                tag = "%s : %s" % (k.upper(), v)
                applied_facets[tag] = myurl 
                print applied_facets
        

        if q:
            sqs = SearchQuerySet().models(Part).models(Company).facet('brand').facet('category').facet('with_distributors').facet('with_image').auto_query(q)
            
            for facet in selected_facets:
                sqs = sqs.narrow(facet)

        try:                                                                    
            page = request.GET.get('page', 1)
        except PageNotAnInteger:
            page = 1

        p = Paginator(sqs, 10, request=request)
        results_list = p.page(page)

    return render_to_response('search/results.html',
                              { 
                                  'results_list': results_list, 
                                  'query': q,
                                  'facets': sqs.facet_counts(),
                                  'applied_facets': applied_facets,
                              },
                              context_instance=RequestContext(request))
示例#22
0
def test_search_form_validity(city, start_date, min_rent, max_rent,
                              num_of_roomates, num_of_rooms, validity,
                              request):
    city = request.getfixturevalue(city)
    form = SearchForm(
        data={
            'city': city,
            'start_date': start_date,
            'min_rent': min_rent,
            'max_rent': max_rent,
            'num_of_roomates': num_of_roomates,
            'num_of_rooms': num_of_rooms,
        })
    assert form.is_valid() is validity
示例#23
0
文件: rpc.py 项目: crodjer/mirosubs
    def search(self, rdata, user, testing=False):
        sqs = SearchQuerySet().result_class(VideoSearchResult) \
                .models(Video)

        q = rdata.get('q')
        if q:
            sqs = SearchForm.apply_query(q, sqs)
            form = SearchForm(rdata, sqs=sqs)
        else:
            form = SearchForm(rdata)

        if form.is_valid():
            qs = form.search_qs(sqs)
        else:
            qs = SearchQuerySet().none()

        #result = [item.object for item in qs]
        #qs1 = Video.objects.filter(title__contains=rdata['q'])
        #for o in qs1:
        #    if not o in result:
        #        print o.title

        display_views = form.get_display_views()
        output = render_page(rdata.get('page', 1),
                             qs,
                             20,
                             display_views=display_views)
        output['sidebar'] = render_to_string('search/_sidebar.html',
                                             dict(form=form, rdata=rdata))

        if testing:
            output['sqs'] = qs

        return output
示例#24
0
 def test_results_page(self):
     form_data = {
         "user_latitude": "64.14624",
         "user_longitude": "-21.94259",
         "user_address": "",
         "radius": "500",
         "search_preset": self.search_preset_id,
         "no_private": "on"
     }
     form = SearchForm(form_data)
     if not form.is_valid():
         self.fail("La validation du formulaire a échoué.")
     form.clean()
     response = self.client.post('/', data=form_data, follow=True)
     self.assertEqual(response.status_code, 200)
     self.assertTemplateUsed(response, "base.html")
     self.assertContains(response, "Voici votre localisation ")
     self.assertContains(response, "Latitude : 64.14624")
     self.assertContains(response, "Longitude : -21.94259")
     self.assertContains(response, "Adresse : ")
     self.assertContains(
         response, 'Recherche : "Boulangerie / Pâtisserie"')
     self.assertContains(response, " dans un rayon de 500 mètres.")
     self.assertContains(response, "Exclusion des résultats à accès privé.")
     # Same test, but including private results.
     form_data = {
         "user_latitude": "64.14624",
         "user_longitude": "-21.94259",
         "user_address": "",
         "radius": "500",
         "search_preset": self.search_preset_id,
         "no_private": "False"
     }
     form = SearchForm(form_data)
     if not form.is_valid():
         self.fail("La validation du formulaire a échoué.")
     form.clean()
     response = self.client.post('/', data=form_data, follow=True)
     self.assertEqual(response.status_code, 200)
     self.assertTemplateUsed(response, "base.html")
     self.assertContains(response, "Voici votre localisation ")
     self.assertContains(response, "Latitude : 64.14624")
     self.assertContains(response, "Longitude : -21.94259")
     self.assertContains(response, "Adresse : ")
     self.assertContains(
         response, 'Recherche : "Boulangerie / Pâtisserie"')
     self.assertContains(response, " dans un rayon de 500 mètres.")
     self.assertContains(response, "Inclusion des résultats à accès privé.")
     return
示例#25
0
 def test_light_search(self):
     form_data = {
         "user_latitude": "64.14624",
         "user_longitude": "-21.94259",
         "user_address": "",
         "radius": "500",
         "search_preset": self.search_preset_id,
         "no_private": "on"
     }
     form = SearchForm(form_data)
     if not form.is_valid():
         self.fail("Validation of the form failed.")
     form.clean()
     response = self.client.post('/light/', data=form_data, follow=True)
     self.assertEqual(response.status_code, 200)
     self.assertTemplateUsed(response, "base_light.html")
     self.assertContains(response, "Voici votre localisation ")
     self.assertContains(response, "Latitude : 64.14624")
     self.assertContains(response, "Longitude : -21.94259")
     self.assertContains(response, "Adresse : ")
     self.assertContains(
         response, 'Recherche : "Boulangerie / Pâtisserie"')
     self.assertContains(response, " dans un rayon de 500 mètres.")
     self.assertContains(response, "Exclusion des résultats à accès privé.")
     self.assertContains(response, "Nom : City Hall of Reykjavik")
     self.assertContains(response, "Distance : 11990937 mètres")
     self.assertContains(response, "Direction : 107,6° E →")
     self.assertContains(response,
                         ('Téléphone : <a href="tel:+354 411 1111">'
                          '+354 411 1111</a>'))
     self.assertContains(response, 'Adresse estimée : ')
     # Same test, but including private results.
     form_data = {
         "user_latitude": "64.14624",
         "user_longitude": "-21.94259",
         "user_address": "",
         "radius": "500",
         "search_preset": self.search_preset_id,
         "no_private": "False"
     }
     form = SearchForm(form_data)
     if not form.is_valid():
         self.fail("Validation of the form failed.")
     form.clean()
     response = self.client.post('/light/', data=form_data, follow=True)
     self.assertEqual(response.status_code, 200)
     self.assertTemplateUsed(response, "base_light.html")
     self.assertContains(response, "Inclusion des résultats à accès privé.")
     return
示例#26
0
    def get_context_data(self, **kwargs):
        context = super(BestTalksView, self).get_context_data(**kwargs)

        search_form = SearchForm()
        context['search_form'] = search_form

        page = 1
        if "page" in self.kwargs:
            page = int(self.kwargs["page"])

        sort = "wilsonscore_rank"

        results_total, results_ids = search_talks(page=page, sort=sort)
        search_results = Talk.published_objects.filter(pk__in=results_ids)

        num_pages = math.ceil(results_total / self.paginate_by)
        if num_pages > 500:
            num_pages = 500
        pagination = {
            "is_paginated":
            True if results_total > self.paginate_by else False,
            "number": page,
            "num_pages": num_pages,
            "has_previous": True if page > 1 else False,
            "previous_page_number": page - 1,
            "has_next": True if page < num_pages else False,
            "next_page_number": page + 1,
        }
        context['pagination'] = pagination
        context['object_list'] = search_results

        return context
示例#27
0
def home(request):
    form = SearchForm()
    topics = FoodTopic.objects.order_by('-total_collects').all()[:4]
    return render(request, 'home/index.tpl', {
        'search_form': form,
        'topics': topics
    })
示例#28
0
def toolbar(context, on):
    request = context.get("request", None)
    if request:
        search_form = SearchForm(data=request.GET)
    else:
        search_form = SearchForm()
    return {
        'on': on,
        'graph': context["graph"],
        'node_type': context.get("node_type", None),
        'nodes': context.get("nodes", None),
        'csv_results': context.get("csv_results", None),
        'search_form': search_form,
        'ENABLE_CLONING': settings.ENABLE_CLONING,
        'OPTIONS': context.get("OPTIONS", None)
    }
示例#29
0
文件: views.py 项目: slavkoBV/Academy
def thesis_list(request, id, slug):
    conference = get_object_or_404(Conference, pk=id)
    theses = conference.thesis_set.all()
    form = ThesisFilterForm()
    search_form = SearchForm(search_text='Прізвище автора або назва статті')
    sort_param = None
    q = request.GET.get('q', '')
    thesis_message = ''
    if q:
        search_params = ('title', 'author__participant__user__lastname')
        theses = search_objects(q, theses, search_params, sort_param)
        store(request, q)
    section = request.GET.get('section', '')
    if section:
        form = ThesisFilterForm(request.GET)
        if form.is_valid():
            if request.GET.get('section') != 'all':
                theses = theses.filter(section=request.GET.get('section'))
    number_of_search_result = str(len(theses))

    if len(number_of_search_result) == 2 and number_of_search_result.startswith('1'):
        thesis_message = 'доповідей'
    else:
        for i in thesis_message_dict.keys():
            if number_of_search_result[-1] in i:
                thesis_message = thesis_message_dict[i]
    context = paginate(theses, 4, request, {'theses': theses}, var_name='theses')
    context['conference'] = conference
    context['form'] = form
    context['search_form'] = search_form
    context['q'] = q
    context['section'] = section
    context['number_of_search_results'] = number_of_search_result
    context['thesis_message'] = thesis_message
    return render(request, 'conference_app/thesis_list.html', context)
示例#30
0
def index(request):
    site = current_site(request)
    if request.GET:
        return HttpResponseRedirect(
            site['BASE_URL'] + '%s#/?%s' %
            (reverse('search:index'), urlencode(request.GET)))
    return {'form': SearchForm(sqs=VideoIndex.public())}
示例#31
0
def get_data(request):
    """ This method is meant to get all results from all selected locations whitch match the searched string
        :param request - request object sent by the browser
        :returns Dictionary with products from every selected vendor"""
    search_string = request.POST.get('search_string')
    products = {}
    search_data = []
    search_on = request.POST.getlist('search_on')
    if not search_on:
        search_on = request.POST.getlist('search_on[]')
    locations = SearchPlace.objects.filter(id__in=search_on)
    start = time.time()
    for location in locations:
        url = "%s%s" % (location.url, search_string)
        search_data.append([url, location])
    num_cores = cpu_count()
    pool = Pool(processes=num_cores)
    pool_outputs = pool.map(function_wrapper, search_data)

    for product in pool_outputs:
        if not products:
            products = product
        else:
            products.update(product)
    stop = time.time()
    print(stop - start)
    return render(request, 'results.html', {
        'data': products,
        'form': SearchForm()
    })
示例#32
0
def home(request):
    form = SearchForm()
    return render(request, "home.html", {
        'form': form,
        'extra': {
            'hide_search_bar': True
        }
    })
示例#33
0
    def get_context_data(self, **kwargs):
        context = super(DetailTalkView, self).get_context_data(**kwargs)

        # Views +1 (autoplay)
        talk = self.get_object()
        talk.view_count += 1

        # Update talk to the database
        talk.updated = timezone.now()
        talk.save()

        watched = None
        if self.request.user.is_authenticated:
            try:
                watched = TalkWatch.objects.get(user=self.request.user,
                                                talk=talk)
            except ObjectDoesNotExist:
                pass
        context['watched'] = watched

        favorited = None
        if self.request.user.is_authenticated:
            try:
                favorited = TalkFavorite.objects.get(user=self.request.user,
                                                     talk=talk)
            except ObjectDoesNotExist:
                pass
        context['favorited'] = favorited

        liked = None
        if self.request.user.is_authenticated:
            try:
                liked = TalkLike.objects.get(user=self.request.user, talk=talk)
            except ObjectDoesNotExist:
                pass
        context['liked'] = liked

        disliked = None
        if self.request.user.is_authenticated:
            try:
                disliked = TalkDislike.objects.get(user=self.request.user,
                                                   talk=talk)
            except ObjectDoesNotExist:
                pass
        context['disliked'] = disliked

        search_form = SearchForm()
        context['search_form'] = search_form

        results_total, results_ids = search_talks(page=1, sort="hacker_hot")
        hot_talks = Talk.published_objects.filter(pk__in=results_ids)[:4]
        context['hot_talks'] = hot_talks

        results_total, results_ids = search_more_like_this(talk)
        context['related_talks'] = Talk.published_objects.filter(
            pk__in=results_ids)

        return context
示例#34
0
def home(request):
    """
        The main page of Goose. Shows the search form, validate it
        and redirect to "results" if it's correct.
    """
    base_template = "base.html"
    if request.method == "POST":
        form = SearchForm(request.POST)
        if form.is_valid():
            form.clean()
            user_latitude = form.cleaned_data["latitude"]
            user_longitude = form.cleaned_data["longitude"]
            calculated_address = form.cleaned_data["calculated_address"]
            request.session["search_form"] = {
                "user_latitude": user_latitude,
                "user_longitude": user_longitude,
                "user_address": calculated_address,
                "radius": form.cleaned_data["radius"],
                "search_preset_id": form.cleaned_data["search_preset"].id,
                "no_private": form.cleaned_data["no_private"],
            }
            return redirect("results")
    else:
        form = SearchForm()
    return render(request, "search/home.html", locals())
示例#35
0
 def test_search(self):
     reset_solr()
     sqs = SearchQuerySet().models(Video)
     qs = Video.objects.exclude(title='')
     self.assertTrue(qs.count())
     
     for video in qs:
         result = SearchForm.apply_query(video.title, sqs)
         self.assertTrue(video in [item.object for item in result])
示例#36
0
def search(request):
    if request.method == 'POST': # If the form has been submitted...
        request.session['_simple_post'] = request.POST
        return HttpResponseRedirect('/search_results')
    else:
        form = SearchForm() # An unbound form
    return render(request, 'search/search.html', {
            'form': form,
    })
示例#37
0
    def test_search(self):
        reset_solr()
        sqs = VideoIndex.public()
        qs = Video.objects.exclude(title='')
        self.assertTrue(qs.count())

        for video in qs:
            result = SearchForm.apply_query(video.title, sqs)
            self.assertTrue(video in [item.object for item in result])
示例#38
0
    def test_search_funcionality(self):
        NewsFactory(subject='Test Search')
        EventFactory(title='Test Search')
        response = self.client.get('search', {'query': 'search'})
        self.assertEquals(200, response.status_code)

        self.assertEquals(1, len(response.context['news']))
        self.assertEquals(1, len(response.context['events']))

        response = self.client.post('search', {'query': '1'})

        from search.forms import SearchForm

        form = SearchForm({'query': '1234'})
        self.assertTrue(form.is_valid())

        form = SearchForm({'query': '1'})
        self.assertFalse(form.is_valid())
示例#39
0
def search(request):
    if request.method == "GET":

        form = SearchForm(request.GET)
        if form.is_valid():
            pass

        return render(
            request, "search/search.html", {
                'form': form,
                'categories': Category.objects.all(),
                'paper_hosts': PaperHost.objects.order_by('name')
            })
    elif request.method == "POST":
        form = SearchForm(request.POST)
        return render_search_result(request, form)

    return HttpResponseNotFound()
示例#40
0
def results(request):
    start = time.time()
    searchform = SearchForm(request.GET)

    if not "q" in request.GET:
        return render_to_response('search/search.html',
                                  context_instance=RequestContext(request))

    if searchform.is_valid():
        q = searchform.cleaned_data['q']

        if q:
            try:
                """ Redirect to a lot number if found """
                report = Report.objects.get(lot_number=q)
                return redirect('reports.views.report',
                                lot_number=report.lot_number)
            except:
                """ No lot # could be found, perform a search """
                sqs = SearchQuerySet().auto_query(q)
                results = sqs.filter(content=AutoQuery(q))

            try:
                page = request.GET.get('page', 1)
            except PageNotAnInteger:
                page = 1

            p = Paginator(sqs, 20, request=request)
            results_list = p.page(page)

        else:
            sqs = None
            results_list = []

        end = time.time()
        runtime = end - start

    return render_to_response('search/search.html', {
        'results_list': results_list,
        'query': q,
        'runtime': runtime,
    },
                              context_instance=RequestContext(request))
示例#41
0
    def test_prepare_query(self):
        queries = [
            'сварщики по Москве', 'сварщики из москвы', 'сварщики, москва'
        ]
        expect = 'сварщик москв'

        for query in queries:
            with self.subTest(query_string=query):
                result = SearchForm().prepare_query(query)
                self.assertEqual(result, expect)
示例#42
0
def search_box(request):
    q = request.GET.get('q', '')
    category_searched = request.GET.get('category_searched', '')
    active_departments = Department.active.all()

    form = SearchForm({
        'q': q,
        'category_searched': category_searched,
    })
    return {'form': form, 'departments': active_departments}
示例#43
0
    def get_context_data(self, **kwargs):
        context = super(TopicListView, self).get_context_data(**kwargs)

        search_form = SearchForm()
        context['search_form'] = search_form

        topics_list = Topic.published_objects.filter(parent_topic=None)
        context['object_list'] = topics_list

        return context
示例#44
0
def index(request):
    context = RequestContext(request)
    gmap = maps.Map(opts = {
        'mapTypeId': maps.MapTypeId.ROADMAP,
        'zoom': 7,
        'mapTypeControlOptions': {
             'style': maps.MapTypeControlStyle.DROPDOWN_MENU
        },
    })
    
    converted=""
    if request.method == 'POST':
        form = SearchForm(request.POST)
        
        if form.is_valid():
            new_obj = Destination()
            new_obj.category_id = 1
            searchlocation = form.cleaned_data['searchfor']
            geoloc = Geocoder.geocode(searchlocation)[0]
            converted = str(geoloc)
            print(converted)
            lat, lng = geoloc.coordinates
            marker = maps.Marker(opts = {
                'map': gmap,
                'position': maps.LatLng(lat, lng),
            })
            maps.event.addListener(marker, 'mouseover', 'myobj.markerOver')
            maps.event.addListener(marker, 'mouseout', 'myobj.markerOut')
            info = maps.InfoWindow({
                'content': 'Hello!',
                'disableAutoPan': True
            })
            info.open(gmap, marker)
            #new_obj.save();
            form = SearchForm()
        else:
            print form.errors
    else:
        form = SearchForm()

    dict = {'form': SearchForm(initial={'map': gmap}), 'converted': converted}
    return render_to_response('search/index.html', dict, context);
示例#45
0
def results(request):
    start = time.time()
    searchform = SearchForm(request.GET)
    
    if not "q" in request.GET:
        return render_to_response('search/search.html', context_instance=RequestContext(request))
    
    if searchform.is_valid():
        q = searchform.cleaned_data['q']

        if q:
            try:
                """ Redirect to a lot number if found """
                report = Report.objects.get(lot_number=q)
                return redirect('reports.views.report', lot_number=report.lot_number)
            except:
                """ No lot # could be found, perform a search """
                sqs = SearchQuerySet().auto_query(q)
                results = sqs.filter(content=AutoQuery(q))
            
            try:                                                                    
                page = request.GET.get('page', 1)
            except PageNotAnInteger:
                page = 1
    
            p = Paginator(sqs, 20, request=request)
            results_list = p.page(page)

        else:
            sqs = None
            results_list = []
        
        end = time.time()
        runtime = end-start

    return render_to_response('search/search.html',
                              { 
                                  'results_list': results_list, 
                                  'query': q,
                                  'runtime': runtime,
                              },
                              context_instance=RequestContext(request))
示例#46
0
def search(request):

    if request.method == 'POST':
        form = SearchForm(request.POST)
        if form.is_valid():
            
            #startPos and endPos are the lat/long for the address passed in via the form (javascript)
            startAddress = form.cleaned_data['startAddress']
            startLat = float(form.cleaned_data['startLat'])
            startLong = float(form.cleaned_data['startLong'])
            endAddress = form.cleaned_data['endAddress']
            endLat = float(form.cleaned_data['endLat'])
            endLong = float(form.cleaned_data['endLong'])
            distance = float(form.cleaned_data['distance'])
            results = runSearch(startAddress, startLat, startLong, endAddress, endLat, endLong, distance)
            
            query = reduce(operator.or_, (Q(pk=x) for x in results))
            results = Trip.objects.filter(query)
            
            return direct_to_template(request, 'search.html', { 'results': results, 'authenticated' : request.user.is_authenticated() })


    # handle the typical ajax search request
    elif request.method == 'GET':
        startAddress = request.GET.get('startAddress' ,'')
        startLat = request.GET.get('startLat', '')
        startLong = request.GET.get('startLong', '')
        endAddress = request.GET.get('endAddress' ,'')
        endLat = request.GET.get('endLat' ,'')
        endLong = request.GET.get('endLong', '')
        distance = request.GET.get('distance', '')
        results = runSearch(startAddress, startLat, startLong, endAddress, endLat, endLong, distance)

        query = reduce(operator.or_, (Q(pk=x) for x in results))
        results = Trip.objects.filter(query)
        
        return HttpResponse(results)

    else:
        form = SearchForm()
        return direct_to_template(request, 'search.html', { 'form' : form } )
示例#47
0
文件: feeds.py 项目: Jnull/FlightDeck
    def get_object(self, request):
        form = SearchForm(request.GET)
        form.is_valid()
        self.query = query = form.cleaned_data

        t = query.get('type')
        self.search_type = _TYPES.get(t)
        self.search_query = query.get('q', '')

        filters = {}
        if t:
            filters['type'] = t

        if query.get('author'):
            filters['author'] = query['author'].id

        if query.get('copies'):
            filters['copies_count'] = query['copies']

        return package_search(self.search_query, user=request.user,
                **filters).order_by('-created_at')[:20]
示例#48
0
def search(request):
    if request.method == "POST":
        searchform = SearchForm(request.POST)
        if searchform.is_valid():
            query = searchform.data.get('query')
            query_list = query.lower().split(',')
            if len(query_list) > 1:
                try:
                    qs = [list(chain(*[skill.profile_set.all() for skill in Skill.objects.filter(name__contains=tag_clean(qry))])) for qry in query_list]
                    results = list(set(qs[0]).intersection(*qs))
                except Exception, e:
                    results = None
            else:
                try:
                    results = list(chain(Skill.objects.get(name__contains=query_list[0]).profile_set.all()))
                except Exception, e:
                    if e.__class__ == Skill.MultipleObjectsReturned:
                        results = list(chain(*[skill.profile_set.all() for skill in Skill.objects.filter(name__contains=query_list[0])]))
                    else:
                        results = None
            searchform = SearchForm({'query':query})
示例#49
0
文件: rpc.py 项目: adncentral/unisubs
    def search(self, rdata, user, testing=False):
        sqs = SearchQuerySet().result_class(VideoSearchResult) \
                .models(Video)
        
        rdata['q'] = rdata['q'] or u' '
        q = rdata.get('q')

        if q:
            sqs = SearchForm.apply_query(q, sqs)
            form = SearchForm(rdata, sqs=sqs)
        else:
            form = SearchForm(rdata)
        
        if form.is_valid():
            qs = form.search_qs(sqs)
        else:
            qs = SearchQuerySet().none()    

        #result = [item.object for item in qs]
        #qs1 = Video.objects.filter(title__contains=rdata['q'])
        #for o in qs1:
        #    if not o in result:
        #        print o.title
        
        display_views = form.get_display_views()
        output = render_page(rdata.get('page', 1), qs, 20, display_views=display_views)
        output['sidebar'] = render_to_string('search/_sidebar.html', dict(form=form, rdata=rdata))
        
        if testing:
            output['sqs'] = qs
        
        return output
示例#50
0
文件: views.py 项目: wanshot/trdist
def search(request, page=1):
    paged_results = []
    count = 0
    if "page" in request.GET:
        page = request.GET["page"]

    if "keyword" in request.GET:
        form = SearchForm(request.GET)
        if form.is_valid():
            query = create_search_query(form.cleaned_data["keyword"])
            search_results = Search.objects.filter(text__search=query, status=1).order_by("update_date").all()
            if search_results:
                paginator = Paginator(search_results, settings.ITEMS_PER_PAGE)
                try:
                    paged_results = paginator.page(page)
                    count = paginator.count
                except PageNotAnInteger:
                    raise Http404
                except EmptyPage:
                    raise Http404
                else:
                    results = []
                    target_models = get_search_target_models()
                    for target in target_models:
                        model_name = target().__class__.__name__
                        target_results = target.objects.filter(
                            pk__in=[item.model_pk for item in paged_results if item.model == model_name]
                        )
                        for item in target_results:
                            results.append(item)
                    results.sort(key=lambda x: x.update_date, reverse=True)
                    paged_results.object_list = results
    else:
        form = SearchForm()
    return render_to_response(
        "search/index.html",
        {"form": form, "count": count, "results": paged_results},
        context_instance=RequestContext(request),
    )
示例#51
0
def home(request):
    """
        This function is called for display the Home page
        @param request : Contains the query parameters
        This function just accepts the GET method
    """
    if request.method == 'GET': # For the GET method
        form = SearchForm()  # Nous creons un formulaire vide
    elif request.method == 'POST':
        # create a form instance and populate it with data from the request:
        formulaire = SearchForm(request.POST)
        # check whether it's valid:
        if formulaire.is_valid():
            day_month = request.POST.get('day_month')
            day_day = request.POST.get('day_day')
            day_year = request.POST.get('day_year')
            hour_hour = request.POST.get('hour_hour')
            hour_minute = request.POST.get('hour_minute')
            station = request.POST.get('station')
            hour = "%s/%s/%s %s:%s" % (day_year, day_month, day_day, hour_hour, hour_minute)
            timestamp = date2Timestamp(hour)
            prev = previsions(timestamp, station)
    return Response(locals(), template_name='home.html') # Return the response
示例#52
0
文件: views.py 项目: fligtar/zamboni
def search(request, tag_name=None):
    # If the form is invalid we still want to have a query.
    query = request.REQUEST.get('q', '')

    search_opts = {
            'meta': ('versions', 'categories', 'tags'),
            'version': None,
            }

    form = SearchForm(request)
    form.is_valid()  # Let the form try to clean data.

    # TODO(davedash): remove this feature when we remove Application for
    # the search advanced form
    # Redirect if appid != request.APP.id

    appid = form.cleaned_data['appid']

    if request.APP.id != appid:
        new_app = amo.APP_IDS.get(appid)
        return HttpResponseRedirect(
                urlresolvers.get_app_redirect(new_app))

    category = form.cleaned_data.get('cat')

    if category == 'collections':
        return _collections(request)
    elif category == 'personas':
        return _personas(request)

    # TODO: Let's change the form values to something less gross when
    # Remora dies in a fire.
    query = form.cleaned_data['q']

    addon_type = form.cleaned_data.get('atype', 0)
    tag = tag_name if tag_name is not None else form.cleaned_data.get('tag')
    page = form.cleaned_data['page']
    last_updated = form.cleaned_data.get('lup')
    sort = form.cleaned_data.get('sort')

    search_opts['version'] = form.cleaned_data.get('lver')
    search_opts['limit'] = form.cleaned_data.get('pp', DEFAULT_NUM_RESULTS)
    search_opts['platform'] = form.cleaned_data.get('pid', amo.PLATFORM_ALL)
    search_opts['sort'] = sort
    search_opts['app'] = request.APP.id
    search_opts['offset'] = (page - 1) * search_opts['limit']

    delta_dict = {
            '1 day ago': timedelta(days=1),
            '1 week ago': timedelta(days=7),
            '1 month ago': timedelta(days=30),
            '3 months ago': timedelta(days=90),
            '6 months ago': timedelta(days=180),
            '1 year ago': timedelta(days=365),
            }

    delta = delta_dict.get(last_updated)

    if delta:
        search_opts['before'] = int(
                time.mktime((datetime.now() - delta).timetuple()))

    if category:
        search_opts['category'] = category
    elif addon_type:
        search_opts['type'] = addon_type

    search_opts['tag'] = tag

    client = SearchClient()

    try:
        results = client.query(query, **search_opts)
    except SearchError:
        return jingo.render(request, 'search/down.html', locals(), status=503)

    version_filters = client.meta['versions']

    # If we are filtering by a version, make sure we explicitly list it.
    if search_opts['version']:
        try:
            version_filters += (version_int(search_opts['version']),)
        except UnicodeEncodeError:
            pass  # We didn't want to list you anyway.

    versions = _get_versions(request, client.meta['versions'],
                             search_opts['version'])
    categories = _get_categories(request, client.meta['categories'],
                                 addon_type, category)
    tags = _get_tags(request, client.meta['tags'], tag)
    sort_tabs = _get_sorts(request, sort)

    pager = amo.utils.paginate(request, results, search_opts['limit'])

    return jingo.render(request, 'search/results.html', {
                'pager': pager, 'query': query, 'tag': tag,
                'versions': versions, 'categories': categories, 'tags': tags,
                'sort_tabs': sort_tabs, 'sort': sort})
示例#53
0
文件: views.py 项目: GPHemsley/kuma
def search(request):
    """Performs search or displays the search form."""

    # JSON-specific variables
    is_json = (request.GET.get('format') == 'json')
    callback = request.GET.get('callback', '').strip()
    mimetype = 'application/x-javascript' if callback else 'application/json'

    # Search "Expires" header format
    expires_fmt = '%A, %d %B %Y %H:%M:%S GMT'

    # Check callback is valid
    if is_json and callback and not jsonp_is_valid(callback):
        return HttpResponse(
            json.dumps({'error': _('Invalid callback function.')}),
            mimetype=mimetype, status=400)

    language = locale_or_default(request.GET.get('language', request.locale))
    r = request.GET.copy()
    a = request.GET.get('a', '0')

    # Search default values
    try:
        category = map(int, r.getlist('category')) or \
                   settings.SEARCH_DEFAULT_CATEGORIES
    except ValueError:
        category = settings.SEARCH_DEFAULT_CATEGORIES
    r.setlist('category', [x for x in category if x > 0])
    exclude_category = [abs(x) for x in category if x < 0]

    try:
        fx = map(int, r.getlist('fx')) or [v.id for v in FIREFOX_VERSIONS]
    except ValueError:
        fx = [v.id for v in FIREFOX_VERSIONS]
    #r.setlist('fx', fx)

    try:
        os = map(int, r.getlist('os')) or [o.id for o in OPERATING_SYSTEMS]
    except ValueError:
        os = [o.id for o in OPERATING_SYSTEMS]
    #r.setlist('os', os)

    # Basic form
    if a == '0':
        r['w'] = r.get('w', constants.WHERE_BASIC)
    # Advanced form
    if a == '2':
        r['language'] = language
        r['a'] = '1'

    search_form = SearchForm(r)

    if not search_form.is_valid() or a == '2':
        if is_json:
            return HttpResponse(
                json.dumps({'error': _('Invalid search data.')}),
                mimetype=mimetype,
                status=400)

        search_ = jingo.render(request, 'search/form.html',
                            {'advanced': a, 'request': request,
                             'search_form': search_form})
        search_['Cache-Control'] = 'max-age=%s' % \
                                   (settings.SEARCH_CACHE_PERIOD * 60)
        search_['Expires'] = (datetime.utcnow() +
                              timedelta(
                                minutes=settings.SEARCH_CACHE_PERIOD)) \
                              .strftime(expires_fmt)
        return search_

    cleaned = search_form.cleaned_data
    search_locale = (sphinx_locale(language),)

    page = max(smart_int(request.GET.get('page')), 1)
    offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE

    # get language name for display in template
    lang = language.lower()
    if settings.LANGUAGES.get(lang):
        lang_name = settings.LANGUAGES[lang]
    else:
        lang_name = ''

    documents = []
    filters_w = []
    filters_q = []
    filters_f = []

    # wiki filters
    # Version and OS filters
    if cleaned['fx']:
        filters_w.append({
            'filter': 'fx',
            'value': cleaned['fx'],
        })

    if cleaned['os']:
        filters_w.append({
            'filter': 'os',
            'value': cleaned['os'],
        })

    # Category filter
    if cleaned['category']:
        filters_w.append({
            'filter': 'category',
            'value': cleaned['category'],
        })

    if exclude_category:
        filters_w.append({
            'filter': 'category',
            'value': exclude_category,
            'exclude': True,
        })

    # Locale filter
    filters_w.append({
        'filter': 'locale',
        'value': search_locale,
    })

    # Tags filter
    tags = [crc32(t.strip()) for t in cleaned['tags'].split()]
    if tags:
        for t in tags:
            filters_w.append({
                'filter': 'tag',
                'value': (t,),
                })
    # End of wiki filters

    """
    # Support questions specific filters
    if cleaned['w'] & constants.WHERE_SUPPORT:

        # Solved is set by default if using basic search
        if a == '0' and not cleaned['is_solved']:
            cleaned['is_solved'] = constants.TERNARY_YES

        # These filters are ternary, they can be either YES, NO, or OFF
        toggle_filters = ('is_locked', 'is_solved', 'has_answers',
                          'has_helpful')
        for filter_name in toggle_filters:
            if cleaned[filter_name] == constants.TERNARY_YES:
                filters_q.append({
                    'filter': filter_name,
                    'value': (True,),
                })
            if cleaned[filter_name] == constants.TERNARY_NO:
                filters_q.append({
                    'filter': filter_name,
                    'value': (False,),
                })

        if cleaned['asked_by']:
            filters_q.append({
                'filter': 'question_creator',
                'value': (crc32(cleaned['asked_by']),),
            })

        if cleaned['answered_by']:
            filters_q.append({
                'filter': 'answer_creator',
                'value': (crc32(cleaned['answered_by']),),
            })

        q_tags = [crc32(t.strip()) for t in cleaned['q_tags'].split()]
        if q_tags:
            for t in q_tags:
                filters_q.append({
                    'filter': 'tag',
                    'value': (t,),
                    })

    # Discussion forum specific filters
    if cleaned['w'] & constants.WHERE_DISCUSSION:
        if cleaned['author']:
            filters_f.append({
                'filter': 'author_ord',
                'value': (crc32(cleaned['author']),),
            })

        if cleaned['thread_type']:
            if constants.DISCUSSION_STICKY in cleaned['thread_type']:
                filters_f.append({
                    'filter': 'is_sticky',
                    'value': (1,),
                })

            if constants.DISCUSSION_LOCKED in cleaned['thread_type']:
                filters_f.append({
                    'filter': 'is_locked',
                    'value': (1,),
                })

        if cleaned['forum']:
            filters_f.append({
                'filter': 'forum_id',
                'value': cleaned['forum'],
            })
    """
    # Filters common to support and discussion forums
    # Created filter
    unix_now = int(time.time())
    interval_filters = (
#        ('created', cleaned['created'], cleaned['created_date']),
        ('updated', cleaned['updated'], cleaned['updated_date']),
#        ('question_votes', cleaned['num_voted'], cleaned['num_votes'])
    )
    for filter_name, filter_option, filter_date in interval_filters:
        if filter_option == constants.INTERVAL_BEFORE:
            before = {
                'range': True,
                'filter': filter_name,
                'min': 0,
                'max': max(filter_date, 0),
            }
            if filter_name != 'question_votes':
                filters_f.append(before)
            filters_q.append(before)
        elif filter_option == constants.INTERVAL_AFTER:
            after = {
                'range': True,
                'filter': filter_name,
                'min': min(filter_date, unix_now),
                'max': unix_now,
            }
            if filter_name != 'question_votes':
                filters_f.append(after)
            filters_q.append(after)

    sortby = smart_int(request.GET.get('sortby'))
    try:
        if cleaned['w'] & constants.WHERE_WIKI:
            wc = WikiClient()  # Wiki SearchClient instance
            # Execute the query and append to documents
            documents += wc.query(cleaned['q'], filters_w)

        if cleaned['w'] & constants.WHERE_SUPPORT:
            qc = QuestionsClient()  # Support question SearchClient instance

            # Sort results by
            try:
                qc.set_sort_mode(constants.SORT_QUESTIONS[sortby][0],
                                 constants.SORT_QUESTIONS[sortby][1])
            except IndexError:
                pass

            documents += qc.query(cleaned['q'], filters_q)

        if cleaned['w'] & constants.WHERE_DISCUSSION:
            dc = DiscussionClient()  # Discussion forums SearchClient instance

            # Sort results by
            try:
                dc.groupsort = constants.GROUPSORT[sortby]
            except IndexError:
                pass

            documents += dc.query(cleaned['q'], filters_f)

    except SearchError:
        if is_json:
            return HttpResponse(json.dumps({'error':
                                             _('Search Unavailable')}),
                                mimetype=mimetype, status=503)

        return jingo.render(request, 'search/down.html', {}, status=503)

    pages = paginate(request, documents, settings.SEARCH_RESULTS_PER_PAGE)

    results = []
    for i in range(offset, offset + settings.SEARCH_RESULTS_PER_PAGE):
        try:
            if documents[i]['attrs'].get('category', False) != False:
                wiki_page = Document.objects.get(pk=documents[i]['id'])
                summary = wiki_page.current_revision.summary

                result = {'search_summary': summary,
                          'url': wiki_page.get_absolute_url(),
                          'title': wiki_page.title,
                          'type': 'document', }
                results.append(result)
            elif documents[i]['attrs'].get('question_creator', False) != False:
                question = Question.objects.get(
                    pk=documents[i]['attrs']['question_id'])

                excerpt = qc.excerpt(question.content, cleaned['q'])
                summary = jinja2.Markup(excerpt)

                result = {'search_summary': summary,
                          'url': question.get_absolute_url(),
                          'title': question.title,
                          'type': 'question', }
                results.append(result)
            else:
                thread = Thread.objects.get(
                    pk=documents[i]['attrs']['thread_id'])
                post = Post.objects.get(pk=documents[i]['id'])

                excerpt = dc.excerpt(post.content, cleaned['q'])
                summary = jinja2.Markup(excerpt)

                result = {'search_summary': summary,
                          'url': thread.get_absolute_url(),
                          'title': thread.title,
                          'type': 'thread', }
                results.append(result)
        except IndexError:
            break
        except ObjectDoesNotExist:
            continue

    items = [(k, v) for k in search_form.fields for
             v in r.getlist(k) if v and k != 'a']
    items.append(('a', '2'))

    refine_query = u'?%s' % urlencode(items)

    if is_json:
        data = {}
        data['results'] = results
        data['total'] = len(results)
        data['query'] = cleaned['q']
        if not results:
            data['message'] = _('No pages matched the search criteria')
        json_data = json.dumps(data)
        if callback:
            json_data = callback + '(' + json_data + ');'

        return HttpResponse(json_data, mimetype=mimetype)

    results_ = jingo.render(request, 'search/results.html',
        {'num_results': len(documents), 'results': results, 'q': cleaned['q'],
         'pages': pages, 'w': cleaned['w'], 'refine_query': refine_query,
         'search_form': search_form, 'lang_name': lang_name, })
    results_['Cache-Control'] = 'max-age=%s' % \
                                (settings.SEARCH_CACHE_PERIOD * 60)
    results_['Expires'] = (datetime.utcnow() +
                           timedelta(minutes=settings.SEARCH_CACHE_PERIOD)) \
                           .strftime(expires_fmt)
    return results_
示例#54
0
def search_with_sphinx(request, template=None):
    """Sphinx-specific search view"""

    # Time ES and Sphinx separate. See bug 723930.
    # TODO: Remove this once Sphinx is gone.
    start = time.time()

    # JSON-specific variables
    is_json = (request.GET.get('format') == 'json')
    callback = request.GET.get('callback', '').strip()
    mimetype = 'application/x-javascript' if callback else 'application/json'

    if waffle.flag_is_active(request, 'elasticsearch'):
        engine = 'elastic'
    else:
        engine = 'sphinx'

    # Search "Expires" header format
    expires_fmt = '%A, %d %B %Y %H:%M:%S GMT'

    # Check callback is valid
    if is_json and callback and not jsonp_is_valid(callback):
        return HttpResponse(
            json.dumps({'error': _('Invalid callback function.')}),
            mimetype=mimetype, status=400)

    language = locale_or_default(request.GET.get('language', request.locale))
    r = request.GET.copy()
    a = request.GET.get('a', '0')

    # Search default values
    try:
        category = map(int, r.getlist('category')) or \
                   settings.SEARCH_DEFAULT_CATEGORIES
    except ValueError:
        category = settings.SEARCH_DEFAULT_CATEGORIES
    r.setlist('category', category)

    # Basic form
    if a == '0':
        r['w'] = r.get('w', constants.WHERE_BASIC)
    # Advanced form
    if a == '2':
        r['language'] = language
        r['a'] = '1'

    # TODO: Rewrite so SearchForm is unbound initially and we can use `initial`
    # on the form fields.
    if 'include_archived' not in r:
        r['include_archived'] = False

    search_form = SearchForm(r)

    if not search_form.is_valid() or a == '2':
        if is_json:
            return HttpResponse(
                json.dumps({'error': _('Invalid search data.')}),
                mimetype=mimetype,
                status=400)

        t = template if request.MOBILE else 'search/form.html'
        search_ = jingo.render(request, t,
                               {'advanced': a, 'request': request,
                                'search_form': search_form})
        search_['Cache-Control'] = 'max-age=%s' % \
                                   (settings.SEARCH_CACHE_PERIOD * 60)
        search_['Expires'] = (datetime.utcnow() +
                              timedelta(
                                minutes=settings.SEARCH_CACHE_PERIOD)) \
                              .strftime(expires_fmt)
        return search_

    cleaned = search_form.cleaned_data

    page = max(smart_int(request.GET.get('page')), 1)
    offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE

    # get language name for display in template
    lang = language.lower()
    if settings.LANGUAGES.get(lang):
        lang_name = settings.LANGUAGES[lang]
    else:
        lang_name = ''

    wiki_s = wiki_searcher(request)
    question_s = question_searcher(request)
    discussion_s = discussion_searcher(request)

    documents = []

    # wiki filters
    # Category filter
    if cleaned['category']:
        wiki_s = wiki_s.filter(category__in=cleaned['category'])

    # Locale filter
    wiki_s = wiki_s.filter(locale=language)

    # Product filter
    products = cleaned['product']
    for p in products:
        wiki_s = wiki_s.filter(tag=p)

    # Tags filter
    tags = [t.strip() for t in cleaned['tags'].split()]
    for t in tags:
        wiki_s = wiki_s.filter(tag=t)

    # Archived bit
    if a == '0' and not cleaned['include_archived']:
        # Default to NO for basic search:
        cleaned['include_archived'] = False
    if not cleaned['include_archived']:
        wiki_s = wiki_s.filter(is_archived=False)
    # End of wiki filters

    # Support questions specific filters
    if cleaned['w'] & constants.WHERE_SUPPORT:

        # Solved is set by default if using basic search
        if a == '0' and not cleaned['has_helpful']:
            cleaned['has_helpful'] = constants.TERNARY_YES

        # These filters are ternary, they can be either YES, NO, or OFF
        ternary_filters = ('is_locked', 'is_solved', 'has_answers',
                           'has_helpful')
        d = dict((filter_name, _ternary_filter(cleaned[filter_name]))
                 for filter_name in ternary_filters
                 if cleaned[filter_name])
        if d:
            question_s = question_s.filter(**d)

        if cleaned['asked_by']:
            question_s = question_s.filter(
                question_creator=cleaned['asked_by'])

        if cleaned['answered_by']:
            question_s = question_s.filter(
                answer_creator=cleaned['answered_by'])

        q_tags = [t.strip() for t in cleaned['q_tags'].split()]
        for t in q_tags:
            question_s = question_s.filter(tag=t)

    # Discussion forum specific filters
    if cleaned['w'] & constants.WHERE_DISCUSSION:
        if cleaned['author']:
            discussion_s = discussion_s.filter(author_ord=cleaned['author'])

        if cleaned['thread_type']:
            if constants.DISCUSSION_STICKY in cleaned['thread_type']:
                discussion_s = discussion_s.filter(is_sticky=1)

            if constants.DISCUSSION_LOCKED in cleaned['thread_type']:
                discussion_s = discussion_s.filter(is_locked=1)

        if cleaned['forum']:
            discussion_s = discussion_s.filter(forum_id__in=cleaned['forum'])

    # Filters common to support and discussion forums
    # Created filter
    unix_now = int(time.time())
    interval_filters = (
        ('created', cleaned['created'], cleaned['created_date']),
        ('updated', cleaned['updated'], cleaned['updated_date']),
        ('question_votes', cleaned['num_voted'], cleaned['num_votes']))
    for filter_name, filter_option, filter_date in interval_filters:
        if filter_option == constants.INTERVAL_BEFORE:
            before = {filter_name + '__gte': 0,
                      filter_name + '__lte': max(filter_date, 0)}

            if filter_name != 'question_votes':
                discussion_s = discussion_s.filter(**before)
            question_s = question_s.filter(**before)
        elif filter_option == constants.INTERVAL_AFTER:
            after = {filter_name + '__gte': min(filter_date, unix_now),
                     filter_name + '__lte': unix_now}

            if filter_name != 'question_votes':
                discussion_s = discussion_s.filter(**after)
            question_s = question_s.filter(**after)

    sortby = smart_int(request.GET.get('sortby'))
    try:
        max_results = settings.SEARCH_MAX_RESULTS
        cleaned_q = cleaned['q']

        if cleaned['w'] & constants.WHERE_WIKI:
            if cleaned_q:
                wiki_s = wiki_s.query(cleaned_q)
            wiki_s = wiki_s[:max_results]
            # Execute the query and append to documents
            documents += [('wiki', (pair[0], pair[1]))
                          for pair in enumerate(wiki_s.object_ids())]

        if cleaned['w'] & constants.WHERE_SUPPORT:
            # Sort results by
            try:
                question_s = question_s.order_by(
                    *constants.SORT_QUESTIONS[sortby])
            except IndexError:
                pass

            if engine == 'elastic':
                highlight_fields = ['title', 'question_content',
                                    'answer_content']
            else:
                highlight_fields = ['content']

            question_s = question_s.highlight(
                *highlight_fields,
                before_match='<b>',
                after_match='</b>',
                limit=settings.SEARCH_SUMMARY_LENGTH)

            if cleaned_q:
                question_s = question_s.query(cleaned_q)
            question_s = question_s[:max_results]
            documents += [('question', (pair[0], pair[1]))
                          for pair in enumerate(question_s.object_ids())]

        if cleaned['w'] & constants.WHERE_DISCUSSION:
            # Sort results by
            try:
                # Note that the first attribute needs to be the same
                # here and in forums/models.py discussion_search.
                discussion_s = discussion_s.group_by(
                    'thread_id', constants.GROUPSORT[sortby])
            except IndexError:
                pass

            discussion_s = discussion_s.highlight(
                'content',
                before_match='<b>',
                after_match='</b>',
                limit=settings.SEARCH_SUMMARY_LENGTH)

            if cleaned_q:
                discussion_s = discussion_s.query(cleaned_q)
            discussion_s = discussion_s[:max_results]
            documents += [('discussion', (pair[0], pair[1]))
                          for pair in enumerate(discussion_s.object_ids())]

        pages = paginate(request, documents, settings.SEARCH_RESULTS_PER_PAGE)

        # Build a dict of { type_ -> list of indexes } for the specific
        # docs that we're going to display on this page.  This makes it
        # easy for us to slice the appropriate search Ss so we're limiting
        # our db hits to just the items we're showing.
        documents_dict = {}
        for doc in documents[offset:offset + settings.SEARCH_RESULTS_PER_PAGE]:
            documents_dict.setdefault(doc[0], []).append(doc[1][0])

        docs_for_page = []
        for kind, search_s in [('wiki', wiki_s),
                                ('question', question_s),
                                ('discussion', discussion_s)]:
            if kind not in documents_dict:
                continue

            # documents_dict[type_] is a list of indexes--one for each
            # object id search result for that type_.  We use the values
            # at the beginning and end of the list for slice boundaries.
            begin = documents_dict[kind][0]
            end = documents_dict[kind][-1] + 1

            search_s = search_s[begin:end]

            if engine == 'elastic':
                # If we're doing elasticsearch, then we need to update
                # the _s variables to point to the sliced versions of
                # S so that, when we iterate over them in the
                # following list comp, we hang onto the version that
                # does the query, so we can call excerpt() on it
                # later.
                #
                # We only need to do this with elasticsearch.  For Sphinx,
                # search_s at this point is an ObjectResults and not an S
                # because we've already acquired object_ids on it.  Thus
                # if we update the _s variables, we'd be pointing to the
                # ObjectResults and not the S and then excerpting breaks.
                #
                # Ugh.
                if kind == 'wiki':
                    wiki_s = search_s
                elif kind == 'question':
                    question_s = search_s
                elif kind == 'discussion':
                    discussion_s = search_s

            docs_for_page += [(kind, doc) for doc in search_s]

        results = []
        for i, docinfo in enumerate(docs_for_page):
            rank = i + offset
            type_, doc = docinfo
            try:
                if type_ == 'wiki':
                    summary = doc.current_revision.summary
                    result = {
                        'url': doc.get_absolute_url(),
                        'title': doc.title,
                        'type': 'document',
                        'object': doc}
                elif type_ == 'question':
                    summary = _build_excerpt(question_s, doc)
                    result = {
                        'url': doc.get_absolute_url(),
                        'title': doc.title,
                        'type': 'question',
                        'object': doc,
                        'is_solved': doc.is_solved,
                        'num_answers': doc.num_answers,
                        'num_votes': doc.num_votes,
                        'num_votes_past_week': doc.num_votes_past_week}
                else:
                    if engine == 'elastic':
                        thread = doc
                    else:
                        thread = Thread.objects.get(pk=doc.thread_id)

                    summary = _build_excerpt(discussion_s, doc)
                    result = {
                        'url': thread.get_absolute_url(),
                        'title': thread.title,
                        'type': 'thread',
                        'object': thread}
                result['search_summary'] = summary
                result['rank'] = rank
                results.append(result)
            except IndexError:
                break
            except ObjectDoesNotExist:
                continue

    except (SearchError, ESTimeoutError, ESMaxRetryError, ESException), exc:
        # Handle timeout and all those other transient errors with a
        # "Search Unavailable" rather than a Django error page.
        if is_json:
            return HttpResponse(json.dumps({'error':
                                             _('Search Unavailable')}),
                                mimetype=mimetype, status=503)

        if isinstance(exc, SearchError):
            statsd.incr('search.%s.searcherror' % engine)
        elif isinstance(exc, ESTimeoutError):
            statsd.incr('search.%s.timeouterror' % engine)
        elif isinstance(exc, ESMaxRetryError):
            statsd.incr('search.%s.maxretryerror' % engine)
        elif isinstance(exc, ESException):
            statsd.incr('search.%s.elasticsearchexception' % engine)

        t = 'search/mobile/down.html' if request.MOBILE else 'search/down.html'
        return jingo.render(request, t, {'q': cleaned['q']}, status=503)
示例#55
0
文件: views.py 项目: klrmn/kitsune
def search(request, template=None):
    """ES-specific search view"""

    if (waffle.flag_is_active(request, 'esunified') or
        request.GET.get('esunified')):
        return search_with_es_unified(request, template)

    start = time.time()

    # JSON-specific variables
    is_json = (request.GET.get('format') == 'json')
    callback = request.GET.get('callback', '').strip()
    mimetype = 'application/x-javascript' if callback else 'application/json'

    # Search "Expires" header format
    expires_fmt = '%A, %d %B %Y %H:%M:%S GMT'

    # Check callback is valid
    if is_json and callback and not jsonp_is_valid(callback):
        return HttpResponse(
            json.dumps({'error': _('Invalid callback function.')}),
            mimetype=mimetype, status=400)

    language = locale_or_default(request.GET.get('language', request.locale))
    r = request.GET.copy()
    a = request.GET.get('a', '0')

    # Search default values
    try:
        category = (map(int, r.getlist('category')) or
                    settings.SEARCH_DEFAULT_CATEGORIES)
    except ValueError:
        category = settings.SEARCH_DEFAULT_CATEGORIES
    r.setlist('category', category)

    # Basic form
    if a == '0':
        r['w'] = r.get('w', constants.WHERE_BASIC)
    # Advanced form
    if a == '2':
        r['language'] = language
        r['a'] = '1'

    # TODO: Rewrite so SearchForm is unbound initially and we can use
    # `initial` on the form fields.
    if 'include_archived' not in r:
        r['include_archived'] = False

    search_form = SearchForm(r)

    if not search_form.is_valid() or a == '2':
        if is_json:
            return HttpResponse(
                json.dumps({'error': _('Invalid search data.')}),
                mimetype=mimetype,
                status=400)

        t = template if request.MOBILE else 'search/form.html'
        search_ = jingo.render(request, t,
                               {'advanced': a, 'request': request,
                                'search_form': search_form})
        search_['Cache-Control'] = 'max-age=%s' % \
                                   (settings.SEARCH_CACHE_PERIOD * 60)
        search_['Expires'] = (datetime.utcnow() +
                              timedelta(
                                minutes=settings.SEARCH_CACHE_PERIOD)) \
                              .strftime(expires_fmt)
        return search_

    cleaned = search_form.cleaned_data

    page = max(smart_int(request.GET.get('page')), 1)
    offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE

    lang = language.lower()
    if settings.LANGUAGES.get(lang):
        lang_name = settings.LANGUAGES[lang]
    else:
        lang_name = ''

    wiki_s = Document.search()
    question_s = Question.search()
    discussion_s = Thread.search()

    # wiki filters
    # Category filter
    if cleaned['category']:
        wiki_s = wiki_s.filter(document_category__in=cleaned['category'])

    # Locale filter
    wiki_s = wiki_s.filter(document_locale=language)

    # Product filter
    products = cleaned['product']
    for p in products:
        wiki_s = wiki_s.filter(document_tag=p)

    # Tags filter
    tags = [t.strip() for t in cleaned['tags'].split()]
    for t in tags:
        wiki_s = wiki_s.filter(document_tag=t)

    # Archived bit
    if a == '0' and not cleaned['include_archived']:
        # Default to NO for basic search:
        cleaned['include_archived'] = False
    if not cleaned['include_archived']:
        wiki_s = wiki_s.filter(document_is_archived=False)
    # End of wiki filters

    # Support questions specific filters
    if cleaned['w'] & constants.WHERE_SUPPORT:

        # Solved is set by default if using basic search
        if a == '0' and not cleaned['has_helpful']:
            cleaned['has_helpful'] = constants.TERNARY_YES

        # These filters are ternary, they can be either YES, NO, or OFF
        ternary_filters = ('is_locked', 'is_solved', 'has_answers',
                           'has_helpful')
        d = dict(('question_%s' % filter_name,
                  _ternary_filter(cleaned[filter_name]))
                 for filter_name in ternary_filters if cleaned[filter_name])
        if d:
            question_s = question_s.filter(**d)

        if cleaned['asked_by']:
            question_s = question_s.filter(
                question_creator=cleaned['asked_by'])

        if cleaned['answered_by']:
            question_s = question_s.filter(
                question_answer_creator=cleaned['answered_by'])

        q_tags = [t.strip() for t in cleaned['q_tags'].split(',')]
        for t in q_tags:
            if t:
                question_s = question_s.filter(question_tag=t)

    # Discussion forum specific filters
    if cleaned['w'] & constants.WHERE_DISCUSSION:
        if cleaned['author']:
            discussion_s = discussion_s.filter(
                post_author_ord=cleaned['author'])

        if cleaned['thread_type']:
            if constants.DISCUSSION_STICKY in cleaned['thread_type']:
                discussion_s = discussion_s.filter(post_is_sticky=1)

            if constants.DISCUSSION_LOCKED in cleaned['thread_type']:
                discussion_s = discussion_s.filter(post_is_locked=1)

        if cleaned['forum']:
            discussion_s = discussion_s.filter(
                post_forum_id__in=cleaned['forum'])

    # Filters common to support and discussion forums
    # Created filter
    unix_now = int(time.time())
    interval_filters = (
        ('created', cleaned['created'], cleaned['created_date']),
        ('updated', cleaned['updated'], cleaned['updated_date']))
    for filter_name, filter_option, filter_date in interval_filters:
        if filter_option == constants.INTERVAL_BEFORE:
            before = {filter_name + '__gte': 0,
                      filter_name + '__lte': max(filter_date, 0)}

            discussion_s = discussion_s.filter(**before)
            question_s = question_s.filter(**before)
        elif filter_option == constants.INTERVAL_AFTER:
            after = {filter_name + '__gte': min(filter_date, unix_now),
                     filter_name + '__lte': unix_now}

            discussion_s = discussion_s.filter(**after)
            question_s = question_s.filter(**after)

    # Note: num_voted (with a d) is a different field than num_votes
    # (with an s). The former is a dropdown and the latter is an
    # integer value.
    if cleaned['num_voted'] == constants.INTERVAL_BEFORE:
        question_s = question_s.filter(
            question_num_votes__lte=max(cleaned['num_votes'], 0))
    elif cleaned['num_voted'] == constants.INTERVAL_AFTER:
        question_s = question_s.filter(
            question_num_votes__gte=cleaned['num_votes'])

    # Done with all the filtery stuff--time  to generate results

    documents = ComposedList()
    sortby = smart_int(request.GET.get('sortby'))
    try:
        max_results = settings.SEARCH_MAX_RESULTS
        cleaned_q = cleaned['q']

        if cleaned['w'] & constants.WHERE_WIKI:
            if cleaned_q:
                wiki_s = wiki_s.query(cleaned_q)

            # For a front-page non-advanced search, we want to cap the kb
            # at 10 results.
            if a == '0':
                wiki_max_results = 10
            else:
                wiki_max_results = max_results
            documents.set_count(('wiki', wiki_s),
                                min(wiki_s.count(), wiki_max_results))

        if cleaned['w'] & constants.WHERE_SUPPORT:
            # Sort results by
            try:
                question_s = question_s.order_by(
                    *constants.SORT_QUESTIONS[sortby])
            except IndexError:
                pass

            question_s = question_s.highlight(
                'question_title', 'question_content',
                'question_answer_content',
                before_match='<b>',
                after_match='</b>',
                limit=settings.SEARCH_SUMMARY_LENGTH)

            if cleaned_q:
                question_s = question_s.query(cleaned_q)
            documents.set_count(('question', question_s),
                                min(question_s.count(), max_results))

        if cleaned['w'] & constants.WHERE_DISCUSSION:
            discussion_s = discussion_s.highlight(
                'discussion_content',
                before_match='<b>',
                after_match='</b>',
                limit=settings.SEARCH_SUMMARY_LENGTH)

            if cleaned_q:
                discussion_s = discussion_s.query(cleaned_q)
            documents.set_count(('forum', discussion_s),
                                min(discussion_s.count(), max_results))

        results_per_page = settings.SEARCH_RESULTS_PER_PAGE
        pages = paginate(request, documents, results_per_page)
        num_results = len(documents)

        # Get the documents we want to show and add them to
        # docs_for_page.
        documents = documents[offset:offset + results_per_page]
        docs_for_page = []
        for (kind, search_s), bounds in documents:
            search_s = search_s.values_dict()[bounds[0]:bounds[1]]
            docs_for_page += [(kind, doc) for doc in search_s]

        results = []
        for i, docinfo in enumerate(docs_for_page):
            rank = i + offset
            # Type here is something like 'wiki', ... while doc here
            # is an ES result document.
            type_, doc = docinfo

            if type_ == 'wiki':
                summary = doc['document_summary']
                result = {
                    'url': doc['url'],
                    'title': doc['document_title'],
                    'type': 'document',
                    'object': ObjectDict(doc)}
            elif type_ == 'question':
                summary = _build_es_excerpt(doc)
                result = {
                    'url': doc['url'],
                    'title': doc['question_title'],
                    'type': 'question',
                    'object': ObjectDict(doc),
                    'is_solved': doc['question_is_solved'],
                    'num_answers': doc['question_num_answers'],
                    'num_votes': doc['question_num_votes'],
                    'num_votes_past_week': doc['question_num_votes_past_week']}
            else:
                summary = _build_es_excerpt(doc)
                result = {
                    'url': doc['url'],
                    'title': doc['post_title'],
                    'type': 'thread',
                    'object': ObjectDict(doc)}
            result['search_summary'] = summary
            result['rank'] = rank
            result['score'] = doc._score
            results.append(result)

    except (ESTimeoutError, ESMaxRetryError, ESException), exc:
        # Handle timeout and all those other transient errors with a
        # "Search Unavailable" rather than a Django error page.
        if is_json:
            return HttpResponse(json.dumps({'error':
                                             _('Search Unavailable')}),
                                mimetype=mimetype, status=503)

        if isinstance(exc, ESTimeoutError):
            statsd.incr('search.es.timeouterror')
        elif isinstance(exc, ESMaxRetryError):
            statsd.incr('search.es.maxretryerror')
        elif isinstance(exc, ESException):
            statsd.incr('search.es.elasticsearchexception')

        t = 'search/mobile/down.html' if request.MOBILE else 'search/down.html'
        return jingo.render(request, t, {'q': cleaned['q']}, status=503)
示例#56
0
def home(request):
    title = "Search text"
    form = SearchForm(request.POST or None)

    context = {
        "title": title,
        "form": form
    }

    if form.is_valid():
        instance = form.save(commit=False)
        instance.save()

        message = "You will get search results for: %s via %s soon" % (instance.searching_text, instance.email)
        context = {
            "title": "Thank you",
            "message": message,
        }

        with ix.searcher() as searcher:
            query = QueryParser("text", ix.schema).parse(instance.searching_text)
            # Get a collector object
            c = searcher.collector(limit=None)
            # Wrap it in a TimeLimitedCollector and set the time limit to 10 seconds
            tlc = TimeLimitCollector(c, timelimit=instance.t_limit)
            # Try searching
            try:
                searcher.search_with_collector(query, tlc)
            except TimeLimit:
                pass
            # You can still get partial results from the collector
            results = tlc.results()
            lst = []
            for i in range(0, len(results)):
                st = ''
                st += 'Book: '
                st += results[i]["book"]
                st += ', chapter: '
                st += results[i]["chapter"]
                st += ', page: '
                st += str(results[i]["page"])
                lst.append(st)



        # with ix.searcher() as searcher:
        #     query = QueryParser("text", ix.schema).parse(instance.searching_text)
        #     results = searcher.search(query)
        #     lst = []
        #     for i in range(0, len(results)):
        #         st = ''
        #         st += 'Book: '
        #         st += results[i]["book"]
        #         st += ', chapter: '
        #         st += results[i]["chapter"]
        #         st += ', page: '
        #         st += str(results[i]["page"])
        #         lst.append(st)

        logging.basicConfig(format=u'%(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG,
                            filename=u'mylog.log')
        time_diff = datetime.datetime.now(timezone.utc) - instance.timestamp
        logging.info(time_diff.total_seconds())

        subject = 'Search results for: ' + form.cleaned_data.get('searching_text')
        message = 'Search results for: ' + form.cleaned_data.get('searching_text') + '\n'
        for i in range(0, len(lst)):
            message += str(i+1)+') '
            message += lst[i]
            message += '\n'
        from_email = settings.EMAIL_HOST_USER
        to_email = form.cleaned_data.get('email')
        send_mail(subject,
                  message,
                  from_email,
                  [to_email],
                  fail_silently=True)
    return render(request, "home.html", context)
示例#57
0
文件: views.py 项目: klrmn/kitsune
def search_with_es_unified(request, template=None):
    """ES-specific search view"""

    # TODO: Remove this once elastic search bucketed code is gone.
    start = time.time()

    # JSON-specific variables
    is_json = (request.GET.get('format') == 'json')
    callback = request.GET.get('callback', '').strip()
    mimetype = 'application/x-javascript' if callback else 'application/json'

    # Search "Expires" header format
    expires_fmt = '%A, %d %B %Y %H:%M:%S GMT'

    # Check callback is valid
    if is_json and callback and not jsonp_is_valid(callback):
        return HttpResponse(
            json.dumps({'error': _('Invalid callback function.')}),
            mimetype=mimetype, status=400)

    language = locale_or_default(request.GET.get('language', request.locale))
    r = request.GET.copy()
    a = request.GET.get('a', '0')

    # Search default values
    try:
        category = (map(int, r.getlist('category')) or
                    settings.SEARCH_DEFAULT_CATEGORIES)
    except ValueError:
        category = settings.SEARCH_DEFAULT_CATEGORIES
    r.setlist('category', category)

    # Basic form
    if a == '0':
        r['w'] = r.get('w', constants.WHERE_BASIC)
    # Advanced form
    if a == '2':
        r['language'] = language
        r['a'] = '1'

    # TODO: Rewrite so SearchForm is unbound initially and we can use
    # `initial` on the form fields.
    if 'include_archived' not in r:
        r['include_archived'] = False

    search_form = SearchForm(r)

    if not search_form.is_valid() or a == '2':
        if is_json:
            return HttpResponse(
                json.dumps({'error': _('Invalid search data.')}),
                mimetype=mimetype,
                status=400)

        t = template if request.MOBILE else 'search/form.html'
        search_ = jingo.render(request, t,
                               {'advanced': a, 'request': request,
                                'search_form': search_form})
        search_['Cache-Control'] = 'max-age=%s' % \
                                   (settings.SEARCH_CACHE_PERIOD * 60)
        search_['Expires'] = (datetime.utcnow() +
                              timedelta(
                                minutes=settings.SEARCH_CACHE_PERIOD)) \
                              .strftime(expires_fmt)
        return search_

    cleaned = search_form.cleaned_data

    page = max(smart_int(request.GET.get('page')), 1)
    offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE

    lang = language.lower()
    if settings.LANGUAGES.get(lang):
        lang_name = settings.LANGUAGES[lang]
    else:
        lang_name = ''

    # Woah! object?! Yeah, so what happens is that Sphilastic is
    # really an elasticutils.S and that requires a Django ORM model
    # argument. That argument only gets used if you want object
    # results--for every hit it gets back from ES, it creates an
    # object of the type of the Django ORM model you passed in. We use
    # object here to satisfy the need for a type in the constructor
    # and make sure we don't ever ask for object results.
    searcher = Sphilastic(object)

    wiki_f = F(model='wiki_document')
    question_f = F(model='questions_question')
    discussion_f = F(model='forums_thread')

    # Start - wiki filters

    if cleaned['w'] & constants.WHERE_WIKI:
        # Category filter
        if cleaned['category']:
            wiki_f &= F(document_category__in=cleaned['category'])

        # Locale filter
        wiki_f &= F(document_locale=language)

        # Product filter
        products = cleaned['product']
        for p in products:
            wiki_f &= F(document_tag=p)

        # Tags filter
        tags = [t.strip() for t in cleaned['tags'].split()]
        for t in tags:
            wiki_f &= F(document_tag=t)

        # Archived bit
        if a == '0' and not cleaned['include_archived']:
            # Default to NO for basic search:
            cleaned['include_archived'] = False
        if not cleaned['include_archived']:
            wiki_f &= F(document_is_archived=False)

    # End - wiki filters

    # Start - support questions filters

    if cleaned['w'] & constants.WHERE_SUPPORT:

        # Solved is set by default if using basic search
        if a == '0' and not cleaned['has_helpful']:
            cleaned['has_helpful'] = constants.TERNARY_YES

        # These filters are ternary, they can be either YES, NO, or OFF
        ternary_filters = ('is_locked', 'is_solved', 'has_answers',
                           'has_helpful')
        d = dict(('question_%s' % filter_name,
                  _ternary_filter(cleaned[filter_name]))
                 for filter_name in ternary_filters if cleaned[filter_name])
        if d:
            question_f &= F(**d)

        if cleaned['asked_by']:
            question_f &= F(question_creator=cleaned['asked_by'])

        if cleaned['answered_by']:
            question_f &= F(question_answer_creator=cleaned['answered_by'])

        q_tags = [t.strip() for t in cleaned['q_tags'].split(',')]
        for t in q_tags:
            if t:
                question_f &= F(question_tag=t)

    # End - support questions filters

    # Start - discussion forum filters

    if cleaned['w'] & constants.WHERE_DISCUSSION:
        if cleaned['author']:
            discussion_f &= F(post_author_ord=cleaned['author'])

        if cleaned['thread_type']:
            if constants.DISCUSSION_STICKY in cleaned['thread_type']:
                discussion_f &= F(post_is_sticky=1)

            if constants.DISCUSSION_LOCKED in cleaned['thread_type']:
                discussion_f &= F(post_is_locked=1)

        if cleaned['forum']:
            discussion_f &= F(post_forum_id__in=cleaned['forum'])

    # End - discussion forum filters

    # Created filter
    unix_now = int(time.time())
    interval_filters = (
        ('created', cleaned['created'], cleaned['created_date']),
        ('updated', cleaned['updated'], cleaned['updated_date']))
    for filter_name, filter_option, filter_date in interval_filters:
        if filter_option == constants.INTERVAL_BEFORE:
            before = {filter_name + '__gte': 0,
                      filter_name + '__lte': max(filter_date, 0)}

            discussion_f &= F(**before)
            question_f &= F(**before)
        elif filter_option == constants.INTERVAL_AFTER:
            after = {filter_name + '__gte': min(filter_date, unix_now),
                     filter_name + '__lte': unix_now}

            discussion_f &= F(**after)
            question_f &= F(**after)

    # Note: num_voted (with a d) is a different field than num_votes
    # (with an s). The former is a dropdown and the latter is an
    # integer value.
    if cleaned['num_voted'] == constants.INTERVAL_BEFORE:
        question_f &= F(question_num_votes__lte=max(cleaned['num_votes'], 0))
    elif cleaned['num_voted'] == constants.INTERVAL_AFTER:
        question_f &= F(question_num_votes__gte=cleaned['num_votes'])

    # Done with all the filtery stuff--time  to generate results

    # Combine all the filters and add to the searcher
    final_filter = F()
    if cleaned['w'] & constants.WHERE_WIKI:
        final_filter |= wiki_f

    if cleaned['w'] & constants.WHERE_SUPPORT:
        final_filter |= question_f

    if cleaned['w'] & constants.WHERE_DISCUSSION:
        final_filter |= discussion_f

    searcher = searcher.filter(final_filter)

    documents = ComposedList()
    try:
        cleaned_q = cleaned['q']

        # Set up the highlights
        searcher = searcher.highlight(
            'question_title', 'question_content', 'question_answer_content',
            'discussion_content',
            before_match='<b>',
            after_match='</b>',
            limit=settings.SEARCH_SUMMARY_LENGTH)

        # Set up weights
        searcher = searcher.weight(
            question_title__text=4, question_content__text=3,
            question_answer_content__text=3,
            post_title__text=2, post_content__text=1,
            document_title__text=6, document_content__text=1,
            document_keywords__text=4, document_summary__text=2)

        # Apply sortby, but only for advanced search for questions
        if a == '1' and cleaned['w'] & constants.WHERE_SUPPORT:
            sortby = smart_int(request.GET.get('sortby'))
            try:
                searcher = searcher.order_by(
                    *constants.SORT_QUESTIONS[sortby])
            except IndexError:
                # Skip index errors because they imply the user is
                # sending us sortby values that aren't valid.
                pass

        # Build the query
        if cleaned_q:
            query_fields = chain(*[cls.get_query_fields()
                                   for cls in get_search_models()])

            query = dict((field, cleaned_q) for field in query_fields)

            searcher = searcher.query(or_=query)

        num_results = min(searcher.count(), settings.SEARCH_MAX_RESULTS)

        # TODO - Can ditch the ComposedList here, but we need
        # something that paginate can use to figure out the paging.
        documents = ComposedList()
        documents.set_count(('results', searcher), num_results)

        results_per_page = settings.SEARCH_RESULTS_PER_PAGE
        pages = paginate(request, documents, results_per_page)

        # If we know there aren't any results, let's cheat and in
        # doing that, not hit ES again.
        if num_results == 0:
            searcher = []
        else:
            # Get the documents we want to show and add them to
            # docs_for_page
            documents = documents[offset:offset + results_per_page]

            if len(documents) == 0:
                # If the user requested a page that's beyond the
                # pagination, then documents is an empty list and
                # there are no results to show.
                searcher = []
            else:
                bounds = documents[0][1]
                searcher = searcher.values_dict()[bounds[0]:bounds[1]]

        results = []
        for i, doc in enumerate(searcher):
            rank = i + offset

            if doc['model'] == 'wiki_document':
                summary = doc['document_summary']
                result = {
                    'title': doc['document_title'],
                    'type': 'document'}

            elif doc['model'] == 'questions_question':
                summary = _build_es_excerpt(doc)
                result = {
                    'title': doc['question_title'],
                    'type': 'question',
                    'is_solved': doc['question_is_solved'],
                    'num_answers': doc['question_num_answers'],
                    'num_votes': doc['question_num_votes'],
                    'num_votes_past_week': doc['question_num_votes_past_week']}

            else:
                summary = _build_es_excerpt(doc)
                result = {
                    'title': doc['post_title'],
                    'type': 'thread'}

            result['url'] = doc['url']
            result['object'] = ObjectDict(doc)
            result['search_summary'] = summary
            result['rank'] = rank
            result['score'] = doc._score
            results.append(result)

    except (ESTimeoutError, ESMaxRetryError, ESException), exc:
        # Handle timeout and all those other transient errors with a
        # "Search Unavailable" rather than a Django error page.
        if is_json:
            return HttpResponse(json.dumps({'error':
                                             _('Search Unavailable')}),
                                mimetype=mimetype, status=503)

        if isinstance(exc, ESTimeoutError):
            statsd.incr('search.esunified.timeouterror')
        elif isinstance(exc, ESMaxRetryError):
            statsd.incr('search.esunified.maxretryerror')
        elif isinstance(exc, ESException):
            statsd.incr('search.esunified.elasticsearchexception')

        import logging
        logging.exception(exc)

        t = 'search/mobile/down.html' if request.MOBILE else 'search/down.html'
        return jingo.render(request, t, {'q': cleaned['q']}, status=503)
示例#58
0
def search(request, template=None):
    """ES-specific search view"""

    # JSON-specific variables
    is_json = (request.GET.get('format') == 'json')
    callback = request.GET.get('callback', '').strip()
    mimetype = 'application/x-javascript' if callback else 'application/json'

    # Search "Expires" header format
    expires_fmt = '%A, %d %B %Y %H:%M:%S GMT'

    # Check callback is valid
    if is_json and callback and not jsonp_is_valid(callback):
        return HttpResponse(
            json.dumps({'error': _('Invalid callback function.')}),
            mimetype=mimetype, status=400)

    language = locale_or_default(
        request.GET.get('language', request.LANGUAGE_CODE))
    r = request.GET.copy()
    a = request.GET.get('a', '0')

    # Search default values
    try:
        category = (map(int, r.getlist('category')) or
                    settings.SEARCH_DEFAULT_CATEGORIES)
    except ValueError:
        category = settings.SEARCH_DEFAULT_CATEGORIES
    r.setlist('category', category)

    # Basic form
    if a == '0':
        r['w'] = r.get('w', constants.WHERE_BASIC)
    # Advanced form
    if a == '2':
        r['language'] = language
        r['a'] = '1'

    # TODO: Rewrite so SearchForm is unbound initially and we can use
    # `initial` on the form fields.
    if 'include_archived' not in r:
        r['include_archived'] = False

    search_form = SearchForm(r)

    if not search_form.is_valid() or a == '2':
        if is_json:
            return HttpResponse(
                json.dumps({'error': _('Invalid search data.')}),
                mimetype=mimetype,
                status=400)

        t = template if request.MOBILE else 'search/form.html'
        search_ = render(request, t, {
            'advanced': a, 'request': request,
            'search_form': search_form})
        search_['Cache-Control'] = 'max-age=%s' % \
                                   (settings.SEARCH_CACHE_PERIOD * 60)
        search_['Expires'] = (datetime.utcnow() +
                              timedelta(
                                minutes=settings.SEARCH_CACHE_PERIOD)) \
                              .strftime(expires_fmt)
        return search_

    cleaned = search_form.cleaned_data

    if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC:
        cleaned['w'] = constants.WHERE_WIKI

    page = max(smart_int(request.GET.get('page')), 1)
    offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE

    lang = language.lower()
    if settings.LANGUAGES.get(lang):
        lang_name = settings.LANGUAGES[lang]
    else:
        lang_name = ''

    # We use a regular S here because we want to search across
    # multiple doctypes.
    searcher = (UntypedS().es(urls=settings.ES_URLS)
                          .indexes(es_utils.READ_INDEX))

    wiki_f = F(model='wiki_document')
    question_f = F(model='questions_question')
    discussion_f = F(model='forums_thread')

    # Start - wiki filters

    if cleaned['w'] & constants.WHERE_WIKI:
        # Category filter
        if cleaned['category']:
            wiki_f &= F(document_category__in=cleaned['category'])

        # Locale filter
        wiki_f &= F(document_locale=language)

        # Product filter
        products = cleaned['product']
        for p in products:
            wiki_f &= F(product=p)

        # Topics filter
        topics = cleaned['topics']
        for t in topics:
            wiki_f &= F(topic=t)

        # Archived bit
        if a == '0' and not cleaned['include_archived']:
            # Default to NO for basic search:
            cleaned['include_archived'] = False
        if not cleaned['include_archived']:
            wiki_f &= F(document_is_archived=False)

    # End - wiki filters

    # Start - support questions filters

    if cleaned['w'] & constants.WHERE_SUPPORT:
        # Solved is set by default if using basic search
        if a == '0' and not cleaned['has_helpful']:
            cleaned['has_helpful'] = constants.TERNARY_YES

        # These filters are ternary, they can be either YES, NO, or OFF
        ternary_filters = ('is_locked', 'is_solved', 'has_answers',
                           'has_helpful')
        d = dict(('question_%s' % filter_name,
                  _ternary_filter(cleaned[filter_name]))
                 for filter_name in ternary_filters if cleaned[filter_name])
        if d:
            question_f &= F(**d)

        if cleaned['asked_by']:
            question_f &= F(question_creator=cleaned['asked_by'])

        if cleaned['answered_by']:
            question_f &= F(question_answer_creator=cleaned['answered_by'])

        q_tags = [t.strip() for t in cleaned['q_tags'].split(',')]
        for t in q_tags:
            if t:
                question_f &= F(question_tag=t)

        # Product filter
        products = cleaned['product']
        for p in products:
            question_f &= F(product=p)

        # Topics filter
        topics = cleaned['topics']
        for t in topics:
            question_f &= F(topic=t)

    # End - support questions filters

    # Start - discussion forum filters

    if cleaned['w'] & constants.WHERE_DISCUSSION:
        if cleaned['author']:
            discussion_f &= F(post_author_ord=cleaned['author'])

        if cleaned['thread_type']:
            if constants.DISCUSSION_STICKY in cleaned['thread_type']:
                discussion_f &= F(post_is_sticky=1)

            if constants.DISCUSSION_LOCKED in cleaned['thread_type']:
                discussion_f &= F(post_is_locked=1)

        if cleaned['forum']:
            discussion_f &= F(post_forum_id__in=cleaned['forum'])

    # End - discussion forum filters

    # Created filter
    unix_now = int(time.time())
    interval_filters = (
        ('created', cleaned['created'], cleaned['created_date']),
        ('updated', cleaned['updated'], cleaned['updated_date']))
    for filter_name, filter_option, filter_date in interval_filters:
        if filter_option == constants.INTERVAL_BEFORE:
            before = {filter_name + '__gte': 0,
                      filter_name + '__lte': max(filter_date, 0)}

            discussion_f &= F(**before)
            question_f &= F(**before)
        elif filter_option == constants.INTERVAL_AFTER:
            after = {filter_name + '__gte': min(filter_date, unix_now),
                     filter_name + '__lte': unix_now}

            discussion_f &= F(**after)
            question_f &= F(**after)

    # In basic search, we limit questions from the last
    # SEARCH_DEFAULT_MAX_QUESTION_AGE seconds.
    if a == '0':
        start_date = unix_now - settings.SEARCH_DEFAULT_MAX_QUESTION_AGE
        question_f &= F(created__gte=start_date)

    # Note: num_voted (with a d) is a different field than num_votes
    # (with an s). The former is a dropdown and the latter is an
    # integer value.
    if cleaned['num_voted'] == constants.INTERVAL_BEFORE:
        question_f &= F(question_num_votes__lte=max(cleaned['num_votes'], 0))
    elif cleaned['num_voted'] == constants.INTERVAL_AFTER:
        question_f &= F(question_num_votes__gte=cleaned['num_votes'])

    # Done with all the filtery stuff--time  to generate results

    # Combine all the filters and add to the searcher
    doctypes = []
    final_filter = F()
    if cleaned['w'] & constants.WHERE_WIKI:
        doctypes.append(DocumentMappingType.get_mapping_type_name())
        final_filter |= wiki_f

    if cleaned['w'] & constants.WHERE_SUPPORT:
        doctypes.append(QuestionMappingType.get_mapping_type_name())
        final_filter |= question_f

    if cleaned['w'] & constants.WHERE_DISCUSSION:
        doctypes.append(ThreadMappingType.get_mapping_type_name())
        final_filter |= discussion_f

    searcher = searcher.doctypes(*doctypes)
    searcher = searcher.filter(final_filter)

    if 'explain' in request.GET and request.GET['explain'] == '1':
        searcher = searcher.explain()

    documents = ComposedList()

    try:
        cleaned_q = cleaned['q']

        # Set up the highlights
        # First 500 characters of content in one big fragment
        searcher = searcher.highlight(
            'question_content', 'discussion_content', 'document_summary',
            pre_tags=['<b>'],
            post_tags=['</b>'],
            number_of_fragments=0,
            fragment_size=500)

        # Set up boosts
        searcher = searcher.boost(
            question_title=4.0,
            question_content=3.0,
            question_answer_content=3.0,
            post_title=2.0,
            post_content=1.0,
            document_title=6.0,
            document_content=1.0,
            document_keywords=8.0,
            document_summary=2.0,

            # Text phrases in document titles and content get an extra
            # boost.
            document_title__text_phrase=10.0,
            document_content__text_phrase=8.0)

        # Apply sortby for advanced search of questions
        if cleaned['w'] == constants.WHERE_SUPPORT:
            sortby = cleaned['sortby']
            try:
                searcher = searcher.order_by(
                    *constants.SORT_QUESTIONS[sortby])
            except IndexError:
                # Skip index errors because they imply the user is
                # sending us sortby values that aren't valid.
                pass

        # Apply sortby for advanced search of kb documents
        if cleaned['w'] == constants.WHERE_WIKI:
            sortby = cleaned['sortby_documents']
            try:
                searcher = searcher.order_by(
                    *constants.SORT_DOCUMENTS[sortby])
            except IndexError:
                # Skip index errors because they imply the user is
                # sending us sortby values that aren't valid.
                pass

        # Build the query
        if cleaned_q:
            query_fields = chain(*[cls.get_query_fields()
                                   for cls in get_mapping_types()])

            query = {}
            # Create text and text_phrase queries for every field
            # we want to search.
            for field in query_fields:
                for query_type in ['text', 'text_phrase']:
                    query['%s__%s' % (field, query_type)] = cleaned_q

            searcher = searcher.query(should=True, **query)

        num_results = min(searcher.count(), settings.SEARCH_MAX_RESULTS)

        # TODO - Can ditch the ComposedList here, but we need
        # something that paginate can use to figure out the paging.
        documents = ComposedList()
        documents.set_count(('results', searcher), num_results)

        results_per_page = settings.SEARCH_RESULTS_PER_PAGE
        pages = paginate(request, documents, results_per_page)

        # Facets
        product_facets = {}

        # If we know there aren't any results, let's cheat and in
        # doing that, not hit ES again.
        if num_results == 0:
            searcher = []
        else:
            # Get the documents we want to show and add them to
            # docs_for_page
            documents = documents[offset:offset + results_per_page]

            if len(documents) == 0:
                # If the user requested a page that's beyond the
                # pagination, then documents is an empty list and
                # there are no results to show.
                searcher = []
            else:
                bounds = documents[0][1]
                searcher = searcher.values_dict()[bounds[0]:bounds[1]]

                # If we are doing basic search, we show product facets.
                if a == '0':
                    pfc = searcher.facet(
                        'product', filtered=True).facet_counts()
                    product_facets = dict(
                        [(p['term'], p['count']) for p in pfc['product']])

        results = []
        for i, doc in enumerate(searcher):
            rank = i + offset

            if doc['model'] == 'wiki_document':
                summary = _build_es_excerpt(doc)
                if not summary:
                    summary = doc['document_summary']
                result = {
                    'title': doc['document_title'],
                    'type': 'document'}

            elif doc['model'] == 'questions_question':
                summary = _build_es_excerpt(doc)
                if not summary:
                    # We're excerpting only question_content, so if
                    # the query matched question_title or
                    # question_answer_content, then there won't be any
                    # question_content excerpts. In that case, just
                    # show the question--but only the first 500
                    # characters.
                    summary = bleach.clean(
                        doc['question_content'], strip=True)[:500]

                result = {
                    'title': doc['question_title'],
                    'type': 'question',
                    'is_solved': doc['question_is_solved'],
                    'num_answers': doc['question_num_answers'],
                    'num_votes': doc['question_num_votes'],
                    'num_votes_past_week': doc['question_num_votes_past_week']}

            else:
                summary = _build_es_excerpt(doc)
                result = {
                    'title': doc['post_title'],
                    'type': 'thread'}

            result['url'] = doc['url']
            result['object'] = ObjectDict(doc)
            result['search_summary'] = summary
            result['rank'] = rank
            result['score'] = doc._score
            result['explanation'] = escape(format_explanation(
                    doc._explanation))
            results.append(result)

    except ES_EXCEPTIONS as exc:
        # Handle timeout and all those other transient errors with a
        # "Search Unavailable" rather than a Django error page.
        if is_json:
            return HttpResponse(json.dumps({'error':
                                             _('Search Unavailable')}),
                                mimetype=mimetype, status=503)

        # Cheating here: Convert from 'Timeout()' to 'timeout' so
        # we have less code, but still have good stats.
        exc_bucket = repr(exc).lower().strip('()')
        statsd.incr('search.esunified.{0}'.format(exc_bucket))

        import logging
        logging.exception(exc)

        t = 'search/mobile/down.html' if request.MOBILE else 'search/down.html'
        return render(request, t, {'q': cleaned['q']}, status=503)

    items = [(k, v) for k in search_form.fields for
             v in r.getlist(k) if v and k != 'a']
    items.append(('a', '2'))

    if is_json:
        # Models are not json serializable.
        for r in results:
            del r['object']
        data = {}
        data['results'] = results
        data['total'] = len(results)
        data['query'] = cleaned['q']
        if not results:
            data['message'] = _('No pages matched the search criteria')
        json_data = json.dumps(data)
        if callback:
            json_data = callback + '(' + json_data + ');'

        return HttpResponse(json_data, mimetype=mimetype)

    fallback_results = None
    if num_results == 0:
        fallback_results = _fallback_results(language, cleaned['product'])

    results_ = render(request, template, {
        'num_results': num_results,
        'results': results,
        'fallback_results': fallback_results,
        'q': cleaned['q'],
        'w': cleaned['w'],
        'product': cleaned['product'],
        'products': Product.objects.filter(visible=True),
        'product_facets': product_facets,
        'pages': pages,
        'search_form': search_form,
        'lang_name': lang_name, })
    results_['Cache-Control'] = 'max-age=%s' % \
                                (settings.SEARCH_CACHE_PERIOD * 60)
    results_['Expires'] = (datetime.utcnow() +
                           timedelta(minutes=settings.SEARCH_CACHE_PERIOD)) \
                           .strftime(expires_fmt)
    results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
                        max_age=3600, secure=False, httponly=False)

    return results_