def search(request): if request.method == 'GET': query = request.GET.get('q') if len(query) >= 3: from haystack.query import SearchQuerySet response = SearchQuerySet().autocomplete(name_auto=query) if response.count() > 0: from django.contrib.auth.models import User if response.count() >1: return render_to_response( 'search.html', { 'result_characters': response.models(Character), 'result_users': response.models(User), 'results': response }, context_instance=RequestContext(request) ) else: if response.models(User): return HttpResponseRedirect('/accounts/profile/%s/' % response[0].object.id) else: return HttpResponseRedirect('/character/%s/' % response[0].object.character_id) else: return render_to_response( 'search.html', { 'results': None }, context_instance=RequestContext(request) ) else: raise Http404() else: raise Http404()
def get_queryset(self): #функция для пунтосвитчера def fix_layout(s): return ''.join([_trans_table.get(c, c) for c in s]) # Get the q GET parameter q = self.request.GET.get('q') city = self.request.GET.get('city') if q: results = SearchQuerySet().filter(content=q, city=city).values_list('company', flat=True) if results.count() == 0: #пунто свитчер _eng_chars = "~!@#$%^&qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:\"|ZXCVBNM<>?" _rus_chars = "ё!\"№;%:?йцукенгшщзхъфывапролджэячсмитьбю.ЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭ/ЯЧСМИТЬБЮ," _trans_table = dict(zip(_eng_chars, _rus_chars)) if re.fullmatch('[a-zA-Z0-9\s]*', q): self.q_punto = fix_layout(q) results = SearchQuerySet().filter(content=self.q_punto, city=city).values_list('company', flat=True) if results.count() == 0: sqs = SearchQuerySet().auto_query(self.q_punto) self.q_spell = sqs.spelling_suggestion() results = SearchQuerySet().filter(content=self.q_spell, city=city).values_list('company', flat=True) if results.count() == 0: sqs = SearchQuerySet().auto_query(q) self.q_spell = sqs.spelling_suggestion() results = SearchQuerySet().filter(content=self.q_spell, city=city).values_list('company', flat=True) search_results = Company.objects.filter(pk__in=set(results)).order_by('-yellow', '-blue', '-priority', 'name') self.count = search_results.count() else: search_results = '' return search_results
def project_search_fulltext(request, text, format=None): resultList = SearchQuerySet().filter(content=AutoQuery(text)) resultList = SearchQuerySetWrapper(resultList) print(resultList.count()) if resultList.count() == 0: return Response(status=status.HTTP_404_NOT_FOUND) serializer = ProjectSerializer(resultList, many=True) return Response(serializer.data)
def handle(self, *args, **options): print('test searching') s = SearchQuerySet().filter(original_species_name=options['name'] ).models(BiologicalCollectionRecord) print(s.count()) if s.count() > 0: for hit in s: print(hit.original_species_name) else: print('No object with original species name: {} is found'.format( options['name']))
def journey(request): origin = request.GET.get('from') from_q = request.GET.get('from_q') destination = request.GET.get('to') to_q = request.GET.get('to_q') if origin: origin = get_object_or_404(Locality, slug=origin) if from_q: from_options = SearchQuerySet().models(Locality).filter( content=from_q).load_all() if from_options.count() == 1: origin = from_options[0].object from_options = None elif origin not in from_options: origin = None else: from_options = None if destination: destination = get_object_or_404(Locality, slug=destination) if to_q: to_options = SearchQuerySet().models(Locality).filter( content=to_q).load_all() if to_options.count() == 1: destination = to_options[0].object to_options = None elif destination not in to_options: destination = None else: to_options = None journeys = None # if origin and destination: # journeys = Journey.objects.filter( # stopusageusage__stop__locality=origin # ).filter(stopusageusage__stop__locality=destination) # else: # journeys = None return render( request, 'journey.html', { 'from': origin, 'from_q': from_q or origin or '', 'from_options': from_options, 'to': destination, 'to_q': to_q or destination or '', 'to_options': to_options, 'journeys': journeys })
def search_results_json(req, term='', context_models=''): all_results = [] term = req.GET.get('term', '') context_models = req.GET.get('model', '').split(',') p = {} indexes = sorted(_get_indexes(), key=lambda index: 0 if index[0].__name__.lower() in context_models else index[1].PRIORITY) for index in indexes: results = SearchQuerySet().filter(content=term).models(index[0]) results_count = results.count() for r in results[:5]: all_results.append(_create_category(r.display, r.index_name, term, r.model_name, r.url, results_count)) all_results.append({'category': 'Wiki', 'term': term, 'search_slug': 'wiki'}) return json_response(all_results)
def current_bills_sorted(self): from haystack.query import SearchQuerySet qs = SearchQuerySet().using("bill").filter(indexed_model_name__in=["Bill"], congress=CURRENT_CONGRESS, committees=self.id).order_by('-proscore') return { "count": qs.count(), "bills": [ b.object for b in qs[0:100] ], }
def cruce1(form): sqs = SearchQuerySet() sqs = sqs.filter(estado=form.cleaned_data['estado']) total = sqs.count() estado = form.cleaned_data['estado'] return {'total': total, 'estado': estado, 'results': sqs}
def search_api_view(request): result = SearchQuerySet().filter(content=request.GET.get('q', '')) offset = int(request.GET.get('offset', '0')) limit = int(request.GET.get('limit', '10')) sort = request.GET.get('sort', "") order = request.GET.get('order', "") if sort != "" and order != "": if order == 'desc': result = result.order_by("-" + sort) else: result = result.order_by(sort) total = result.count() result = result[offset:(offset + limit)] return HttpResponse( json.dumps({ 'total': total, 'rows': [{ 'score': x.score, 'reply_serial_no': x.object.reply_serial_no, 'key': x.object.key, 'director': x.object.director, 'year': x.object.year, 'question_short': strip_tags(x.object.question)[0:100] + "...", "answer_short": strip_tags(x.object.answer)[0:100] + "...", "member": x.object.member } for x in result][0:100] }))
def filter_queryset(self, request, queryset, view): is_search = False search_queryset = SearchQuerySet().models(queryset.model) for k, v in request.query_params.iteritems(): if k == 'q': # 'q' means do a full-text search of the document fields. # Raw() passes the search string unaltered to Woosh. This loses # backend agnosticism but gains a rich query language: # https://pythonhosted.org/Whoosh/querylang.html search_queryset = search_queryset.filter(content=Raw(v)) is_search = True elif k == 'parent' and v == '': # Empty string means query for null parent queryset = queryset.filter(parent=None) if not is_search: return queryset # TODO: Call highlight() on the SearchQuerySet and somehow pass the # highlighted result to the serializer. # http://django-haystack.readthedocs.org/en/latest/searchqueryset_api.html#SearchQuerySet.highlight matching_pks = search_queryset.values_list('pk', flat=True) # We can now read len(matching_pks) very quickly, so the search engine # has done its job. HOWEVER, Haystack will only retrieve the actual pks # in batches of 10 (HAYSTACK_ITERATOR_LOAD_PER_QUERY), with each batch # taking nearly a tenth of a second! By using a slice, we can force # Haystack to hand over all the pks at once. big_slice = max(ITERATOR_LOAD_PER_QUERY, search_queryset.count()) matching_pks = list(matching_pks[:big_slice]) # Will still be filtered by KpiObjectPermissionsFilter.filter_queryset() # TODO: Preserve ordering of search results return queryset.filter(pk__in=matching_pks)
def home(request): movies = Movie.objects.all() shortcomments = ShortComment.objects.all() comments = Comment.objects.all() form = SearchForm() if 'query' in request.GET: form = SearchForm(request.GET) if form.is_valid(): cd = form.cleaned_data results = SearchQuerySet().models(Movie).filter(content=cd['query']).load_all() # count total results total_results = results.count() return render(request, 'movies/movie/list.html', {'form': form, 'cd': cd, 'results': results, 'total_results': total_results}) return render(request, 'account/home.html', {'section': 'home', 'movies':movies, 'shortcomments':shortcomments, 'comments':comments})
def read(self, request): #print request #user = User.objects.get(username=username) baseq = q = request.GET['q'] if q == '': return {} q += '*' p = int(request.GET.get('p',1)) show = 10 init = (p-1)*show end = init+show #raise Exception (q) if False: q = ' '.join(['+'+i+'*' for i in request.GET['q'].split(' ')]) exercises = Exercise.objects.filter(content__search=q) data = [] for e in exercises[p*show:(p+1)*show]: data.append({'sections':e.sections(),'path':e.path,'title':e.title,'description':e.description[:255],'id':e.id}) count = exercises.count() else: s = SearchQuerySet().raw_search(q).highlight() count = s.count() data = [] for e in s[init:end]: data.append({'sections':e.object.sections(),'path':e.object.path,'title':e.title,'description':e.highlighted[0],'id':e.object.id}) return { 'query':q,'page':p+1,'results': count, 'exercises': data,'more':'%s?q=%s&p=%d'%(reverse('exercises_list_json'),baseq,p+1) if end<count else False}
def get_search(self, request, **kwargs): self.method_check(request, allowed=['get']) self.throttle_check(request) # pagination limit = int(request.GET.get('limit', 20)) offset = int(request.GET.get('offset', 0)) page = (offset / limit) + 1 q_args = {} # add search query if 'q' in request.GET and request.GET['q'] != '': q_args['content'] = AutoQuery(remove_accents(request.GET['q'])) if 'tag' in request.GET: q_args['tag_exact'] = remove_accents(request.GET.get('tag').lower()) if 'id' in request.GET: q_args['object_id'] = int(request.GET.get('id')) if 'followed' in request.GET: uid = int(request.GET['followed']) tag_ids = [f.object_id for f in Follow.objects.filter(content_type__model='tag', user__id=uid)] q_args['obj_id__in'] = tag_ids sqs = SearchQuerySet().models(Tag).load_all().filter(**q_args) paginator = Paginator(sqs, limit) try: page = paginator.page(page) except InvalidPage: raise Http404("Sorry, no results on that page.") objects = [] if 'order_by' in request.GET: order_by = request.GET.get('order_by').split(',') for result in page.object_list: bundle = self.build_bundle(obj=result.object, request=request) bundle = self.full_dehydrate(bundle) objects.append(bundle) object_list = { 'meta': { 'limit': limit, 'next': page.has_next(), 'previous': page.has_previous(), 'total_count': sqs.count(), 'offset': offset }, 'objects': objects, } response = self.create_response(request, object_list) self.log_throttled_access(request) return response
def search_question_detail(request): # TODO """ detail 검색 페이지에서 사용한다. .../search/detail/?q=query 와 같은 식으로 사용한다. \n **Permissions:** AllowAny """ q = request.GET.get('q', '') print(q) if q is '': return Response(status=status.HTTP_400_BAD_REQUEST) all_results = SearchQuerySet().models(Question).filter( SQ(content__contains=Clean(q)) & SQ(title__contains=Clean(q))) all_results = SearchQuerySet().filter(content=q) all_results = SearchQuerySet().values() sqs = SearchQuerySet().models(Question).filter( title__contains=q).order_by('created') for i in range(0, sqs.count()): json_data = json.loads(sqs[i].content) for block in json_data['blocks']: print(block['text']) serializer = QuestionSearchSerializer(sqs, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK)
def search(request): form = SearchForm() if 'query' in request.GET: form = SearchForm(request.GET) if form.is_valid(): cd = form.cleaned_data results = SearchQuerySet().models(Video).filter( content=cd['query']).load_all() #count total results total_results = results.count() paginator = Paginator(results, 80) # 80 posts in each page page = request.GET.get('page') try: vidz = paginator.page(page) except PageNotAnInteger: # If page is not an integer deliver the first page vidz = paginator.page(1) except EmptyPage: # If page is out of range deliver last page of results vidz = paginator.page(paginator.num_pages) return render( request, 'videos/search.html', { 'form': form, 'cd': cd, 'total_results': total_results, 'results': vidz })
def get(self, request, *args, **kwargs): sqs = SearchQuerySet().models(Annotation) sqs = filter_annotations(self.request, sqs) result = { 'count': sqs.count() } return Response(result)
def get(self, request, keyword=None, format=None): result = [] total = 0 offset = int(request.GET.get('offset', '0')) limit = int(request.GET.get('limit', '10')) if keyword is not None: words = keyword.split(' ') result = SearchQuerySet() for word in words: result = result.filter(content__exact=word) result = result.models(Reply) total = result.count() result = result[offset:(offset + limit)] for r in result: print(r.score) result = [r.object for r in result] for reply in result: reply.question = strip_tags(reply.question)[0:100] reply.answer = strip_tags(reply.answer)[0:100] serializer = ReplySerializer(result, many=True) return JsonResponse( { 'data': serializer.data, 'total': total, 'limit': limit, 'offset': offset }, safe=False)
def test_items_are_searchable(self): self.api_login() response = self.client.post('/api/items/', {'url': EXAMPLE_COM, 'tags': ['test-tag', 'second-tag']}, format='json') self.assertTrue('id' in response.data) sqs = SearchQuerySet().filter(owner_id=1).auto_query('second-tag') self.assertEqual(sqs.count(), 1, 'New item is not in the searchable by tag')
def get_search_results(modelcls, q, limit=DEFAULT_LIMIT): sqs = SearchQuerySet().models(modelcls) sqs = sqs.auto_query(q).filter().load_all() total_results_count = sqs.count() sresults = sqs[:limit] results = [ccc.object for ccc in sqs._result_cache[:limit] if ccc] return total_results_count, results
def post_search(request): ''' 文章搜索 :param request: :return: ''' form = SearchForm() if 'query' in request.GET: form = SearchForm(request.GET) if form.is_valid(): cd = form.cleaned_data results = SearchQuerySet().models(Post).filter( content=cd['query']) #.load_all() #.load_all() # count total results total_results = results.count() for result in results: a = result.object return render( request, 'myblog/post/search.html', { 'form': form, 'cd': cd, 'results': results, 'total_results': total_results }) return render(request, 'myblog/post/search.html', {'form': form})
def search_view(request): q = AutoQuery(request.GET.get("q", "")) sqs = SearchQuerySet().filter( SQ(name=q) | SQ(teaser=q) | SQ(additional_info=q) | SQ(authors=q) | SQ(subjects=q) | SQ(school_types=q) ) sqs.query.boost_fields = { "name": 2, "teaser": 1.5, "additional_info": 1, "authors": 1, } if request.is_ajax(): results = [{"title": result.name, "url": result.url} for result in sqs[:10]] return JsonResponse(results, safe=False) # If there are no results - display random Content objects. suggestions = [] if sqs.count() == 0: suggestions = get_random_content(2, 2, 2) ctx = { "results": Content.objects.filter(pk__in=sqs.values_list("pk", flat=True)), "mlt": suggestions, } return render(request, "dll/search.html", ctx)
def product_search(request): form = SearchForm() if 'query' in request.GET: form = SearchForm(request.GET) page = request.GET.get('page') if form.is_valid(): cd = form.cleaned_data results = SearchQuerySet().models(Product).filter( content=cd['query']).load_all() paginator = Paginator(results, 2) page = request.GET.get('page') try: prods = paginator.page(page) except PageNotAnInteger: prods = paginator.page(1) except: prods = paginator.page(paginator.num_pages) total_results = results.count() return render( request, 'front/search.html', { 'form': form, 'cd': cd, 'page': page, 'prods': prods, 'total_results': total_results }) return render(request, 'front/search.html', {'form': form})
def post_search(request): try: cd = None results = None total_results = None if 'query' in request.GET: form = SearchForm(request.GET) if form.is_valid(): cd = form.cleaned_data results = SearchQuerySet().models(Post).filter( content=cd['query']).load_all() total_results = results.count() else: form = SearchForm() return render( request, 'blog/post/search.html', { 'form': form, 'cd': cd, 'results': results, 'total_results': total_results }) except RuntimeError: return render( request, 'blog/post/search.html', { 'form': form, 'cd': cd, 'results': results, 'total_results': total_results })
def search_view(request): search_query = request.GET.get('q', '') if search_query: search_results = SearchQuerySet().filter(content=search_query) else: search_results = [] data = {} data['q'] = search_query form = SearchForm(initial=data) paginator = Paginator(search_results, settings.ORB_PAGINATOR_DEFAULT) # Make sure page request is an int. If not, deliver first page. try: page = int(request.GET.get('page', '1')) except ValueError: page = 1 try: results = paginator.page(page) except (EmptyPage, InvalidPage): results = paginator.page(paginator.num_pages) if search_query: search.send(sender=search_results, query=search_query, no_results=search_results.count(), request=request, page=page) return render(request, 'orb/search.html', { 'form': form, 'query': search_query, 'page': results, 'total_results': paginator.count, })
def get_box(request): long1 = float(request.GET.get('long1',0)) lat1 = float(request.GET.get('lat1',0)) long2 = float(request.GET.get('long2',0)) lat2 = float(request.GET.get('lat2',0)) bl = Point(long1, lat1) tr = Point(long2, lat2) max_dist = D(mi=20) sqs = SearchQuerySet().within('location', bl, tr) print (tr,bl) data = {'counter':0 ,'places':[]} if sqs.count()==0: return HttpResponse(content=json.dumps(data)) for elem in sqs.all(): if elem.object.long: data['counter'] += 1 place = {'long':0, 'lat':0, 'title':''} place['long'] = float(elem.object.long) place['lat'] = float(elem.object.lat) place['title'] = elem.object.name place['level'] = elem.object.level data['places'].append(place) return HttpResponse(content=json.dumps(data))
def list(self, request): limit = int(request.GET['limit']) if 'limit' in request.GET else 100 offset = int(request.GET['offset']) if 'offset' in request.GET else 0 lower = offset upper = offset + limit from haystack.query import SearchQuerySet # Create new queryset qs = SearchQuerySet() qs = qs.models(self.model) result_list = [] for sr in qs.values(*self.fields)[lower:upper]: result_list.append(sr) hostname = request._request.META['HTTP_HOST'] https = 'https://' if request._request.is_secure() else 'http://' for r in result_list: if 'api_url' in r: r['api_url'] = "{}{}{}".format(https, hostname, r['api_url']) self.paginate_queryset(result_list) self.paginator.count = qs.count() self.paginator.display_page_controls = True return self.get_paginated_response(result_list)
def handle(self, *args, **options): available_models = get_all_indexed_models() models_to_check = get_models_to_check(args, available_models) # Now we know which models to check, do that: for model_name in models_to_check: model_details = available_models[model_name] qs = model_details['index'].build_queryset() sqs = SearchQuerySet( using=model_details['backend'].connection_alias ).models(model_details['model']) msg = "Checking {0} ({1} in the DB, {2} in the search index))" print msg.format(model_name, qs.count(), sqs.count()) # Get all the primary keys from the database: pks_in_database = set( unicode(pk) for pk in qs.values_list('pk', flat=True) ) # Then go through every search result for that # model, and check that the primary key is one # that's in the database: for search_result in sqs: if search_result.pk not in pks_in_database: msg = "stale search entry for primary key {0} (text: {1})" print " ", msg.format(search_result.pk, search_result.text) if options['delete']: model_details['index'].remove_object(search_result.id) print " removed!"
def get_data(self, context): category = Sub_Category.objects.get(pk=self.sid) self.keyword = category.title res = {'articles': []} sqs = SearchQuerySet().models(Article).filter(tags=self.keyword, is_selection=True) paginator = Paginator(sqs, self.size) try: articles = paginator.page(self.page) except Exception: return res article_ids = map(lambda x: x.article_id, articles.object_list) for row in APIArticle.objects.filter(pk__in=article_ids): res['articles'].append(row.v4_toDict()) res.update({ 'stat': { 'all_count': sqs.count(), 'is_sub': True, 'group_id': category.group_id, }, }) return res
def get_evaluations(request): qs = SearchQuerySet().filter( evaluated_rubrics__in=[0] + list(Rubric.objects.values_list("id", flat=True)) ).narrow("is_displayed:true") size = int(request.REQUEST.get("size", 100)) start = int(request.REQUEST.get("start", 0)) total_items = qs.count() items = [] if start < total_items and start >= 0: for r in qs[start:start+size]: fields = r.get_stored_fields() items.append(dict( title=fields["title"], url=fields["url"], rubric_1=fields["evaluation_score_rubric_0"], rubric_2=fields["evaluation_score_rubric_1"], rubric_3=fields["evaluation_score_rubric_2"], rubric_4=fields["evaluation_score_rubric_3"], rubric_5=fields["evaluation_score_rubric_4"], rubric_6=fields["evaluation_score_rubric_5"], rubric_7=fields["evaluation_score_rubric_6"], )) return dict(items=items, total_items=total_items)
def post_search(request): #新建函数定义帖子搜索视图 form = SearchForm() #变量form实例化创建的SearchForm表单. context = {'form': form} # 将实例化表单以字典的形式存入变量context中 if 'query' in request.GET: ''' 使用GET方法来提交这个表单(form)可以直接在url上输入查询字段。 假设这个表单(form)已经被提交,我们将在request.GET字典中查找query参数 ''' form = SearchForm(request.GET) #表单(form)被提交后,我们通过提交的GET数据来实例化它, if form.is_valid(): #如果这个表单有效 cd = form.cleaned_data # 将表单(form)返回的值以字典的形式存储在变量cd中 results = SearchQuerySet().models(Post).filter( content=cd['query']).load_all() ''' 使用earchQuerySet为所有被编入索引的并且主要内容中包含给予的查询内容的Post对象来执行一次搜索 load_all()方法会立刻加载所有在数据库中有关联的Post对象 ''' total_results = results.count() # 将查询到的的对象总数赋值给变量total_results context.update({ 'cd': cd, 'results': results, 'total_results': total_results }) #使用update方法将本地变量(cd,results)结果及查询到的总数追加到变量context中。 return render(request, 'blog/post/search.html', context)
def search_results_json(req, term='', context_models=''): all_results = [] term = req.GET.get('term', '') context_models = req.GET.get('model', '').split(',') p = {} indexes = sorted( _get_indexes(), key=lambda index: 0 if index[0].__name__.lower() in context_models else index[1].PRIORITY) for index in indexes: results = SearchQuerySet().filter(content=term).models(index[0]) results_count = results.count() for r in results[:5]: all_results.append( _create_category(r.display, r.index_name, term, r.model_name, r.url, results_count)) all_results.append({ 'category': 'Wiki', 'term': term, 'search_slug': 'wiki' }) return json_response(all_results)
def get_queryset(self): query = self.request.GET.get('q') query_list, results, results3 = None, None, [] if (self.request.method == "GET") and query: query_list = query.split() results = SearchQuerySet().filter( sub_script__contains=query).load_all() for k in range(results.count()): results[k].thumbnail = results[k].thumbnail.replace( '/default.', '/mqdefault.') results[k].num_matches = int(results[k].score * 100) results_limit = min(20, int(len(results) / 3)) num_videos = results_limit * 3 for k in range(0, results_limit): results3.append( [results[3 * k], results[3 * k + 1], results[3 * k + 2]]) context = { 'query': query_list, 'num_videos': num_videos, 'videos': results3 } self.request.session['query_list'] = query_list return context
def sentence(request, sentence_id): try: sentence = Sentence.objects.get(pk=sentence_id) except: raise Http404 # Get more sentences like this. more = SearchQuerySet().all().more_like_this(sentence) print more.count() dic = { 'object': sentence, 'more': more, } return render_to_response("sentences/sentence_detail.html", dic, context_instance=RequestContext(request))
def post_search(request): form = SearchForm() if 'query' in request.GET: form = SearchForm(request.GET) if form.is_valid(): cd = form.cleaned_data results = SearchQuerySet().models(Post).filter( content=cd['query']).load_all() total_results = results.count() paginator = Paginator(results, 2) page = request.GET.get('page') try: posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) else: cd = {} results = SearchQuerySet().models(Post).all() total_results = results.count() # posts = {} # page = {} paginator = Paginator(results, 2) page = request.GET.get('page') try: posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) else: cd = {} results = {} total_results = {} posts = {} page = {} return render( request, 'blog/post/search.html', { 'form': form, 'cd': cd, 'results': results, 'total_results': total_results, 'posts': posts, 'page': page })
def journey(request): origin = request.GET.get('from') from_q = request.GET.get('from_q') destination = request.GET.get('to') to_q = request.GET.get('to_q') if origin: origin = get_object_or_404(Locality, slug=origin) if from_q: from_options = SearchQuerySet().models(Locality).filter(content=from_q).load_all() if from_options.count() == 1: origin = from_options[0].object from_options = None elif origin not in from_options: origin = None else: from_options = None if destination: destination = get_object_or_404(Locality, slug=destination) if to_q: to_options = SearchQuerySet().models(Locality).filter(content=to_q).load_all() if to_options.count() == 1: destination = to_options[0].object to_options = None elif destination not in to_options: destination = None else: to_options = None if origin and destination: journeys = Journey.objects.filter( stopusageusage__stop__locality=origin ).filter(stopusageusage__stop__locality=destination) else: journeys = None return render(request, 'journey.html', { 'from': origin, 'from_q': from_q or origin or '', 'from_options': from_options, 'to': destination, 'to_q': to_q or destination or '', 'to_options': to_options, 'journeys': journeys })
def _FindNameMatches(self, query): """Find all the matches for this query. Args: query: the query to match. Returns: A list of CommonName objects matching the query. """ res = SearchQuerySet().filter(text__exact=query) if res.count() > 0: logging.debug('%s exact matches for "%s" found', res.count(), query) return [m.object for m in res] else: logging.debug('No exact match for "%s"', query) return []
def solr_counts(): latest_q = SearchQuerySet().filter(latest=True) registrations_q = latest_q.filter(category="entity_status::ACT") last_week = datetime.now() - timedelta(days=7) last_month = datetime.now() - timedelta(days=30) last_week_q = SearchQuerySet().filter(create_timestamp__gte=last_week) last_month_q = SearchQuerySet().filter(create_timestamp__gte=last_month) try: return { "active": latest_q.count(), "registrations": registrations_q.count(), "last_month": last_month_q.count(), "last_week": last_week_q.count(), } except SolrError: LOGGER.exception("Error when retrieving quickload counts from Solr") return False
def view(request): terms = request.GET.get('term') results = SearchQuerySet().auto_query(terms) results = results.models(Product, Service, Contact) count = results.count() return render_to_response('common/search.html', dict(terms=terms, count=count), context_instance=RequestContext(request))
def count_annotations(self, obj): sqs = SearchQuerySet().models(Annotation) sqs = sqs.filter( the_model_name__exact='timeseries', the_model_pk__exact=obj.pk ) count = sqs.count() return count
def search_results(request, search_string, unit='pct', fiscal_year=2009): search = unquote(search_string) programs = SearchQuerySet().filter(content=search_string) result_count = programs.count() table_data = generic_program_table(programs, fiscal_year, unit) return render_to_response('generic_program_list.html', { 'table_data': table_data, 'fiscal_year': fiscal_year, 'unit': unit, 'search_string': search, 'result_count': result_count })
def get_context_data(self, **kwargs): user = self.object context = {} count_types = OrderedDict() fields_or_lookup = ( {'collaborators__contains': user.username}, {'fullname_and_username__contains': user.username}, ) counter_class = {} #{ # 'wiki': WikiCollabCount, # 'ticket': TicketCollabCount, #} types = ['thread'] #types.extend(['ticket', 'wiki', 'changeset', 'attachment']) messages = Message.objects.filter(from_address__user__pk=user.pk) for type in types: CounterClass = counter_class.get(type) if CounterClass: try: counter = CounterClass.objects.get(author=user.username) except CounterClass.DoesNotExist: count_types[trans(type)] = 0 else: count_types[trans(type)] = counter.count elif type == 'thread': count_types[trans(type)] = messages.count() else: sqs = SearchQuerySet() for filter_or in fields_or_lookup: sqs = sqs.filter_or(type=type, **filter_or) count_types[trans(type)] = sqs.count() context['type_count'] = count_types sqs = SearchQuerySet() for filter_or in fields_or_lookup: sqs = sqs.filter_or(**filter_or).exclude(type='thread') context['results'] = sqs.order_by('-modified', '-created')[:10] email_pks = [addr.pk for addr in user.emails.iterator()] query = Message.objects.filter(from_address__in=email_pks) query = query.order_by('-received_time') context['emails'] = query[:10] count_by = 'thread__mailinglist__name' context['list_activity'] = dict(messages.values_list(count_by)\ .annotate(Count(count_by))\ .order_by(count_by)) context.update(kwargs) return super(UserProfileDetailView, self).get_context_data(**context)
def microsite(request, microsite): microsite = get_object_or_404(Microsite, slug=microsite) page_title = u"%s Home" % microsite.name breadcrumbs = [{"url": reverse("materials:microsite", kwargs=dict(microsite=microsite.slug)), "title": page_title}] query = SearchQuerySet().narrow("is_displayed:true") query = query.narrow("microsites:%i" % microsite.id) query = query.order_by("-rating") query = query.facet("indexed_topics").facet("keywords").facet("grade_levels").facet("course_material_types") items = [] results = query[0:8] for result in results: items.append(populate_item_from_search_result(result)) facets = query.facet_counts()["fields"] topics = [] topic_counts = dict(facets["indexed_topics"]) for topic, tree_info in tree_item_iterator(microsite.topics.all()): topic.count = topic_counts.get(str(topic.id), 0) topics.append((topic, tree_info)) grade_levels = [] grade_level_counts = dict(facets["grade_levels"]) for level in GradeLevel.objects.all(): level.count = grade_level_counts.get(str(level.id), 0) grade_levels.append(level) course_material_types = [] course_material_type_counts = dict(facets["course_material_types"]) for material_type in CourseMaterialType.objects.all(): material_type.count = course_material_type_counts.get(str(material_type.id), 0) course_material_types.append(material_type) keywords = query.count() and facets.get("keywords", []) or [] if len(keywords) > MAX_TOP_KEYWORDS: keywords = keywords[:MAX_TOP_KEYWORDS] keywords = get_tag_cloud(dict(keywords), 3, 0, 0) for keyword in keywords: name = get_name_from_slug(Keyword, keyword["slug"]) or \ get_name_from_slug(Tag, keyword["slug"]) or \ keyword["slug"] keyword["name"] = name featured_k12 = SearchQuerySet().filter(workflow_state=PUBLISHED_STATE, featured=True, grade_levels__in=(1, 2), microsites=microsite.id).order_by("-featured_on").load_all()[:3] featured_k12 = [r.object for r in featured_k12 if r] featured_highered = SearchQuerySet().filter(workflow_state=PUBLISHED_STATE, featured=True, grade_levels=3, microsites=microsite.id).order_by("-featured_on").load_all()[:3] featured_highered = [r.object for r in featured_highered if r] slides = Slide.objects.filter(microsite=microsite) resource_number = SearchQuerySet().filter(workflow_state=PUBLISHED_STATE, microsites=microsite.id).count() return direct_to_template(request, "materials/microsites/%s.html" % microsite.slug, locals())
def get_context_data(self, **kwargs): user = self.object context = {} count_types = OrderedDict() fields_or_lookup = ( { 'collaborators__contains': user.username }, { 'fullname_and_username__contains': user.username }, ) counter_class = { 'wiki': WikiCollabCount, 'ticket': TicketCollabCount, } messages = Message.objects.filter(from_address__user__pk=user.pk) for type in ['thread', 'ticket', 'wiki', 'changeset', 'attachment']: CounterClass = counter_class.get(type) if CounterClass: try: counter = CounterClass.objects.get(author=user.username) except CounterClass.DoesNotExist: count_types[trans(type)] = 0 else: count_types[trans(type)] = counter.count elif type == 'thread': count_types[trans(type)] = messages.count() else: sqs = SearchQuerySet() for filter_or in fields_or_lookup: sqs = sqs.filter_or(type=type, **filter_or) count_types[trans(type)] = sqs.count() context['type_count'] = count_types sqs = SearchQuerySet() for filter_or in fields_or_lookup: sqs = sqs.filter_or(**filter_or).exclude(type='thread') context['results'] = sqs.order_by('-modified', '-created')[:10] email_pks = [addr.pk for addr in user.emails.iterator()] query = Message.objects.filter(from_address__in=email_pks) query = query.order_by('-received_time') context['emails'] = query[:10] count_by = 'thread__mailinglist__name' context['list_activity'] = dict(messages.values_list(count_by)\ .annotate(Count(count_by))\ .order_by(count_by)) context.update(kwargs) return super(UserProfileDetailView, self).get_context_data(**context)
def api_question_result(request): pageNum = request.GET['pageNum'].strip() # 页码 pageSize = request.GET['pageSize'].strip() # 每页条数 query = request.GET['title'].strip() if query == '': posts = SearchQuerySet().using('question').models(Question).all() else: posts = SearchQuerySet().using('question').models(Question).filter( content=query) result_num = posts.count() if True: json_dict = {} json_dict['code'] = 0 json_dict['msg'] = "操作成功" page_info = {} page_info['pageNums'] = pageNum page_info['pageSize'] = pageSize total_page = math.ceil(result_num / pageSize) # 浮点数向上取整 page_info['pageTotal'] = total_page page_info['pageCount'] = 0 json_dict['pageInfo'] = page_info if request.GET['pageNums'] == total_page: returned_posts = posts[(pageNum - 1) * pageSize:] else: returned_posts = posts[(pageNum - 1) * pageSize:pageNum * pageSize] data = [] for item in returned_posts: item_dict = {} item_dict['id'] = item.id item_dict['contentType'] = 1 item_info = {} item_info['title'] = item.title item_info['sourceName'] = '' item_info['createTime'] = '' item_info['level'] = '' item_info['labels'] = '' item_info['ansName'] = '' item_info['answer'] = '' item_info['answerNums'] = '' item_info['content'] = item.question_text item_info['readNum'] = '' item_info['praiseNum'] = '' item_info['status'] = '' item_info['imgUrl'] = '' item_info['language'] = '' item_info['languageCode'] = '' item_dict['info'] = item_info data.append(item_dict) json_dict['data'] = data return json_dict
def cruce8(form): sqs = SearchQuerySet() sqs = sqs.filter(pais='Venezuela').filter(tipo__in=[4, 5, 6, 7, 8]) total = sqs.count() yacimientos = {} for estado in ESTADOS: lista = sqs.filter(estado=estado) yacimientos[estado] = lista return {'total': total, 'yacimientos': yacimientos}
def post_search(request): form = SearchForm() if 'query' in request.GET: form = SearchForm(request.GET) if form.is_valid(): cd = form.cleaned_data results = SearchQuerySet().models(Post).filter(content=cd['query']).load_all() total_results = results.count() paginator = Paginator(results, 2) page = request.GET.get('page') try: posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) else: cd = {} results = SearchQuerySet().models(Post).all() total_results = results.count() # posts = {} # page = {} paginator = Paginator(results, 2) page = request.GET.get('page') try: posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) else: cd = {} results = {} total_results = {} posts = {} page = {} return render(request, 'blog/post/search.html', {'form': form, 'cd': cd, 'results': results, 'total_results': total_results, 'posts': posts, 'page': page })
def cruce1(form): sqs = SearchQuerySet() sqs = sqs.filter(estado = form.cleaned_data['estado']) total = sqs.count() estado = form.cleaned_data['estado'] return { 'total' : total, 'estado':estado, 'results':sqs}
def get_queryset(self): qs = SearchQuerySet().models(docs_models.Document) self.form = docs_forms.DocumentSearchForm(data=self.request.GET) if self.form.is_valid(): q = self.form.cleaned_data.get('q', '') qs = qs.filter(content=q) qs = qs.filter(visible=True) self.count = qs.count() qs = qs[:self.limit] return qs
def search_api(request): query = request.REQUEST.get("q", "") start = int(request.REQUEST.get("start", 0)) limit = int(request.REQUEST.get("limit", getattr(settings, "HAYSTACK_SEARCH_RESULTS_PER_PAGE", 25))) sort = request.REQUEST.get("sort", "relevance") type = request.REQUEST.get("bytype") sqs = SearchQuerySet() if type is not None: if type in ["map", "layer", "contact", "group"]: # Type is one of our Major Types (not a sub type) sqs = sqs.narrow("type:%s" % type) elif type in ["vector", "raster"]: # Type is one of our sub types sqs = sqs.narrow("subtype:%s" % type) if query: sqs = sqs.filter(content=AutoQuery(query)) sqs = sqs.facet("type").facet("subtype") if sort.lower() == "newest": sqs = sqs.order_by("-date") elif sort.lower() == "oldest": sqs = sqs.order_by("date") elif sort.lower() == "alphaaz": sqs = sqs.order_by("title") elif sort.lower() == "alphaza": sqs = sqs.order_by("-title") results = [] for i, result in enumerate(sqs[start:start + limit]): data = json.loads(result.json) data.update({"iid": i + start}) results.append(data) facets = sqs.facet_counts() counts = {"map": 0, "layer": 0, "vector": 0, "raster": 0, "contact": 0, "group": 0} for t, c in facets.get("fields", {}).get("type", []): counts[t] = c for t, c in facets.get("fields", {}).get("subtype", []): counts[t] = c data = { "success": True, "total": sqs.count(), "rows": results, "counts": counts, } return HttpResponse(json.dumps(data), mimetype="application/json")
def test_items_are_searchable(self): self.api_login() response = self.client.post('/api/items/', { 'url': EXAMPLE_COM, 'tags': ['test-tag', 'second-tag'] }, format='json') self.assertTrue('id' in response.data) sqs = SearchQuerySet().filter(owner_id=1).auto_query('second-tag') self.assertEqual(sqs.count(), 1, 'New item is not in the searchable by tag')
def testAutoStart( self ): self.testStart() self._build_index() self.testStop() sqs = SearchQuerySet() sqs = sqs.filter( title="Godfather" ) self.assertEqual( sqs.count(), 2 ) self.assertTrue( self.daemon.is_runing() ) self.assertTrue( self.daemon.is_active() )
def random(self): sqs = SearchQuerySet() sqs = sqs.filter( validated=True, date__gte=datetime.datetime.now(), date__lte=datetime.datetime.now() + datetime.timedelta(days=90), ) last = sqs.count() - 1 if last > 0: index = randint(0, last) return sqs[index].object
def cruce8(form): sqs = SearchQuerySet() sqs = sqs.filter(pais = 'Venezuela').filter(tipo__in = [4, 5, 6, 7, 8]) total = sqs.count() yacimientos = {} for estado in ESTADOS: lista = sqs.filter(estado = estado) yacimientos[estado] = lista return { 'total' : total, 'yacimientos': yacimientos}
def post_search(request): form = SearchForm() cd = None results = None total_results = None if 'query' in request.GET: form = SearchForm(request.GET) if form.is_valid(): cd = form.cleaned_data results = SearchQuerySet().models(Post).filter(content=cd['query']).load_all() total_results = results.count() return render(request, 'blog/search.html', {'form': form, 'cd': cd, 'results': results, 'total_results': total_results})
def search(self): # First, store the SearchQuerySet received from other processing. sqs = super(UrlSearchForm, self).search() # Workaround to return all results when there is no searchword (instead of returning 0 results) if not self.cleaned_data.get('q'): # .all() doesn't work?! so .exclude() something very unlikely to actually filter away any results... stupid sqs = SearchQuerySet().exclude(when=timezone.now()) # Filter channel if requested if self.cleaned_data['channel']: print("filtering %s results by channel %s" % (sqs.count(), self.cleaned_data['channel'])) sqs = sqs.filter(channel=self.cleaned_data['channel']) # Filter usermask if requested if self.cleaned_data['usermask']: print("filtering %s results by usermask %s" % (sqs.count(), self.cleaned_data['usermask'])) sqs = sqs.filter(usermask__icontains=self.cleaned_data['usermask']) # Filter start_date if requested if self.cleaned_data['start_date']: print("filtering %s results by start_date %s" % (sqs.count(), self.cleaned_data['start_date'])) sqs = sqs.filter(when__gte=self.cleaned_data['start_date']) # Filter end_date if requested if self.cleaned_data['end_date']: # add timestamp to end_date to include the whole end_date day, # because datefields are compared as 0am on the given date, # which is what we want for start_date but not for end_date) end_time = datetime.datetime.combine(self.cleaned_data['end_date'], datetime.time.max) print("filtering %s results by end_date %s" % (sqs.count(), self.cleaned_data['end_date'])) sqs = sqs.filter(when__lte=end_time) # Order results by date sqs = sqs.order_by('-when') # Return results print("returning %s results" % sqs.count()) return sqs