Пример #1
0
def get_diplomaticletter_locations():
    qry = SearchQuerySet().models(models.DiplomaticLetter).facet('locations', limit=10000, mincount=1)
    try:
        result = qry.facet_counts()['fields']['locations']
        return result
    except KeyError:
        return []
Пример #2
0
def get_corpusdiplomaticum_areanames():
    qry = SearchQuerySet().models(models.CorpusDiplomaticumContract).facet('areaName', limit=10000, mincount=1)
    try:
        result = qry.facet_counts()['fields']['areaName']
        return result
    except KeyError:
        return []
Пример #3
0
def get_placard_governors():
    qry = SearchQuerySet().models(models.Placard).facet('locations', limit=10000, mincount=1)
    try:
        result = qry.facet_counts()['fields']['locations']
        return result
    except KeyError:
        return []
Пример #4
0
def tags(request, tags=''):
    if tags == '':
        return redirect(reverse('index'))
    tag_list = [tag for tag in tags.split(',') if tag != '']

    # Due to a bug (or feature?) in Whoosh or haystack, we can't filter for all tags at once,
    # the .filter(tags=[...]) method cannot handle spaces apparently
    # It however works with tags__in, but that is an OR
    sqs = SearchQuerySet().filter(owner_id=request.user.id)
    for tag in tag_list:
        sqs = sqs.filter(tags__in=[tag])

    profile = request.user.userprofile

    if not profile.show_excluded:
        excluded_tags = profile.excluded_tags.names()
        for tag in excluded_tags:
            sqs = sqs.exclude(tags__in=[tag])

    sqs = sqs.order_by('-created').facet('tags')

    facets = sqs.facet_counts()
    result_objects = [result.object for result in sqs]
    tag_objects = [Tag(name, count, tag_list) for name, count in facets.get('fields', {}).get('tags', [])]
    return TemplateResponse(request, 'readme/item_list.html', {
        'current_item_list': result_objects,
        'tags': tag_objects,
        'tag_names': json.dumps([tag.name for tag in tag_objects]),
        'user': request.user,
    })
Пример #5
0
def get_appendix_europeannames():
    qry = SearchQuerySet().models(models.Appendix).facet('person_names_european_list', limit=10000, mincount=1)
    try:
        vessel_names = qry.facet_counts()['fields']['person_names_european_list']
        return vessel_names
    except KeyError:
        return []
Пример #6
0
def get_documenttypes_appendix():
    qry = SearchQuerySet().models(models.Appendix).facet('documenttypes_list', limit=10000, mincount=1)
    try:
        vessel_names = qry.facet_counts()['fields']['documenttypes_list']
        return vessel_names
    except KeyError:
        return []
Пример #7
0
    def get_searchqueryset(self, form):
        """Get the Haystack searchqueryset (which we treat as
        a regular Django queryset."""
        sqs = SearchQuerySet()
        if self.model:
            sqs = sqs.models(self.model)
        # FIXME: Move somewhere more sensible
        if settings.PORTAL_HIDE_DRAFTS and not self.request.user.is_staff:
            sqs = sqs.narrow("publication_status:%d" %
                             models.Resource.PUBLISHED)

        for facet in self.facetclasses:
            sqs = facet.apply(sqs)

        # apply the query
        if form.is_valid():
            sqs = form.filter(sqs)
        for facetclass in self.facetclasses:
            sqs = facetclass.narrow(
                sqs, self.request.GET.getlist(facetclass.paramname))
        counts = sqs.facet_counts()
        current = sqs.query.narrow_queries
        for facetclass in self.facetclasses:
            facetclass.parse(counts, current)

        # FIXME: Find way around assigning the sqs to the instance,
        # but it seems to be difficult to prevent it from running
        # multiple times otherwise, e.g. when checking for a spelling
        # suggestion.
        self.searchqueryset = sqs
        return sqs
Пример #8
0
    def get_searchqueryset(self, form):
        """Get the Haystack searchqueryset (which we treat as
        a regular Django queryset."""
        sqs = SearchQuerySet()
        if self.model:
            sqs = sqs.models(self.model)
        # FIXME: Move somewhere more sensible
        if settings.PORTAL_HIDE_DRAFTS and not self.request.user.is_staff:
            sqs = sqs.narrow("publication_status:%d" % models.Resource.PUBLISHED)

        for facet in self.facetclasses:
            sqs = facet.apply(sqs)

        # apply the query
        if form.is_valid():
            sqs = form.filter(sqs)
        for facetclass in self.facetclasses:
            sqs = facetclass.narrow(sqs, self.request.GET.getlist(
                facetclass.paramname))
        counts = sqs.facet_counts()
        current = sqs.query.narrow_queries
        for facetclass in self.facetclasses:
            facetclass.parse(counts, current)

        # FIXME: Find way around assigning the sqs to the instance,
        # but it seems to be difficult to prevent it from running
        # multiple times otherwise, e.g. when checking for a spelling
        # suggestion.
        self.searchqueryset = sqs
        return sqs
Пример #9
0
 def _get_facet_group(self, facet_field):
     search_query_set = SearchQuerySet().filter(django_ct='works.work').facet(facet_field)
     facets = []
     for x in search_query_set.facet_counts()['fields'][facet_field]:
         facets.append({'name': x[0], 'count': x[1]})
         
     return facets        
Пример #10
0
def get_marginalia_placenames():
    qry = SearchQuerySet().models(models.JournalEntry).facet('place_names_list', limit=10000, mincount=1)
    try:
        vessel_names = qry.facet_counts()['fields']['place_names_list']
        return vessel_names
    except KeyError:
        return []
Пример #11
0
def get_resolution_sources():
    qry = SearchQuerySet().models(models.Resolution).facet('source')
    try:
        return qry.facet_counts()['fields']['source']
    except KeyError, error:
        from django.conf import settings
        solr_url = settings.HAYSTACK_SOLR_URL
        raise Exception('%(error)s\n Check if solr is running on %(solr_url)s; try "fab build_solr_schema"' % locals())
Пример #12
0
def get_facets_for_field(field, model=None):

    query = SearchQuerySet()
    if model is not None:
        query = query.models(model)
    query = query.narrow("is_displayed:true")
    query = query.facet(field)
    return query.facet_counts().get("fields", {}).get(field, [])
Пример #13
0
def get_dehaan_indexTerms():
    # the indexMaps index is called vessle_names_list (for reasons of quick coding)
    qry = SearchQuerySet().models(models.DeHaan).facet('vessel_names_list', limit=10000, mincount=1)
    try:
        vessel_names = qry.facet_counts()['fields']['vessel_names_list']
        return vessel_names
    except KeyError:
        return []
Пример #14
0
def view_analytics(request):

    attacked_service_search_queryset = SearchQuerySet().facet('attacked_service')
    context = RequestContext(request, {
        'attacked_service_facet': attacked_service_search_queryset.facet_counts()
    })

    country_search_queryset = SearchQuerySet().facet('country_code')
    context.update({
        'country_code_facet': country_search_queryset.facet_counts()
    })

    ip_search_queryset = SearchQuerySet().facet('ip')
    context.update({
        'ip_count': ip_search_queryset.count()
    })
    return render_to_response("analytics_view.html", context)
Пример #15
0
 def get_context_data(self, **kwargs):
     context = super(CuratorWorkflowView, self).get_context_data(**kwargs)
     sqs = SearchQuerySet().filter(assigned_curator=self.request.user,
                                   is_primary=True).facet('status')
     context.update(facets=sqs.facet_counts(),
                    total_number_of_records=Publication.objects.filter(
                        assigned_curator=self.request.user).count())
     return context
Пример #16
0
def microsite(request, microsite):

    microsite = get_object_or_404(Microsite, slug=microsite)

    page_title = u"%s Home" % microsite.name
    breadcrumbs = [{"url": reverse("materials:microsite", kwargs=dict(microsite=microsite.slug)), "title": page_title}]

    query = SearchQuerySet().narrow("is_displayed:true")
    query = query.narrow("microsites:%i" % microsite.id)
    query = query.order_by("-rating")
    query = query.facet("indexed_topics").facet("keywords").facet("grade_levels").facet("course_material_types")

    items = []
    results = query[0:8]
    for result in results:
        items.append(populate_item_from_search_result(result))

    facets = query.facet_counts()["fields"]

    topics = []
    topic_counts = dict(facets["indexed_topics"])
    for topic, tree_info in tree_item_iterator(microsite.topics.all()):
        topic.count = topic_counts.get(str(topic.id), 0)
        topics.append((topic, tree_info))

    grade_levels = []
    grade_level_counts = dict(facets["grade_levels"])
    for level in GradeLevel.objects.all():
        level.count = grade_level_counts.get(str(level.id), 0)
        grade_levels.append(level)

    course_material_types = []
    course_material_type_counts = dict(facets["course_material_types"])
    for material_type in CourseMaterialType.objects.all():
        material_type.count = course_material_type_counts.get(str(material_type.id), 0)
        course_material_types.append(material_type)

    keywords = query.count() and facets.get("keywords", []) or []
    if len(keywords) > MAX_TOP_KEYWORDS:
        keywords = keywords[:MAX_TOP_KEYWORDS]
    keywords = get_tag_cloud(dict(keywords), 3, 0, 0)
    for keyword in keywords:
        name = get_name_from_slug(Keyword, keyword["slug"]) or \
               get_name_from_slug(Tag, keyword["slug"]) or \
               keyword["slug"]
        keyword["name"] = name

    featured_k12 = SearchQuerySet().filter(workflow_state=PUBLISHED_STATE, featured=True, grade_levels__in=(1, 2), microsites=microsite.id).order_by("-featured_on").load_all()[:3]
    featured_k12 = [r.object for r in featured_k12 if r]

    featured_highered = SearchQuerySet().filter(workflow_state=PUBLISHED_STATE, featured=True, grade_levels=3, microsites=microsite.id).order_by("-featured_on").load_all()[:3]
    featured_highered = [r.object for r in featured_highered if r]

    slides = Slide.objects.filter(microsite=microsite)

    resource_number = SearchQuerySet().filter(workflow_state=PUBLISHED_STATE, microsites=microsite.id).count()

    return direct_to_template(request, "materials/microsites/%s.html" % microsite.slug, locals())
Пример #17
0
    def projectsheet_search(self, request, **kwargs):
        """ Search project sheets. """

        self.method_check(request, allowed=['get'])
        self.throttle_check(request)
        self.is_authenticated(request)

        # Query params
        query = request.GET.get('q', '')
        autocomplete = request.GET.get('auto', None)
        selected_facets = request.GET.getlist('facet', None)
        order = request.GET.getlist('order', None)
        sqs = SearchQuerySet().models(self.Meta.object_class).facet('tags')

        # narrow down QS with facets
        if selected_facets:
            for facet in selected_facets:
                sqs = sqs.narrow('tags:%s' % (facet))

        # A: if autocomplete, we return only a list of tags
        # starting with "auto" along with their count.
        if autocomplete is not None:
            tags = sqs.facet_counts()
            tags = tags['fields']['tags']
            if len(autocomplete) > 0:
                tags = [t for t in tags if t[0].startswith(autocomplete)]
            tags = [{'name': t[0], 'count': t[1]} for t in tags]
            object_list = {
                'objects': tags,
            }
        # B: else, we return a list of projectsheets
        else:
            # launch query
            if query != "":
                sqs = sqs.auto_query(query)

            uri = reverse('api_projectsheet_search',
                          kwargs={'api_name': self.api_name,
                                  'resource_name': self._meta.resource_name})
            paginator = Paginator(request.GET, sqs, resource_uri=uri)

            objects = []
            for result in paginator.page()['objects']:
                if result:
                    bundle = self.build_bundle(obj=result.object,
                                               request=request)
                    bundle = self.full_dehydrate(bundle, for_list=True)

                    objects.append(bundle)

            object_list = {
                'meta': paginator.page()['meta'],
                'objects': objects,
            }

        self.log_throttled_access(request)
        return self.create_response(request, object_list)
Пример #18
0
    def get_context_data(self, **kwargs):
        context = super(TagNamesToContextMixin, self).get_context_data(**kwargs)

        sqs = SearchQuerySet().filter(owner_id=self.request.user.id).facet('tags')

        facets = sqs.facet_counts()
        tags = [name for name, count in facets.get('fields', {}).get('tags', []) if name is not None]
        context['tag_names'] = json.dumps(tags)
        return context
Пример #19
0
 def get_collections(self):
     from models import Collection
     collections = []
     search_query_set = SearchQuerySet().filter(django_ct='works.work').facet('collection')
     for x in search_query_set.facet_counts()['fields']['collection']:
         try:
             collections.append({'collection': Collection.objects.get(name=x[0]), 'count': x[1]})
         except ObjectDoesNotExist:
             log.debug(x[0]+' collection does not exist')
     return collections
Пример #20
0
def get_subjects(firstletter=None):
    """returns a list of pairs ('subject', number_of_resolutions)"""
    try:
        qry = SearchQuerySet().models(models.Resolution).facet('subject', limit=10000, mincount=1)
        subjects = qry.facet_counts()['fields']['subject']
        if firstletter:
            subjects = [x for x in subjects if x.lower().startswith(firstletter)]
        return subjects
    except KeyError:
        return []
Пример #21
0
def search_api(request):
    query = request.REQUEST.get("q", "")
    start = int(request.REQUEST.get("start", 0))
    limit = int(request.REQUEST.get("limit", getattr(settings, "HAYSTACK_SEARCH_RESULTS_PER_PAGE", 25)))
    sort = request.REQUEST.get("sort", "relevance")
    type = request.REQUEST.get("bytype")

    sqs = SearchQuerySet()

    if type is not None:
        if type in ["map", "layer", "contact", "group"]:
            # Type is one of our Major Types (not a sub type)
            sqs = sqs.narrow("type:%s" % type)
        elif type in ["vector", "raster"]:
            # Type is one of our sub types
            sqs = sqs.narrow("subtype:%s" % type)

    if query:
        sqs = sqs.filter(content=AutoQuery(query))

    sqs = sqs.facet("type").facet("subtype")

    if sort.lower() == "newest":
        sqs = sqs.order_by("-date")
    elif sort.lower() == "oldest":
        sqs = sqs.order_by("date")
    elif sort.lower() == "alphaaz":
        sqs = sqs.order_by("title")
    elif sort.lower() == "alphaza":
        sqs = sqs.order_by("-title")

    results = []

    for i, result in enumerate(sqs[start:start + limit]):
        data = json.loads(result.json)
        data.update({"iid": i + start})
        results.append(data)

    facets = sqs.facet_counts()
    counts = {"map": 0, "layer": 0, "vector": 0, "raster": 0, "contact": 0, "group": 0}

    for t, c in facets.get("fields", {}).get("type", []):
        counts[t] = c

    for t, c in facets.get("fields", {}).get("subtype", []):
        counts[t] = c

    data = {
        "success": True,
        "total": sqs.count(),
        "rows": results,
        "counts": counts,
    }

    return HttpResponse(json.dumps(data), mimetype="application/json")
Пример #22
0
    def file_search(self, request, **kwargs):
        """ Implement file searching. """

        self.method_check(request, allowed=['get'])
        self.throttle_check(request)
        self.is_authenticated(request)

        # URL params
        bucket_id = kwargs['bucket_id']
        # Query params
        query = request.GET.get('q', '')
        autocomplete = request.GET.get('auto', None)
        selected_facets = request.GET.getlist('facet', None)
        order = request.GET.getlist('order', '-pub_date')

        sqs = SearchQuerySet().models(BucketFile).filter(
            bucket=bucket_id).order_by(order).facet('tags')

        # 1st narrow down QS
        if selected_facets:
            for facet in selected_facets:
                sqs = sqs.narrow('tags:%s' % (facet))
        # A: if autocomplete, we return only a list of tags starting
        # with "auto" along with their count.
        if autocomplete is not None:
            tags = sqs.facet_counts()
            tags = tags['fields']['tags']
            if len(autocomplete) > 0:
                tags = [t for t in tags if t[0].startswith(autocomplete)]
            tags = [
                {'name': t[0], 'count':t[1]} for t in tags
            ]
            object_list = {
                'objects': tags,
            }

        # B: else, we return a list of files
        else:
            if query != "":
                sqs = sqs.auto_query(query)

            objects = []
            # Building object list
            for result in sqs:
                bundle = self.build_bundle(obj=result.object, request=request)
                bundle = self.full_dehydrate(bundle)
                objects.append(bundle)

            object_list = {
                'objects': objects,
            }

        self.log_throttled_access(request)
        return self.create_response(request, object_list)
Пример #23
0
 def list_sets(self, microsite):
     sets = []
     if microsite:
         query = SearchQuerySet().narrow("microsites:%s" % microsite.id).facet("collection")
         collections_facets = query.facet_counts().get("fields", {}).get("collection", [])
         collections_qs = Collection.objects.filter(id__in=[c[0] for c in collections_facets]).values_list(
             "slug", "name"
         )
     else:
         collections_qs = Collection.objects.values_list("slug", "name")
     for slug, name in collections_qs:
         sets.append(Set("collection:%s" % slug, name))
     return sets
Пример #24
0
def results(request):
    url = urllib.unquote_plus(request.get_full_path())
    print url
    searchform = SearchForm(request.GET)
    
    if not "q" in request.GET:
        return redirect('search.views.index')
    
    if searchform.is_valid():
        q = searchform.cleaned_data['q']
        selected_facets = request.GET.getlist("selected_facets")
        remove_facets = request.GET.getlist("remove_facets")
        applied_facets = {}
        
        if remove_facets:
            for r in remove_facets:
                filter(lambda a: a != r, selected_facets)
                filter(lambda a: a != r, applied_facets)

        if selected_facets:
            for f in selected_facets:
                k, v = f.split(":")
                myurl = url.replace("&selected_facets=%s" % f, "")
                tag = "%s : %s" % (k.upper(), v)
                applied_facets[tag] = myurl 
                print applied_facets
        

        if q:
            sqs = SearchQuerySet().models(Part).models(Company).facet('brand').facet('category').facet('with_distributors').facet('with_image').auto_query(q)
            
            for facet in selected_facets:
                sqs = sqs.narrow(facet)

        try:                                                                    
            page = request.GET.get('page', 1)
        except PageNotAnInteger:
            page = 1

        p = Paginator(sqs, 10, request=request)
        results_list = p.page(page)

    return render_to_response('search/results.html',
                              { 
                                  'results_list': results_list, 
                                  'query': q,
                                  'facets': sqs.facet_counts(),
                                  'applied_facets': applied_facets,
                              },
                              context_instance=RequestContext(request))
Пример #25
0
class LanguageView(MapDataMixin, RepositoryInfoMixin, ListView):
    model = Record
    template_name = 'item_list_view.html'

    def get_context_data(self, **kwargs):
        context = super(LanguageView, self).get_context_data(**kwargs)
        query = self.kwargs['query']

        try:
            self.queryset = SearchQuerySet().filter(
                element_type='subject.language').filter(element_data=query).facet('element_type')
        except Exception as e:
            print e

        language_name = ISOLanguageNameIndex.objects.filter(code=query)
        if language_name:
            language_name = language_name[0].print_name

        records = []
        collection_filters = set()
        page = 0
        pager = []
        """ Building presentation data here rather than in template. Iterating over MetadataElement in search results."""
        for i in self.queryset:
            r = {}
            r['page'] = len(pager) + 1
            r['element_type'] = i.element_type
            r['collection'] = i.collection
            collection_filters.add(r['collection'])
            r['title'] = i.object.record.get_title()
            r['url'] = i.object.record.get_absolute_url()
            r['languages'] = i.object.record.list_subject_languages_as_tuples()
            r['description'] = i.object.record.get_metadata_item(
                'description')[0].element_data
            records.append(r)
            page = page + 1
            if page % 10 == 0:
                pager.append(len(pager) + 1)

        if page % 10 > 0:
            pager.append(len(pager) + 1)

        context['records'] = records
        context['pager'] = pager
        context['collection_filters'] = list(collection_filters)
        context['size'] = self.queryset.count()
        context['facets'] = self.queryset.facet_counts()['fields']['element_type']
        context['page_title'] = language_name + ' (' + query + ') ' + ' language'
        return context
Пример #26
0
 def get(self, request, *agrs, **kwargs):
     facet_key = kwargs.get('facet_key')
     models = [apps.get_model(app_label='main', model_name=kwargs.get('model'))]
     qs = SearchQuerySet().models(*models)
     fkwargs = {
         u'order': u'count',
         u'size': 20
     }
     for key, val in request.GET.items():
         if key == 'order':
             fkwargs['order'] = val
         if key == 'size':
             fkwargs['size'] = val
     qs = qs.facet(facet_key, **fkwargs)
     return Response(qs.facet_counts())
Пример #27
0
def featured_counts(obj):

    try:
        featured_counts = {}
        sqs = SearchQuerySet()
        sqs = sqs.facet("featured")
        sqs = sqs.narrow("featured:[* TO *]")

        for featured in sqs.facet_counts()['fields']['featured']:
            key = featured[0].replace(' ', '')
            key = key.replace('-', '')
            featured_counts[key] = featured[1]
        return featured_counts
    except KeyError:
        return None
Пример #28
0
def get_facets_for_field(field, model=None, facet_limit=None, facet_mincount=None):

    query = SearchQuerySet()
    if model is not None:
        if isinstance(model, (tuple, list)):
            query = query.models(*model)
        else:
            query = query.models(model)
    query = query.narrow("is_displayed:true")
    query = query.facet(field)
    if facet_limit:
        query = query.facet_limit(facet_limit)
    if facet_mincount:
        query = query.facet_mincount(facet_mincount)
    return query.facet_counts().get("fields", {}).get(field, [])
Пример #29
0
def featured_counts(obj):

    try:
        featured_counts = {}
        sqs = SearchQuerySet()
        sqs = sqs.facet("featured")
        sqs = sqs.narrow("featured:[* TO *]")

        for featured in sqs.facet_counts()['fields']['featured']:
            key = featured[0].replace(' ', '')
            key = key.replace('-', '')
            featured_counts[key] = featured[1]
        return featured_counts
    except:
        return None
Пример #30
0
def person(request, pk):
    person = Person.objects.get(id=pk)

    sqs = SearchQuerySet().filter(authors=Exact(person.complete_name())).facet("type")

    facets = sqs.facet_counts()
    facets = {k: v for k, v in facets["fields"]["type"] if v > 0}

    search_url = (reverse('bioresources:search_view') +
                  "?q=*&authors=" + person.complete_name() + "&type=")

    return render(request, 'resources/person.html', {
        "person": person, "facets": facets, "search_url": search_url,
        "nameMap": {"person": Person._meta.verbose_name_plural,
                    Resource.RESOURCE_TYPES.PUBLICATION: Publication._meta.verbose_name_plural},
        "sidebarleft": 1, })
Пример #31
0
def facet_counts(obj, facetName):

    try:
        counts = {}

        sqs = SearchQuerySet()
        sqs = sqs.facet(facetName)
        sqs = sqs.narrow(facetName + ":[* TO *]")

        for x in sqs.facet_counts()['fields'][facetName]:
            key = x[0].replace(' ', '')
            key = key.replace('-', '')
            counts[key] = x[1]
        return counts
    except:
        return None
Пример #32
0
def facet_counts(obj, facetName):

    try:
        counts = {}

        sqs = SearchQuerySet()
        sqs = sqs.facet(facetName)
        sqs = sqs.narrow(facetName + ":[* TO *]")

        for x in sqs.facet_counts()['fields'][facetName]:
            key = x[0].replace(' ', '')
            key = key.replace('-', '')
            counts[key] = x[1]
        return counts
    except KeyError:
        return None
Пример #33
0
def infrastructure_counts(obj):

    try:
        infrastructure_counts = {}

        sqs = SearchQuerySet()
        sqs = sqs.facet("infrastructure")
        sqs = sqs.narrow("infrastructure:[* TO *]")

        for infrastructure in sqs.facet_counts()['fields']['infrastructure']:
            key = infrastructure[0].replace(' ', '')
            key = key.replace('-', '')
            infrastructure_counts[key] = infrastructure[1]
        return infrastructure_counts
    except KeyError:
        return None
Пример #34
0
def infrastructure_counts(obj):

    try:
        infrastructure_counts = {}

        sqs = SearchQuerySet()
        sqs = sqs.facet("infrastructure")
        sqs = sqs.narrow("infrastructure:[* TO *]")

        for infrastructure in sqs.facet_counts()['fields']['infrastructure']:
            key = infrastructure[0].replace(' ', '')
            key = key.replace('-', '')
            infrastructure_counts[key] = infrastructure[1]
        return infrastructure_counts
    except:
        return None
Пример #35
0
def autocomplete(request, field): 
    if not request.user.is_authenticated():
        return HttpResponseRedirect('/accounts/login/')
        
    term    = request.GET.get('term') 
    sqs     = SearchQuerySet()
    
    kwargs = {field + '_auto':term} #ex) country_auto for the autocomplete index of country
    sqs    = SearchQuerySet().facet(field).filter(**kwargs)

    result_counts = sqs.facet_counts()['fields'][field]
    
    result = {'options':[]}

    for result_count in result_counts:
        result['options'].append(result_count[0])

    return HttpResponse(json.dumps(result), mimetype='application/json; charset=utf8')
Пример #36
0
def green_browse(request):

    microsite = get_object_or_404(Microsite, slug="green")

    query = SearchQuerySet().narrow("is_displayed:true")
    query = query.narrow("microsites:%i" % microsite.id)
    query = query.facet("indexed_topics").facet("keywords").facet("grade_levels").facet("course_material_types")

    facets = query.facet_counts()["fields"]

    topics = []
    topic_counts = dict(facets["indexed_topics"])
    for topic, tree_info in tree_item_iterator(microsite.topics.all()):
        topic.count = topic_counts.get(str(topic.id), 0)
        topics.append((topic, tree_info))

    grade_levels = []
    grade_level_counts = dict(facets["grade_levels"])
    for level in GradeLevel.objects.all():
        level.count = grade_level_counts.get(str(level.id), 0)
        grade_levels.append(level)

    course_material_types = []
    course_material_type_counts = dict(facets["course_material_types"])
    for material_type in CourseMaterialType.objects.all():
        material_type.count = course_material_type_counts.get(str(material_type.id), 0)
        course_material_types.append(material_type)

    keywords = query.count() and facets.get("keywords", []) or []
    if len(keywords) > MAX_TOP_KEYWORDS:
        keywords = keywords[:MAX_TOP_KEYWORDS]
    keywords = get_tag_cloud(dict(keywords), 3, 0, 0)
    for keyword in keywords:
        name = get_name_from_slug(Keyword, keyword["slug"]) or \
               get_name_from_slug(Tag, keyword["slug"]) or \
               keyword["slug"]
        keyword["name"] = name

    query = SearchQuerySet().narrow("is_displayed:true")
    query = query.narrow("microsites:%i" % microsite.id)
    query = query.order_by("-published_on").load_all()
    recently_added = [r.object for r in query[:7]]

    return direct_to_template(request, "materials/microsites/green-browse.html", locals())
Пример #37
0
def species(request, selected_Genus=None):
    
    #Call sorl for a faceted list of Genus
    sqs = SearchQuerySet().facet('genus')
    genus_list = []
    for genus_count in sqs.facet_counts()['fields']['Genus']:
        genus_list.append({
            'name'  : genus_count[0],
            'count' : genus_count[1]
        
        })
    
    #Sort the list alphabetically by name
    genus_list.sort(key=lambda x : x['name'])
    
    return render_to_response('data/template.species.html', 
                               context_instance = RequestContext(request,
                               {'genus_list': genus_list,
                               'is_page_data' : True }))
Пример #38
0
class ContributorView(MapDataMixin, RepositoryInfoMixin, ListView):
    model = Record
    template_name = 'item_list_view.html'

    def get_context_data(self, **kwargs):
        context = super(ContributorView, self).get_context_data(**kwargs)
        query = self.kwargs['query']
        query = query.replace('-', ' ')  # unslugify
        self.queryset = SearchQuerySet().filter(element_type__startswith='contributor').filter(element_data=query).facet('element_type').facet('collection')

        collection_filters = set()
        records = []
        page = 0
        pager = []
        """ Building presentation data here rather than in template. Iterating over MetadataElement in search results."""
        for i in self.queryset:
            r = {}
            r['page'] = len(pager) + 1
            r['element_type'] = i.element_type
            r['collection'] = i.collection
            collection_filters.add(r['collection'])
            r['title'] = i.object.record.get_title()
            r['url'] = i.object.record.get_absolute_url()
            r['languages'] = i.object.record.list_subject_languages_as_tuples()
            r['description'] = i.object.record.get_metadata_item(
                'description')[0].element_data
            records.append(r)
            page = page + 1
            if page % 10 == 0:
                pager.append(len(pager) + 1)

        if page % 10 > 0:
            pager.append(len(pager) + 1)

        context['records'] = records
        context['pager'] = pager
        context['size'] = self.queryset.count()
        context['collection_filters'] = list(collection_filters)
        context['facets'] = self.queryset.facet_counts()['fields']['element_type']
        context['page_title'] = query
        return context
Пример #39
0
def all_notes(request, project_slug=None):
    o = {}
    template = 'all-notes.html'
    o['filtered'] = False

    if request.GET.get('filter'):
        template = 'filtered-notes.html'
        o['filtered'] = True

    qs = SearchQuerySet().models(Note)
    query = []
    if request.GET.get('topic'):
        query += [ ' AND '.join([ 'related_topic_id:%s' % topic for topic
                                in request.GET.get('topic').split(',') ]) ]
    if request.GET.get('project'):
        query += [ ' AND '.join([ 'project_id:%s' % project for project
                                in request.GET.get('project').split(',') ]) ]
    qs = qs.narrow(' AND '.join(query)) if query else qs

    qs = qs.facet('related_topic_id').facet('project_id')
    facet_fields = qs.facet_counts()['fields']
    topic_facets = sorted(facet_fields['related_topic_id'],
                          key=lambda t: t[1], reverse=True)
    project_facets = sorted(facet_fields['project_id'],
                            key=lambda p: p[1], reverse=True)

    topic_facets = [ (Topic.objects.get(id=t_id), t_count)
                         for t_id, t_count in topic_facets[:16] ]
    o['topic_facets_1'] = topic_facets[:8]
    o['topic_facets_2'] = topic_facets[8:] if (len(topic_facets) > 8) else []

    o['project_facets'] = [ (Project.objects.get(id=p_id), p_count)
                           for p_id, p_count in project_facets ]
    o['notes'] = qs

    return render_to_response(
        template, o, context_instance=RequestContext(request)) 
Пример #40
0
    def get_facet_counts(self):
        queryset = SearchQuerySet().models(PostAssertion)
        queryset = self._apply_facets_to_queryset(queryset)

        params = self.request.GET

        if ('date_from' in params and params['date_from']) or \
                ('date_to' in params and params['date_to']):
            query = 'date:[{} TO {}]'.format(
                -1 * int(params['date_from'])
                if 'date_from' in params and params['date_from']
                else PromrepFacetedSearchForm.MIN_DATE,
                -1 * int(params['date_to'])
                if 'date_to' in params and params['date_to']
                else PromrepFacetedSearchForm.MAX_DATE
            )
            queryset = queryset.narrow(query)

        for field in PromrepFacetedSearchForm.AUTOCOMPLETE_FACETS:
            if field in params and params[field]:
                queryset = queryset.narrow(
                    '{}:{}'.format(field, params[field]))

        return queryset.facet_counts()
Пример #41
0
    def get(self, request):

        kw = self.request.GET.get("term")

        sqs = SearchQuerySet().filter(
            keywords__startswith=kw).facet("keywords")

        keywords = []

        for term, count in sqs.facet_counts()['fields']['keywords']:
            if term.startswith(kw):
                keywords.append((term, count))

        # sorted(keywords, lambda x, y: cmp(x[1], y[1]))
        sorted(keywords, key=lambda x: x[1])

        results = []

        for label, count in keywords:
            results.append({"label": label, "value": label})

        return HttpResponse(
            json.dumps(results),
            content_type='application/json')
Пример #42
0
def facetSearchView(request, *args, **kwargs):
    """ Return the faceted data corresponding to the given query 
    """
    facet_fields = ('material_type', 'categories', 'language')
    query = {}
    or_fields = getOrFields(request)
    # support both 'type' & 'material_type'
    if request.GET.get('type', None):
        query['material_type'] = request.GET['type']
    allow_model = models.Material
    sqs = SearchQuerySet().models(allow_model).filter(**query)    
    # add custom fileting with categories 
    sqs = queryCategory(request, sqs, 'categories' in or_fields)  
    # implement facet
    for field in facet_fields:
        sqs = sqs.facet(field)
    counts = sqs.facet_counts()
    # refine the category IDs
    cats = counts['fields']['categories']
    cat_dict = {}
    for cid in range(len(cats)):
        raw_id = models.restoreAssignedCategory(cats[cid][0])
        if raw_id: 
            raw_id = str(raw_id[0])
        else:
            raw_id = '0'
        if cat_dict.has_key(raw_id):
            cat_dict[raw_id] += cats[cid][1]
        else:
            cat_dict[raw_id] = cats[cid][1]
    cats = []
    for key in cat_dict:
        cats.append([int(key), cat_dict[key]])
    counts['fields']['categories'] = cats

    return Response(counts)
Пример #43
0
def _parse_facets(qd):
    """
    Given a request, returns a quadruple of:
    * A SearchQuerySet constrained by the request.GET vars.
    * a dict of constraints that have been applied
    * a list of potential choices for new constraints
    * a querystring that produces these constraints (without pagination 
      or sorting)
    """
    sqs = SearchQuerySet()

    constraints = []
    # Process constraints, and execute facets
    for name, field in settings.ALL_FIELDS.iteritems():
        if field['document']:
            terms = qd.get(field['name'], qd.get('document_', None))
            if terms:
                sqs = sqs.auto_query(terms).highlight()
                constraints.append({
                    'value': terms,
                    'key': name,
                    'type': 'document',
                    'display': field['display_name'],
                })
        elif field['faceted']:
            sqs = sqs.raw_params(**{
                'f.%s_exact.facet.limit' % field['name']: field['facet_limit']
             })
            if field['type'] == 'date':
                sqs, start, end = _narrow_range(sqs, field, qd)
                if start:
                    constraints.append({
                        'value': start.strftime("%Y-%m-%d"),
                        'key': name + '__gte',
                        'type': 'date',
                        'display': "%s - more than" % field['display_name'],
                    })
                if end:
                    constraints.append({
                        'value': end.strftime("%Y-%m-%d"),
                        'key': name + '__lte',
                        'type': 'date',
                        'display': "%s - less than" % field['display_name'],
                    })
                if start or end or name in settings.FACET_DISPLAY:
                    if not end:
                        end = utils.d_to_dt(getattr(SearchQuerySet().order_by("-" + name)[0], name))
                    if not start:
                        start = utils.d_to_dt(getattr(SearchQuerySet().order_by(name)[0], name))
                    span = max(1, (end - start).days)
                    gap = max(1, int(span / 100))
                    sqs = sqs.date_facet(name, start, end, 'day', gap)

            elif field['type'] in ('int', 'float', 'latitude', 'longitude'):
                sqs, start, end = _narrow_range(sqs, field, qd)
                if start:
                    constraints.append({
                        'value': start,
                        'key': name + '__gte',
                        'type': 'min_max',
                        'display': "%s - more than" % field['display_name'],
                    })
                if end:
                    constraints.append({
                        'value': end,
                        'key': name + '__lte',
                        'type': 'min_max',
                        'display': "%s - less than" % field['display_name'],
                    })
                if start or end or name in settings.FACET_DISPLAY:
                    sqs = sqs.facet(name)
#                    if not end:
#                        end = getattr(SearchQuerySet().order_by("-" + name)[0], name)
#                    if not start:
#                        start = getattr(SearchQuerySet().order_by(name)[0], name)
#                    span = max(1, (end - start))
#                    gap = max(1, int(span / 100))
#                    exact = "%s_exact" % name
#                    sqs = sqs.raw_params(**{
#                        "facet.range": exact,
#                        "f.%s.facet.range.start" % exact: start,
#                        "f.%s.facet.range.end" % exact: end,
#                        "f.%s.facet.range.gap" % exact: gap,
#                    })
            else:
                val = qd.get(name, None)
                if val is not None:
                    constraints.append({
                        'value': val,
                        'key': name,
                        'type': 'text',
                        'display': field['display_name'],
                    })
                    sqs = sqs.narrow('%s:"%s"' % (name + "_exact", val))
                if val is not None or name in settings.FACET_DISPLAY:
                    sqs = sqs.facet(name)

    # Constraint removal links
    queryargs = dict((p['key'], p['value']) for p in constraints)
    querystr = urllib.urlencode(queryargs)
    for param in constraints:
        # remove key
        value = queryargs.pop(param['key'])
        remainder = urllib.urlencode(queryargs)
        param.update({'remove_link': remainder})
        # restore key
        queryargs[param['key']] = value
    
    # Choices
    counts = sqs.facet_counts()
    choices = []
    for name in settings.FACET_DISPLAY:
        field = settings.ALL_FIELDS[name]
        choice = {'field': field}
        if field['document']:
            choice.update({
                'type': 'document',
                'value': queryargs.get(name, ''),
            })
        elif field['type'] in ('text', 'char'):
            facets = sorted((k, c) for k, c in counts['fields'][name] if c > 0)
            if facets:
                choice.update({
                    'choices': facets,
                    'type': 'text',
                    'value': queryargs.get(name, '')
                })
        elif field['type'] in ('int', 'float', 'latitude', 'longitude'):
            facets = sorted([(int(k), c) for k,c in counts['fields'][name] if c > 0])
            if facets:
                choice.update({
                    'type': 'min_max',
                    'counts': [c for k,c in facets],
                    'vals': [k for k,c in facets],
                    'min_value': facets[0][0],
                    'max_value': facets[-1][0],
                    'chosen_min': queryargs.get(name + '__gte', ''),
                    'chosen_max': queryargs.get(name + '__lte', ''),
                })
        elif field['type'] == 'date':
            facets = sorted(counts['dates'].get(name, {}).iteritems())
            if facets:
                val_counts = []
                vals = []
                last_dt = None
                for d,c in facets:
                    if c > 0:
                        try:
                            last_dt = utils.iso_to_datetime(d)
                            val_counts.append(c)
                            vals.append(last_dt.strftime('%Y-%m-%d'))
                        except (TypeError, ValueError):
                            pass
                if vals and last_dt:
                    max_value = min(
                        utils.iso_to_datetime(counts['dates'][name]['end']),
                        utils.d_to_dt(
                            getattr(SearchQuerySet().order_by('-' + name)[0], name)
                        ) + datetime.timedelta(days=1),
                        last_dt + datetime.timedelta(
                            days=int(re.sub('[^\d]', '', counts['dates'][name]['gap']))
                        ),
                    )
                    vals.append(max_value.strftime('%Y-%m-%d'))
                    val_counts.append(0)
                    choice.update({
                        'type': 'date',
                        'counts': val_counts,
                        'vals': vals,
                        'min_value': vals[0],
                        'max_value': vals[-1],
                        'chosen_min': queryargs.get(name + '__gte', ''),
                        'chosen_max': queryargs.get(name + '__lte', ''),
                    })
        choices.append(choice)
    return sqs, choices, constraints, querystr
Пример #44
0
def index(request):
    db = request.GET.get("db", "all")
    search = request.GET.get("search", "").strip()

    selected = {
        x: Exact(request.GET[x])
        for x in ["authors", "affiliations", "taxon"] if x in request.GET
    }
    if search:
        sqs = SearchQuerySet().filter(content=search, **selected).facet("type")
    else:
        sqs = SearchQuerySet().filter(**selected).facet("type")
        search = "*"

    params = dict(request.GET)
    params["q"] = [search]

    for ft in ["authors", "affiliations", "taxon"]:
        if ft not in selected:
            sqs = sqs.facet(ft, limit=5)

    facets = sqs.facet_counts()
    if "fields" in facets:
        rdata = defaultdict(lambda: 0,
                            {k: v
                             for k, v in facets["fields"]["type"]})
    else:
        rdata = defaultdict(lambda: 0)
    count = 0
    for r in resources:
        r["count"] = rdata[str(r["type"])]
        count += rdata[r["type"]]

    suggestions = []
    if count == 0:
        suggestions = SearchQuerySet().auto_query(search).spelling_suggestion()
        if suggestions:
            suggestions = [
                x.strip() for x in suggestions.replace("(", " ").split(")")
                if x.strip()
            ]

    if "fields" in facets:
        del facets["fields"]["type"]
    else:
        facets["fields"] = {}

    return render(
        request, 'index.html', {
            "stats": resources,
            "search": search if search != "*" else "",
            "selected": selected,
            "db": db,
            "suggestions": suggestions,
            "querystring": params,
            "sidebarleft": facets["fields"],
            "sidebarrigth": {
                "news": [{
                    "title": "n1",
                    "text": "lalala"
                }]
            }
        })
Пример #45
0
def _location_data(request):
    """
    Convert Solr facet counts to JSON object for consumption by location
    dialogue when creating/editing Saved Searches.
    
    """
    loc_data = {'countries': {'None': {'states': {'None': {'cities': []}}}}}
    sqs = SearchQuerySet().facet("full_loc").facet("country").facet_limit(-1)
    locs = sqs.facet_counts()['fields']['full_loc']
    countries = sqs.facet_counts()['fields']['country']
    for loc in locs:
        loc_tuples = loc[0].split('@@')
        for atom in loc_tuples:
            loc_tuples[loc_tuples.index(atom)] = atom.split('::')
        locs[locs.index(loc)] = dict(loc_tuples)
    # `locs` now looks like:
    #
    # [{'city': 'Topeka',
    #   'country': 'United States',
    #   'location': 'Topeka, KS',
    #   'state': 'Kansas'},
    #  {'city': 'Indianapolis',
    #   'country': 'United States',
    #   'location': 'Indianapolis, IN',
    #   'state': 'Indiana'},
    #   ...
    #   etc.]

    # Next two iterations structure the data in `locs` to be hierarchical, e.g.
    # {
    #	 "countries": {
    #		 "United States": {
    #			 "states": {
    #				 "Indiana": {"cities": ["Indy", "Carmel", "Richmond"]},
    #				 "Texas"  : {"cities": ["Dallas", "Houston", "Amarillo"]},
    #				 "Ohio"   : {"cities": ["Cleveland", "Columbus", "Cincy"]}
    #			 }
    #		 },
    #		 "Argentina": {
    #                    "states": {
    #                            "None": {"cities": ["Sao Paulo", "Buenos Ares"]}
    #		 }
    # 	 }
    # }
    #
    # There are "None" keys at every geographical level (country/state/city) to
    # capture entries that do not have values in those fields. Mostly this
    # applies to non-Canadian/US locations.

    for country in countries:
        loc_data['countries'][country] = {'states': {'None': {'cities': []}}}

    for loc in locs:
        state = loc['state']
        # Drill down to the 'state' key of a `locs` member object.
        state_key = loc_data['countries'][loc['country']]['states']
        if not loc_data['countries'][loc['country']]['states'].get(state):
            state_key[state] = {'cities': []}
        state_key['cities'].append(loc['city'])

    return json.dumps(loc_data)
Пример #46
0
class SearchQuerySetTestCase(TestCase):
    def setUp(self):
        super(SearchQuerySetTestCase, self).setUp()
        self.bsqs = SearchQuerySet(query=DummySearchQuery(backend=DummySearchBackend()))
        self.msqs = SearchQuerySet(query=MockSearchQuery(backend=MockSearchBackend()))
        self.mmsqs = SearchQuerySet(query=MockSearchQuery(backend=MixedMockSearchBackend()))
        
        # Stow.
        self.old_debug = settings.DEBUG
        settings.DEBUG = True
        self.old_site = haystack.site
        test_site = SearchSite()
        test_site.register(MockModel)
        test_site.register(CharPKMockModel)
        haystack.site = test_site
        
        backends.reset_search_queries()
    
    def tearDown(self):
        # Restore.
        haystack.site = self.old_site
        settings.DEBUG = self.old_debug
        super(SearchQuerySetTestCase, self).tearDown()
    
    def test_len(self):
        # Dummy always returns 0.
        self.assertEqual(len(self.bsqs), 0)
        
        self.assertEqual(len(self.msqs), 100)
    
    def test_repr(self):
        self.assertEqual(repr(self.bsqs), '[]')
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        self.assertEqual(repr(self.msqs), "[<SearchResult: core.MockModel (pk=0)>, <SearchResult: core.MockModel (pk=1)>, <SearchResult: core.MockModel (pk=2)>, <SearchResult: core.MockModel (pk=3)>, <SearchResult: core.MockModel (pk=4)>, <SearchResult: core.MockModel (pk=5)>, <SearchResult: core.MockModel (pk=6)>, <SearchResult: core.MockModel (pk=7)>, <SearchResult: core.MockModel (pk=8)>, <SearchResult: core.MockModel (pk=9)>, <SearchResult: core.MockModel (pk=10)>, <SearchResult: core.MockModel (pk=11)>, <SearchResult: core.MockModel (pk=12)>, <SearchResult: core.MockModel (pk=13)>, <SearchResult: core.MockModel (pk=14)>, <SearchResult: core.MockModel (pk=15)>, <SearchResult: core.MockModel (pk=16)>, <SearchResult: core.MockModel (pk=17)>, <SearchResult: core.MockModel (pk=18)>, '...(remaining elements truncated)...']")
        self.assertEqual(len(backends.queries), 1)
    
    def test_iter(self):
        # Dummy always returns [].
        self.assertEqual([result for result in self.bsqs.all()], [])
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        msqs = self.msqs.all()
        results = [result for result in msqs]
        self.assertEqual(results, MOCK_SEARCH_RESULTS)
        self.assertEqual(len(backends.queries), 10)
    
    def test_slice(self):
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        results = self.msqs.all()
        self.assertEqual(results[1:11], MOCK_SEARCH_RESULTS[1:11])
        self.assertEqual(len(backends.queries), 1)
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        results = self.msqs.all()
        self.assertEqual(results[50], MOCK_SEARCH_RESULTS[50])
        self.assertEqual(len(backends.queries), 1)
    
    def test_manual_iter(self):
        results = self.msqs.all()
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        
        for offset, result in enumerate(results._manual_iter()):
            self.assertEqual(result, MOCK_SEARCH_RESULTS[offset])
        
        self.assertEqual(len(backends.queries), 10)
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not in the SearchSite) than the hit count indicates.
        # This will hang indefinitely if broken.
        results = self.mmsqs.all()
        loaded = [result.pk for result in results._manual_iter()]
        self.assertEqual(loaded, [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
        self.assertEqual(len(backends.queries), 8)
    
    def test_fill_cache(self):
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        results = self.msqs.all()
        self.assertEqual(len(results._result_cache), 0)
        self.assertEqual(len(backends.queries), 0)
        results._fill_cache(0, 10)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 10)
        self.assertEqual(len(backends.queries), 1)
        results._fill_cache(10, 20)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 20)
        self.assertEqual(len(backends.queries), 2)
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not in the SearchSite) than the hit count indicates.
        results = self.mmsqs.all()
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 0)
        self.assertEqual([result.pk for result in results._result_cache if result is not None], [])
        self.assertEqual(len(backends.queries), 0)
        results._fill_cache(0, 10)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 9)
        self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8])
        self.assertEqual(len(backends.queries), 2)
        results._fill_cache(10, 20)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 17)
        self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19])
        self.assertEqual(len(backends.queries), 4)
        results._fill_cache(20, 30)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 27)
        self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
        self.assertEqual(len(backends.queries), 6)
    
    def test_cache_is_full(self):
        # Dummy always has a count of 0 and an empty _result_cache, hence True.
        self.assertEqual(self.bsqs._cache_is_full(), False)
        results = self.bsqs.all()
        fire_the_iterator_and_fill_cache = [result for result in results]
        self.assertEqual(results._cache_is_full(), True)
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        self.assertEqual(self.msqs._cache_is_full(), False)
        results = self.msqs.all()
        fire_the_iterator_and_fill_cache = [result for result in results]
        self.assertEqual(results._cache_is_full(), True)
        self.assertEqual(len(backends.queries), 10)
    
    def test_all(self):
        sqs = self.bsqs.all()
        self.assert_(isinstance(sqs, SearchQuerySet))
    
    def test_filter(self):
        sqs = self.bsqs.filter(content='foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)
    
    def test_exclude(self):
        sqs = self.bsqs.exclude(content='foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)
    
    def test_order_by(self):
        sqs = self.bsqs.order_by('foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assert_('foo' in sqs.query.order_by)
    
    def test_models(self):
        mock_index_site = SearchSite()
        mock_index_site.register(MockModel)
        mock_index_site.register(AnotherMockModel)
        
        bsqs = SearchQuerySet(site=mock_index_site)
        
        sqs = bsqs.all()
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 0)
        
        sqs = bsqs.models(MockModel)
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)
        
        sqs = bsqs.models(MockModel, AnotherMockModel)
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 2)
        
        # This will produce a warning.
        mock_index_site.unregister(AnotherMockModel)
        sqs = bsqs.models(AnotherMockModel)
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)
    
    def test_result_class(self):
        sqs = self.bsqs.all()
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
        
        # Custom class.
        class IttyBittyResult(object):
            pass
        
        sqs = self.bsqs.result_class(IttyBittyResult)
        self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))
        
        # Reset to default.
        sqs = self.bsqs.result_class(None)
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
    
    def test_boost(self):
        sqs = self.bsqs.boost('foo', 10)
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.boost.keys()), 1)
    
    def test_highlight(self):
        sqs = self.bsqs.highlight()
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.query.highlight, True)
    
    def test_spelling(self):
        # Test the case where spelling support is disabled.
        sqs = self.bsqs.filter(content='Indx')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.spelling_suggestion(), None)
        self.assertEqual(sqs.spelling_suggestion('indexy'), None)
    
    def test_raw_search(self):
        self.assertEqual(len(self.bsqs.raw_search('foo')), 0)
        self.assertEqual(len(self.bsqs.raw_search('(content__exact hello AND content__exact world)')), 1)
    
    def test_load_all(self):
        # Models with character primary keys
        sqs = SearchQuerySet(query=MockSearchQuery(backend=CharPKMockSearchBackend()))
        results = sqs.load_all().all()
        self.assertEqual(len(results._result_cache), 0)
        results._fill_cache(0, 2)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 2)
        
        # If nothing is registered, you get nothing.
        haystack.site.unregister(MockModel)
        haystack.site.unregister(CharPKMockModel)
        sqs = self.msqs.load_all()
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs), 0)
        
        # For full tests, see the solr_backend.
    
    def test_auto_query(self):
        sqs = self.bsqs.auto_query('test search -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=test AND content__exact=search AND NOT (content__exact=stuff))>')
        
        sqs = self.bsqs.auto_query('test "my thing" search -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND NOT (content__exact=stuff))>')
        
        sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND content__exact='moar AND content__exact=quotes' AND NOT (content__exact=stuff))>")
        
        sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND content__exact=\'moar AND content__exact=quotes\' AND content__exact="foo AND NOT (content__exact=stuff))>')
        
        sqs = self.bsqs.auto_query('test - stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=test AND content__exact=- AND content__exact=stuff)>')
        
        # Ensure bits in exact matches get escaped properly as well.
        sqs = self.bsqs.auto_query('"pants:rule"')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__exact=pants:rule>')
    
    def test_count(self):
        self.assertEqual(self.bsqs.count(), 0)
    
    def test_facet_counts(self):
        self.assertEqual(self.bsqs.facet_counts(), {})
    
    def test_best_match(self):
        self.assert_(isinstance(self.msqs.best_match(), SearchResult))
    
    def test_latest(self):
        self.assert_(isinstance(self.msqs.latest('pub_date'), SearchResult))
    
    def test_more_like_this(self):
        mock = MockModel()
        mock.id = 1
        
        self.assertEqual(len(self.msqs.more_like_this(mock)), 100)
    
    def test_facets(self):
        sqs = self.bsqs.facet('foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.facets), 1)
        
        sqs2 = self.bsqs.facet('foo').facet('bar')
        self.assert_(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.facets), 2)
    
    def test_date_facets(self):
        try:
            sqs = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph')
            self.fail()
        except FacetingError, e:
            self.assertEqual(str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.")
        
        sqs = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.date_facets), 1)
        
        sqs2 = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year')
        self.assert_(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.date_facets), 2)
Пример #47
0
class SearchQuerySetTestCase(TestCase):
    fixtures = ['bulk_data.json']

    def setUp(self):
        super(SearchQuerySetTestCase, self).setUp()

        # Stow.
        self.old_unified_index = connections['default']._index
        self.ui = UnifiedIndex()
        self.bmmsi = BasicMockModelSearchIndex()
        self.cpkmmsi = CharPKMockModelSearchIndex()
        self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])
        connections['default']._index = self.ui

        # Update the "index".
        backend = connections['default'].get_backend()
        backend.clear()
        backend.update(self.bmmsi, MockModel.objects.all())

        self.msqs = SearchQuerySet()

        # Stow.
        self.old_debug = settings.DEBUG
        settings.DEBUG = True

        reset_search_queries()

    def tearDown(self):
        # Restore.
        connections['default']._index = self.old_unified_index
        settings.DEBUG = self.old_debug
        super(SearchQuerySetTestCase, self).tearDown()

    def test_len(self):
        self.assertEqual(len(self.msqs), 23)

    def test_repr(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        self.assertEqual(repr(self.msqs), "[<SearchResult: core.mockmodel (pk=u'1')>, <SearchResult: core.mockmodel (pk=u'2')>, <SearchResult: core.mockmodel (pk=u'3')>, <SearchResult: core.mockmodel (pk=u'4')>, <SearchResult: core.mockmodel (pk=u'5')>, <SearchResult: core.mockmodel (pk=u'6')>, <SearchResult: core.mockmodel (pk=u'7')>, <SearchResult: core.mockmodel (pk=u'8')>, <SearchResult: core.mockmodel (pk=u'9')>, <SearchResult: core.mockmodel (pk=u'10')>, <SearchResult: core.mockmodel (pk=u'11')>, <SearchResult: core.mockmodel (pk=u'12')>, <SearchResult: core.mockmodel (pk=u'13')>, <SearchResult: core.mockmodel (pk=u'14')>, <SearchResult: core.mockmodel (pk=u'15')>, <SearchResult: core.mockmodel (pk=u'16')>, <SearchResult: core.mockmodel (pk=u'17')>, <SearchResult: core.mockmodel (pk=u'18')>, <SearchResult: core.mockmodel (pk=u'19')>, '...(remaining elements truncated)...']")
        self.assertEqual(len(connections['default'].queries), 1)

    def test_iter(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        msqs = self.msqs.all()
        results = [int(res.pk) for res in msqs]
        self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]])
        self.assertEqual(len(connections['default'].queries), 3)

    def test_slice(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        results = self.msqs.all()
        self.assertEqual([int(res.pk) for res in results[1:11]], [res.pk for res in MOCK_SEARCH_RESULTS[1:11]])
        self.assertEqual(len(connections['default'].queries), 1)

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        results = self.msqs.all()
        self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk)
        self.assertEqual(len(connections['default'].queries), 1)

    def test_manual_iter(self):
        results = self.msqs.all()

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)

        check = [result.pk for result in results._manual_iter()]
        self.assertEqual(check, [u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9', u'10', u'11', u'12', u'13', u'14', u'15', u'16', u'17', u'18', u'19', u'20', u'21', u'22', u'23'])

        self.assertEqual(len(connections['default'].queries), 3)

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)

        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not a handled model) than the hit count indicates.
        # This will hang indefinitely if broken.
        old_ui = self.ui
        self.ui.build(indexes=[self.cpkmmsi])
        connections['default']._index = self.ui
        self.cpkmmsi.update()

        results = self.msqs.all()
        loaded = [result.pk for result in results._manual_iter()]
        self.assertEqual(loaded, [u'sometext', u'1234'])
        self.assertEqual(len(connections['default'].queries), 1)

        connections['default']._index = old_ui

    def test_fill_cache(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        results = self.msqs.all()
        self.assertEqual(len(results._result_cache), 0)
        self.assertEqual(len(connections['default'].queries), 0)
        results._fill_cache(0, 10)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 10)
        self.assertEqual(len(connections['default'].queries), 1)
        results._fill_cache(10, 20)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 20)
        self.assertEqual(len(connections['default'].queries), 2)

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)

        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not a handled model) than the hit count indicates.
        sqs = SearchQuerySet().all()
        sqs.query.backend = MixedMockSearchBackend('default')
        results = sqs
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 0)
        self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [])
        self.assertEqual(len(connections['default'].queries), 0)
        results._fill_cache(0, 10)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 9)
        self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10])
        self.assertEqual(len(connections['default'].queries), 2)
        results._fill_cache(10, 20)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 17)
        self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20])
        self.assertEqual(len(connections['default'].queries), 4)
        results._fill_cache(20, 30)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 20)
        self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23])
        self.assertEqual(len(connections['default'].queries), 6)

    def test_cache_is_full(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        self.assertEqual(self.msqs._cache_is_full(), False)
        results = self.msqs.all()
        fire_the_iterator_and_fill_cache = [result for result in results]
        self.assertEqual(results._cache_is_full(), True)
        self.assertEqual(len(connections['default'].queries), 3)

    def test_all(self):
        sqs = self.msqs.all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))

    def test_filter(self):
        sqs = self.msqs.filter(content='foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_exclude(self):
        sqs = self.msqs.exclude(content='foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_order_by(self):
        sqs = self.msqs.order_by('foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertTrue('foo' in sqs.query.order_by)

    def test_models(self):
        # Stow.
        old_unified_index = connections['default']._index
        ui = UnifiedIndex()
        bmmsi = BasicMockModelSearchIndex()
        bammsi = BasicAnotherMockModelSearchIndex()
        ui.build(indexes=[bmmsi, bammsi])
        connections['default']._index = ui

        msqs = SearchQuerySet()

        sqs = msqs.all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 0)

        sqs = msqs.models(MockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)

        sqs = msqs.models(MockModel, AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 2)

        # This will produce a warning.
        ui.build(indexes=[bmmsi])
        sqs = msqs.models(AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)

    def test_result_class(self):
        sqs = self.msqs.all()
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))

        # Custom class.
        class IttyBittyResult(object):
            pass

        sqs = self.msqs.result_class(IttyBittyResult)
        self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))

        # Reset to default.
        sqs = self.msqs.result_class(None)
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))

    def test_boost(self):
        sqs = self.msqs.boost('foo', 10)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.boost.keys()), 1)

    def test_highlight(self):
        sqs = self.msqs.highlight()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.query.highlight, True)

    def test_spelling(self):
        # Test the case where spelling support is disabled.
        sqs = self.msqs.filter(content='Indx')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.spelling_suggestion(), None)
        self.assertEqual(sqs.spelling_suggestion('indexy'), None)

    def test_raw_search(self):
        self.assertEqual(len(self.msqs.raw_search('foo')), 23)
        self.assertEqual(len(self.msqs.raw_search('(content__exact:hello AND content__exact:world)')), 23)

    def test_load_all(self):
        # Models with character primary keys.
        sqs = SearchQuerySet()
        sqs.query.backend = CharPKMockSearchBackend('charpk')
        results = sqs.load_all().all()
        self.assertEqual(len(results._result_cache), 0)
        results._fill_cache(0, 2)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 2)

        # If nothing is handled, you get nothing.
        old_ui = connections['default']._index
        ui = UnifiedIndex()
        ui.build(indexes=[])
        connections['default']._index = ui

        sqs = self.msqs.load_all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs), 0)

        connections['default']._index = old_ui

        # For full tests, see the solr_backend.

    def test_load_all_read_queryset(self):
        # Stow.
        old_ui = connections['default']._index
        ui = UnifiedIndex()
        gafmmsi = GhettoAFifthMockModelSearchIndex()
        ui.build(indexes=[gafmmsi])
        connections['default']._index = ui
        gafmmsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend('default')
        results._fill_cache(0, 2)

        # The deleted result isn't returned
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 1)

        # Register a SearchIndex with a read_queryset that returns deleted items
        rqstsi = TextReadQuerySetTestSearchIndex()
        ui.build(indexes=[rqstsi])
        rqstsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend('default')
        results._fill_cache(0, 2)

        # Both the deleted and not deleted items are returned
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 2)

        # Restore.
        connections['default']._index = old_ui

    def test_auto_query(self):
        sqs = self.msqs.auto_query('test search -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test search -stuff>')

        sqs = self.msqs.auto_query('test "my thing" search -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search -stuff>')

        sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' -stuff>')

        sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' "foo -stuff>')

        sqs = self.msqs.auto_query('test - stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND content__contains=test - stuff>")

        # Ensure bits in exact matches get escaped properly as well.
        sqs = self.msqs.auto_query('"pants:rule"')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains="pants:rule">')

        # Now with a different fieldname
        sqs = self.msqs.auto_query('test search -stuff', fieldname='title')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND title__contains=test search -stuff>")

        sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname='title')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND title__contains=test "my thing" search -stuff>')

    def test_count(self):
        self.assertEqual(self.msqs.count(), 23)

    def test_facet_counts(self):
        self.assertEqual(self.msqs.facet_counts(), {})

    def test_best_match(self):
        self.assertTrue(isinstance(self.msqs.best_match(), SearchResult))

    def test_latest(self):
        self.assertTrue(isinstance(self.msqs.latest('pub_date'), SearchResult))

    def test_more_like_this(self):
        mock = MockModel()
        mock.id = 1

        self.assertEqual(len(self.msqs.more_like_this(mock)), 23)

    def test_facets(self):
        sqs = self.msqs.facet('foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.facets), 1)

        sqs2 = self.msqs.facet('foo').facet('bar')
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.facets), 2)

    def test_date_facets(self):
        try:
            sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph')
            self.fail()
        except FacetingError, e:
            self.assertEqual(str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.")

        sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.date_facets), 1)

        sqs2 = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year')
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.date_facets), 2)
Пример #48
0
class SearchQuerySetTestCase(TestCase):
    def setUp(self):
        super(SearchQuerySetTestCase, self).setUp()
        self.bsqs = SearchQuerySet(query=DummySearchQuery(backend=DummySearchBackend()))
        self.msqs = SearchQuerySet(query=MockSearchQuery(backend=MockSearchBackend()))
        self.mmsqs = SearchQuerySet(query=MockSearchQuery(backend=MixedMockSearchBackend()))
        
        # Stow.
        self.old_debug = settings.DEBUG
        settings.DEBUG = True
        self.old_site = haystack.site
        test_site = SearchSite()
        test_site.register(MockModel)
        test_site.register(CharPKMockModel)
        haystack.site = test_site
        
        backends.reset_search_queries()
    
    def tearDown(self):
        # Restore.
        haystack.site = self.old_site
        settings.DEBUG = self.old_debug
        super(SearchQuerySetTestCase, self).tearDown()
    
    def test_len(self):
        # Dummy always returns 0.
        self.assertEqual(len(self.bsqs), 0)
        
        self.assertEqual(len(self.msqs), 100)
    
    def test_repr(self):
        self.assertEqual(repr(self.bsqs), '[]')
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        self.assertEqual(repr(self.msqs), "[<SearchResult: core.MockModel (pk=0)>, <SearchResult: core.MockModel (pk=1)>, <SearchResult: core.MockModel (pk=2)>, <SearchResult: core.MockModel (pk=3)>, <SearchResult: core.MockModel (pk=4)>, <SearchResult: core.MockModel (pk=5)>, <SearchResult: core.MockModel (pk=6)>, <SearchResult: core.MockModel (pk=7)>, <SearchResult: core.MockModel (pk=8)>, <SearchResult: core.MockModel (pk=9)>, <SearchResult: core.MockModel (pk=10)>, <SearchResult: core.MockModel (pk=11)>, <SearchResult: core.MockModel (pk=12)>, <SearchResult: core.MockModel (pk=13)>, <SearchResult: core.MockModel (pk=14)>, <SearchResult: core.MockModel (pk=15)>, <SearchResult: core.MockModel (pk=16)>, <SearchResult: core.MockModel (pk=17)>, <SearchResult: core.MockModel (pk=18)>, '...(remaining elements truncated)...']")
        self.assertEqual(len(backends.queries), 1)
    
    def test_iter(self):
        # Dummy always returns [].
        self.assertEqual([result for result in self.bsqs.all()], [])
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        msqs = self.msqs.all()
        results = [result for result in msqs]
        self.assertEqual(results, MOCK_SEARCH_RESULTS)
        self.assertEqual(len(backends.queries), 10)
    
    def test_slice(self):
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        results = self.msqs.all()
        self.assertEqual(results[1:11], MOCK_SEARCH_RESULTS[1:11])
        self.assertEqual(len(backends.queries), 1)
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        results = self.msqs.all()
        self.assertEqual(results[50], MOCK_SEARCH_RESULTS[50])
        self.assertEqual(len(backends.queries), 1)
    
    def test_manual_iter(self):
        results = self.msqs.all()
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        
        for offset, result in enumerate(results._manual_iter()):
            self.assertEqual(result, MOCK_SEARCH_RESULTS[offset])
        
        self.assertEqual(len(backends.queries), 10)
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not in the SearchSite) than the hit count indicates.
        # This will hang indefinitely if broken.
        results = self.mmsqs.all()
        loaded = [result.pk for result in results._manual_iter()]
        self.assertEqual(loaded, [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
        self.assertEqual(len(backends.queries), 8)
    
    def test_fill_cache(self):
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        results = self.msqs.all()
        self.assertEqual(len(results._result_cache), 0)
        self.assertEqual(len(backends.queries), 0)
        results._fill_cache(0, 10)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 10)
        self.assertEqual(len(backends.queries), 1)
        results._fill_cache(10, 20)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 20)
        self.assertEqual(len(backends.queries), 2)
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not in the SearchSite) than the hit count indicates.
        results = self.mmsqs.all()
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 0)
        self.assertEqual([result.pk for result in results._result_cache if result is not None], [])
        self.assertEqual(len(backends.queries), 0)
        results._fill_cache(0, 10)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 9)
        self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8])
        self.assertEqual(len(backends.queries), 2)
        results._fill_cache(10, 20)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 17)
        self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19])
        self.assertEqual(len(backends.queries), 4)
        results._fill_cache(20, 30)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 27)
        self.assertEqual([result.pk for result in results._result_cache if result is not None], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
        self.assertEqual(len(backends.queries), 6)
    
    def test_cache_is_full(self):
        # Dummy always has a count of 0 and an empty _result_cache, hence True.
        self.assertEqual(self.bsqs._cache_is_full(), False)
        results = self.bsqs.all()
        fire_the_iterator_and_fill_cache = [result for result in results]
        self.assertEqual(results._cache_is_full(), True)
        
        backends.reset_search_queries()
        self.assertEqual(len(backends.queries), 0)
        self.assertEqual(self.msqs._cache_is_full(), False)
        results = self.msqs.all()
        fire_the_iterator_and_fill_cache = [result for result in results]
        self.assertEqual(results._cache_is_full(), True)
        self.assertEqual(len(backends.queries), 10)
    
    def test_all(self):
        sqs = self.bsqs.all()
        self.assert_(isinstance(sqs, SearchQuerySet))
    
    def test_filter(self):
        sqs = self.bsqs.filter(content='foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)
    
    def test_exclude(self):
        sqs = self.bsqs.exclude(content='foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)
    
    def test_order_by(self):
        sqs = self.bsqs.order_by('foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assert_('foo' in sqs.query.order_by)
    
    def test_models(self):
        mock_index_site = SearchSite()
        mock_index_site.register(MockModel)
        mock_index_site.register(AnotherMockModel)
        
        bsqs = SearchQuerySet(site=mock_index_site)
        
        sqs = bsqs.all()
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 0)
        
        sqs = bsqs.models(MockModel)
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)
        
        sqs = bsqs.models(MockModel, AnotherMockModel)
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 2)
        
        # This will produce a warning.
        mock_index_site.unregister(AnotherMockModel)
        sqs = bsqs.models(AnotherMockModel)
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)
    
    def test_boost(self):
        sqs = self.bsqs.boost('foo', 10)
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.boost.keys()), 1)
    
    def test_highlight(self):
        sqs = self.bsqs.highlight()
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.query.highlight, True)
    
    def test_spelling(self):
        # Test the case where spelling support is disabled.
        sqs = self.bsqs.filter(content='Indx')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.spelling_suggestion(), None)
        self.assertEqual(sqs.spelling_suggestion('indexy'), None)
    
    def test_raw_search(self):
        self.assertEqual(len(self.bsqs.raw_search('foo')), 0)
        self.assertEqual(len(self.bsqs.raw_search('(content__exact hello AND content__exact world)')), 1)
    
    def test_load_all(self):
        # Models with character primary keys
        sqs = SearchQuerySet(query=MockSearchQuery(backend=CharPKMockSearchBackend()))
        results = sqs.load_all().all()
        self.assertEqual(len(results._result_cache), 0)
        results._fill_cache(0, 2)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 2)
        
        # If nothing is registered, you get nothing.
        haystack.site.unregister(MockModel)
        haystack.site.unregister(CharPKMockModel)
        sqs = self.msqs.load_all()
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs), 0)
        
        # For full tests, see the solr_backend.
    
    def test_auto_query(self):
        sqs = self.bsqs.auto_query('test search -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=test AND content__exact=search AND NOT (content__exact=stuff))>')
        
        sqs = self.bsqs.auto_query('test "my thing" search -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND NOT (content__exact=stuff))>')
        
        sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND content__exact='moar AND content__exact=quotes' AND NOT (content__exact=stuff))>")
        
        sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=my thing AND content__exact=test AND content__exact=search AND content__exact=\'moar AND content__exact=quotes\' AND content__exact="foo AND NOT (content__exact=stuff))>')
        
        sqs = self.bsqs.auto_query('test - stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND (content__exact=test AND content__exact=- AND content__exact=stuff)>')
        
        # Ensure bits in exact matches get escaped properly as well.
        sqs = self.bsqs.auto_query('"pants:rule"')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__exact=pants:rule>')
    
    def test_count(self):
        self.assertEqual(self.bsqs.count(), 0)
    
    def test_facet_counts(self):
        self.assertEqual(self.bsqs.facet_counts(), {})
    
    def test_best_match(self):
        self.assert_(isinstance(self.msqs.best_match(), SearchResult))
    
    def test_latest(self):
        self.assert_(isinstance(self.msqs.latest('pub_date'), SearchResult))
    
    def test_more_like_this(self):
        mock = MockModel()
        mock.id = 1
        
        self.assertEqual(len(self.msqs.more_like_this(mock)), 100)
    
    def test_facets(self):
        sqs = self.bsqs.facet('foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.facets), 1)
        
        sqs2 = self.bsqs.facet('foo').facet('bar')
        self.assert_(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.facets), 2)
    
    def test_date_facets(self):
        try:
            sqs = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph')
            self.fail()
        except FacetingError, e:
            self.assertEqual(str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.")
        
        sqs = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.date_facets), 1)
        
        sqs2 = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year')
        self.assert_(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.date_facets), 2)
Пример #49
0
    def get_context(self, request, *args, **kwargs):
        facet_map = (
            ('building_block', (ActivityBuildingBlock, False, 10)),
            ('school_subject', (ActivitySchoolSubject, False, 25)),
            ('topic', (ActivityTopic, True, 25)),
            ('grade_level', (ActivityGradeLevel, False, 10)),
            ('age_range', (ActivityAgeRange, False, 10)),
            ('student_characteristics', (ActivityStudentCharacteristics, False,
                                         10)),  # noqa: E501
            ('activity_type', (ActivityType, False, 10)),
            ('teaching_strategy', (ActivityTeachingStrategy, False, 25)),
            ('blooms_taxonomy_level', (ActivityBloomsTaxonomyLevel, False,
                                       25)),  # noqa: E501
            ('activity_duration', (ActivityDuration, False, 10)),
            ('jump_start_coalition', (ActivityJumpStartCoalition, False, 25)),
            ('council_for_economic_education', (ActivityCouncilForEconEd,
                                                False, 25)),  # noqa: E501
        )
        search_query = request.GET.get('q', '')  # haystack cleans this string
        sqs = SearchQuerySet().models(ActivityPage).filter(live=True)
        total_activities = sqs.count()
        # Load selected facets
        selected_facets = {}
        facet_queries = {}

        for facet, facet_config in facet_map:
            sqs = sqs.facet(str(facet), size=facet_config[2])
            if facet in request.GET and request.GET.get(facet):
                selected_facets[facet] = [
                    int(value) for value in request.GET.getlist(facet)
                    if value.isdigit()
                ]
                facet_queries[facet] = facet + '_exact:' + (
                    " OR " + facet + "_exact:").join(
                        [str(value) for value in selected_facets[facet]])

        payload = {
            'search_query': search_query,
            'results': [],
            'total_results': 0,
            'total_activities': total_activities,
            'selected_facets': selected_facets,
            'facet_queries': facet_queries,
            'all_facets': {},
        }

        # Apply search query if it exists, but don't apply facets
        if search_query:
            sqs = sqs.filter(content=search_query).order_by(
                '-_score', '-date')  # noqa: E501
        else:
            sqs = sqs.order_by('-date')

        # Get all facets and their counts
        facet_counts = sqs.facet_counts()
        all_facets = self.get_all_facets(facet_map, sqs, facet_counts,
                                         facet_queries,
                                         selected_facets)  # noqa: E501

        # List all facet blocks that need to be expanded
        always_expanded = {'building_block', 'topic', 'school_subject'}
        conditionally_expanded = {
            facet_name
            for facet_name, facet_items in all_facets.items()
            if any(facet['selected'] is True for facet in facet_items)
        }
        expanded_facets = always_expanded.union(set(conditionally_expanded))

        payload.update({
            'facet_counts': facet_counts,
            'all_facets': all_facets,
            'expanded_facets': expanded_facets,
        })

        # Apply all the active facet values to our search results
        for facet_narrow_query in facet_queries.values():
            sqs = sqs.narrow(facet_narrow_query)

        results = [activity.object for activity in sqs]
        total_results = sqs.count()

        payload.update({
            'results': results,
            'total_results': total_results,
        })
        self.results = payload
        results_per_page = validate_results_per_page(request)
        paginator = Paginator(payload['results'], results_per_page)
        current_page = validate_page_number(request, paginator)
        paginated_page = paginator.page(current_page)

        context = super(ActivityIndexPage, self).get_context(request)
        context.update({
            'facet_counts': facet_counts,
            'facets': all_facets,
            'activities': paginated_page,
            'total_results': total_results,
            'results_per_page': results_per_page,
            'current_page': current_page,
            'paginator': paginator,
            'show_filters': bool(facet_queries),
        })
        return context
Пример #50
0
def _location_data(request):
    """
    Convert Solr facet counts to JSON object for consumption by location
    dialogue when creating/editing Saved Searches.
    
    """
    loc_data = {'countries': {'None': {'states': {'None': {'cities': []}}}}}
    sqs = SearchQuerySet().facet("full_loc").facet("country").facet_limit(-1)
    locs = sqs.facet_counts()['fields']['full_loc']
    countries = sqs.facet_counts()['fields']['country']
    for loc in locs:
        loc_tuples = loc[0].split('@@')
        for atom in loc_tuples:
            loc_tuples[loc_tuples.index(atom)] = atom.split('::')
        locs[locs.index(loc)] = dict(loc_tuples)
    # `locs` now looks like:
    #
    # [{'city': 'Topeka',
    #   'country': 'United States',
    #   'location': 'Topeka, KS',
    #   'state': 'Kansas'},
    #  {'city': 'Indianapolis',
    #   'country': 'United States',
    #   'location': 'Indianapolis, IN',
    #   'state': 'Indiana'},
    #   ...
    #   etc.]

    # Next two iterations structure the data in `locs` to be hierarchical, e.g.
    # {
    #	 "countries": {
    #		 "United States": {
    #			 "states": {
    #				 "Indiana": {"cities": ["Indy", "Carmel", "Richmond"]},
    #				 "Texas"  : {"cities": ["Dallas", "Houston", "Amarillo"]},
    #				 "Ohio"   : {"cities": ["Cleveland", "Columbus", "Cincy"]}		
    #			 }
    #		 },
    #		 "Argentina": {
    #                    "states": {
    #                            "None": {"cities": ["Sao Paulo", "Buenos Ares"]}
    #		 }
    # 	 }
    # }
    #
    # There are "None" keys at every geographical level (country/state/city) to
    # capture entries that do not have values in those fields. Mostly this
    # applies to non-Canadian/US locations.
        
    for country in countries:
        loc_data['countries'][country] = {'states': {'None': {'cities': []}}}

    for loc in locs:
        state = loc['state']
        # Drill down to the 'state' key of a `locs` member object.
        state_key = loc_data['countries'][loc['country']]['states']
        if not loc_data['countries'][loc['country']]['states'].get(state):
            state_key[state] = {'cities': []}
        state_key['cities'].append(loc['city'])
        
    return json.dumps(loc_data)
Пример #51
0
class SearchQuerySetTestCase(TestCase):
    def setUp(self):
        super(SearchQuerySetTestCase, self).setUp()
        self.bsqs = SearchQuerySet(query=DummySearchQuery(backend=DummySearchBackend()))
        self.msqs = SearchQuerySet(query=MockSearchQuery(backend=MockSearchBackend()))
        self.mmsqs = SearchQuerySet(query=MockSearchQuery(backend=MixedMockSearchBackend()))
        
        # Stow.
        self.old_site = haystack.site
        test_site = SearchSite()
        test_site.register(MockModel)
        haystack.site = test_site
    
    def tearDown(self):
        # Restore.
        haystack.site = self.old_site
        super(SearchQuerySetTestCase, self).tearDown()
    
    def test_len(self):
        # Dummy always returns 0.
        self.assertEqual(len(self.bsqs), 0)
        
        self.assertEqual(len(self.msqs), 100)
    
    def test_iter(self):
        # Dummy always returns [].
        self.assertEqual([result for result in self.bsqs.all()], [])
        
        msqs = self.msqs.all()
        results = [result for result in msqs]
        self.assertEqual(results, MOCK_SEARCH_RESULTS)
    
    def test_slice(self):
        self.assertEqual(self.msqs.all()[1:11], MOCK_SEARCH_RESULTS[1:11])
        self.assertEqual(self.msqs.all()[50], MOCK_SEARCH_RESULTS[50])
    
    def test_manual_iter(self):
        results = self.msqs.all()
        
        for offset, result in enumerate(results._manual_iter()):
            self.assertEqual(result, MOCK_SEARCH_RESULTS[offset])
        
        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not in the SearchSite) than the hit count indicates.
        # This will hang indefinitely if broken.
        results = self.mmsqs.all()
        self.assertEqual([result.pk for result in results._manual_iter()], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
    
    def test_fill_cache(self):
        results = self.msqs.all()
        self.assertEqual(len(results._result_cache), 0)
        results._fill_cache()
        self.assertEqual(len(results._result_cache), 10)
        results._fill_cache()
        self.assertEqual(len(results._result_cache), 20)
        
        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not in the SearchSite) than the hit count indicates.
        results = self.mmsqs.all()
        self.assertEqual(len(results._result_cache), 0)
        self.assertEqual([result.pk for result in results._result_cache], [])
        results._fill_cache()
        self.assertEqual(len(results._result_cache), 10)
        self.assertEqual([result.pk for result in results._result_cache], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10])
        results._fill_cache()
        self.assertEqual(len(results._result_cache), 20)
        self.assertEqual([result.pk for result in results._result_cache], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18, 19, 20, 21, 22])
        results._fill_cache()
        self.assertEqual(len(results._result_cache), 27)
        self.assertEqual([result.pk for result in results._result_cache], [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
    
    def test_cache_is_full(self):
        # Dummy always has a count of 0 and an empty _result_cache, hence True.
        self.assertEqual(self.bsqs._cache_is_full(), True)
        
        self.assertEqual(self.msqs._cache_is_full(), False)
        results = self.msqs.all()
        fire_the_iterator_and_fill_cache = [result for result in results]
        self.assertEqual(results._cache_is_full(), True)
    
    def test_all(self):
        sqs = self.bsqs.all()
        self.assert_(isinstance(sqs, SearchQuerySet))
    
    def test_filter(self):
        sqs = self.bsqs.filter(content='foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filters), 1)
    
    def test_exclude(self):
        sqs = self.bsqs.exclude(content='foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filters), 1)
    
    def test_order_by(self):
        sqs = self.bsqs.order_by('foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assert_('foo' in sqs.query.order_by)
    
    def test_models(self):
        mock_index_site = SearchSite()
        mock_index_site.register(MockModel)
        mock_index_site.register(AnotherMockModel)
        
        bsqs = SearchQuerySet(site=mock_index_site)
        
        sqs = bsqs.all()
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 0)
        
        sqs = bsqs.models(MockModel)
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)
        
        sqs = bsqs.models(MockModel, AnotherMockModel)
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 2)
    
    def test_boost(self):
        sqs = self.bsqs.boost('foo', 10)
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.boost.keys()), 1)
    
    def test_highlight(self):
        sqs = self.bsqs.highlight()
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.query.highlight, True)
    
    def test_spelling(self):
        # Test the case where spelling support is disabled.
        sqs = self.bsqs.filter(content='Indx')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.spelling_suggestion(), None)
    
    def test_raw_search(self):
        self.assertEqual(len(self.bsqs.raw_search('foo')), 0)
        self.assertEqual(len(self.bsqs.raw_search('content__exact hello AND content__exact world')), 1)
    
    def test_load_all(self):
        # If nothing is registered, you get nothing.
        haystack.site.unregister(MockModel)
        sqs = self.msqs.load_all()
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs), 0)
        
        # For full tests, see the solr_backend.
    
    def test_load_all_queryset(self):
        sqs = self.msqs.load_all()
        self.assertEqual(len(sqs._load_all_querysets), 0)
        
        sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=1))
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs._load_all_querysets), 1)
        
        # For full tests, see the solr_backend.
    
    def test_auto_query(self):
        sqs = self.bsqs.auto_query('test search -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: NOT content__exact=stuff>'])
        
        sqs = self.bsqs.auto_query('test "my thing" search -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=my thing>', '<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: NOT content__exact=stuff>'])
        
        sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=my thing>', '<QueryFilter: AND content__exact=moar quotes>', '<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: NOT content__exact=stuff>'])
        
        sqs = self.bsqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=my thing>', '<QueryFilter: AND content__exact=moar quotes>', '<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=search>', '<QueryFilter: AND content__exact="foo>', '<QueryFilter: NOT content__exact=stuff>'])
        
        sqs = self.bsqs.auto_query('test - stuff')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual([repr(the_filter) for the_filter in sqs.query.query_filters], ['<QueryFilter: AND content__exact=test>', '<QueryFilter: AND content__exact=->', '<QueryFilter: AND content__exact=stuff>'])
    
    def test_count(self):
        self.assertEqual(self.bsqs.count(), 0)
    
    def test_facet_counts(self):
        self.assertEqual(self.bsqs.facet_counts(), {})
    
    def test_best_match(self):
        self.assert_(isinstance(self.msqs.best_match(), SearchResult))
    
    def test_latest(self):
        self.assert_(isinstance(self.msqs.latest('pub_date'), SearchResult))
    
    def test_more_like_this(self):
        mock = MockModel()
        mock.id = 1
        
        self.assertEqual(len(self.msqs.more_like_this(mock)), 100)
    
    def test_facets(self):
        sqs = self.bsqs.facet('foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.facets), 1)
        
        sqs2 = self.bsqs.facet('foo').facet('bar')
        self.assert_(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.facets), 2)
    
    def test_date_facets(self):
        sqs = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.date_facets), 1)
        
        sqs2 = self.bsqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year')
        self.assert_(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.date_facets), 2)
    
    def test_query_facets(self):
        sqs = self.bsqs.query_facet('foo', '[bar TO *]')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_facets), 1)
        
        sqs2 = self.bsqs.query_facet('foo', '[bar TO *]').query_facet('bar', '[100 TO 499]')
        self.assert_(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.query_facets), 2)
    
    def test_narrow(self):
        sqs = self.bsqs.narrow('foo:moof')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.narrow_queries), 1)
    
    def test_clone(self):
        results = self.msqs.filter(foo='bar', foo__lt='10')
        
        clone = results._clone()
        self.assert_(isinstance(clone, SearchQuerySet))
        self.assertEqual(clone.site, results.site)
        self.assertEqual(str(clone.query), str(results.query))
        self.assertEqual(clone._result_cache, [])
        self.assertEqual(clone._result_count, None)
        self.assertEqual(clone._cache_full, False)
    
    def test_chaining(self):
        sqs = self.bsqs.filter(content='foo')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filters), 1)
        
        # A second instance should inherit none of the changes from above.
        sqs = self.bsqs.filter(content='bar')
        self.assert_(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filters), 1)
    
    def test_none(self):
        sqs = self.bsqs.none()
        self.assert_(isinstance(sqs, EmptySearchQuerySet))
        self.assertEqual(len(sqs), 0)
Пример #52
0
class SearchQuerySetTestCase(TestCase):
    fixtures = ['bulk_data.json']

    def setUp(self):
        super(SearchQuerySetTestCase, self).setUp()

        # Stow.
        self.old_unified_index = connections['default']._index
        self.ui = UnifiedIndex()
        self.bmmsi = BasicMockModelSearchIndex()
        self.cpkmmsi = CharPKMockModelSearchIndex()
        self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])
        connections['default']._index = self.ui

        # Update the "index".
        backend = connections['default'].get_backend()
        backend.clear()
        backend.update(self.bmmsi, MockModel.objects.all())

        self.msqs = SearchQuerySet()

        # Stow.
        self.old_debug = settings.DEBUG
        settings.DEBUG = True

        reset_search_queries()

    def tearDown(self):
        # Restore.
        connections['default']._index = self.old_unified_index
        settings.DEBUG = self.old_debug
        super(SearchQuerySetTestCase, self).tearDown()

    def test_len(self):
        self.assertEqual(len(self.msqs), 23)

    def test_repr(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        self.assertEqual(
            repr(self.msqs),
            "[<SearchResult: core.mockmodel (pk=u'1')>, <SearchResult: core.mockmodel (pk=u'2')>, <SearchResult: core.mockmodel (pk=u'3')>, <SearchResult: core.mockmodel (pk=u'4')>, <SearchResult: core.mockmodel (pk=u'5')>, <SearchResult: core.mockmodel (pk=u'6')>, <SearchResult: core.mockmodel (pk=u'7')>, <SearchResult: core.mockmodel (pk=u'8')>, <SearchResult: core.mockmodel (pk=u'9')>, <SearchResult: core.mockmodel (pk=u'10')>, <SearchResult: core.mockmodel (pk=u'11')>, <SearchResult: core.mockmodel (pk=u'12')>, <SearchResult: core.mockmodel (pk=u'13')>, <SearchResult: core.mockmodel (pk=u'14')>, <SearchResult: core.mockmodel (pk=u'15')>, <SearchResult: core.mockmodel (pk=u'16')>, <SearchResult: core.mockmodel (pk=u'17')>, <SearchResult: core.mockmodel (pk=u'18')>, <SearchResult: core.mockmodel (pk=u'19')>, '...(remaining elements truncated)...']"
        )
        self.assertEqual(len(connections['default'].queries), 1)

    def test_iter(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        msqs = self.msqs.all()
        results = [int(res.pk) for res in msqs]
        self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]])
        self.assertEqual(len(connections['default'].queries), 3)

    def test_slice(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        results = self.msqs.all()
        self.assertEqual([int(res.pk) for res in results[1:11]],
                         [res.pk for res in MOCK_SEARCH_RESULTS[1:11]])
        self.assertEqual(len(connections['default'].queries), 1)

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        results = self.msqs.all()
        self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk)
        self.assertEqual(len(connections['default'].queries), 1)

    def test_manual_iter(self):
        results = self.msqs.all()

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)

        check = [result.pk for result in results._manual_iter()]
        self.assertEqual(check, [
            u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9', u'10', u'11',
            u'12', u'13', u'14', u'15', u'16', u'17', u'18', u'19', u'20',
            u'21', u'22', u'23'
        ])

        self.assertEqual(len(connections['default'].queries), 3)

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)

        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not a handled model) than the hit count indicates.
        # This will hang indefinitely if broken.
        old_ui = self.ui
        self.ui.build(indexes=[self.cpkmmsi])
        connections['default']._index = self.ui
        self.cpkmmsi.update()

        results = self.msqs.all()
        loaded = [result.pk for result in results._manual_iter()]
        self.assertEqual(loaded, [u'sometext', u'1234'])
        self.assertEqual(len(connections['default'].queries), 1)

        connections['default']._index = old_ui

    def test_fill_cache(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        results = self.msqs.all()
        self.assertEqual(len(results._result_cache), 0)
        self.assertEqual(len(connections['default'].queries), 0)
        results._fill_cache(0, 10)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 10)
        self.assertEqual(len(connections['default'].queries), 1)
        results._fill_cache(10, 20)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 20)
        self.assertEqual(len(connections['default'].queries), 2)

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)

        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not a handled model) than the hit count indicates.
        sqs = SearchQuerySet().all()
        sqs.query.backend = MixedMockSearchBackend('default')
        results = sqs
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 0)
        self.assertEqual([
            int(result.pk)
            for result in results._result_cache if result is not None
        ], [])
        self.assertEqual(len(connections['default'].queries), 0)
        results._fill_cache(0, 10)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 9)
        self.assertEqual([
            int(result.pk)
            for result in results._result_cache if result is not None
        ], [1, 2, 3, 4, 5, 6, 7, 8, 10])
        self.assertEqual(len(connections['default'].queries), 2)
        results._fill_cache(10, 20)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 17)
        self.assertEqual([
            int(result.pk)
            for result in results._result_cache if result is not None
        ], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20])
        self.assertEqual(len(connections['default'].queries), 4)
        results._fill_cache(20, 30)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 20)
        self.assertEqual([
            int(result.pk)
            for result in results._result_cache if result is not None
        ], [
            1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22,
            23
        ])
        self.assertEqual(len(connections['default'].queries), 6)

    def test_cache_is_full(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        self.assertEqual(self.msqs._cache_is_full(), False)
        results = self.msqs.all()
        fire_the_iterator_and_fill_cache = [result for result in results]
        self.assertEqual(results._cache_is_full(), True)
        self.assertEqual(len(connections['default'].queries), 3)

    def test_all(self):
        sqs = self.msqs.all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))

    def test_filter(self):
        sqs = self.msqs.filter(content='foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_exclude(self):
        sqs = self.msqs.exclude(content='foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_order_by(self):
        sqs = self.msqs.order_by('foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertTrue('foo' in sqs.query.order_by)

    def test_models(self):
        # Stow.
        old_unified_index = connections['default']._index
        ui = UnifiedIndex()
        bmmsi = BasicMockModelSearchIndex()
        bammsi = BasicAnotherMockModelSearchIndex()
        ui.build(indexes=[bmmsi, bammsi])
        connections['default']._index = ui

        msqs = SearchQuerySet()

        sqs = msqs.all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 0)

        sqs = msqs.models(MockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)

        sqs = msqs.models(MockModel, AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 2)

        # This will produce a warning.
        ui.build(indexes=[bmmsi])
        sqs = msqs.models(AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)

    def test_result_class(self):
        sqs = self.msqs.all()
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))

        # Custom class.
        class IttyBittyResult(object):
            pass

        sqs = self.msqs.result_class(IttyBittyResult)
        self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))

        # Reset to default.
        sqs = self.msqs.result_class(None)
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))

    def test_boost(self):
        sqs = self.msqs.boost('foo', 10)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.boost.keys()), 1)

    def test_highlight(self):
        sqs = self.msqs.highlight()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.query.highlight, True)

    def test_spelling(self):
        # Test the case where spelling support is disabled.
        sqs = self.msqs.filter(content='Indx')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.spelling_suggestion(), None)
        self.assertEqual(sqs.spelling_suggestion('indexy'), None)

    def test_raw_search(self):
        self.assertEqual(len(self.msqs.raw_search('foo')), 23)
        self.assertEqual(
            len(
                self.msqs.raw_search(
                    '(content__exact:hello AND content__exact:world)')), 23)

    def test_load_all(self):
        # Models with character primary keys.
        sqs = SearchQuerySet()
        sqs.query.backend = CharPKMockSearchBackend('charpk')
        results = sqs.load_all().all()
        self.assertEqual(len(results._result_cache), 0)
        results._fill_cache(0, 2)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 2)

        # If nothing is handled, you get nothing.
        old_ui = connections['default']._index
        ui = UnifiedIndex()
        ui.build(indexes=[])
        connections['default']._index = ui

        sqs = self.msqs.load_all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs), 0)

        connections['default']._index = old_ui

        # For full tests, see the solr_backend.

    def test_load_all_read_queryset(self):
        # Stow.
        old_ui = connections['default']._index
        ui = UnifiedIndex()
        gafmmsi = GhettoAFifthMockModelSearchIndex()
        ui.build(indexes=[gafmmsi])
        connections['default']._index = ui
        gafmmsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend('default')
        results._fill_cache(0, 2)

        # The deleted result isn't returned
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 1)

        # Register a SearchIndex with a read_queryset that returns deleted items
        rqstsi = TextReadQuerySetTestSearchIndex()
        ui.build(indexes=[rqstsi])
        rqstsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend('default')
        results._fill_cache(0, 2)

        # Both the deleted and not deleted items are returned
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 2)

        # Restore.
        connections['default']._index = old_ui

    def test_auto_query(self):
        sqs = self.msqs.auto_query('test search -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND (content__contains=test AND content__contains=search AND NOT (content__contains=stuff))>'
        )

        sqs = self.msqs.auto_query('test "my thing" search -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND (content__exact=my thing AND content__contains=test AND content__contains=search AND NOT (content__contains=stuff))>'
        )

        sqs = self.msqs.auto_query(
            'test "my thing" search \'moar quotes\' -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            "<SQ: AND (content__exact=my thing AND content__contains=test AND content__contains=search AND content__contains='moar AND content__contains=quotes' AND NOT (content__contains=stuff))>"
        )

        sqs = self.msqs.auto_query(
            'test "my thing" search \'moar quotes\' "foo -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND (content__exact=my thing AND content__contains=test AND content__contains=search AND content__contains=\'moar AND content__contains=quotes\' AND content__contains="foo AND NOT (content__contains=stuff))>'
        )

        sqs = self.msqs.auto_query('test - stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND (content__contains=test AND content__contains=- AND content__contains=stuff)>'
        )

        # Ensure bits in exact matches get escaped properly as well.
        sqs = self.msqs.auto_query('"pants:rule"')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter),
                         '<SQ: AND content__exact=pants:rule>')

        # Now with a different fieldname
        sqs = self.msqs.auto_query('test search -stuff', fieldname='title')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND (title__contains=test AND title__contains=search AND NOT (title__contains=stuff))>'
        )

        sqs = self.msqs.auto_query('test "my thing" search -stuff',
                                   fieldname='title')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND (title__exact=my thing AND title__contains=test AND title__contains=search AND NOT (title__contains=stuff))>'
        )

    def test_count(self):
        self.assertEqual(self.msqs.count(), 23)

    def test_facet_counts(self):
        self.assertEqual(self.msqs.facet_counts(), {})

    def test_best_match(self):
        self.assertTrue(isinstance(self.msqs.best_match(), SearchResult))

    def test_latest(self):
        self.assertTrue(isinstance(self.msqs.latest('pub_date'), SearchResult))

    def test_more_like_this(self):
        mock = MockModel()
        mock.id = 1

        self.assertEqual(len(self.msqs.more_like_this(mock)), 23)

    def test_facets(self):
        sqs = self.msqs.facet('foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.facets), 1)

        sqs2 = self.msqs.facet('foo').facet('bar')
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.facets), 2)

    def test_date_facets(self):
        try:
            sqs = self.msqs.date_facet('foo',
                                       start_date=datetime.date(2008, 2, 25),
                                       end_date=datetime.date(2009, 2, 25),
                                       gap_by='smarblaph')
            self.fail()
        except FacetingError, e:
            self.assertEqual(
                str(e),
                "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second."
            )

        sqs = self.msqs.date_facet('foo',
                                   start_date=datetime.date(2008, 2, 25),
                                   end_date=datetime.date(2009, 2, 25),
                                   gap_by='month')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.date_facets), 1)

        sqs2 = self.msqs.date_facet('foo',
                                    start_date=datetime.date(2008, 2, 25),
                                    end_date=datetime.date(2009, 2, 25),
                                    gap_by='month').date_facet(
                                        'bar',
                                        start_date=datetime.date(2007, 2, 25),
                                        end_date=datetime.date(2009, 2, 25),
                                        gap_by='year')
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.date_facets), 2)
Пример #53
0
def debug_facets(facets):
    qs = SearchQuerySet().all()
    for field in facets:
        qs = qs.facet(field, sort='index', limit=-1)
    output = qs.facet_counts()
    pprint(output)
Пример #54
0
    def get_queryset(self, *args, **kwargs):
        qs = SearchQuerySet().all()
        qs = qs.facet('categories')

        if self.request.user.is_authenticated():
            qs = qs.facet('groups')

        if self.request.query_params.get('q'):
            #qs = qs.filter_and(title__contains=self.request.query_params.get('q'))
            qs = qs.auto_query(self.request.query_params.get('q'))

        distance = None
        try:
            for k, v in self.request.query_params.items():
                if k in D.UNITS.keys():
                    distance = {k: v}
        except Exception as e:
            logging.error(e)

        point = None

        try:
            point = Point(float(self.request.query_params['lng']),
                          float(self.request.query_params['lat']))
        except Exception as e:
            logging.error(e)

        if distance and point:
            qs = qs or SearchQuerySet()
            qs = qs.dwithin('location', point,
                            D(**distance)).distance('location', point)

        try:
            if self.request.query_params.get('w') and self.request.query_params.get('s')\
                    and self.request.query_params.get('n') and self.request.query_params.get('e'):
                bottom_left = Point(float(self.request.query_params['w']),
                                    float(self.request.query_params['s']))
                top_right = Point(float(self.request.query_params['e']),
                                  float(self.request.query_params['n']))
                qs = qs.within('location', bottom_left, top_right)
        except:
            pass

        param_facet_url = self.request.query_params.get('selected_facets',
                                                        '').split(',')

        flag_group = False
        for param_facet in param_facet_url:
            if ":" not in param_facet:
                continue
            field, value = param_facet.split(":", 1)
            if value and field.split('_')[1] == 'exact':
                try:
                    # Validate that the user belongs to the group
                    if field.split('_')[0] == 'groups':
                        InterestGroup.objects.\
                            filter(Q(memberships__user=self.request.user) | Q(owner=self.request.user)).\
                            get(slug=value)
                        flag_group = True
                    qs = qs.narrow('%s:"%s"' % (field, qs.query.clean(value)))
                except InterestGroup.DoesNotExist:
                    # The user hasnt permissiont to filter by this group.
                    pass

        self.facets = []
        group_facet = get_facet_by_name(param_facet_url,
                                        qs.facet_counts()['fields'].items(),
                                        'groups', self.request.user)
        if group_facet:
            self.facets.append(group_facet)

        order_by = self.request.query_params.get('order_by')
        if order_by:
            # TODO: Order by distance not found
            if order_by is 'distance':
                if distance and point:
                    qs = qs.order_by(order_by)
            else:
                qs = qs.order_by(order_by)

        if not flag_group:
            qs = qs.narrow('%s:"%s"' %
                           ('groups_exact', qs.query.clean('public')))

        self.facets.append(
            get_facet_by_name(param_facet_url,
                              qs.facet_counts()['fields'].items(),
                              'categories'))
        return qs
Пример #55
0
def ajaxresponse(request):
    print "Incoming query to ajax = [" + request.get_full_path() + "]"
    #results = getsearchresults(request)

    context = {}
    context['request'] = request

    name = request.GET.get("name", '')
    branch = request.GET.get("branch", '')
    year = request.GET.get("year_of_passing", '')
    offset = request.GET.get("offset", '0')

    branch_facet = request.GET.get("branch_facet", '')
    year_facet = request.GET.get("year_of_passing_facet", '')

    sqs = SearchQuerySet().facet('branch')
    sqs = sqs.facet('year_of_passing')

    if name or branch or year:
        context['form'] = ProfileSearchBasicForm(request.GET)
        sqs = sqs.auto_query(name + branch + year)
    else:
        context['form'] = ProfileSearchBasicForm()

    context['facets'] = sqs.facet_counts()

    if branch_facet:
        context['facets']['fields']['year_of_passing'] = sqs.auto_query(
            branch_facet).facet_counts()['fields']['year_of_passing']
        context['branch_facet_selected'] = branch_facet
    else:
        context['branch_facet_selected'] = ''

    if year_facet:
        context['facets']['fields']['branch'] = sqs.auto_query(
            year_facet).facet_counts()['fields']['branch']
        context['year_facet_selected'] = year_facet
    else:
        context['year_facet_selected'] = ''

    offsetvalue = 0
    if (offset != ''):
        offsetvalue = int(offset)
    results = sqs.auto_query(branch_facet + year_facet).order_by('name')
    context['resultcount'] = results.count()
    results = results[offsetvalue:offsetvalue + 20]

    querystring = ""
    if (name != ''):
        querystring += ('&name=' + name)
    if (branch != ''):
        querystring += ('&branch=' + branch)
    if (year != ''):
        querystring += ('&year_of_passing=' + year)

    # querystring+=('&offset='+str(offsetvalue+20))

    if (branch_facet != ''):
        querystring += ('&branch_facet=' + branch_facet)
    if (year_facet != ''):
        querystring += ('&year_facet=' + year_facet)

    print "Outgoing query string from ajax = [" + querystring + "]"
    context['querystring'] = querystring

    html = ""
    if not results:
        html += "<p>Sorry, no results found.</p>\n"
    else:
        for result in results:
            html += "<div class=\"result_div\">\n"
            html += "<h2 class=\"name section-subtitle\">" + result.name + "</h2>\n"
            if (result.branch):
                html += "<p class=\"branch\">" + result.branch + "</p>\n"
            if (result.year_of_passing):
                html += "<p class=\"year_of_passing\">" + str(
                    result.year_of_passing) + "</p>\n"
            if (result.branch):
                html += "<p class=\"city\">" + result.branch + "</p>\n"
            html += "</div>\n"
    # print html
    return HttpResponse(html)
Пример #56
0
def haystack_search_api(request, format="json", **kwargs):
    """
    View that drives the search api
    """
    from haystack.inputs import Raw
    from haystack.query import SearchQuerySet, SQ

    # Retrieve Query Params
    id = request.REQUEST.get("id", None)
    query = request.REQUEST.get('q', None)
    category = request.REQUEST.get("category", None)
    limit = int(
        request.REQUEST.get(
            "limit", getattr(settings, "HAYSTACK_SEARCH_RESULTS_PER_PAGE",
                             20)))
    startIndex = int(request.REQUEST.get("startIndex", 0))
    sort = request.REQUEST.get("sort", "relevance")
    type_facets = request.REQUEST.get("type", None)
    format = request.REQUEST.get("format", format)
    date_start = request.REQUEST.get("start_date", None)
    date_end = request.REQUEST.get("end_date", None)
    keyword = request.REQUEST.get("kw", None)
    service = request.REQUEST.get("service", None)
    local = request.REQUEST.get("local", None)

    # Geospatial Elements
    bbox = request.REQUEST.get("extent", None)

    ts = time()
    sqs = SearchQuerySet()

    limit = min(limit, 500)

    # Filter by ID
    if id:
        sqs = sqs.narrow("django_id:%s" % id)

    # Filter by Type and subtype
    if type_facets is not None:
        type_facets = type_facets.replace("owner", "user").split(",")
        subtype_facets = ["vector", "raster"]
        types = []
        subtypes = []
        for type in type_facets:
            if type in ["map", "layer", "user", "document", "group"]:
                # Type is one of our Major Types (not a sub type)
                types.append(type)
            elif type in subtype_facets:
                subtypes.append(type)

        if len(subtypes) > 0:
            for sub_type in subtype_facets:
                if sub_type not in subtypes:
                    sqs = sqs.exclude(subtype='%s' % sub_type)

        if len(types) > 0:
            sqs = sqs.narrow("type:%s" % ','.join(map(str, types)))

    # Filter by Query Params
    # haystack bug? if boosted fields aren't included in the
    # query, then the score won't be affected by the boost
    if query:
        if query.startswith('"') or query.startswith('\''):
            #Match exact phrase
            phrase = query.replace('"', '')
            sqs = sqs.filter(
                SQ(title__exact=phrase) | SQ(abstract__exact=phrase)
                | SQ(content__exact=phrase))
        else:
            words = query.split()
            for word in range(0, len(words)):
                if word == 0:
                    sqs = sqs.filter(
                        SQ(title=Raw(words[word]))
                        | SQ(abstract=Raw(words[word]))
                        | SQ(content=Raw(words[word])))
                elif words[word] in ["AND", "OR"]:
                    pass
                elif words[word - 1] == "OR":  #previous word OR this word
                    sqs = sqs.filter_or(
                        SQ(title=Raw(words[word]))
                        | SQ(abstract=Raw(words[word]))
                        | SQ(content=Raw(words[word])))
                else:  #previous word AND this word
                    sqs = sqs.filter(
                        SQ(title=Raw(words[word]))
                        | SQ(abstract=Raw(words[word]))
                        | SQ(content=Raw(words[word])))

    # filter by cateory
    if category:
        sqs = sqs.narrow('category:%s' % category)

    #filter by keyword
    if keyword:
        sqs = sqs.narrow('keywords:%s' % keyword)

    if date_start:
        sqs = sqs.filter(SQ(date__gte=date_start))

    if date_end:
        sqs = sqs.filter(SQ(date__lte=date_end))
    """
    ### Code to filter on temporal extent start/end dates instead

    if date_start or date_end:
        #Exclude results with no dates at all
        sqs = sqs.filter(
            SQ(temporal_extent_start=Raw("[* TO *]")) | SQ(temporal_extent_end=Raw("[* TO *]"))
        )
    if temporal_start and temporal_end:
        #Return anything with a start date < date_end or an end date > date_start
        sqs = sqs.filter(
            SQ(temporal_extent_end__gte=date_start) | SQ(temporal_extent_start__lte=date_end)
        )
    elif temporal_start:
        #Exclude anything with an end date <date_start
        sqs = sqs.exclude(
            SQ(temporal_extent_end__lte=date_start)
        )
    elif temporal_end:
        #Exclude anything with a start date > date_end
        sqs = sqs.exclude(
            SQ(temporal_extent_start__gte=date_end)
        )
    """

    if bbox:
        left, right, bottom, top = bbox.split(',')
        sqs = sqs.filter(
            # first check if the bbox has at least one point inside the window
            SQ(bbox_left__gte=left) & SQ(bbox_left__lte=right)
            & SQ(bbox_top__gte=bottom) & SQ(bbox_top__lte=top)
            |  #check top_left is inside the window
            SQ(bbox_right__lte=right) & SQ(bbox_right__gte=left)
            & SQ(bbox_top__lte=top) & SQ(bbox_top__gte=bottom)
            |  #check top_right is inside the window
            SQ(bbox_bottom__gte=bottom) & SQ(bbox_bottom__lte=top)
            & SQ(bbox_right__lte=right) & SQ(bbox_right__gte=left)
            |  #check bottom_right is inside the window
            SQ(bbox_top__lte=top) & SQ(bbox_top__gte=bottom)
            & SQ(bbox_left__gte=left) & SQ(bbox_left__lte=right)
            |  #check bottom_left is inside the window
            # then check if the bbox is including the window
            SQ(bbox_left__lte=left) & SQ(bbox_right__gte=right)
            & SQ(bbox_bottom__lte=bottom) & SQ(bbox_top__gte=top))

    # Filter by permissions
    '''
    ### Takes too long with many results.
    ### Instead, show all results but disable links on restricted ones.

    for i, result in enumerate(sqs):
        if result.type == 'layer':
            if not request.user.has_perm('layers.view_layer',obj = result.object):
                sqs = sqs.exclude(id = result.id)
        if result.type == 'map':
            if not request.user.has_perm('maps.view_map',obj = result.object):
                sqs = sqs.exclude(id = result.id)
    '''

    #filter by service
    '''
    if service:
        sqs = sqs.narrow('service:%s' % service)

    if local:
        sqs = sqs.narrow('local:%s' % local)
    '''

    # Apply Sort
    # TODO: Handle for Revised sort types
    # [relevance, alphabetically, rating, created, updated, popularity]
    if sort.lower() == "newest":
        sqs = sqs.order_by("-modified")
    elif sort.lower() == "oldest":
        sqs = sqs.order_by("modified")
    elif sort.lower() == "alphaaz":
        sqs = sqs.order_by("title_sortable")
    elif sort.lower() == "alphaza":
        sqs = sqs.order_by("-title_sortable")
    elif sort.lower() == "popularity":
        sqs = sqs.order_by("-popular_count")
    else:
        sqs = sqs.order_by("-_score")

    # Setup Search Results
    results = []
    items = []

    # Build the result based on the limit
    for i, result in enumerate(sqs[startIndex:startIndex + limit]):
        logger.info(result)
        data = result.get_stored_fields()
        resource = None
        if "modified" in data:
            data["modified"] = data["modified"].strftime(
                "%Y-%m-%dT%H:%M:%S.%f")
        if "temporal_extent_start" in data and data[
                "temporal_extent_start"] is not None:
            data["temporal_extent_start"] = data[
                "temporal_extent_start"].strftime("%Y-%m-%dT%H:%M:%S.%f")
        if "temporal_extent_end" in data and data[
                "temporal_extent_end"] is not None:
            data["temporal_extent_end"] = data["temporal_extent_end"].strftime(
                "%Y-%m-%dT%H:%M:%S.%f")
        if data['type'] == "map":
            resource = MapNormalizer(Map.objects.get(pk=data['oid']))
        elif data['type'] == "layer":
            resource = LayerNormalizer(Layer.objects.get(pk=data['oid']))
        elif data['type'] == "user":
            resource = OwnerNormalizer(Profile.objects.get(pk=data['oid']))
        elif data['type'] == "document":
            resource = DocumentNormalizer(Document.objects.get(pk=data['oid']))
        elif data[
                'type'] == "group" and "geonode.contrib.groups" in settings.INSTALLED_APPS:
            resource = GroupNormalizer(Group.objects.get(pk=data['oid']))
        if resource:
            resource.rating = data["rating"] if "rating" in data else 0
        results.append(data)
        items.append(resource)

    # Setup Facet Counts
    sqs = sqs.facet("type").facet("subtype")

    sqs = sqs.facet('category')

    sqs = sqs.facet('keywords')

    sqs = sqs.facet('service')

    sqs = sqs.facet('local')

    facet_counts = sqs.facet_counts()

    # Prepare Search Results
    data = {
        "success":
        True,
        "total":
        sqs.count(),
        "query_info": {
            "q": query,
            "startIndex": startIndex,
            "limit": limit,
            "sort": sort,
            "type": type_facets,
        },
        "results":
        results,
        "facets":
        dict(
            facet_counts.get("fields")['type'] +
            facet_counts.get('fields')['subtype']) if sqs.count() > 0 else [],
        "categories": {
            facet[0]: facet[1]
            for facet in facet_counts.get('fields')['category']
        } if sqs.count() > 0 else {},
        "keywords": {
            facet[0]: facet[1]
            for facet in facet_counts.get('fields')['keywords']
        } if sqs.count() > 0 else {},
    }

    # Return Results
    ts1 = time() - ts

    if format == "html":  #Send to search/explore page
        return data, items
    elif format == "raw":
        return HttpResponse(json.dumps(data), mimetype="application/json")
    else:
        query = query_from_request(request, kwargs)
        return _search_json(query, items, data["facets"], ts1)
Пример #57
0
def races_formatted_search(sport='',
                           viewport=[],
                           start_date='',
                           end_date='',
                           distances=[],
                           search_expr='',
                           format='JSON'):

    sqs = SearchQuerySet()

    sqs = sqs.filter(validated="true")
    print(sport)
    if sport:
        sqs = sqs.filter(sport__exact=sport)
        print("sport filtered")

    # dates required for facet generation
    # TODO : fix
    if start_date:
        sqs = sqs.filter(date__gte=start_date)
    else:
        start_date = '2010-01-01'

    if end_date:
        sqs = sqs.filter(date__lte=end_date)
    else:
        end_date = '2020-01-01'

    if distances:
        sqs = sqs.filter(distance_cat__in=distances)
    if search_expr:
        sqs = sqs.filter(content=search_expr)

    # search from map bounds
    if len(viewport) == 4:
        lat_lo = viewport[0]
        lng_lo = viewport[1]
        lat_hi = viewport[2]
        lng_hi = viewport[3]

        if lat_lo and lng_lo and lat_hi and lng_hi:
            downtown_bottom_left = Point(float(lng_lo), float(lat_lo))
            downtown_top_right = Point(float(lng_hi), float(lat_hi))

            sqs = sqs.within('location', downtown_bottom_left, downtown_top_right)

    sqs = sqs.order_by('date')
    sqs = sqs.facet('distance_cat').facet('administrative_area_level_1').facet('administrative_area_level_2')

    # facet for dates if defined
    sqs = sqs.date_facet(field='date',
                         start_date=datetime.strptime(start_date, '%Y-%m-%d'),
                         end_date=datetime.strptime(end_date, '%Y-%m-%d'),
                         gap_by='month')

    facet = sqs.facet_counts()

    if len(facet) > 0:
        # convert datetime into serializable isoformat
        facet_dates = []
        for f in facet['dates']['date']:
            facet_dates.append((f[0].isoformat(), f[1]))
        facet['dates']['date'] = facet_dates

    # build the JSON response
    races = []
    result_html = []

    # Uniqfy the event list (multiple races from an event)
    seen = {}
    rank = 1
    prev_month = cur_month = 0
    month_iterator = 0

    for sr in sqs:
        cur_month = sr.date.strftime('%m%Y')
        event_id = sr.get_stored_fields()['event_id']
        event_title = sr.get_stored_fields()['event_title']
        location = sr.get_stored_fields()['location']
        rendered = sr.get_stored_fields()['rendered']

        if event_id in seen:
            continue

        seen[event_id] = 1
        race_data = {'id': int(event_id),
                     'name': event_title,
                     'score': str(sr.score),
                     # 'rankClass': "primary" if (rank <= 10) else "secondary",
                     'rankClass': "secondary",
                     'lat': str(location.get_coords()[1]),
                     'lng': str(location.get_coords()[0]),
                     'date': sr.date.strftime('%Y%m%d'),
                     }

        races.append(race_data)

        if prev_month != cur_month:
            result_html.append(render_to_string('html_utils/result_month_spacer.html',
                                                {'last_date': sr.date,
                                                 'count':  facet['dates']['date'][month_iterator][1]
                                                 }))
            month_iterator += 1

        result_html.append(rendered)

        prev_month = cur_month
        rank += 1

    if not races:
        result_html = render_to_string('html_utils/search_no_result_alert.html', {'search_expr': search_expr})

    data = {'count': sqs.count(),
            'races': races,
            'html': result_html,
            'facet': facet,
            }
    if format.lower() == 'dict':
        return data
    return dumps(data)
Пример #58
0
class SearchQuerySetTestCase(TestCase):
    fixtures = ['bulk_data.json']

    def setUp(self):
        super(SearchQuerySetTestCase, self).setUp()

        # Stow.
        self.old_unified_index = connections['default']._index
        self.ui = UnifiedIndex()
        self.bmmsi = BasicMockModelSearchIndex()
        self.cpkmmsi = CharPKMockModelSearchIndex()
        self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])
        connections['default']._index = self.ui

        # Update the "index".
        backend = connections['default'].get_backend()
        backend.clear()
        backend.update(self.bmmsi, MockModel.objects.all())

        self.msqs = SearchQuerySet()

        # Stow.
        reset_search_queries()

    def tearDown(self):
        # Restore.
        connections['default']._index = self.old_unified_index
        super(SearchQuerySetTestCase, self).tearDown()

    def test_len(self):
        self.assertEqual(len(self.msqs), 23)

    def test_repr(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        self.assertRegexpMatches(
            repr(self.msqs),
            r'^<SearchQuerySet: query=<test_haystack.mocks.MockSearchQuery object'
            r' at 0x[0-9A-Fa-f]+>, using=None>$')

    def test_iter(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        msqs = self.msqs.all()
        results = [int(res.pk) for res in msqs]
        self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]])
        self.assertEqual(len(connections['default'].queries), 3)

    def test_slice(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        results = self.msqs.all()
        self.assertEqual([int(res.pk) for res in results[1:11]],
                         [res.pk for res in MOCK_SEARCH_RESULTS[1:11]])
        self.assertEqual(len(connections['default'].queries), 1)

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        results = self.msqs.all()
        self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk)
        self.assertEqual(len(connections['default'].queries), 1)

    def test_manual_iter(self):
        results = self.msqs.all()

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)

        check = [result.pk for result in results._manual_iter()]
        self.assertEqual(check, [
            u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9', u'10', u'11',
            u'12', u'13', u'14', u'15', u'16', u'17', u'18', u'19', u'20',
            u'21', u'22', u'23'
        ])

        self.assertEqual(len(connections['default'].queries), 3)

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)

        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not a handled model) than the hit count indicates.
        # This will hang indefinitely if broken.
        old_ui = self.ui
        self.ui.build(indexes=[self.cpkmmsi])
        connections['default']._index = self.ui
        self.cpkmmsi.update()

        results = self.msqs.all()
        loaded = [result.pk for result in results._manual_iter()]
        self.assertEqual(loaded, [u'sometext', u'1234'])
        self.assertEqual(len(connections['default'].queries), 1)

        connections['default']._index = old_ui

    def test_fill_cache(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        results = self.msqs.all()
        self.assertEqual(len(results._result_cache), 0)
        self.assertEqual(len(connections['default'].queries), 0)
        results._fill_cache(0, 10)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 10)
        self.assertEqual(len(connections['default'].queries), 1)
        results._fill_cache(10, 20)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 20)
        self.assertEqual(len(connections['default'].queries), 2)

        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)

        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not a handled model) than the hit count indicates.
        sqs = SearchQuerySet().all()
        sqs.query.backend = MixedMockSearchBackend('default')
        results = sqs
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 0)
        self.assertEqual([
            int(result.pk)
            for result in results._result_cache if result is not None
        ], [])
        self.assertEqual(len(connections['default'].queries), 0)
        results._fill_cache(0, 10)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 9)
        self.assertEqual([
            int(result.pk)
            for result in results._result_cache if result is not None
        ], [1, 2, 3, 4, 5, 6, 7, 8, 10])
        self.assertEqual(len(connections['default'].queries), 2)
        results._fill_cache(10, 20)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 17)
        self.assertEqual([
            int(result.pk)
            for result in results._result_cache if result is not None
        ], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20])
        self.assertEqual(len(connections['default'].queries), 4)
        results._fill_cache(20, 30)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 20)
        self.assertEqual([
            int(result.pk)
            for result in results._result_cache if result is not None
        ], [
            1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22,
            23
        ])
        self.assertEqual(len(connections['default'].queries), 6)

    def test_cache_is_full(self):
        reset_search_queries()
        self.assertEqual(len(connections['default'].queries), 0)
        self.assertEqual(self.msqs._cache_is_full(), False)
        results = self.msqs.all()
        fire_the_iterator_and_fill_cache = [result for result in results]
        self.assertEqual(results._cache_is_full(), True)
        self.assertEqual(len(connections['default'].queries), 3)

    def test_all(self):
        sqs = self.msqs.all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))

    def test_filter(self):
        sqs = self.msqs.filter(content='foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_exclude(self):
        sqs = self.msqs.exclude(content='foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_order_by(self):
        sqs = self.msqs.order_by('foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertTrue('foo' in sqs.query.order_by)

    def test_models(self):
        # Stow.
        old_unified_index = connections['default']._index
        ui = UnifiedIndex()
        bmmsi = BasicMockModelSearchIndex()
        bammsi = BasicAnotherMockModelSearchIndex()
        ui.build(indexes=[bmmsi, bammsi])
        connections['default']._index = ui

        msqs = SearchQuerySet()

        sqs = msqs.all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 0)

        sqs = msqs.models(MockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)

        sqs = msqs.models(MockModel, AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 2)

        # This will produce a warning.
        ui.build(indexes=[bmmsi])
        sqs = msqs.models(AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)

    def test_result_class(self):
        sqs = self.msqs.all()
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))

        # Custom class.
        class IttyBittyResult(object):
            pass

        sqs = self.msqs.result_class(IttyBittyResult)
        self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))

        # Reset to default.
        sqs = self.msqs.result_class(None)
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))

    def test_boost(self):
        sqs = self.msqs.boost('foo', 10)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.boost.keys()), 1)

    def test_highlight(self):
        sqs = self.msqs.highlight()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.query.highlight, True)

    def test_spelling(self):
        # Test the case where spelling support is disabled.
        sqs = self.msqs.filter(content='Indx')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.spelling_suggestion(), None)
        self.assertEqual(sqs.spelling_suggestion('indexy'), None)

    def test_raw_search(self):
        self.assertEqual(len(self.msqs.raw_search('foo')), 23)
        self.assertEqual(
            len(
                self.msqs.raw_search(
                    '(content__exact:hello AND content__exact:world)')), 23)

    def test_load_all(self):
        # Models with character primary keys.
        sqs = SearchQuerySet()
        sqs.query.backend = CharPKMockSearchBackend('charpk')
        results = sqs.load_all().all()
        self.assertEqual(len(results._result_cache), 0)
        results._fill_cache(0, 2)
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 2)

        # If nothing is handled, you get nothing.
        old_ui = connections['default']._index
        ui = UnifiedIndex()
        ui.build(indexes=[])
        connections['default']._index = ui

        sqs = self.msqs.load_all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs), 0)

        connections['default']._index = old_ui

        # For full tests, see the solr_backend.

    def test_load_all_read_queryset(self):
        # Stow.
        old_ui = connections['default']._index
        ui = UnifiedIndex()
        gafmmsi = GhettoAFifthMockModelSearchIndex()
        ui.build(indexes=[gafmmsi])
        connections['default']._index = ui
        gafmmsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend('default')
        results._fill_cache(0, 2)

        # The deleted result isn't returned
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 1)

        # Register a SearchIndex with a read_queryset that returns deleted items
        rqstsi = TextReadQuerySetTestSearchIndex()
        ui.build(indexes=[rqstsi])
        rqstsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend('default')
        results._fill_cache(0, 2)

        # Both the deleted and not deleted items are returned
        self.assertEqual(
            len([
                result for result in results._result_cache
                if result is not None
            ]), 2)

        # Restore.
        connections['default']._index = old_ui

    def test_auto_query(self):
        sqs = self.msqs.auto_query('test search -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter),
                         '<SQ: AND content__contains=test search -stuff>')

        sqs = self.msqs.auto_query('test "my thing" search -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND content__contains=test "my thing" search -stuff>')

        sqs = self.msqs.auto_query(
            'test "my thing" search \'moar quotes\' -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND content__contains=test "my thing" search \'moar quotes\' -stuff>'
        )

        sqs = self.msqs.auto_query(
            'test "my thing" search \'moar quotes\' "foo -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND content__contains=test "my thing" search \'moar quotes\' "foo -stuff>'
        )

        sqs = self.msqs.auto_query('test - stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter),
                         "<SQ: AND content__contains=test - stuff>")

        # Ensure bits in exact matches get escaped properly as well.
        sqs = self.msqs.auto_query('"pants:rule"')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter),
                         '<SQ: AND content__contains="pants:rule">')

        # Now with a different fieldname
        sqs = self.msqs.auto_query('test search -stuff', fieldname='title')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter),
                         "<SQ: AND title__contains=test search -stuff>")

        sqs = self.msqs.auto_query('test "my thing" search -stuff',
                                   fieldname='title')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND title__contains=test "my thing" search -stuff>')

    def test_count(self):
        self.assertEqual(self.msqs.count(), 23)

    def test_facet_counts(self):
        self.assertEqual(self.msqs.facet_counts(), {})

    def test_best_match(self):
        self.assertTrue(isinstance(self.msqs.best_match(), SearchResult))

    def test_latest(self):
        self.assertTrue(isinstance(self.msqs.latest('pub_date'), SearchResult))

    def test_more_like_this(self):
        mock = MockModel()
        mock.id = 1

        self.assertEqual(len(self.msqs.more_like_this(mock)), 23)

    def test_facets(self):
        sqs = self.msqs.facet('foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.facets), 1)

        sqs2 = self.msqs.facet('foo').facet('bar')
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.facets), 2)

    def test_date_facets(self):
        try:
            sqs = self.msqs.date_facet('foo',
                                       start_date=datetime.date(2008, 2, 25),
                                       end_date=datetime.date(2009, 2, 25),
                                       gap_by='smarblaph')
            self.fail()
        except FacetingError as e:
            self.assertEqual(
                str(e),
                "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second."
            )

        sqs = self.msqs.date_facet('foo',
                                   start_date=datetime.date(2008, 2, 25),
                                   end_date=datetime.date(2009, 2, 25),
                                   gap_by='month')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.date_facets), 1)

        sqs2 = self.msqs.date_facet('foo',
                                    start_date=datetime.date(2008, 2, 25),
                                    end_date=datetime.date(2009, 2, 25),
                                    gap_by='month').date_facet(
                                        'bar',
                                        start_date=datetime.date(2007, 2, 25),
                                        end_date=datetime.date(2009, 2, 25),
                                        gap_by='year')
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.date_facets), 2)

    def test_query_facets(self):
        sqs = self.msqs.query_facet('foo', '[bar TO *]')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_facets), 1)

        sqs2 = self.msqs.query_facet('foo', '[bar TO *]').query_facet(
            'bar', '[100 TO 499]')
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.query_facets), 2)

        # Test multiple query facets on a single field
        sqs3 = self.msqs.query_facet('foo', '[bar TO *]').query_facet(
            'bar', '[100 TO 499]').query_facet('foo', '[1000 TO 1499]')
        self.assertTrue(isinstance(sqs3, SearchQuerySet))
        self.assertEqual(len(sqs3.query.query_facets), 3)

    def test_stats(self):
        sqs = self.msqs.stats_facet('foo', 'bar')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.stats), 1)

        sqs2 = self.msqs.stats_facet('foo', 'bar').stats_facet('foo', 'baz')
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.stats), 1)

        sqs3 = self.msqs.stats_facet('foo', 'bar').stats_facet('moof', 'baz')
        self.assertTrue(isinstance(sqs3, SearchQuerySet))
        self.assertEqual(len(sqs3.query.stats), 2)

    def test_narrow(self):
        sqs = self.msqs.narrow('foo:moof')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.narrow_queries), 1)

    def test_clone(self):
        results = self.msqs.filter(foo='bar', foo__lt='10')

        clone = results._clone()
        self.assertTrue(isinstance(clone, SearchQuerySet))
        self.assertEqual(str(clone.query), str(results.query))
        self.assertEqual(clone._result_cache, [])
        self.assertEqual(clone._result_count, None)
        self.assertEqual(clone._cache_full, False)
        self.assertEqual(clone._using, results._using)

    def test_using(self):
        sqs = SearchQuerySet(using='default')
        self.assertNotEqual(sqs.query, None)
        self.assertEqual(sqs.query._using, 'default')

    def test_chaining(self):
        sqs = self.msqs.filter(content='foo')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

        # A second instance should inherit none of the changes from above.
        sqs = self.msqs.filter(content='bar')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_none(self):
        sqs = self.msqs.none()
        self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
        self.assertEqual(len(sqs), 0)

    def test___and__(self):
        sqs1 = self.msqs.filter(content='foo')
        sqs2 = self.msqs.filter(content='bar')
        sqs = sqs1 & sqs2

        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 2)

    def test___or__(self):
        sqs1 = self.msqs.filter(content='foo')
        sqs2 = self.msqs.filter(content='bar')
        sqs = sqs1 | sqs2

        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 2)

    def test_and_or(self):
        """
        Combining AND queries with OR should give
            AND(OR(a, b), OR(c, d))
        """
        sqs1 = self.msqs.filter(content='foo').filter(content='oof')
        sqs2 = self.msqs.filter(content='bar').filter(content='rab')
        sqs = sqs1 | sqs2

        self.assertEqual(sqs.query.query_filter.connector, 'OR')
        self.assertEqual(repr(sqs.query.query_filter.children[0]),
                         repr(sqs1.query.query_filter))
        self.assertEqual(repr(sqs.query.query_filter.children[1]),
                         repr(sqs2.query.query_filter))

    def test_or_and(self):
        """
        Combining OR queries with AND should give
            OR(AND(a, b), AND(c, d))
        """
        sqs1 = self.msqs.filter(content='foo').filter_or(content='oof')
        sqs2 = self.msqs.filter(content='bar').filter_or(content='rab')
        sqs = sqs1 & sqs2

        self.assertEqual(sqs.query.query_filter.connector, 'AND')
        self.assertEqual(repr(sqs.query.query_filter.children[0]),
                         repr(sqs1.query.query_filter))
        self.assertEqual(repr(sqs.query.query_filter.children[1]),
                         repr(sqs2.query.query_filter))
Пример #59
0
class SearchQuerySetTestCase(TestCase):
    fixtures = ["base_data.json", "bulk_data.json"]

    def setUp(self):
        super(SearchQuerySetTestCase, self).setUp()

        # Stow.
        self.old_unified_index = connections["default"]._index
        self.ui = UnifiedIndex()
        self.bmmsi = BasicMockModelSearchIndex()
        self.cpkmmsi = CharPKMockModelSearchIndex()
        self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])
        connections["default"]._index = self.ui

        # Update the "index".
        backend = connections["default"].get_backend()
        backend.clear()
        backend.update(self.bmmsi, MockModel.objects.all())

        self.msqs = SearchQuerySet()

        # Stow.
        reset_search_queries()

    def tearDown(self):
        # Restore.
        connections["default"]._index = self.old_unified_index
        super(SearchQuerySetTestCase, self).tearDown()

    def test_len(self):
        self.assertEqual(len(self.msqs), 23)

    def test_repr(self):
        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)
        self.assertRegexpMatches(
            repr(self.msqs),
            r"^<SearchQuerySet: query=<test_haystack.mocks.MockSearchQuery object" r" at 0x[0-9A-Fa-f]+>, using=None>$",
        )

    def test_iter(self):
        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)
        msqs = self.msqs.all()
        results = [int(res.pk) for res in iter(msqs)]
        self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]])
        self.assertEqual(len(connections["default"].queries), 3)

    def test_slice(self):
        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)
        results = self.msqs.all()
        self.assertEqual([int(res.pk) for res in results[1:11]], [res.pk for res in MOCK_SEARCH_RESULTS[1:11]])
        self.assertEqual(len(connections["default"].queries), 1)

        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)
        results = self.msqs.all()
        self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk)
        self.assertEqual(len(connections["default"].queries), 1)

    def test_manual_iter(self):
        results = self.msqs.all()

        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)

        check = [result.pk for result in results._manual_iter()]
        self.assertEqual(
            check,
            [
                "1",
                "2",
                "3",
                "4",
                "5",
                "6",
                "7",
                "8",
                "9",
                "10",
                "11",
                "12",
                "13",
                "14",
                "15",
                "16",
                "17",
                "18",
                "19",
                "20",
                "21",
                "22",
                "23",
            ],
        )

        self.assertEqual(len(connections["default"].queries), 3)

        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)

        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not a handled model) than the hit count indicates.
        # This will hang indefinitely if broken.
        old_ui = self.ui
        self.ui.build(indexes=[self.cpkmmsi])
        connections["default"]._index = self.ui
        self.cpkmmsi.update()

        results = self.msqs.all()
        loaded = [result.pk for result in results._manual_iter()]
        self.assertEqual(loaded, ["sometext", "1234"])
        self.assertEqual(len(connections["default"].queries), 1)

        connections["default"]._index = old_ui

    def test_cache_is_full(self):
        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)
        self.assertEqual(self.msqs._cache_is_full(), False)
        results = self.msqs.all()
        fire_the_iterator_and_fill_cache = list(results)
        self.assertEqual(23, len(fire_the_iterator_and_fill_cache))
        self.assertEqual(results._cache_is_full(), True)
        self.assertEqual(len(connections["default"].queries), 4)

    def test_all(self):
        sqs = self.msqs.all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))

    def test_filter(self):
        sqs = self.msqs.filter(content="foo")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_exclude(self):
        sqs = self.msqs.exclude(content="foo")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_order_by(self):
        sqs = self.msqs.order_by("foo")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertTrue("foo" in sqs.query.order_by)

    def test_models(self):
        # Stow.
        old_unified_index = connections["default"]._index
        ui = UnifiedIndex()
        bmmsi = BasicMockModelSearchIndex()
        bammsi = BasicAnotherMockModelSearchIndex()
        ui.build(indexes=[bmmsi, bammsi])
        connections["default"]._index = ui

        msqs = SearchQuerySet()

        sqs = msqs.all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 0)

        sqs = msqs.models(MockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)

        sqs = msqs.models(MockModel, AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 2)

        # This will produce a warning.
        ui.build(indexes=[bmmsi])
        sqs = msqs.models(AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)

    def test_result_class(self):
        sqs = self.msqs.all()
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))

        # Custom class.
        class IttyBittyResult(object):
            pass

        sqs = self.msqs.result_class(IttyBittyResult)
        self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))

        # Reset to default.
        sqs = self.msqs.result_class(None)
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))

    def test_boost(self):
        sqs = self.msqs.boost("foo", 10)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.boost.keys()), 1)

    def test_highlight(self):
        sqs = self.msqs.highlight()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.query.highlight, True)

    def test_spelling_override(self):
        sqs = self.msqs.filter(content="not the spellchecking query")
        self.assertEqual(sqs.query.spelling_query, None)
        sqs = self.msqs.set_spelling_query("override")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.query.spelling_query, "override")

    def test_spelling_suggestions(self):
        # Test the case where spelling support is disabled.
        sqs = self.msqs.filter(content="Indx")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.spelling_suggestion(), None)
        self.assertEqual(sqs.spelling_suggestion("indexy"), None)

    def test_raw_search(self):
        self.assertEqual(len(self.msqs.raw_search("foo")), 23)
        self.assertEqual(len(self.msqs.raw_search("(content__exact:hello AND content__exact:world)")), 23)

    def test_load_all(self):
        # Models with character primary keys.
        sqs = SearchQuerySet()
        sqs.query.backend = CharPKMockSearchBackend("charpk")
        results = sqs.load_all().all()
        self.assertEqual(len(results._result_cache), 0)
        results._fill_cache(0, 2)
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 2)

        # If nothing is handled, you get nothing.
        old_ui = connections["default"]._index
        ui = UnifiedIndex()
        ui.build(indexes=[])
        connections["default"]._index = ui

        sqs = self.msqs.load_all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs), 0)

        connections["default"]._index = old_ui

        # For full tests, see the solr_backend.

    def test_load_all_read_queryset(self):
        # Stow.
        old_ui = connections["default"]._index
        ui = UnifiedIndex()
        gafmmsi = GhettoAFifthMockModelSearchIndex()
        ui.build(indexes=[gafmmsi])
        connections["default"]._index = ui
        gafmmsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend("default")
        results._fill_cache(0, 2)

        # The deleted result isn't returned
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 1)

        # Register a SearchIndex with a read_queryset that returns deleted items
        rqstsi = TextReadQuerySetTestSearchIndex()
        ui.build(indexes=[rqstsi])
        rqstsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend("default")
        results._fill_cache(0, 2)

        # Both the deleted and not deleted items are returned
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 2)

        # Restore.
        connections["default"]._index = old_ui

    def test_auto_query(self):
        sqs = self.msqs.auto_query("test search -stuff")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND content__content=test search -stuff>")

        sqs = self.msqs.auto_query('test "my thing" search -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__content=test "my thing" search -stuff>')

        sqs = self.msqs.auto_query("test \"my thing\" search 'moar quotes' -stuff")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter), "<SQ: AND content__content=test \"my thing\" search 'moar quotes' -stuff>"
        )

        sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND content__content=test "my thing" search \'moar quotes\' "foo -stuff>',
        )

        sqs = self.msqs.auto_query("test - stuff")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND content__content=test - stuff>")

        # Ensure bits in exact matches get escaped properly as well.
        sqs = self.msqs.auto_query('"pants:rule"')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__content="pants:rule">')

        # Now with a different fieldname
        sqs = self.msqs.auto_query("test search -stuff", fieldname="title")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND title__content=test search -stuff>")

        sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname="title")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND title__content=test "my thing" search -stuff>')

    def test_count(self):
        self.assertEqual(self.msqs.count(), 23)

    def test_facet_counts(self):
        self.assertEqual(self.msqs.facet_counts(), {})

    def test_best_match(self):
        self.assertTrue(isinstance(self.msqs.best_match(), SearchResult))

    def test_latest(self):
        self.assertTrue(isinstance(self.msqs.latest("pub_date"), SearchResult))

    def test_more_like_this(self):
        mock = MockModel()
        mock.id = 1

        self.assertEqual(len(self.msqs.more_like_this(mock)), 23)

    def test_facets(self):
        sqs = self.msqs.facet("foo")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.facets), 1)

        sqs2 = self.msqs.facet("foo").facet("bar")
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.facets), 2)

    def test_date_facets(self):
        try:
            sqs = self.msqs.date_facet(
                "foo", start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="smarblaph"
            )
            self.fail()
        except FacetingError as e:
            self.assertEqual(
                str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second."
            )

        sqs = self.msqs.date_facet(
            "foo", start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="month"
        )
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.date_facets), 1)

        sqs2 = self.msqs.date_facet(
            "foo", start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="month"
        ).date_facet("bar", start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by="year")
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.date_facets), 2)

    def test_query_facets(self):
        sqs = self.msqs.query_facet("foo", "[bar TO *]")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_facets), 1)

        sqs2 = self.msqs.query_facet("foo", "[bar TO *]").query_facet("bar", "[100 TO 499]")
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.query_facets), 2)

        # Test multiple query facets on a single field
        sqs3 = (
            self.msqs.query_facet("foo", "[bar TO *]")
            .query_facet("bar", "[100 TO 499]")
            .query_facet("foo", "[1000 TO 1499]")
        )
        self.assertTrue(isinstance(sqs3, SearchQuerySet))
        self.assertEqual(len(sqs3.query.query_facets), 3)

    def test_stats(self):
        sqs = self.msqs.stats_facet("foo", "bar")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.stats), 1)

        sqs2 = self.msqs.stats_facet("foo", "bar").stats_facet("foo", "baz")
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.stats), 1)

        sqs3 = self.msqs.stats_facet("foo", "bar").stats_facet("moof", "baz")
        self.assertTrue(isinstance(sqs3, SearchQuerySet))
        self.assertEqual(len(sqs3.query.stats), 2)

    def test_narrow(self):
        sqs = self.msqs.narrow("foo:moof")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.narrow_queries), 1)

    def test_clone(self):
        results = self.msqs.filter(foo="bar", foo__lt="10")

        clone = results._clone()
        self.assertTrue(isinstance(clone, SearchQuerySet))
        self.assertEqual(str(clone.query), str(results.query))
        self.assertEqual(clone._result_cache, [])
        self.assertEqual(clone._result_count, None)
        self.assertEqual(clone._cache_full, False)
        self.assertEqual(clone._using, results._using)

    def test_using(self):
        sqs = SearchQuerySet(using="default")
        self.assertNotEqual(sqs.query, None)
        self.assertEqual(sqs.query._using, "default")

    def test_chaining(self):
        sqs = self.msqs.filter(content="foo")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

        # A second instance should inherit none of the changes from above.
        sqs = self.msqs.filter(content="bar")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_none(self):
        sqs = self.msqs.none()
        self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
        self.assertEqual(len(sqs), 0)

    def test___and__(self):
        sqs1 = self.msqs.filter(content="foo")
        sqs2 = self.msqs.filter(content="bar")
        sqs = sqs1 & sqs2

        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 2)

    def test___or__(self):
        sqs1 = self.msqs.filter(content="foo")
        sqs2 = self.msqs.filter(content="bar")
        sqs = sqs1 | sqs2

        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 2)

    def test_and_or(self):
        """
        Combining AND queries with OR should give
            AND(OR(a, b), OR(c, d))
        """
        sqs1 = self.msqs.filter(content="foo").filter(content="oof")
        sqs2 = self.msqs.filter(content="bar").filter(content="rab")
        sqs = sqs1 | sqs2

        self.assertEqual(sqs.query.query_filter.connector, "OR")
        self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter))
        self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter))

    def test_or_and(self):
        """
        Combining OR queries with AND should give
            OR(AND(a, b), AND(c, d))
        """
        sqs1 = self.msqs.filter(content="foo").filter_or(content="oof")
        sqs2 = self.msqs.filter(content="bar").filter_or(content="rab")
        sqs = sqs1 & sqs2

        self.assertEqual(sqs.query.query_filter.connector, "AND")
        self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter))
        self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter))
Пример #60
0
class SearchQuerySetTestCase(TestCase):
    fixtures = ["base_data.json", "bulk_data.json"]

    def setUp(self):
        super(SearchQuerySetTestCase, self).setUp()

        # Stow.
        self.old_unified_index = connections["default"]._index
        self.ui = UnifiedIndex()
        self.bmmsi = BasicMockModelSearchIndex()
        self.cpkmmsi = CharPKMockModelSearchIndex()
        self.uuidmmsi = SimpleMockUUIDModelIndex()
        self.ui.build(indexes=[self.bmmsi, self.cpkmmsi, self.uuidmmsi])
        connections["default"]._index = self.ui

        # Update the "index".
        backend = connections["default"].get_backend()
        backend.clear()
        backend.update(self.bmmsi, MockModel.objects.all())

        self.msqs = SearchQuerySet()

        # Stow.
        reset_search_queries()

    def tearDown(self):
        # Restore.
        connections["default"]._index = self.old_unified_index
        super(SearchQuerySetTestCase, self).tearDown()

    def test_len(self):
        self.assertEqual(len(self.msqs), 23)

    def test_repr(self):
        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)
        self.assertRegexpMatches(
            repr(self.msqs),
            r"^<SearchQuerySet: query=<test_haystack.mocks.MockSearchQuery object"
            r" at 0x[0-9A-Fa-f]+>, using=None>$",
        )

    def test_iter(self):
        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)
        msqs = self.msqs.all()
        results = [int(res.pk) for res in iter(msqs)]
        self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]])
        self.assertEqual(len(connections["default"].queries), 3)

    def test_slice(self):
        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)
        results = self.msqs.all()
        self.assertEqual(
            [int(res.pk) for res in results[1:11]],
            [res.pk for res in MOCK_SEARCH_RESULTS[1:11]],
        )
        self.assertEqual(len(connections["default"].queries), 1)

        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)
        results = self.msqs.all()
        self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk)
        self.assertEqual(len(connections["default"].queries), 1)

    def test_manual_iter(self):
        results = self.msqs.all()

        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)

        check = [result.pk for result in results._manual_iter()]
        self.assertEqual(
            check,
            [
                "1",
                "2",
                "3",
                "4",
                "5",
                "6",
                "7",
                "8",
                "9",
                "10",
                "11",
                "12",
                "13",
                "14",
                "15",
                "16",
                "17",
                "18",
                "19",
                "20",
                "21",
                "22",
                "23",
            ],
        )

        self.assertEqual(len(connections["default"].queries), 3)

        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)

        # Test to ensure we properly fill the cache, even if we get fewer
        # results back (not a handled model) than the hit count indicates.
        # This will hang indefinitely if broken.

        # CharPK testing
        old_ui = self.ui
        self.ui.build(indexes=[self.cpkmmsi])
        connections["default"]._index = self.ui
        self.cpkmmsi.update()

        results = self.msqs.all()
        loaded = [result.pk for result in results._manual_iter()]
        self.assertEqual(loaded, ["sometext", "1234"])
        self.assertEqual(len(connections["default"].queries), 1)

        # UUID testing
        self.ui.build(indexes=[self.uuidmmsi])
        connections["default"]._index = self.ui
        self.uuidmmsi.update()

        results = self.msqs.all()
        loaded = [result.pk for result in results._manual_iter()]
        self.assertEqual(
            loaded,
            [
                "53554c58-7051-4350-bcc9-dad75eb248a9",
                "77554c58-7051-4350-bcc9-dad75eb24888",
            ],
        )

        connections["default"]._index = old_ui

    def test_cache_is_full(self):
        reset_search_queries()
        self.assertEqual(len(connections["default"].queries), 0)
        self.assertEqual(self.msqs._cache_is_full(), False)
        results = self.msqs.all()
        fire_the_iterator_and_fill_cache = list(results)
        self.assertEqual(23, len(fire_the_iterator_and_fill_cache))
        self.assertEqual(results._cache_is_full(), True)
        self.assertEqual(len(connections["default"].queries), 4)

    def test_all(self):
        sqs = self.msqs.all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))

    def test_filter(self):
        sqs = self.msqs.filter(content="foo")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_exclude(self):
        sqs = self.msqs.exclude(content="foo")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_order_by(self):
        sqs = self.msqs.order_by("foo")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertTrue("foo" in sqs.query.order_by)

    def test_models(self):
        # Stow.
        old_unified_index = connections["default"]._index
        ui = UnifiedIndex()
        bmmsi = BasicMockModelSearchIndex()
        bammsi = BasicAnotherMockModelSearchIndex()
        ui.build(indexes=[bmmsi, bammsi])
        connections["default"]._index = ui

        msqs = SearchQuerySet()

        sqs = msqs.all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 0)

        sqs = msqs.models(MockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)

        sqs = msqs.models(MockModel, AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 2)

        # This will produce a warning.
        ui.build(indexes=[bmmsi])
        sqs = msqs.models(AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)

    def test_result_class(self):
        sqs = self.msqs.all()
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))

        # Custom class.
        class IttyBittyResult(object):
            pass

        sqs = self.msqs.result_class(IttyBittyResult)
        self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))

        # Reset to default.
        sqs = self.msqs.result_class(None)
        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))

    def test_boost(self):
        sqs = self.msqs.boost("foo", 10)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.boost.keys()), 1)

    def test_highlight(self):
        sqs = self.msqs.highlight()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.query.highlight, True)

    def test_spelling_override(self):
        sqs = self.msqs.filter(content="not the spellchecking query")
        self.assertEqual(sqs.query.spelling_query, None)
        sqs = self.msqs.set_spelling_query("override")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.query.spelling_query, "override")

    def test_spelling_suggestions(self):
        # Test the case where spelling support is disabled.
        sqs = self.msqs.filter(content="Indx")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(sqs.spelling_suggestion(), None)
        self.assertEqual(sqs.spelling_suggestion("indexy"), None)

    def test_raw_search(self):
        self.assertEqual(len(self.msqs.raw_search("foo")), 23)
        self.assertEqual(
            len(
                self.msqs.raw_search("(content__exact:hello AND content__exact:world)")
            ),
            23,
        )

    def test_load_all(self):
        # Models with character primary keys.
        sqs = SearchQuerySet()
        sqs.query.backend = CharPKMockSearchBackend("charpk")
        results = sqs.load_all().all()
        self.assertEqual(len(results._result_cache), 0)
        results._fill_cache(0, 2)
        self.assertEqual(
            len([result for result in results._result_cache if result is not None]), 2
        )

        # Models with uuid primary keys.
        sqs = SearchQuerySet()
        sqs.query.backend = UUIDMockSearchBackend("uuid")
        results = sqs.load_all().all()
        self.assertEqual(len(results._result_cache), 0)
        results._fill_cache(0, 2)
        self.assertEqual(
            len([result for result in results._result_cache if result is not None]), 2
        )

        # If nothing is handled, you get nothing.
        old_ui = connections["default"]._index
        ui = UnifiedIndex()
        ui.build(indexes=[])
        connections["default"]._index = ui

        sqs = self.msqs.load_all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs), 0)

        connections["default"]._index = old_ui

        # For full tests, see the solr_backend.

    def test_load_all_read_queryset(self):
        # Stow.
        old_ui = connections["default"]._index
        ui = UnifiedIndex()
        gafmmsi = GhettoAFifthMockModelSearchIndex()
        ui.build(indexes=[gafmmsi])
        connections["default"]._index = ui
        gafmmsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend("default")
        results._fill_cache(0, 2)

        # The deleted result isn't returned
        self.assertEqual(
            len([result for result in results._result_cache if result is not None]), 1
        )

        # Register a SearchIndex with a read_queryset that returns deleted items
        rqstsi = TextReadQuerySetTestSearchIndex()
        ui.build(indexes=[rqstsi])
        rqstsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend("default")
        results._fill_cache(0, 2)

        # Both the deleted and not deleted items are returned
        self.assertEqual(
            len([result for result in results._result_cache if result is not None]), 2
        )

        # Restore.
        connections["default"]._index = old_ui

    def test_auto_query(self):
        sqs = self.msqs.auto_query("test search -stuff")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            "<SQ: AND content__content=test search -stuff>",
        )

        sqs = self.msqs.auto_query('test "my thing" search -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND content__content=test "my thing" search -stuff>',
        )

        sqs = self.msqs.auto_query("test \"my thing\" search 'moar quotes' -stuff")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            "<SQ: AND content__content=test \"my thing\" search 'moar quotes' -stuff>",
        )

        sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND content__content=test "my thing" search \'moar quotes\' "foo -stuff>',
        )

        sqs = self.msqs.auto_query("test - stuff")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter), "<SQ: AND content__content=test - stuff>"
        )

        # Ensure bits in exact matches get escaped properly as well.
        sqs = self.msqs.auto_query('"pants:rule"')
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter), '<SQ: AND content__content="pants:rule">'
        )

        # Now with a different fieldname
        sqs = self.msqs.auto_query("test search -stuff", fieldname="title")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter), "<SQ: AND title__content=test search -stuff>"
        )

        sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname="title")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(
            repr(sqs.query.query_filter),
            '<SQ: AND title__content=test "my thing" search -stuff>',
        )

    def test_count(self):
        self.assertEqual(self.msqs.count(), 23)

    def test_facet_counts(self):
        self.assertEqual(self.msqs.facet_counts(), {})

    def test_best_match(self):
        self.assertTrue(isinstance(self.msqs.best_match(), SearchResult))

    def test_latest(self):
        self.assertTrue(isinstance(self.msqs.latest("pub_date"), SearchResult))

    def test_more_like_this(self):
        mock = MockModel()
        mock.id = 1

        self.assertEqual(len(self.msqs.more_like_this(mock)), 23)

    def test_facets(self):
        sqs = self.msqs.facet("foo")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.facets), 1)

        sqs2 = self.msqs.facet("foo").facet("bar")
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.facets), 2)

    def test_date_facets(self):
        try:
            sqs = self.msqs.date_facet(
                "foo",
                start_date=datetime.date(2008, 2, 25),
                end_date=datetime.date(2009, 2, 25),
                gap_by="smarblaph",
            )
            self.fail()
        except FacetingError as e:
            self.assertEqual(
                str(e),
                "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.",
            )

        sqs = self.msqs.date_facet(
            "foo",
            start_date=datetime.date(2008, 2, 25),
            end_date=datetime.date(2009, 2, 25),
            gap_by="month",
        )
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.date_facets), 1)

        sqs2 = self.msqs.date_facet(
            "foo",
            start_date=datetime.date(2008, 2, 25),
            end_date=datetime.date(2009, 2, 25),
            gap_by="month",
        ).date_facet(
            "bar",
            start_date=datetime.date(2007, 2, 25),
            end_date=datetime.date(2009, 2, 25),
            gap_by="year",
        )
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.date_facets), 2)

    def test_query_facets(self):
        sqs = self.msqs.query_facet("foo", "[bar TO *]")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_facets), 1)

        sqs2 = self.msqs.query_facet("foo", "[bar TO *]").query_facet(
            "bar", "[100 TO 499]"
        )
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.query_facets), 2)

        # Test multiple query facets on a single field
        sqs3 = (
            self.msqs.query_facet("foo", "[bar TO *]")
            .query_facet("bar", "[100 TO 499]")
            .query_facet("foo", "[1000 TO 1499]")
        )
        self.assertTrue(isinstance(sqs3, SearchQuerySet))
        self.assertEqual(len(sqs3.query.query_facets), 3)

    def test_stats(self):
        sqs = self.msqs.stats_facet("foo", "bar")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.stats), 1)

        sqs2 = self.msqs.stats_facet("foo", "bar").stats_facet("foo", "baz")
        self.assertTrue(isinstance(sqs2, SearchQuerySet))
        self.assertEqual(len(sqs2.query.stats), 1)

        sqs3 = self.msqs.stats_facet("foo", "bar").stats_facet("moof", "baz")
        self.assertTrue(isinstance(sqs3, SearchQuerySet))
        self.assertEqual(len(sqs3.query.stats), 2)

    def test_narrow(self):
        sqs = self.msqs.narrow("foo:moof")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.narrow_queries), 1)

    def test_clone(self):
        results = self.msqs.filter(foo="bar", foo__lt="10")

        clone = results._clone()
        self.assertTrue(isinstance(clone, SearchQuerySet))
        self.assertEqual(str(clone.query), str(results.query))
        self.assertEqual(clone._result_cache, [])
        self.assertEqual(clone._result_count, None)
        self.assertEqual(clone._cache_full, False)
        self.assertEqual(clone._using, results._using)

    def test_using(self):
        sqs = SearchQuerySet(using="default")
        self.assertNotEqual(sqs.query, None)
        self.assertEqual(sqs.query._using, "default")

    def test_chaining(self):
        sqs = self.msqs.filter(content="foo")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

        # A second instance should inherit none of the changes from above.
        sqs = self.msqs.filter(content="bar")
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 1)

    def test_none(self):
        sqs = self.msqs.none()
        self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
        self.assertEqual(len(sqs), 0)

    def test___and__(self):
        sqs1 = self.msqs.filter(content="foo")
        sqs2 = self.msqs.filter(content="bar")
        sqs = sqs1 & sqs2

        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 2)

    def test___or__(self):
        sqs1 = self.msqs.filter(content="foo")
        sqs2 = self.msqs.filter(content="bar")
        sqs = sqs1 | sqs2

        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.query_filter), 2)

    def test_and_or(self):
        """
        Combining AND queries with OR should give
            AND(OR(a, b), OR(c, d))
        """
        sqs1 = self.msqs.filter(content="foo").filter(content="oof")
        sqs2 = self.msqs.filter(content="bar").filter(content="rab")
        sqs = sqs1 | sqs2

        self.assertEqual(sqs.query.query_filter.connector, "OR")
        self.assertEqual(
            repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)
        )
        self.assertEqual(
            repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)
        )

    def test_or_and(self):
        """
        Combining OR queries with AND should give
            OR(AND(a, b), AND(c, d))
        """
        sqs1 = self.msqs.filter(content="foo").filter_or(content="oof")
        sqs2 = self.msqs.filter(content="bar").filter_or(content="rab")
        sqs = sqs1 & sqs2

        self.assertEqual(sqs.query.query_filter.connector, "AND")
        self.assertEqual(
            repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)
        )
        self.assertEqual(
            repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)
        )