예제 #1
0
파일: views.py 프로젝트: comses/catalog
 def get(self, request, format=None):
     query = request.GET.get('q', '').strip()
     sqs = SearchQuerySet().models(self.model_class)
     if query:
         sqs = sqs.autocomplete(name=query)
     data = [{'id': int(result.pk), 'name': result.name} for result in sqs]
     return Response(json.dumps(data))
예제 #2
0
    def test_load_all_read_queryset(self):
        # Stow.
        old_ui = connections["default"]._index
        ui = UnifiedIndex()
        gafmmsi = GhettoAFifthMockModelSearchIndex()
        ui.build(indexes=[gafmmsi])
        connections["default"]._index = ui
        gafmmsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend("default")
        results._fill_cache(0, 2)

        # The deleted result isn't returned
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 1)

        # Register a SearchIndex with a read_queryset that returns deleted items
        rqstsi = TextReadQuerySetTestSearchIndex()
        ui.build(indexes=[rqstsi])
        rqstsi.update()

        sqs = SearchQuerySet()
        results = sqs.load_all().all()
        results.query.backend = ReadQuerySetMockSearchBackend("default")
        results._fill_cache(0, 2)

        # Both the deleted and not deleted items are returned
        self.assertEqual(len([result for result in results._result_cache if result is not None]), 2)

        # Restore.
        connections["default"]._index = old_ui
예제 #3
0
    def search(self, query=None, *args, **kwargs):
        """
            Uses haystack to query news. 
            Returns a SearchQuerySet
        """
        sqs = SearchQuerySet()
        user = kwargs.get('user', None)

        # check to see if there is impersonation
        if hasattr(user,'impersonated_user'):
            if isinstance(user.impersonated_user, User):
                user = user.impersonated_user

        is_an_admin = user.profile.is_superuser

        if query:
            sqs = sqs.auto_query(sqs.query.clean(query)) 
            if user:
                if not is_an_admin:
                    return []
        else:
            sqs = sqs.all()
            if user:
                if not is_an_admin:
                    return []

        return sqs.models(self.model).order_by('-update_dt')
예제 #4
0
파일: views.py 프로젝트: comses/catalog
    def get(self, request, relation=None, name=None, year=None):

        filter_criteria = request.session.get("filter_criteria", {})
        filter_criteria.update(date_published__gte=year + "-01-01T00:00:00Z",
                               date_published__lte=year + "-12-31T00:00:00Z")
        if relation == RelationClassifier.JOURNAL.value:
            filter_criteria.update(container__name=name)
        elif relation == RelationClassifier.SPONSOR.value:
            filter_criteria.update(sponsors__name=name)
        elif relation == RelationClassifier.PLATFORM.value:
            filter_criteria.update(platforms__name=name)
        elif relation == RelationClassifier.MODELDOCUMENDTATION.value:
            filter_criteria.update(model_documentation__name=name)
        elif relation == RelationClassifier.AUTHOR.value:
            filter_criteria.update(authors__name__exact=name.replace("/", " "))

        sqs = SearchQuerySet()
        sqs = sqs.filter(**filter_criteria).models(Publication)
        pubs_pk = queryset_gen(sqs)
        pubs = Publication.api.primary(pk__in=pubs_pk)
        paginator = CatalogPagination()
        result_page = paginator.paginate_queryset(pubs, request)
        serializer = PublicationListSerializer(result_page, many=True)
        response = paginator.get_paginated_response(serializer.data)
        return Response({'json': json.dumps(response)}, template_name="publication/list.html")
예제 #5
0
    def test_models(self):
        # Stow.
        old_unified_index = connections["default"]._index
        ui = UnifiedIndex()
        bmmsi = BasicMockModelSearchIndex()
        bammsi = BasicAnotherMockModelSearchIndex()
        ui.build(indexes=[bmmsi, bammsi])
        connections["default"]._index = ui

        msqs = SearchQuerySet()

        sqs = msqs.all()
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 0)

        sqs = msqs.models(MockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)

        sqs = msqs.models(MockModel, AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 2)

        # This will produce a warning.
        ui.build(indexes=[bmmsi])
        sqs = msqs.models(AnotherMockModel)
        self.assertTrue(isinstance(sqs, SearchQuerySet))
        self.assertEqual(len(sqs.query.models), 1)
예제 #6
0
파일: views.py 프로젝트: Saworieza/pombola
    def get_global_context(self, context):
        # Find all the models to search over...
        models = set(
            self.search_sections[section]['model']
            for section in self.search_sections
        )

        show_top_hits = (self.page == '1' or not self.page)

        top_hits_ids = []

        if show_top_hits:
            context['top_hits'] = []
            for section, max_for_top_hits in SearchBaseView.top_hits_under.items():
                data = self.get_section_data(section)
                if data['results_count'] <= max_for_top_hits:
                    context['top_hits'] += data['results']
            top_hits_ids = set(r.id for r in context['top_hits'])

        sqs = SearchQuerySet().models(*list(models))
        # Exclude anything that will already have been shown in the top hits:
        for top_hit_id in top_hits_ids:
            sqs = sqs.exclude(id=top_hit_id)
        sqs = sqs. \
            exclude(hidden=True). \
            filter(content=AutoQuery(self.query)). \
            highlight()
        context['results'] = self.get_paginated_results(sqs)
        return context
예제 #7
0
def autocomplete(request):
    """Return autocomple JSON results"""

    term = request.GET.get("term", "").strip()
    response_data = []

    if len(term):

        # Does not work - probably because the FLAG_PARTIAL is not set on Xapian
        # (trying to set it in settings.py as documented appears to have no effect)
        # sqs = SearchQuerySet().autocomplete(name_auto=term)

        # Split the search term up into little bits
        terms = re.split(r"\s+", term)

        # Build up a query based on the bits
        sqs = SearchQuerySet()
        for bit in terms:
            # print "Adding '%s' to the '%s' query" % (bit,term)
            sqs = sqs.filter_and(name_auto__startswith=sqs.query.clean(bit))

        # collate the results into json for the autocomplete js
        for result in sqs.all()[0:10]:
            response_data.append({"url": result.object.get_absolute_url(), "label": result.object.name})

    # send back the results as JSON
    return HttpResponse(simplejson.dumps(response_data), mimetype="application/json")
예제 #8
0
 def search_filter(self, filters=None, *args, **kwargs):
     sqs = SearchQuerySet()
     user = kwargs.get('user', None)
     groups = []
     if user and user.is_authenticated():
         groups = [g.pk for g in user.group_set.all()]
     admin = user.profile.is_superuser
     
     # permission filters
     if user:
         if not user.profile.is_superuser:
             if not user.is_anonymous():
                 # (status+status_detail+(anon OR user)) OR (who_can_view__exact)
                 anon_query = Q(allow_anonymous_view=True)
                 user_query = Q(allow_user_view=True)
                 sec1_query = Q(status=True, status_detail='active')
                 user_perm_q = Q(users_can_view__in=[user.pk])
                 group_perm_q = Q(groups_can_view__in=groups)
                 
                 query = reduce(operator.or_, [anon_query, user_query])
                 query = reduce(operator.and_, [sec1_query, query])
                 query = reduce(operator.or_, [query, user_perm_q, group_perm_q])
             else:
                 sqs = sqs.filter(allow_anonymous_view=True)
     else:
         sqs = sqs.filter(allow_anonymous_view=True)
     
     # custom filters
     for filter in filters:
         sqs = sqs.filter(content='"%s"' % filter)
     
     return sqs.models(self.model)
예제 #9
0
파일: search.py 프로젝트: alexdiao/3805
def personal_search(request):
    """    
    The user can search any item within his own collections and can search **only shared items** of other users
    
    TODO: Build a hash table to store item_id in the result of user_item to reduce time from O(n^2) to O(n)
    
    Reference: http://docs.haystacksearch.org/dev/searchqueryset_api.html#field-lookups
    """
    #Two parameters to tune
    RESULTS_PER_PAGE = 10
    load_all = False
    
    query = request.GET['q'].strip()  #Heystack only accepts key name as 'q'
    user_id = int(request.GET['pid'])
    if query == '':
        sqs = EmptySearchQuerySet()
    else:
        searchqueryset = SearchQuerySet()
        if user_id == request.user.pk:
            pronoun = '我'
            own_items = User_Item.objects.filter(user__pk=request.user.pk)
        else:
            pronoun = Profile.objects.get(pk=user_id).name
            own_items = User_Item.objects.filter(user__pk=user_id).exclude(status=1)
            
        own_items_ids = []
        for oi in own_items:
            own_items_ids.append(int(oi.item_id))
        sqs = searchqueryset.auto_query(query).filter(primary_key__in=own_items_ids)
        
    if load_all:
            sqs = sqs.load_all()
    paginator = Paginator(sqs, RESULTS_PER_PAGE)   
    try:
        page = paginator.page(request.GET.get('page', 1))
        feeds_id = ''
        for result in page.object_list:
            feeds_id += str(result.object.id) + ','
        feeds_id = feeds_id[:-1]
        topics_of_item_dict = get_topics_of_item(feeds_id, request.user.pk)
        friends_of_item_dict = get_friends_of_item(feeds_id, request.user.pk)
        user_item_status_dict = get_user_items(feeds_id, request.user.pk)
    except InvalidPage:
        raise Http404
    context = {
            'query': query,
            'page': page,
            'page_type':'search',
            'topics_of_item_dict':topics_of_item_dict,
            'friends_of_item_dict':friends_of_item_dict,
            'user_item_status_dict':user_item_status_dict,
            'paginator': paginator,
            'suggestion': None,
            'pronoun': pronoun,
            'num_results': len(sqs),
            'user_id': user_id
        }
    from django.template import add_to_builtins
    add_to_builtins('haystack.templatetags.highlight')
    return render_to_response('main/search/personal_search_results.html', context, context_instance=RequestContext(request))
예제 #10
0
 def current_bills_sorted(self):
     from haystack.query import SearchQuerySet
     qs = SearchQuerySet().using("bill").filter(indexed_model_name__in=["Bill"], congress=CURRENT_CONGRESS, committees=self.id).order_by('-proscore')
     return {
         "count": qs.count(),
         "bills": [ b.object for b in qs[0:100] ],
         }
예제 #11
0
파일: utils.py 프로젝트: BIGGANI/tendenci
def _specific_view(user, obj):
    """
    determines if a user has specific permissions to view the object.
    note this is based only on:

    (users_can_view contains user)
    +
    (groups_can_view contains one of user's groups)
    """
    sqs = SearchQuerySet()
    sqs = sqs.models(obj.__class__)

    groups = [g.pk for g in user.group_set.all()]

    q_primary_key = SQ(primary_key=obj.pk)
    q_groups = SQ(groups_can_view__in=groups)
    q_users = SQ(users_can_view__in=[user.pk])

    if groups:
        sqs = sqs.filter(q_primary_key & (q_groups | q_users))
    else:
        sqs = sqs.filter(q_primary_key & q_users)

    if sqs:
        return True

    return False
예제 #12
0
 def get(self, request):
     #takes a latitude and longitude and returns artifacts within 2 miles
     try:
         lon = float(request.GET.get('lon', None))
         lat = float(request.GET.get('lat', None))
     except:
         return HttpResponse('You Must Specify a latitude and longitude', content_type='application/json', status=status.HTTP_400_BAD_REQUEST)
     username = request.user.username
     pnt = Point(lon, lat)
     # Within a two miles.
     max_dist = D(mi=2)
     # 'location' is the fieldname from our ``SearchIndex``...
     # Do the radius query.
     sqs = SearchQuerySet().distance('location',pnt).order_by('distance')
     within = sqs.dwithin('location', pnt, max_dist)
     if len(within) != len(sqs) and len(within) !=0 and len(sqs) !=0:
         closest_not_within = pnt.distance(sqs[len(within)])*100*0.6214
     else: closest_not_within='null'
     the_data = []
     for result in sqs:
         # get number of pickups as well as whether or not the user has already picked up
         already_picked_up = (username in result.pickup_count)
         _pickup_count = count_m2m(result.pickup_count)
         # get number of upvotes as well as whether or not the user has already upvoted
         already_upvoted = (username in result.upvoted)
         upvote_count = count_m2m(result.upvoted) 
         # make a point 
         artifactpnt = Point(float(result.longitude), float(result.latitude))
         loopy_data = {'author': result.author, 'upvote_count': upvote_count, 'already_upvoted': already_upvoted, 'already_picked_up': already_picked_up, 'longitude': result.longitude, 'radius': result.radius, 'latitude': result.latitude, 'pub_date': str(result.pub_date), 'pickup_count': _pickup_count, 'distance': (artifactpnt.distance(pnt) *100)* 0.6214, 'pk': result.pk}
         the_data += [loopy_data]
     the_data = [json.dumps(the_data + [{'closest_not_within':closest_not_within}])]
     return HttpResponse(the_data, content_type='application/json', status=status.HTTP_200_OK)
예제 #13
0
    def __init__(self, *args, user, report_ids=(), **kwargs):
        sqs = SearchQuerySet().models(Report)

        # Ensure anonymous/public users cannot see non-public reports in all
        # cases.
        if not user.is_active:
            if report_ids:
                sqs = sqs.filter(SQ(id__in=report_ids) | SQ(is_public=True))
            else:
                sqs = sqs.filter(is_public=True)

        super().__init__(*args, searchqueryset=sqs, **kwargs)

        self.user = user
        self.report_ids = report_ids

        # Only certain fields on this form can be used by members of the
        # public.
        if not user.is_active:
            for name in self.fields.keys():
                if name not in self.public_fields:
                    self.fields.pop(name)

        if user.is_anonymous():
            if report_ids:
                source_field = self.fields['source']
                source_choices = source_field.choices
                source_field.choices = [
                    (value, label) for (value, label) in source_choices if value != 'invited']
            else:
                self.fields.pop('source')
예제 #14
0
파일: views.py 프로젝트: markpasc/bee
    def build_form(self, form_kwargs=None):
        log = logging.getLogger(".".join((__name__, "PostSearch")))
        request = self.request

        log.debug("which author has domain %r?", request.META["HTTP_HOST"])
        try:
            self.author = User.objects.get(authorsite__site__domain=request.META["HTTP_HOST"])
        except User.DoesNotExist:
            log.debug("    no such author! no results at all!")
            self.author = None
            sqs = SearchQuerySet().none()
        else:
            sqs = SearchQuerySet().filter(author_pk=self.author.pk)
            # What visibility of posts can the searcher see?
            if request.user.is_anonymous():
                log.debug("    viewer is anonymous, so only %s's public posts", self.author.username)
                sqs = sqs.filter(private=0)
            elif request.user.pk == self.author.pk:
                log.debug("    viewer is %s, so all their posts", self.author.username)
            else:
                # TODO: honor trust groups instead of giving everyone else only public posts
                log.debug("    viewer is logged in as somebody else, so only %s's public posts", self.author.username)
                sqs = sqs.filter(private=0)

        self.searchqueryset = sqs

        return super(PostSearch, self).build_form(form_kwargs)
예제 #15
0
def search(request):
    sqs = SearchQuerySet().filter(content=Fuzzy(request.GET['q']))
    if request.GET['id'] != '':
        sqs = sqs.exclude(django_id=int(request.GET['id']))
    res_list = sorted([x for x in sqs], key=lambda x: x.score, reverse=True)
    ser = serializers.serialize('json', [x.object for x in res_list])
    return HttpResponse(ser, content_type="application/json")
예제 #16
0
파일: forms.py 프로젝트: CARocha/amunse
class SearchForm(forms.Form):
    q = forms.CharField(required=False, label=_('Search'))
    
    def __init__(self, *args, **kwargs):
        self.searchqueryset = kwargs.pop('searchqueryset', None)
        self.load_all = kwargs.pop('load_all', False)
        
        if self.searchqueryset is None:
            self.searchqueryset = SearchQuerySet()
        
        super(SearchForm, self).__init__(*args, **kwargs)
    
    def search(self):
        if self.is_valid():
            sqs = self.searchqueryset.auto_query(self.cleaned_data['q'])
            
            if self.load_all:
                sqs = sqs.load_all()
            
            return sqs
        else:
            return []
    
    def get_suggestion(self):
        if not self.is_valid():
            return None
        
        return self.searchqueryset.spelling_suggestion(self.cleaned_data['q'])
예제 #17
0
    def all_results(self):
        from inventory.models import Product

        sqs = SearchQuerySet().load_all().models(Product)
        sqs = sqs.filter(shop_id=self.shop.id)

        return sqs
예제 #18
0
    def get_search(self, request, **kwargs):
        self.method_check(request, allowed=['get'])
        self.is_authenticated(request)
        self.throttle_check(request)

        sqs = SearchQuerySet().models(Tracks).load_all()
        sqs = sqs.auto_query(request.GET.get('q', ''))
        paginator = Paginator(sqs, RESULTS_PER_PAGE)

        try:
            page = paginator.page(int(request.GET.get('page', 1)))
        except InvalidPage:
            raise Http404("No such page exists.")
        except ValueError:
            raise Http404("Invalid page number.")

        objects = []

        for result in page.object_list:
            if result is None:
                continue

            bundle = self.build_bundle(obj=result.object, request=request)
            bundle = self.full_dehydrate(bundle)
            objects.append(bundle)

        self.log_throttled_access(request)

        return self.create_response(request, {'objects': objects})
예제 #19
0
    def get_related(self):
        query_string = u' '.join(self.tags.names())
        if query_string:
            query_set = SearchQuerySet().exclude(django_id=self.pk)
            return query_set.filter(content=query_string, type='thread')

        return tuple()
예제 #20
0
def get_evaluations(request):

    qs = SearchQuerySet().filter(
        evaluated_rubrics__in=[0] + list(Rubric.objects.values_list("id", flat=True))
    ).narrow("is_displayed:true")
    size = int(request.REQUEST.get("size", 100))
    start = int(request.REQUEST.get("start", 0))

    total_items = qs.count()

    items = []

    if start < total_items and start >= 0:
        for r in qs[start:start+size]:
            fields = r.get_stored_fields()
            items.append(dict(
                title=fields["title"],
                url=fields["url"],
                rubric_1=fields["evaluation_score_rubric_0"],
                rubric_2=fields["evaluation_score_rubric_1"],
                rubric_3=fields["evaluation_score_rubric_2"],
                rubric_4=fields["evaluation_score_rubric_3"],
                rubric_5=fields["evaluation_score_rubric_4"],
                rubric_6=fields["evaluation_score_rubric_5"],
                rubric_7=fields["evaluation_score_rubric_6"],
            ))

    return dict(items=items, total_items=total_items)
예제 #21
0
    def search(self, query=None, *args, **kwargs):
        """
        haystack to query corporate memberships.
        Returns a SearchQuerySet
        """
        from corporate_memberships.models import CorporateMembership
        from perms.utils import is_admin

        user = kwargs.get('user', None)
        if user.is_anonymous():
            return SearchQuerySet().models().none()

        is_an_admin = is_admin(user)

        sqs = SearchQuerySet().models(CorporateMembership)

        if query:
            sqs = sqs.filter(content=sqs.query.clean(query))
        else:
            sqs = sqs.all()

        if not is_an_admin:
            # reps__contain
            sqs = sqs.filter(Q(content='rep\:%s' % user.username) |
                             Q(creator=user) |
                             Q(owner=user)).filter(status_detail='active')

        return sqs
예제 #22
0
파일: utils.py 프로젝트: goetzk/tendenci
def _specific_view(user, obj):
    """
    determines if a user has specific permissions to view the object.
    note this is based only on:

    (users_can_view contains user)
    +
    (groups_can_view contains one of user's groups)
    """
    sqs = SearchQuerySet()
    sqs = sqs.models(obj.__class__)

    groups = [g.pk for g in user.group_set.all()]

    q_primary_key = SQ(primary_key=obj.pk)
    q_groups = SQ(groups_can_view__in=groups)
    q_users = SQ(users_can_view__in=[user.pk])

    if groups:
        sqs = sqs.filter(q_primary_key & (q_groups | q_users))
    else:
        sqs = sqs.filter(q_primary_key & q_users)

    if sqs:
        # Make sure the index isn't doing something unexpected with the query,
        # like when the Whoosh StopFilter caused the primary_key portion of the
        # query to be ignored.
        assert len(sqs) == 1, "Index returned an unexpected result set when searching for view permissions on an object"
        return True

    return False
예제 #23
0
    def setUp(self):
        super(LiveSolrMoreLikeThisTestCase, self).setUp()
        self.sqs = SearchQuerySet()

        # Wipe it clean.
        self.sqs.query.backend.clear()

        # With the models registered, you get the proper bits.
        import haystack
        from haystack.sites import SearchSite

        # Stow.
        self.old_site = haystack.site
        test_site = SearchSite()
        test_site.register(MockModel, SolrMockModelSearchIndex)
        test_site.register(AnotherMockModel, SolrAnotherMockModelSearchIndex)
        haystack.site = test_site

        # Force indexing of the content.
        for mock in MockModel.objects.all():
            mock.save()

        # Force indexing of the content.
        for mock in AnotherMockModel.objects.all():
            mock.save()

        self.sqs = SearchQuerySet()
예제 #24
0
    def get_urls(self):
        # Build SQS
        sqs = SearchQuerySet()
        for facet in settings.OSCAR_SEARCH_FACETS["fields"].values():
            sqs = sqs.facet(facet["field"])
        for facet in settings.OSCAR_SEARCH_FACETS["queries"].values():
            for query in facet["queries"]:
                sqs = sqs.query_facet(facet["field"], query[1])

        # The form class has to be passed to the __init__ method as that is how
        # Haystack works.  It's slightly different to normal CBVs.
        urlpatterns = patterns(
            "",
            url(r"^$", self.search_view(form_class=forms.MultiFacetedSearchForm), name="search"),
            url(r"^suggest/$", self.suggest_view.as_view(), name="suggest"),
            url(
                r"^default/$",
                search_view_factory(
                    view_class=views.FacetedSearchView,
                    form_class=forms.PriceRangeSearchForm,
                    searchqueryset=sqs,
                    template="search/results.html",
                ),
                name="search_default",
            ),
        )
        return self.post_process_urls(urlpatterns)
예제 #25
0
    def get_searchqueryset(self, form):
        """Get the Haystack searchqueryset (which we treat as
        a regular Django queryset."""
        sqs = SearchQuerySet()
        if self.model:
            sqs = sqs.models(self.model)
        # FIXME: Move somewhere more sensible
        if settings.PORTAL_HIDE_DRAFTS and not self.request.user.is_staff:
            sqs = sqs.narrow("publication_status:%d" % models.Resource.PUBLISHED)

        for facet in self.facetclasses:
            sqs = facet.apply(sqs)

        # apply the query
        if form.is_valid():
            sqs = form.filter(sqs)
        for facetclass in self.facetclasses:
            sqs = facetclass.narrow(sqs, self.request.GET.getlist(
                facetclass.paramname))
        counts = sqs.facet_counts()
        current = sqs.query.narrow_queries
        for facetclass in self.facetclasses:
            facetclass.parse(counts, current)

        # FIXME: Find way around assigning the sqs to the instance,
        # but it seems to be difficult to prevent it from running
        # multiple times otherwise, e.g. when checking for a spelling
        # suggestion.
        self.searchqueryset = sqs
        return sqs
예제 #26
0
class LiveSearchQuerySetTestCase(HaystackBackendTestCase, TestCase):
    """
    SearchQuerySet specific tests
    """
    fixtures = ['base_data.json']

    def get_index(self):
        return MockSearchIndex()

    def setUp(self):
        super(LiveSearchQuerySetTestCase, self).setUp()

        self.backend.update(self.index, MockModel.objects.all())
        self.sq = connections['default'].get_query()
        self.sqs = SearchQuerySet()

    def test_result_class(self):
        # Assert that we're defaulting to ``SearchResult``.
        sqs = self.sqs.all()
        self.assertTrue(isinstance(sqs[0], SearchResult))

        # Custom class.
        sqs = self.sqs.result_class(MockSearchResult).all()
        self.assertTrue(isinstance(sqs[0], MockSearchResult))

        # Reset to default.
        sqs = self.sqs.result_class(None).all()
        self.assertTrue(isinstance(sqs[0], SearchResult))

    def test_facet(self):
        self.assertEqual(len(self.sqs.facet('name').facet_counts()['fields']['name']), 3)
예제 #27
0
    def handle_save(self, sender, instance, **kwargs):
        if sender not in self.index_models:
            return

        # If IgniteUser was saved and changed name, then update all its authored projects owner_name:
        if sender == IgniteUser:
            owner_projects_qs = Project.objects.filter(owner=instance)
            base_owner_projects_sq = SearchQuerySet()
            base_owner_projects_sq = base_owner_projects_sq.models(Project)
            for owner_project_instance in owner_projects_qs:
                # get owner project from haystack index:
                owner_projects_sq = base_owner_projects_sq.filter(id=owner_project_instance.id)
                if owner_projects_sq.count() == 0:
                    # continue till get to the first project found in haystack.
                    # Note: all projects should be found in haystack always, so this would never happen.
                    continue
                old_owner_name = owner_projects_sq[0].owner_name
                # if indexed owner_name is different from the current user name:
                if old_owner_name != instance.name:
                    using_backends = self.connection_router.for_write(instance=owner_project_instance)
                    for using in using_backends:
                        try:
                            index = self.connections[using].get_unified_index().get_index(Project)
                            if hasattr(index, 'update_owner_projects'):
                                # update the owner_name of the owner projects:
                                index.update_owner_projects(owner=instance, using=using)
                        except NotHandled:
                            # TODO: Maybe log it or let the exception bubble?
                            pass
                break

        else:
            super(IgniteSignalProcessor, self).handle_save(sender, instance, **kwargs)
예제 #28
0
파일: views.py 프로젝트: ukata/vegbasket
def get_box(request):
    
    long1 = float(request.GET.get('long1',0))
    lat1 = float(request.GET.get('lat1',0))
    
    
    long2 = float(request.GET.get('long2',0))
    lat2 = float(request.GET.get('lat2',0))    
    bl = Point(long1, lat1)
    tr = Point(long2, lat2)
    
    
    max_dist = D(mi=20)
    sqs = SearchQuerySet().within('location', bl, tr)
    print (tr,bl)
    data = {'counter':0 ,'places':[]}

    if sqs.count()==0:
        return HttpResponse(content=json.dumps(data))
    
    
    
    for elem in sqs.all():
        if elem.object.long:
            data['counter'] += 1
            place = {'long':0, 'lat':0, 'title':''}
            place['long'] = float(elem.object.long)
            place['lat'] = float(elem.object.lat)
            place['title'] = elem.object.name
            place['level'] = elem.object.level
            data['places'].append(place)
    return HttpResponse(content=json.dumps(data))
예제 #29
0
파일: forms.py 프로젝트: ygneo/daguerro
    def search(self):
        if not hasattr(self, "cleaned_data"):
            return self.no_query_found()

        search_fields = [key for key, value in self.cleaned_data.iteritems() if value == True]
        if 'title' not in search_fields:
            sqs = SearchQuerySet()
        else:
            sqs = super(SearchOptionsForm, self).search()
            # title is a document field and has been used for filtering in super method search()
            search_fields = [key for key in search_fields if key != 'title']

        query = sqs.query.clean(self.cleaned_data.pop('q'))
        galleries = [g.id for g in self.cleaned_data.get('galleries', [])]
        search_galleries = self.cleaned_data.get('search_galleries_choice', "ALL")

        query_words = query.split()
        for key in search_fields:
             if key == "tags":
                 sqs = sqs.filter_or(tags__in=[query.lower() for query in query_words])
             else:
                 sqs = self._filter_or_query_words(sqs, key, query_words)

        if search_galleries == 'SELECTED':
            sqs = sqs.filter_and(galleries_ids__in=galleries)

        return sqs
예제 #30
0
def get_response(project_uri, query_string, include_n3=True):
    d = {
        'results': list(),
    }

    project_graph = projects.get_project_graph(project_uri)
    graph = Graph()

    query_set = SearchQuerySet().models(Text).filter(
        content=AutoQuery(query_string), project__exact=project_uri
    )

    highlighter = Highlighter(query_string, html_tag='span', css_class=CSS_RESULT_MATCH_CLASS)
    title_highlighter = TitleHighlighter(query_string, html_tag='span', css_class=CSS_RESULT_MATCH_CLASS)

    d['spelling_suggestion'] = query_set.spelling_suggestion()

    for result in query_set:
        text_uri = URIRef(result.get_stored_fields()['identifier'])

        if annotations.has_annotation_link(project_graph, text_uri) or projects.is_top_level_project_resource(project_uri, text_uri):
            d['results'].append(search_result_to_dict(result, project_uri, highlighter, title_highlighter))

            if include_n3:
                graph += utils.metadata_triples(project_graph, text_uri)

    if include_n3:
        d['n3'] = graph.serialize(format='n3')

    return d
from aristotle_mdr.views.search import PermissionSearchView

from haystack.views import search_view_factory
from haystack.query import SearchQuerySet

urlpatterns = [
    url(r'^', include('aristotle_mdr.urls')),
    url(
        r'^extension_test/',
        include('extension_test.extension_urls',
                app_name="extension_test",
                namespace="extension_test")),
    url(r'^fail_search/?',
        search_view_factory(view_class=PermissionSearchView,
                            template='search/search.html',
                            searchqueryset=SearchQuerySet(),
                            form_class=PermissionSearchForm),
        name='fail_search'),
    url(
        r'^',
        include('aristotle_mdr.contrib.links.urls',
                app_name="aristotle_mdr_links",
                namespace="aristotle_mdr_links")),
    url(
        r'^publish/',
        include('aristotle_mdr.contrib.self_publish.urls',
                app_name="aristotle_self_publish",
                namespace="aristotle_self_publish")),
    url(
        r'^',
        include('aristotle_mdr.contrib.slots.urls',
예제 #32
0
def get_estimates_common(request, data):
    """ Append common page content to the argument data
    :param request:  web request
    :param data: A dict that contains page render data.
    :return: the results of the search query
    """
    post = None
    if request.method == "POST":
        post = request.POST

    # Ensure timeout for session manually.
    # We could have used set_expiry but then we do not get sessions
    # to automatically expire when browser closes.
    last_access = request.session.get('estimates_last_access_time', 0.0)
    import time
    current_time = time.time()
    if last_access < (current_time - (20 * 60.0)):
        request.session.pop("estimates_post_data", None)
    request.session['estimates_last_access_time'] = current_time

    # Get post from session if this is not a POST request.
    if post is None:
        # If the user had made queries before during this session, recover the state here.
        post = request.session.get("estimates_post_data")
    else:
        # Store the POST data for possible use later in this session.
        request.session["estimates_post_data"] = post

    query = {}

    def is_checked(prefix, element, reset):
        """
        Helper function that checks post data (if any) to see if
        the checkbox corresponding to the given element is checked.
        :param prefix: the prefix used by the view to identify the
        model of the element.
        :param element: the model object whose check state is needed.
        :param reset: the POST parameter name that may flag a full reset of the
        checkboxes. If this parameter is set to a "Reset to default" value,
        then the result of this function will be 1.
        :return: 1 if the checkbox is checked, 0 otherwise
        """
        if post is not None and prefix + str(element.pk) not in post and \
                post.get(reset) != "Reset to default":
            return 0
        else:
            return 1

    EstimateManager.cache()
    # Check which Flags (nations) are selected and include the selection in the query.
    nations = [[
        x.name, x.pk,
        is_checked("checkbox_nation_", x, "submit_nation")
    ] for x in EstimateManager.nations.values()]
    data['nations'] = nations
    query["nation__in"] = [nation[0] for nation in nations if nation[2] == 1]
    data['all_nations_selected'] = len(nations) == len(query["nation__in"])

    export_regions = {}
    for area, regions in EstimateManager.export_hierarchy.iteritems():
        children = [[[x.name, x.pk],
                     is_checked("eregion-button-", x, "submit_regions")]
                    for x in regions]
        checked = is_checked("earea-button-", area, "submit_regions")
        if len(regions) == 1:
            children[0][1] = checked
        export_regions[(area, checked)] = children

    import_regions = {}
    for area, regions in EstimateManager.import_hierarchy.iteritems():
        children = [[[x.name, x.pk],
                     is_checked("dregion-button-", x, "submit_regions")]
                    for x in regions]
        checked = is_checked("darea-button-", area, "submit_regions")
        if len(regions) == 1:
            children[0][1] = checked
        import_regions[(area, checked)] = children

    data['export_regions'] = collections.OrderedDict(
        sorted(export_regions.items(), key=lambda x: x[0][0].name))
    data['import_regions'] = collections.OrderedDict(
        sorted(import_regions.items(), key=lambda x: x[0][0].name))

    def query_region(query_key, regions_dict, all_selected_key):
        """
        Obtain a list of the names of selected regions in the regions_dict
        :param query_key: The key used when inserting this list on the query
        dict
        :param regions_dict: A dictionary with keys given by Area and whose
        values are lists of the regions in that area in the format
        [[name, pk], checked]
        :param all_selected_key: The key to set a boolean value which indicates
        whether all regions are selected.
        :return:
        """
        from itertools import chain
        # Flatten the regions so that we may generate the corresponding query term.
        flat = list(chain.from_iterable(regions_dict.values()))
        query[query_key] = [region[0][0] for region in flat if region[1] == 1]
        data[all_selected_key] = len(flat) == len(query[query_key])

    query_region("embarkation_region__in", export_regions,
                 "all_embarkations_selected")
    query_region("disembarkation_region__in", import_regions,
                 "all_disembarkations_selected")

    year_form = None
    # Ensure that GET requests or Reset POST requests yield a fresh copy of the form with default values.
    if post is not None and not post.get("submit_year") == "Reset to default":
        year_form = EstimateYearForm(post)

    if year_form is not None and year_form.is_valid():
        query["year__gte"] = year_form.cleaned_data["frame_from_year"]
        query["year__lte"] = year_form.cleaned_data["frame_to_year"]
    else:
        if year_form is not None:
            import logging
            logging.getLogger('voyages').error(year_form.errors)
        year_form = EstimateYearForm(
            initial={
                'frame_from_year': globals.default_first_year,
                'frame_to_year': globals.default_last_year
            })
        query["year__gte"] = globals.default_first_year
        query["year__lte"] = globals.default_last_year

    data['year_form'] = year_form
    data['query'] = query
    data['post'] = post

    return SearchQuerySet().models(Estimate).filter(**query).load_all()
예제 #33
0
     print
     
 def _test_solr(self):
     print '=== SOLR ==='
     sb = backend.SearchBackend()
     try:
         video = Video.objects.all()[:1].get()
         update_search_index(Video, video.pk)
         sb.conn.commit()
     except (IOError, SolrError), e:
         raise Exception('Solr is unavailable')
     except Video.DoesNotExist:
         raise Exception('Database is empty to test Solr')
     
     # failing on nf, will check later
     sqs_count = SearchQuerySet().count()#(content=video.title)
     #sqs = SearchQuerySet().filter(content=video.title)
     assert sqs_count, 'Solr is unavailable. Can\'t find video'
     
     print 'OK'
     print 
     
 def _test_memcached(self):
     print '=== CACHE ==='
     print 'backend: ', settings.CACHE_BACKEND
     val = random.random()
     key = 'test-cache-%s' % base64.b64encode(str(random.random()))
     
     cache.set(key, val)
     assert val == cache.get(key), u'Cache is unavailable. Can\'t get value' 
     
예제 #34
0
def perform_search(search, lang):
    items = search['items']
    search_terms = {}
    custom_terms = []
    sqs = SearchQuerySet()
    for item in items:
        term = item['searchTerm']
        operator = _operators_dict[item['op']]
        is_list = isinstance(term, list)
        if is_list and not operator.list_type:
            term = term[0]
        skip = False
        if operator.front_end_op_str == _op_contains.front_end_op_str:
            m = re.match(u'^\s*["\u201c](\*?)([^\*]*)(\*?)["\u201d]\s*$', term)
            if m:
                # Change to exact match and remove quotes.
                # Make sure we sanitize the input.
                term = sqs.query.clean(m.group(2))
                operator = _op_eq
                # Here we are using Solr's format, which is not very portable,
                # but at this stage this project is very dependent on Solr anyway.
                # If the search is really for a full exact match, then we search
                # on the plaintext_exact variant of the field. If it is a "contains"
                # the exact search terms, then we use the plaintext variant instead.
                custom_terms.append(u'var_' + unicode(item['varName']) +
                                    '_plaintext' +
                                    ('_exact' if len(m.group(1)) +
                                     len(m.group(3)) == 0 else '') + ':("' +
                                    term + '")')
                skip = True
        if not skip:
            search_terms[u'var_' + unicode(item['varName']) + u'__' +
                         unicode(operator.back_end_op_str)] = term
    search_terms[u'var_intra_american_voyage__exact'] = json.loads(
        search_terms.get(u'var_intra_american_voyage__exact', 'false'))
    result = sqs.models(Voyage).filter(**search_terms)
    for ct in custom_terms:
        result = result.filter(content=Raw(ct, clean=True))
    order_fields = search.get('orderBy')
    if order_fields:
        remaped_fields = []
        for field in order_fields:
            # Remap field names if they are plain text or language dependent.
            order_by_field = u'var_' + unicode(field['name'])
            if order_by_field.endswith('_partial'):
                # Partial dates are encoded in a way that is terrible for sorting MM,DD,YYYY.
                # Therefore we use the original Date value (which defaults month, day to 1).
                order_by_field = order_by_field[0:-8]
            if order_by_field.endswith('_lang'):
                order_by_field += '_' + lang + '_exact'
            elif order_by_field in translated_field_list:
                order_by_field += '_lang_' + lang + '_exact'
            elif order_by_field in plain_text_suffix_list:
                order_by_field += '_plaintext_exact'
            if field['direction'] == 'desc':
                order_by_field = '-' + order_by_field
            elif order_by_field.endswith('_exact'):
                remaped_fields.append('eq(' + order_by_field + ', \' \')')
            remaped_fields.append(order_by_field)
        result = result.order_by(*remaped_fields)
    return result
예제 #35
0
    def get(self, request):
        frm = ItemSearchForm(request.GET)
        self.object_list = Item.objects.none()

        if frm.is_valid():

            cdata = frm.clean()
            sqs = SearchQuerySet().filter(published=True)
            psqs = sqs

            # No blank values, please
            for key in cdata.iterkeys():
                if isinstance(cdata[key], basestring):
                    cdata[key] = cdata[key].strip()

            if cdata['categories']:
                sqs = sqs.filter(categories__in=[
                    x.strip() for x in cdata['categories'].split(' ')
                ])
            if cdata['title']:
                sqs = sqs.filter(title=cdata['title'])
            if cdata['artist']:
                sqs = sqs.filter(artist=cdata['artist'])
            if cdata['date_from']:
                sqs = sqs.filter(date_from__gte=cdata['date_from'])
            if cdata['date_to']:
                sqs = sqs.filter(date_to__lte=cdata['date_to'])
            if cdata['origin_city']:
                sqs = sqs.filter(origin_city=cdata['origin_city'])
            if cdata['origin_country']:
                sqs = sqs.filter(origin_country=cdata['origin_country'])
            if cdata['materials']:
                sqs = sqs.filter(materials__in=[
                    x.strip() for x in cdata['materials'].split(' ')
                ])
            if cdata['video_only']:
                sqs = sqs.filter(video_only=True)

            # fulltext search
            if cdata['q']:
                sqs = sqs.filter(content=cdata['q'])

            # No search data entered
            if psqs == sqs:
                return redirect(reverse('item_search'))

            # Assigning a list to self.object_list won't work, it needs a QuerySet.
            # We're basically loading the items twice :(
            try:
                results = list(
                    sqs.order_by('score')[:1000]
                )  # slicing the array prevents multiple queries to Solr.
            except KeyError:
                # In case we do not have score, will happen with Whoosh
                results = list(sqs[:1000])
            ids = [
                x.object.id for x in results
            ]  # sqs.values_list('django_id', flat=True) won't work with Haystack.
            self.object_list = Item.objects.filter(id__in=ids)

        self.parent_category = None
        self.current_category = None
        self.child_categories = None
        context = self.get_context_data(object_list=self.object_list)
        return (self.render_to_response(context))
예제 #36
0
    def build_haystack_filters(self, parameters):
        from haystack.inputs import Raw
        from haystack.query import SearchQuerySet, SQ  # noqa

        sqs = None

        # Retrieve Query Params

        # Text search
        query = parameters.get('q', None)

        # Types and subtypes to filter (map, layer, vector, etc)
        type_facets = parameters.getlist("type__in", [])

        # If coming from explore page, add type filter from resource_name
        resource_filter = self._meta.resource_name.rstrip("s")
        if resource_filter != "base" and resource_filter not in type_facets:
            type_facets.append(resource_filter)

        # Publication date range (start,end)
        date_end = parameters.get("date__lte", None)
        date_start = parameters.get("date__gte", None)

        # Topic category filter
        category = parameters.getlist("category__identifier__in")

        # Keyword filter
        keywords = parameters.getlist("keywords__slug__in")

        # Region filter
        regions = parameters.getlist("regions__name__in")

        # Owner filters
        owner = parameters.getlist("owner__username__in")

        # Sort order
        sort = parameters.get("order_by", "relevance")

        # Geospatial Elements
        bbox = parameters.get("extent", None)

        # Filter by Type and subtype
        if type_facets is not None:

            types = []
            subtypes = []

            for type in type_facets:
                if type in ["map", "layer", "document", "user"]:
                    # Type is one of our Major Types (not a sub type)
                    types.append(type)
                elif type in LAYER_SUBTYPES.keys():
                    subtypes.append(type)

            if len(subtypes) > 0:
                types.append("layer")
                sqs = SearchQuerySet().narrow("subtype:%s" %
                                              ','.join(map(str, subtypes)))

            if len(types) > 0:
                sqs = (SearchQuerySet() if sqs is None else sqs).narrow(
                    "type:%s" % ','.join(map(str, types)))

        # Filter by Query Params
        # haystack bug? if boosted fields aren't included in the
        # query, then the score won't be affected by the boost
        if query:
            if query.startswith('"') or query.startswith('\''):
                # Match exact phrase
                phrase = query.replace('"', '')
                sqs = (SearchQuerySet() if sqs is None else sqs).filter(
                    SQ(title__exact=phrase) | SQ(description__exact=phrase)
                    | SQ(content__exact=phrase))
            else:
                words = [
                    w for w in re.split('\W', query, flags=re.UNICODE) if w
                ]
                for i, search_word in enumerate(words):
                    if i == 0:
                        sqs = (SearchQuerySet() if sqs is None else sqs) \
                            .filter(
                            SQ(title=Raw(search_word)) |
                            SQ(description=Raw(search_word)) |
                            SQ(content=Raw(search_word))
                        )
                    elif search_word in ["AND", "OR"]:
                        pass
                    elif words[i - 1] == "OR":  # previous word OR this word
                        sqs = sqs.filter_or(
                            SQ(title=Raw(search_word))
                            | SQ(description=Raw(search_word))
                            | SQ(content=Raw(search_word)))
                    else:  # previous word AND this word
                        sqs = sqs.filter(
                            SQ(title=Raw(search_word))
                            | SQ(description=Raw(search_word))
                            | SQ(content=Raw(search_word)))

        # filter by category
        if category:
            sqs = (SearchQuerySet() if sqs is None else sqs).narrow(
                'category:%s' % ','.join(map(str, category)))

        # filter by keyword: use filter_or with keywords_exact
        # not using exact leads to fuzzy matching and too many results
        # using narrow with exact leads to zero results if multiple keywords
        # selected
        if keywords:
            for keyword in keywords:
                sqs = (SearchQuerySet() if sqs is None else sqs).filter_or(
                    keywords_exact=keyword)

        # filter by regions: use filter_or with regions_exact
        # not using exact leads to fuzzy matching and too many results
        # using narrow with exact leads to zero results if multiple keywords
        # selected
        if regions:
            for region in regions:
                sqs = (SearchQuerySet() if sqs is None else sqs).filter_or(
                    regions_exact__exact=region)

        # filter by owner
        if owner:
            sqs = (SearchQuerySet() if sqs is None else sqs).narrow(
                "owner__username:%s" % ','.join(map(str, owner)))

        # filter by date
        if date_start:
            sqs = (SearchQuerySet() if sqs is None else sqs).filter(
                SQ(date__gte=date_start))

        if date_end:
            sqs = (SearchQuerySet() if sqs is None else sqs).filter(
                SQ(date__lte=date_end))

        # Filter by geographic bounding box
        if bbox:
            left, bottom, right, top = bbox.split(',')
            sqs = (SearchQuerySet() if sqs is None else sqs).exclude(
                SQ(bbox_top__lte=bottom) | SQ(bbox_bottom__gte=top)
                | SQ(bbox_left__gte=right) | SQ(bbox_right__lte=left))

        # Apply sort
        if sort.lower() == "-date":
            sqs = (SearchQuerySet() if sqs is None else sqs).order_by("-date")
        elif sort.lower() == "date":
            sqs = (SearchQuerySet() if sqs is None else sqs).order_by("date")
        elif sort.lower() == "title":
            sqs = (SearchQuerySet()
                   if sqs is None else sqs).order_by("title_sortable")
        elif sort.lower() == "-title":
            sqs = (SearchQuerySet()
                   if sqs is None else sqs).order_by("-title_sortable")
        elif sort.lower() == "-popular_count":
            sqs = (SearchQuerySet()
                   if sqs is None else sqs).order_by("-popular_count")
        else:
            sqs = (SearchQuerySet() if sqs is None else sqs).order_by("-date")

        return sqs
예제 #37
0
def deploy_checks(request=None):
    passed = []
    failed = []

    # cache something now to see if it's still there further down.
    randval = random.randint(1, 1000000)
    cache.set('check_things_cache_test', randval, 60)

    # Django database
    try:
        n = Semester.objects.all().count()
        if n > 0:
            passed.append(('Main database connection', 'okay'))
        else:
            failed.append(('Main database connection',
                           "Can't find any coredata.Semester objects"))
    except django.db.utils.OperationalError:
        failed.append(
            ('Main database connection', "can't connect to database"))
    except django.db.utils.ProgrammingError:
        failed.append(('Main database connection', "database tables missing"))

    # non-BMP Unicode in database
    try:
        l = LogEntry.objects.create(userid='ggbaker',
                                    description='Test Unicode \U0001F600',
                                    related_object=Semester.objects.first())
    except OperationalError:
        failed.append(('Unicode handling in database',
                       'non-BMP character not supported by connection'))
    else:
        l = LogEntry.objects.get(id=l.id)
        if '\U0001F600' in l.description:
            passed.append(('Unicode handling in database', 'okay'))
        else:
            failed.append(('Unicode handling in database',
                           'non-BMP character not stored correctly'))

    # Celery tasks
    celery_okay = False
    try:
        if settings.USE_CELERY:
            try:
                from coredata.tasks import ping
            except ImportError:
                failed.append(
                    ('Celery task',
                     "Couldn't import task: probably missing MySQLdb module"))
            else:
                try:
                    t = ping.apply_async()
                except kombu.exceptions.OperationalError:
                    failed.append(
                        ('Celery task',
                         'Kombu error. Probably RabbitMQ not running.'))
                else:
                    res = t.get(timeout=5)
                    if res == True:
                        passed.append(('Celery task', 'okay'))
                        celery_okay = True
                    else:
                        failed.append(
                            ('Celery task', 'got incorrect result from task'))
        else:
            failed.append(('Celery task', 'celery disabled in settings'))
    except celery.exceptions.TimeoutError:
        failed.append(
            ('Celery task',
             "didn't get result before timeout: celeryd maybe not running"))
    except socket.error:
        failed.append(('Celery task', "can't communicate with broker"))
    except NotImplementedError:
        failed.append(('Celery task', 'celery disabled'))
    except django.db.utils.ProgrammingError:
        failed.append(('Celery task', 'celery DB tables missing'))
    except django.db.utils.OperationalError:
        failed.append(('Celery task', 'djkombu tables missing: try migrating'))

    # celery beat
    try:
        from coredata.tasks import BEAT_TEST_FILE, BEAT_FILE_MAX_AGE
        beatfile_age = time.time() - os.stat(BEAT_TEST_FILE).st_mtime
        if beatfile_age < BEAT_FILE_MAX_AGE:
            passed.append(('Celery beat', 'okay'))
        else:
            failed.append((
                'Celery beat',
                'marker file is old: celery beat likely not processing tasks'))
    except OSError:
        failed.append((
            'Celery beat',
            'marker file is missing: celery beat likely not processing tasks'))

    # Django cache
    # (has a subprocess do something to make sure we're in a persistent shared cache, not DummyCache)
    subprocess.call(
        ['python3', 'manage.py', 'check_things', '--cache_subcall'])
    cache_okay = False
    res = cache.get('check_things_cache_test')
    if res == randval:
        failed.append((
            'Django cache',
            'other processes not sharing cache: dummy/local probably being used instead of memcached'
        ))
    elif res is None:
        failed.append(
            ('Django cache', 'unable to retrieve anything from cache'))
    elif res != randval + 1:
        failed.append(('Django cache', 'unknown result'))
    else:
        passed.append(('Django cache', 'okay'))
        cache_okay = True

    # Reporting DB connection
    try:
        db = SIMSConn()
        db.execute("SELECT last_name FROM ps_names WHERE emplid=301355288", ())
        result = list(db)
        # whoever this is, they have non-ASCII in their name: let's hope they don't change it.
        lname = result[0][0]
        if not isinstance(lname, str):
            failed.append(
                ('Reporting DB connection',
                 'string result not a string: check Unicode decoding'))
        elif lname[1] != u'\u00e4':
            failed.append(('Reporting DB connection',
                           'returned incorrectly-decoded Unicode'))
        elif len(result) == 0:
            failed.append(('Reporting DB connection',
                           'query inexplicably returned nothing'))
        else:
            passed.append(('Reporting DB connection', 'okay'))
    except SIMSProblem as e:
        failed.append(
            ('Reporting DB connection', 'SIMSProblem, %s' % (str(e))))
    except ImportError:
        failed.append(
            ('Reporting DB connection', "couldn't import DB2 module"))

    # compression enabled?
    if settings.COMPRESS_ENABLED:
        passed.append(('Asset compression enabled', 'okay'))
    else:
        failed.append(('Asset compression enabled', 'disabled in settings'))

    # Haystack searching
    from haystack.query import SearchQuerySet
    try:
        res = SearchQuerySet().filter(text='cmpt')
        if res:
            passed.append(('Haystack search', 'okay'))
        else:
            failed.append((
                'Haystack search',
                'nothing found: maybe update_index, or wait for search server to fully start'
            ))
    except IOError:
        failed.append(('Haystack search', "can't read/write index"))

    # photo fetching
    if cache_okay and celery_okay:
        try:
            res = do_photo_fetch(['301222726'])
            if '301222726' not in res:  # I don't know who 301222726 is, but he/she is real.
                failed.append(
                    ('Photo fetching', "didn't find photo we expect to exist"))
            else:
                passed.append(('Photo fetching', 'okay'))
        except (KeyError, Unit.DoesNotExist, django.db.utils.ProgrammingError):
            failed.append(('Photo fetching', 'photo password not set'))
        except urllib.error.HTTPError as e:
            failed.append(
                ('Photo fetching',
                 'failed to fetch photo (%s). Maybe wrong password?' % (e)))
    else:
        failed.append(
            ('Photo fetching', 'not testing since memcached or celery failed'))

    # emplid/userid API
    emplid = userid_to_emplid('ggbaker')
    if not emplid:
        failed.append(('Emplid API', 'no emplid returned'))
    elif isinstance(emplid, str) and not emplid.startswith('2000'):
        failed.append(('Emplid API', 'incorrect emplid returned'))
    else:
        passed.append(('Emplid API', 'okay'))

    # Piwik API
    #if not request:
    #    failed.append(('Piwik API', "can only check in web frontend with valid request object"))
    #elif not settings.PIWIK_URL or not settings.PIWIK_TOKEN:
    #    failed.append(('Piwik API', "not configured in secrets.py"))
    #else:
    #    # try to re-log this request in piwik and see what happens
    #    from piwik_middleware.tracking import PiwikTrackerLogic, urllib_errors
    #    tracking_logic = PiwikTrackerLogic()
    #    kwargs = tracking_logic.get_track_kwargs(request)
    #    try:
    #        tracking_logic.do_track_page_view(fail_silently=False, **kwargs)
    #    except urllib_errors as e:
    #        failed.append(('Piwik API', "API call failed: %s" % (e)))
    #    else:
    #        passed.append(('Piwik API', 'okay'))

    # Backup server
    #if not settings.BACKUP_SERVER or not settings.BACKUP_USER or not settings.BACKUP_PATH or not settings.BACKUP_PASSPHRASE:
    #    failed.append(('Backup server', 'Backup server settings not all present'))
    #else:
    #    from coredata.management.commands.backup_remote import do_check
    #    try:
    #        do_check()
    #    except RuntimeError as e:
    #        failed.append(('Backup server', unicode(e)))
    #    passed.append(('Backup server', 'okay'))

    # certificates
    bad_cert = 0
    res = _check_cert('/etc/stunnel/stunnel.pem')
    if res:
        failed.append(('Stunnel cert', res))
        bad_cert += 1
    res = _check_cert('/etc/nginx/cert.pem')
    if res:
        failed.append(('SSL PEM', res))
        bad_cert += 1
    res = _check_cert('/etc/nginx/cert.key')
    if res:
        failed.append(('SSL KEY', res))
        bad_cert += 1

    if bad_cert == 0:
        passed.append((
            'Certificates',
            'All okay, but maybe check http://www.digicert.com/help/ or https://www.ssllabs.com/ssltest/'
        ))

    # SVN database
    if settings.SVN_DB_CONNECT:
        from courselib.svn import SVN_TABLE, _db_conn
        import MySQLdb
        try:
            db = _db_conn()
            db.execute('SELECT count(*) FROM ' + SVN_TABLE, ())
            n = list(db)[0][0]
            if n > 0:
                passed.append(('SVN database', 'okay'))
            else:
                failed.append(('SVN database', "couldn't access records"))
        except MySQLdb.OperationalError:
            failed.append(('SVN database', "can't connect to database"))
    else:
        failed.append(('SVN database', 'SVN_DB_CONNECT not set in secrets.py'))

    # file creation in the necessary places
    dirs_to_check = [
        (settings.DB_BACKUP_DIR, 'DB backup dir'),
        (settings.SUBMISSION_PATH, 'submitted files path'),
        (os.path.join(settings.COMPRESS_ROOT,
                      'CACHE'), 'compressed media root'),
    ]
    for directory, label in dirs_to_check:
        res = _check_file_create(directory)
        if res is None:
            passed.append(('File creation in ' + label, 'okay'))
        else:
            failed.append(('File creation in ' + label, res))

    # are any services listening publicly that shouldn't?
    hostname = socket.gethostname()
    ports = [
        25,  # mail server
        #4369, # epmd, erlang port mapper daemon is okay to listen externally and won't start with ERL_EPMD_ADDRESS set. http://serverfault.com/questions/283913/turn-off-epmd-listening-port-4369-in-ubuntu-rabbitmq
        45130,  # beam? rabbitmq something
        4000,  # main DB stunnel
        50000,  # reporting DB
        8000,  # gunicorn
        11211,  # memcached
        9200,
        9300,  # elasticsearch
    ]
    connected = []
    for p in ports:
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        try:
            s.connect((hostname, p))
        except socket.error:
            # couldn't connect: good
            pass
        else:
            connected.append(p)
        finally:
            s.close()

    if connected:
        failed.append(
            ('Ports listening externally',
             'got connections to port ' + ','.join(str(p) for p in connected)))
    else:
        passed.append(('Ports listening externally', 'okay'))

    # is the server time close to real-time?
    import ntplib
    c = ntplib.NTPClient()
    response = c.request('0.ca.pool.ntp.org')
    if abs(response.offset) > 0.1:
        failed.append(
            ('Server time',
             'Time is %g seconds off NTP pool.' % (response.offset, )))
    else:
        passed.append(('Server time', 'okay'))

    # library sanity
    err = bitfield_check()
    if err:
        failed.append(('Library sanity', 'django-bitfield: ' + err))
    else:
        err = cache_check()
        if err:
            failed.append(('Library sanity', 'django cache: ' + err))
        else:
            passed.append(('Library sanity', 'okay'))

    # github-flavoured markdown subprocess
    from courselib.markup import markdown_to_html
    try:
        # checks that script runs; does github-flavour correctly; does Unicode correctly.
        html = markdown_to_html(
            'test *markup*\n\n```python\nprint(1)\n```\n\u2605\U0001F600')
        if html.strip(
        ) == '<p>test <em>markup</em></p>\n<pre lang="python"><code>print(1)\n</code></pre>\n<p>\u2605\U0001F600</p>':
            passed.append(('Markdown subprocess', 'okay'))
        else:
            failed.append(('Markdown subprocess',
                           'markdown script returned incorrect markup'))
    except OSError:
        failed.append((
            'Markdown subprocess',
            'failed to start ruby command: ruby package probably not installed'
        ))
    except RuntimeError:
        failed.append(('Markdown subprocess', 'markdown script failed'))

    return passed, failed
예제 #38
0
# -*- coding: utf-8 -*-
from __future__ import absolute_import

from django.conf import settings

from haystack.query import SearchQuerySet

from .models import Product

__all__ = ['product_search']


product_search = SearchQuerySet().models(Product) \
    .filter(is_archived=False) \
    .facet('sites').facet('categories').facet('owner').facet('price').facet('pro_owner') \
    .narrow('sites:%s' % settings.SITE_ID)
예제 #39
0
def search(request):
    articles = SearchQuerySet().autocomplete(
        content_auto=request.POST.get('search_text', ''))
    return render(request, 'search/ajax_search.html', {'articles': articles})
예제 #40
0
 def get_queryset(self):
     self.result = self.request.GET.get('query')
     return SearchQuerySet().filter(text=self.result)
예제 #41
0
from django.conf.urls.defaults import *
from haystack.query import SearchQuerySet

from oscar.core.loading import import_module

import_module('search.views', ['Suggestions', 'MultiFacetedSearchView'],
              locals())
import_module('search.forms', ['MultiFacetedSearchForm'], locals())
import_module('search.search_indexes', ['ProductIndex'], locals())

sqs = SearchQuerySet()
for field_name, field in ProductIndex.fields.items():
    if field.faceted is True:
        # Ensure we facet the results set by the defined facetable fields
        sqs = sqs.facet(field_name)

urlpatterns = patterns(
    'search.apps.views',
    url(r'^suggest/$', Suggestions.as_view(), name='oscar-search-suggest'),
    url(r'^$',
        MultiFacetedSearchView(form_class=MultiFacetedSearchForm,
                               searchqueryset=sqs),
        name='oscar-search'),
)
예제 #42
0
 def test_update_index(self):
     self._add_message()
     self.assertEqual(SearchQuerySet().count(), 0)
     # Update the index
     update_index()
     self.assertEqual(SearchQuerySet().count(), 1)
예제 #43
0
    def handle(self, *args, **options):

        ind = BaseResourceIndex()
        if len(options['resource_ids']
               ) > 0:  # an array of resource short_id to check.
            for rid in options['resource_ids']:
                print("updating resource {}".format(rid))
                try:
                    r = BaseResource.objects.get(short_id=rid)
                    # if ind.should_update(r):  # always True
                    ind.update_object(r)
                except BaseResource.DoesNotExist:
                    print("resource {} does not exist in Django".format(rid))
                except Exception as e:
                    print("resource {} generated exception {}".format(
                        rid, str(e)))

        else:

            sqs = SearchQuerySet().all()
            print("SOLR count = {}".format(sqs.count()))
            dqs = BaseResource.objects.filter(
                Q(raccess__discoverable=True) | Q(raccess__public=True))
            print("Django count = {}".format(dqs.count()))

            # what is in Django that isn't in SOLR
            found_in_solr = set()
            for r in list(sqs):
                found_in_solr.add(r.short_id)  # enable fast matching

            django_indexed = 0
            django_replaced = 0
            django_refreshed = 0

            for r in dqs:
                try:
                    resource = get_resource_by_shortkey(r.short_id,
                                                        or_404=False)
                    repl = False
                    if hasattr(resource, 'metadata') and \
                            resource.metadata is not None and \
                            resource.metadata.relations is not None:
                        repl = resource.metadata.relations.filter(
                            type='isReplacedBy').exists()
                    if not repl:
                        django_indexed += 1
                    else:
                        django_replaced += 1
                except BaseResource.DoesNotExist:
                    # race condition in processing while in production
                    print("resource {} no longer found in Django.".format(
                        r.short_id))
                    continue
                except Exception as e:
                    print("resource {} generated exception {}".format(
                        r.short_id, str(e)))

                if r.short_id not in found_in_solr:
                    print("{} {} NOT FOUND in SOLR: adding to index".format(
                        r.short_id, resource.discovery_content_type))
                    try:
                        ind.update_object(r)
                        django_refreshed += 1
                    except Exception as e:
                        print("resource {} generated exception {}".format(
                            r.short_id, str(e)))

                # # This always returns True whether or not SOLR needs updating
                # # This is likely a Haystack bug.
                # elif ind.should_update(r):
                # update everything to be safe.

                elif options['force']:
                    print("{} {}: refreshing index (forced)".format(
                        r.short_id, resource.discovery_content_type))
                    try:
                        ind.update_object(r)
                        django_refreshed += 1
                    except Exception as e:
                        print("resource {} generated exception {}".format(
                            r.short_id, str(e)))

            print(
                "Django contains {} discoverable resources and {} replaced resources"
                .format(django_indexed, django_replaced))
            print("{} resources in Django refreshed in SOLR".format(
                django_refreshed))

            # what is in SOLR that isn't in Django:
            sqs = SearchQuerySet().all()  # refresh for changes from above
            solr_indexed = 0
            solr_replaced = 0
            solr_deleted = 0
            for r in sqs:
                try:
                    resource = get_resource_by_shortkey(r.short_id,
                                                        or_404=False)
                    repl = False
                    if hasattr(resource,
                               'metadata') and resource.metadata is not None:
                        repl = resource.metadata.relations.filter(
                            type='isReplacedBy').exists()
                    if not repl:
                        solr_indexed += 1
                    else:
                        solr_replaced += 1
                except BaseResource.DoesNotExist:
                    print(
                        "SOLR resource {} ({}) NOT FOUND in Django; removing from SOLR"
                        .format(r.short_id, resource.discovery_content_type))
                    ind.remove_object(r)
                    solr_deleted += 1
                    continue

            print(
                "SOLR contains {} discoverable resources and {} replaced resources"
                .format(solr_indexed, solr_replaced))
            print("{} resources not in Django removed from SOLR".format(
                solr_deleted))
예제 #44
0
def autocomplete(request):
    sqs = SearchQuerySet().autocomplete(
        content_auto=request.GET.get('query', ''))
    template = loader.get_template('reviews/autocomplete_template.html')
    return HttpResponse(template.render({'reviews': sqs}, request))
예제 #45
0
 def test_query__in_empty_list(self):
     """Confirm that an empty list avoids a Elasticsearch exception"""
     sqs = SearchQuerySet(using='elasticsearch').filter(id__in=[])
     self.assertEqual(sqs.query.build_query(), u'id:(!*:*)')
예제 #46
0
def do_site_search(q, allow_redirect=False):
    if q.strip() == "":
        return []
    
    results = []
    
    from bill.models import Bill
    from vote.models import Vote
    if "pass" in q or "fail" in q or "vote" in q:
        results.append({
            "title": "Tracking Federal Legislation",
            "href": "/start",
            "noun": "feeds",
            "results": [
                {"href": f.link,
                 "label": f.title,
                 "obj": f,
                 "feed": f,
                 "secondary": False }
                for f in (
                    Bill.EnactedBillsFeed(), Bill.ActiveBillsExceptIntroductionsFeed(), Bill.ComingUpFeed(), Vote.AllVotesFeed(),
                    )
                ]
            })
    
    from haystack.query import SearchQuerySet
    from events.models import Feed
    
    results.append({
        "title": "Members of Congress, Presidents, and Vice Presidents",
        "href": "/congress/members/all",
        "qsarg": "name",
        "noun": "Members of Congress, Presidents, or Vice Presidents",
        "results": [
            {"href": p.object.get_absolute_url(),
             "label": p.object.name,
             "obj": p.object,
             "feed": p.object.get_feed(),
             "secondary": p.object.get_current_role() == None }
            for p in SearchQuerySet().using("person").filter(indexed_model_name__in=["Person"], content=q).order_by('-is_currently_serving', '-score')[0:9]]
        })
       
    # Skipping states for now because we might want to go to the district maps or to
    # the state's main page for state legislative information.
    #import us
    #results.append(("States", "/congress/members", "most_recent_role_state", "states",
    #    sorted([{"href": "/congress/members/%s" % s, "label": us.statenames[s] }
    #        for s in us.statenames
    #        if us.statenames[s].lower().startswith(q.lower())
    #        ], key=lambda p : p["label"])))
    
    from committee.models import Committee
    results.append({
        "title": "Congressional Committees",
        "href": "/congress/committees",
        "noun": "committees in Congress",
        "results": sorted([
            {"href": c.get_absolute_url(),
             "label": c.fullname,
             "feed": c.get_feed(),
             "obj": c,
             "secondary": c.committee != None}
            for c in Committee.objects.filter(name__icontains=q, obsolete=False)
            ], key=lambda c : c["label"])
        })
       
    from settings import CURRENT_CONGRESS
    from bill.search import parse_bill_citation
    bill = parse_bill_citation(q)
    if not bill or not allow_redirect:
        from haystack.inputs import AutoQuery
        bills = [\
            {"href": b.object.get_absolute_url(),
             "label": b.object.title,
             "obj": b.object,
             "feed": b.object.get_feed() if b.object.is_alive else None,
             "secondary": b.object.congress != CURRENT_CONGRESS }
            for b in SearchQuerySet().using("bill").filter(indexed_model_name__in=["Bill"], content=AutoQuery(q)).order_by('-current_status_date')[0:9]]
    else:
        #bills = [{"href": bill.get_absolute_url(), "label": bill.title, "obj": bill, "secondary": bill.congress != CURRENT_CONGRESS }]
        return HttpResponseRedirect(bill.get_absolute_url())
    results.append({
        "title": "Bills and Resolutions (Federal)",
        "href": "/congress/bills/browse",
        "qsarg": "congress=__ALL__&text",
        "noun": "federal bills or resolutions",
        "results": bills})

    
    results.append({
        "title": "State Legislation",
        "href": "/states/bills/browse",
        "qsarg": "text",
        "noun": "state legislation",
        "results": [
            {"href": p.object.get_absolute_url(),
             "label": p.object.short_display_title,
             "obj": p.object,
             "feed": Feed(feedname="states_bill:%d" % p.object.id),
             "secondary": True }
            for p in SearchQuerySet().using('states').filter(indexed_model_name__in=["StateBill"], content=q)[0:9]]
            })

    # subject terms, but exclude subject terms that look like committee names because
    # that is confusing to also see with committee results
    from bill.models import BillTerm, TermType
    results.append({
        "title": "Subject Areas (Federal Legislation)",
        "href": "/congress/bills",
        "noun": "subject areas",
        "results": [
            {"href": p.get_absolute_url(),
             "label": p.name,
             "obj": p,
             "feed": p.get_feed(),
             "secondary": not p.is_top_term() }
            for p in BillTerm.objects.filter(name__icontains=q, term_type=TermType.new).exclude(name__contains=" Committee on ")[0:9]]
        })
    
    # in each group, make sure the secondary results are placed last, but otherwise preserve order
    for grp in results:
        for i, obj in enumerate(grp["results"]):
           obj["index"] = i
        grp["results"].sort(key = lambda o : (o.get("secondary", False), o["index"]))
    
    # sort categories first by whether all results are secondary results, then by number of matches (fewest first, if greater than zero)
    results.sort(key = lambda c : (
        len([d for d in c["results"] if d.get("secondary", False) == False]) == 0,
        len(c["results"]) == 0,
        len(c["results"])))
        
    return results
예제 #47
0
 def test_narrow_sq(self):
     sqs = SearchQuerySet(using='elasticsearch').narrow(SQ(foo='moof'))
     self.assertTrue(isinstance(sqs, SearchQuerySet))
     self.assertEqual(len(sqs.query.narrow_queries), 1)
     self.assertEqual(sqs.query.narrow_queries.pop(), 'foo:(moof)')
예제 #48
0
def search(request):

    if request.method == 'GET':
        q = request.GET.get('q')
        models = request.GET.getlist('models')
        fields = request.GET.getlist('fields')

        tu_results = []
        bs_results = []
        bp_results = []

        if 'question' in fields and 'answer' not in fields:
            if 'tossup' in models:
                tu_results = SearchQuerySet().filter(
                    tossup_text=q).models(Tossup)

            if 'bonus' in models:
                bs_results = SearchQuerySet().filter(
                    leadin_text=q).models(Bonus)
                bp_results = SearchQuerySet().filter(
                    part_text=q).models(BonusPart)

        elif 'answer' in fields and 'question' not in fields:
            if 'tossup' in models:
                print 'getting tossups for {} with only questions'.format(q)
                tu_results = SearchQuerySet().filter(
                    tossup_answer=q).models(Tossup)

            if 'bonus' in models:
                bp_results = SearchQuerySet().filter(
                    bonus_answer=q).models(BonusPart)

        elif 'answer' in fields and 'question' in fields:
            if 'tossup' in models:
                print 'getting tossups for {} with questions and answers'.format(
                    q)
                tu_results = SearchQuerySet().filter(content=q).models(Tossup)

            if 'bonus' in models:
                print 'getting bonuses for {} with questions and answers'.format(
                    q)
                bs_results = SearchQuerySet().filter(content=q).models(Bonus)
                bp_results = SearchQuerySet().filter(
                    content=q).models(BonusPart)

        tossups_json = json.loads(
            serializers.serialize('json', [r.object for r in tu_results]))
        for tossup in tossups_json:
            tossup['fields']['tournament_name'] = \
                Tournament.objects.get(id=tossup['fields']['tournament']).tournament_name
            tossup['fields']['author'] = Packet.objects.get(
                id=tossup['fields']['packet']).author
            tossup['id'] = tossup.pop('pk')
            tossup['fields']['tour_id'] = tossup['fields'].pop('tournament')
            tossup['fields']['pack_id'] = tossup['fields'].pop('packet')
            for key, value in tossup['fields'].items():
                tossup[key] = value
            del tossup['fields']

        # print q, tossups_json

        bonuses = [r.object for r in bs_results]

        for r in bp_results:
            if r.object.bonus not in bonuses:
                bonuses.append(r.object.bonus)

        bonuses_json = json.loads(serializers.serialize('json', bonuses))
        for bonus in bonuses_json:
            bonus_parts = BonusPart.objects.filter(bonus__id=bonus['pk'])
            bpart_json = json.loads(serializers.serialize('json', bonus_parts))
            for bonus_part in bpart_json:
                for key, value in bonus_part['fields'].items():
                    bonus_part[key] = value
                del bonus_part['fields']
            bonus['fields']['bonus_parts'] = bpart_json
            bonus['fields']['tournament_name'] = \
                Tournament.objects.get(id=bonus['fields']['tournament']).tournament_name
            bonus['fields']['author'] = Packet.objects.get(
                id=bonus['fields']['packet']).author
            bonus['id'] = bonus.pop('pk')
            bonus['fields']['tour_id'] = bonus['fields'].pop('tournament')
            bonus['fields']['pack_id'] = bonus['fields'].pop('packet')
            for key, value in bonus['fields'].items():
                bonus[key] = value
            del bonus['fields']

        return HttpResponse(json.dumps({
            'tossups': tossups_json,
            'bonuses': bonuses_json
        }),
                            content_type='application/json')
예제 #49
0
def map_search_endpoint(request, filter_group_id=None):
    """ Maps API search endpoint using haystack search results. For parameters see ``MAP_SEARCH_PARAMETERS``
        returns JSON with the contents of type ``HaystackMapResult``
        
        @param filter_group_id: Will filter all items by group relation, where applicable 
                (i.e. users are filtered by group memberships for that group, events as events in that group)
    """
    implicit_ignore_location = not any([
        loc_param in request.GET
        for loc_param in ['sw_lon', 'sw_lat', 'ne_lon', 'ne_lat']
    ])
    params = _collect_parameters(request.GET, MAP_SEARCH_PARAMETERS)
    query = force_text(params['q'])
    limit = params['limit']
    page = params['page']
    item_id = params['item']

    if params.get('cloudfiles', False):
        return map_cloudfiles_endpoint(request, query, limit, page)

    # TODO: set to  params['external'] after the external switch button is in frontend!
    external = settings.COSINNUS_EXTERNAL_CONTENT_ENABLED

    prefer_own_portal = getattr(settings, 'MAP_API_HACKS_PREFER_OWN_PORTAL',
                                False)

    if not is_number(limit) or limit < 0:
        return HttpResponseBadRequest(
            '``limit`` param must be a positive number or 0!')
    limit = min(limit, SERVER_SIDE_SEARCH_LIMIT)
    if not is_number(page) or page < 0:
        return HttpResponseBadRequest(
            '``page`` param must be a positive number or 0!')

    # filter for requested model types
    model_list = [
        klass for klass, param_name in list(SEARCH_MODEL_NAMES.items())
        if params.get(param_name, False)
    ]

    sqs = SearchQuerySet().models(*model_list)

    # filter for map bounds (Points are constructed ith (lon, lat)!!!)
    if not params['ignore_location'] and not implicit_ignore_location:
        sqs = sqs.within('location', Point(params['sw_lon'], params['sw_lat']),
                         Point(params['ne_lon'], params['ne_lat']))
    # filter for user's own content
    if params['mine'] and request.user.is_authenticated:
        user_id = request.user.id
        sqs = sqs.filter_and(
            Q(creator=user_id) | Q(user_id=user_id) | Q(group_members=user_id))
    # filter for search terms
    if query:
        sqs = sqs.auto_query(query)

    # group-filtered-map view for on-group pages
    if filter_group_id:
        group = get_object_or_None(get_cosinnus_group_model(),
                                   id=filter_group_id)
        if group:
            filtered_groups = [filter_group_id]
            # get child projects of this group
            filtered_groups += [
                subproject.id for subproject in group.get_children()
                if subproject.is_active
            ]
            sqs = sqs.filter_and(
                Q(membership_groups__in=filtered_groups)
                | Q(group__in=filtered_groups))

    # filter topics
    topics = ensure_list_of_ints(params.get('topics', ''))
    if topics:
        sqs = sqs.filter_and(mt_topics__in=topics)
    if settings.COSINNUS_ENABLE_SDGS:
        sdgs = ensure_list_of_ints(params.get('sdgs', ''))
        if sdgs:
            sqs = sqs.filter_and(sdgs__in=sdgs)
    if settings.COSINNUS_MANAGED_TAGS_ENABLED:
        managed_tags = ensure_list_of_ints(params.get('managed_tags', ''))
        if managed_tags:
            sqs = sqs.filter_and(managed_tags__in=managed_tags)
    # filter for portal visibility
    sqs = filter_searchqueryset_for_portal(
        sqs,
        restrict_multiportals_to_current=prefer_own_portal,
        external=external)
    # filter for read access by this user
    sqs = filter_searchqueryset_for_read_access(sqs, request.user)
    # filter events by upcoming status and exclude hidden proxies
    if params['events'] and Event is not None:
        sqs = filter_event_searchqueryset_by_upcoming(sqs).exclude(
            is_hidden_group_proxy=True)

    # filter all default user groups if the new dashboard is being used (they count as "on plattform" and aren't shown)
    if getattr(settings, 'COSINNUS_USE_V2_DASHBOARD', False):
        sqs = sqs.exclude(is_group_model=True,
                          slug__in=get_default_user_group_slugs())

    # kip score sorting and only rely on natural ordering?
    skip_score_sorting = False
    # if we hae no query-boosted results, use *only* our custom sorting (haystack's is very random)
    if not query:
        sort_args = ['-local_boost']
        # if we only look at conferences, order them by their from_date, future first!
        if prefer_own_portal:
            sort_args = ['-portal'] + sort_args
        """
        # this would be the way to force-sort a content type by a natural ordering instead of score if its the only type being shown
        if params.get('conferences', False) and sum([1 if params.get(content_key, False) else 0 for content_key in MAP_CONTENT_TYPE_SEARCH_PARAMETERS.keys()]) == 1:
            sort_args = ['-from_date'] + sort_args
            skip_score_sorting = True
        sqs = sqs.order_by(*sort_args)
        """

    # sort results into one list per model
    total_count = sqs.count()
    sqs = sqs[limit * page:limit * (page + 1)]
    results = []

    for i, result in enumerate(sqs):
        if skip_score_sorting:
            # if we skip score sorting and only rely on the natural ordering, we make up fake high scores
            result.score = 100000 - (limit * page) - i
        elif not query:
            # if we hae no query-boosted results, use *only* our custom sorting (haystack's is very random)
            result.score = result.local_boost
            if prefer_own_portal and is_number(result.portal) and int(
                    result.portal) == CosinnusPortal.get_current().id:
                result.score += 100.0
        results.append(HaystackMapResult(result, user=request.user))

    # if the requested item (direct select) is not in the queryset snippet
    # (might happen because of an old URL), then mix it in as first item and drop the last
    if item_id:
        item_id = str(item_id)
        if not any([res['id'] == item_id for res in results]):
            item_result = get_searchresult_by_itemid(item_id, request.user)
            if item_result:
                results = [HaystackMapResult(item_result, user=request.user)
                           ] + results[:-1]

    page_obj = None
    if results:
        page_obj = {
            'index': page,
            'count': len(results),
            'total_count': total_count,
            'start': (limit * page) + 1,
            'end': (limit * page) + len(results),
            'has_next': total_count > (limit * (page + 1)),
            'has_previous': page > 0,
        }

    data = {
        'results': results,
        'page': page_obj,
    }
    return JsonResponse(data)
예제 #50
0
 def test_query__in(self):
     sqs = SearchQuerySet(using='elasticsearch').filter(id__in=[1, 2, 3])
     self.assertEqual(sqs.query.build_query(), u'id:("1" OR "2" OR "3")')
예제 #51
0
from core.views import CustomRegistrationView
from data_set_manager.api import (AssayResource, AttributeOrderResource,
                                  AttributeResource, InvestigationResource,
                                  ProtocolReferenceParameterResource,
                                  ProtocolReferenceResource, ProtocolResource,
                                  PublicationResource, StudyResource)
from data_set_manager.urls import data_set_manager_router
from file_store.urls import file_store_router
from tool_manager.urls import django_docker_engine_url, tool_manager_router
from user_files_manager.urls import (user_files_csv_url, user_files_router,
                                     user_files_url)

logger = logging.getLogger(__name__)

# NG: facets for Haystack
sqs = (SearchQuerySet().using("core").models(DataSet).facet(
    'measurement').facet('technology').highlight())

# NG: added for tastypie URL
v1_api = Api(api_name='v1')

v1_api.register(AnalysisResource())
v1_api.register(ProjectResource())
v1_api.register(StudyResource())
v1_api.register(AssayResource())
v1_api.register(DataSetResource())
v1_api.register(AttributeOrderResource())
v1_api.register(NodeResource())
v1_api.register(NodeSetResource())
v1_api.register(NodeSetListResource())
v1_api.register(NodePairResource())
v1_api.register(NodeRelationshipResource())
예제 #52
0
파일: views.py 프로젝트: ltfred/site
class MySearchView(SearchView):
    context_object_name = "search_list"
    paginate_by = getattr(settings, "BASE_PAGE_BY", None)
    paginate_orphans = getattr(settings, "BASE_ORPHANS", 0)
    queryset = SearchQuerySet().order_by("-views")
예제 #53
0
def vocabulary_search(request):  # view for search in vocabularies - remembers selection (vocabulary - class - property)
    # get query parameter
    if 'q' in request.GET:
        q_in = request.GET['q']
    else:
        q_in = ''

    if 'page' in request.GET:
        try:
            page = int(request.GET['page'])
        except ValueError:
            page = 1
    else:
        page = 1

    # translate non english terms
    if 'translate' in request.GET:
        translate = True
        # create a unique translator object to be used
        translator = Translator(MS_TRANSLATOR_UID, MS_TRANSLATOR_SECRET)
        q = translator.translate(text=q_in, to_lang='en', from_lang=None)
        if q.startswith("TranslateApiException:"):
            q = q_in
    else:
        translate = False
        q = q_in

    # get requested type
    if 'type' in request.GET:
        tp = request.GET['type']
    else:
        tp = "vocabularies"

    # load the query set
    if tp == "vocabularies":
        clsname = 'Vocabulary'
        sqs = SearchQuerySet().models(Vocabulary).filter(content=q)
    elif tp == "classes":
        clsname = 'VocabularyClass'
        sqs = SearchQuerySet().models(VocabularyClass).filter(content=q)
    elif tp == "properties":
        clsname = 'VocabularyProperty'
        sqs = SearchQuerySet().models(VocabularyProperty).filter(content=q)
    else:
        raise Http404

    # remove non existing objects (may have been deleted but are still indexed)
    obj_set = []
    for res in sqs:
        if res.object:
            obj_set.append(res)

    # search only inside a vocabulary
    if request.GET.get('definedBy'):
        defined_by = int(request.GET.get('definedBy'))
        obj_set_old = obj_set[:]
        obj_set = []
        for res in obj_set_old:
            try:
                if res.object.vocabulary.id == defined_by:
                    obj_set.append(res)
            except AttributeError:
                continue # only return classes or properties
    else:
        defined_by = None

    # order the results
    if tp == "vocabularies":
        qs = sorted(obj_set, key=attrgetter('object.lodRanking'), reverse=True)  # order objects manually
    elif tp == "classes":
        qs = sorted(obj_set, key=attrgetter('object.vocabulary.lodRanking'), reverse=True)
    elif tp == "properties":
        qs = sorted(obj_set, key=attrgetter('object.vocabulary.lodRanking'), reverse=True)

    # paginate the results
    paginator = Paginator(qs, 15)
    page_object = paginator.page(page)

    # pass parameters and render the search template
    params = {'q': q, 'type': tp, 'query': True, 'translate': translate,
              'page_obj': page_object, 'url': "/vocabularies/?q=" + q + '&type=' + tp}

    if defined_by:
        params['vocabulary_define'] = Vocabulary.objects.get(pk=defined_by)

    return render(request, 'search/search.html', params)
예제 #54
0
def _query_results(query, person):
    """
    Actually build the query results for this person.

    Make sure any result.content_type values are reflected in RESULT_TYPE_DISPLAY for display to the user.
    """
    if len(query) < 2:
        return []

    query = query.replace('@sfu.ca', '') # hack to make email addresses searchable as userids
    query = Clean(query)

    # offerings person was a member of (coredata.CourseOffering)
    if person:
        members = Member.objects.filter(person=person).exclude(role='DROP').select_related('offering')
        offering_slugs = set(m.offering.slug for m in members)
        offering_results = SearchQuerySet().models(CourseOffering).filter(text=query) # offerings that match the query
        offering_results = offering_results.filter(slug__in=offering_slugs) # ... and this person was in
    else:
        members = []
        offering_results = []

    # pages this person can view (pages.Page)
    page_acl = set(['ALL'])
    for m in members:
        # builds a set of offering_slug+"_"+acl_value strings, which will match the permission_key field in the index
        member_acl = set("%s_%s" % (m.offering.slug, acl) for acl in ACL_ROLES[m.role] if acl != 'ALL')
        page_acl |= member_acl

    page_results = SearchQuerySet().models(Page).filter(text=query) # pages that match the query
    page_results = page_results.filter(permission_key__in=page_acl) # ... and are visible to this user

    # discussion this person can view (discussion.DiscussionTopic)
    if person:
        discuss_results = SearchQuerySet().models(DiscussionTopic).filter(text=query) # discussions that match the query
        discuss_results = discuss_results.filter(slug__in=offering_slugs) # ... and this person was in
    else:
        discuss_results = []

    # students taught by instructor (coredata.Member)
    instr_members = Member.objects.filter(person=person, role__in=['INST','TA']).exclude(offering__component='CAN') \
        .select_related('offering')
    if person and instr_members:
        offering_slugs = set(m.offering.slug for m in instr_members)
        member_results = SearchQuerySet().models(Member).filter(text=query) # members that match the query
        member_results = member_results.filter(offering_slug__in=offering_slugs) # ... and this person was the instructor for
        member_results = member_results.load_all()
    else:
        member_results = []

    # combine and limit to best results
    results = itertools.chain(
        offering_results[:MAX_RESULTS],
        page_results[:MAX_RESULTS],
        member_results[:MAX_RESULTS],
        discuss_results[:MAX_RESULTS],
        )
    results = (r for r in results if r is not None)
    results = list(results)
    results.sort(key=lambda result: -result.score)
    results = results[:MAX_RESULTS] # (list before this could be n*MAX_RESULTS long)

    return results
예제 #55
0
 def get_popular_videos(cls, sort='-week_views'):
     return SearchQuerySet().result_class(VideoSearchResult) \
         .models(Video).order_by(sort)
예제 #56
0
    def __init__(self, haystack_result, obj, user, *args, **kwargs):
        message_url = None
        if not settings.COSINNUS_IS_INTEGRATED_PORTAL and not 'cosinnus_message' in settings.COSINNUS_DISABLED_COSINNUS_APPS:
            if settings.COSINNUS_ROCKET_ENABLED:
                message_url = reverse('cosinnus:message-write-group',
                                      kwargs={'slug': obj.slug})
            else:
                group_admins = list(obj.actual_admins)
                message_url = message_group_admins_url(obj, group_admins)

        kwargs.update({
            'is_member':
            check_ug_membership(user, obj),
            'is_pending':
            check_ug_pending(user, obj),
            'is_invited':
            check_ug_invited_pending(user, obj),
            'action_url_1':
            _prepend_url(user, obj.portal) +
            group_aware_reverse('cosinnus:group-microsite',
                                kwargs={'group': obj},
                                skip_domain=True) + '?join=1',
            'action_url_2': (_prepend_url(user, obj.portal) +
                             message_url) if message_url else None,
            'youtube_url':
            obj.video,
            'twitter_username':
            obj.twitter_username,
            'flickr_url':
            obj.flickr_url,
            'website_url':
            obj.website,
            'contact':
            linebreaksbr(escape(obj.contact_info)),
            'followed':
            obj.is_user_following(user),
            'starred':
            obj.is_user_starring(user)
        })
        """ TODO: check all read permissions on related objects! """

        # collect upcoming and visible project/group events
        sqs = SearchQuerySet().models(SEARCH_MODEL_NAMES_REVERSE['events'])
        sqs = sqs.filter_and(group=obj.id)
        sqs = filter_searchqueryset_for_read_access(sqs, user)
        sqs = filter_event_searchqueryset_by_upcoming(sqs)
        sqs = sqs.order_by('from_date')
        kwargs.update(
            {'events': [HaystackEventMapCard(result) for result in sqs]})

        # collect administrator users. these are *not* filtered by visibility, as project admins are always visible!
        sqs = SearchQuerySet().models(SEARCH_MODEL_NAMES_REVERSE['people'])
        sqs = sqs.filter_and(admin_groups=obj.id)
        #sqs = filter_searchqueryset_for_read_access(sqs, user)
        sqs = sqs.order_by('title')

        # private users are not visible to anonymous users, BUT they are visible to logged in users!
        # because if a user chose to make his group visible, he has to take authorship responsibilities
        if not user.is_authenticated:
            sqs = filter_searchqueryset_for_read_access(sqs, user)

        kwargs.update(
            {'admins': [HaystackUserMapCard(result) for result in sqs]})

        if settings.COSINNUS_ORGANIZATIONS_ENABLED:
            sqs = SearchQuerySet().models(
                SEARCH_MODEL_NAMES_REVERSE['organizations'])
            sqs = sqs.filter_and(groups=obj.id)
            sqs = filter_searchqueryset_for_read_access(sqs, user)
            sqs = sqs.order_by('title')

            kwargs.update({
                'organizations':
                [HaystackOrganizationMapCard(result) for result in sqs]
            })

        return super(DetailedBaseGroupMapResult,
                     self).__init__(haystack_result, obj, user, *args,
                                    **kwargs)
예제 #57
0
 def obj_get(self, request=None, **kwargs):
     '''
     Retrieves a detailed search item
     '''
     return SearchQuerySet().filter(id=kwargs['id'])[0]
예제 #58
0
 def get_latest_videos(cls):
     return SearchQuerySet().result_class(VideoSearchResult) \
         .models(Video).order_by('-created')
예제 #59
0
파일: views.py 프로젝트: marcthomas/snipt
def search(request,
           template='search/search.html',
           load_all=True,
           form_class=ModelSearchForm,
           searchqueryset=None,
           context_class=RequestContext,
           extra_context=None,
           results_per_page=None):

    query = ''
    results = EmptySearchQuerySet()

    if request.GET.get('q'):

        searchqueryset = SearchQuerySet() \
            .filter(Q(public=True) | Q(author=request.user)) \
            .order_by('-pub_date')

        if request.user.is_authenticated() and \
                'mine-only' in request.GET:
            searchqueryset = SearchQuerySet().filter(author=request.user) \
                .order_by('-pub_date')

        elif request.user.is_authenticated() and \
                ('author' in request.GET and
                    request.GET.get('author')):

            author = request.GET.get('author')

            if author == request.user.username:
                searchqueryset = SearchQuerySet().filter(author=request.user) \
                    .order_by('-pub_date')

            else:
                team = get_object_or_None(Team, slug=author)

                if team and team.user_is_member(request.user):
                    searchqueryset = SearchQuerySet().filter(author=team) \
                        .order_by('-pub_date')

        form = ModelSearchForm(request.GET,
                               searchqueryset=searchqueryset,
                               load_all=load_all)

        if form.is_valid():
            query = form.cleaned_data['q']
            results = form.search()
    else:
        form = form_class(searchqueryset=searchqueryset, load_all=load_all)

    paginator = Paginator(results, results_per_page or RESULTS_PER_PAGE)

    try:
        page = paginator.page(int(request.GET.get('page', 1)))
    except InvalidPage:
        raise Http404("No such page of results!")

    context = {
        'form': form,
        'has_snipts': True,
        'page': page,
        'paginator': paginator,
        'query': query,
        'suggestion': None,
    }

    if results.query.backend.include_spelling:
        context['suggestion'] = form.get_suggestion()

    if extra_context:
        context.update(extra_context)

    return render(request, template, context)
예제 #60
0
 def get_featured_videos(cls):
     return SearchQuerySet().result_class(VideoSearchResult) \
         .models(Video).filter(featured__gt=datetime.datetime(datetime.MINYEAR, 1, 1)) \
         .order_by('-featured')